362e3c62a95c3b8e1a39d012121b621227131c5e
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24
25 #include "emu_if.h" //emulator interface
26
27 #include <sys/mman.h>
28
29 #ifdef __i386__
30 #include "assem_x86.h"
31 #endif
32 #ifdef __x86_64__
33 #include "assem_x64.h"
34 #endif
35 #ifdef __arm__
36 #include "assem_arm.h"
37 #endif
38
39 #define MAXBLOCK 4096
40 #define MAX_OUTPUT_BLOCK_SIZE 262144
41 #define CLOCK_DIVIDER 2
42
43 struct regstat
44 {
45   signed char regmap_entry[HOST_REGS];
46   signed char regmap[HOST_REGS];
47   uint64_t was32;
48   uint64_t is32;
49   uint64_t wasdirty;
50   uint64_t dirty;
51   uint64_t u;
52   uint64_t uu;
53   u_int wasconst;
54   u_int isconst;
55   uint64_t constmap[HOST_REGS];
56 };
57
58 struct ll_entry
59 {
60   u_int vaddr;
61   u_int reg32;
62   void *addr;
63   struct ll_entry *next;
64 };
65
66   u_int start;
67   u_int *source;
68   u_int pagelimit;
69   char insn[MAXBLOCK][10];
70   u_char itype[MAXBLOCK];
71   u_char opcode[MAXBLOCK];
72   u_char opcode2[MAXBLOCK];
73   u_char bt[MAXBLOCK];
74   u_char rs1[MAXBLOCK];
75   u_char rs2[MAXBLOCK];
76   u_char rt1[MAXBLOCK];
77   u_char rt2[MAXBLOCK];
78   u_char us1[MAXBLOCK];
79   u_char us2[MAXBLOCK];
80   u_char dep1[MAXBLOCK];
81   u_char dep2[MAXBLOCK];
82   u_char lt1[MAXBLOCK];
83   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
84   static uint64_t gte_rt[MAXBLOCK];
85   static uint64_t gte_unneeded[MAXBLOCK];
86   static int gte_reads_flags; // gte flag read encountered
87   int imm[MAXBLOCK];
88   u_int ba[MAXBLOCK];
89   char likely[MAXBLOCK];
90   char is_ds[MAXBLOCK];
91   char ooo[MAXBLOCK];
92   uint64_t unneeded_reg[MAXBLOCK];
93   uint64_t unneeded_reg_upper[MAXBLOCK];
94   uint64_t branch_unneeded_reg[MAXBLOCK];
95   uint64_t branch_unneeded_reg_upper[MAXBLOCK];
96   uint64_t p32[MAXBLOCK];
97   uint64_t pr32[MAXBLOCK];
98   signed char regmap_pre[MAXBLOCK][HOST_REGS];
99   signed char regmap[MAXBLOCK][HOST_REGS];
100   signed char regmap_entry[MAXBLOCK][HOST_REGS];
101   uint64_t constmap[MAXBLOCK][HOST_REGS];
102   struct regstat regs[MAXBLOCK];
103   struct regstat branch_regs[MAXBLOCK];
104   signed char minimum_free_regs[MAXBLOCK];
105   u_int needed_reg[MAXBLOCK];
106   uint64_t requires_32bit[MAXBLOCK];
107   u_int wont_dirty[MAXBLOCK];
108   u_int will_dirty[MAXBLOCK];
109   int ccadj[MAXBLOCK];
110   int slen;
111   u_int instr_addr[MAXBLOCK];
112   u_int link_addr[MAXBLOCK][3];
113   int linkcount;
114   u_int stubs[MAXBLOCK*3][8];
115   int stubcount;
116   u_int literals[1024][2];
117   int literalcount;
118   int is_delayslot;
119   int cop1_usable;
120   u_char *out;
121   struct ll_entry *jump_in[4096];
122   struct ll_entry *jump_out[4096];
123   struct ll_entry *jump_dirty[4096];
124   u_int hash_table[65536][4]  __attribute__((aligned(16)));
125   char shadow[1048576]  __attribute__((aligned(16)));
126   void *copy;
127   int expirep;
128 #ifndef PCSX
129   u_int using_tlb;
130 #else
131   static const u_int using_tlb=0;
132 #endif
133   static u_int sp_in_mirror;
134   u_int stop_after_jal;
135   extern u_char restore_candidate[512];
136   extern int cycle_count;
137
138   /* registers that may be allocated */
139   /* 1-31 gpr */
140 #define HIREG 32 // hi
141 #define LOREG 33 // lo
142 #define FSREG 34 // FPU status (FCSR)
143 #define CSREG 35 // Coprocessor status
144 #define CCREG 36 // Cycle count
145 #define INVCP 37 // Pointer to invalid_code
146 #define MMREG 38 // Pointer to memory_map
147 #define ROREG 39 // ram offset (if rdram!=0x80000000)
148 #define TEMPREG 40
149 #define FTEMP 40 // FPU temporary register
150 #define PTEMP 41 // Prefetch temporary register
151 #define TLREG 42 // TLB mapping offset
152 #define RHASH 43 // Return address hash
153 #define RHTBL 44 // Return address hash table address
154 #define RTEMP 45 // JR/JALR address register
155 #define MAXREG 45
156 #define AGEN1 46 // Address generation temporary register
157 #define AGEN2 47 // Address generation temporary register
158 #define MGEN1 48 // Maptable address generation temporary register
159 #define MGEN2 49 // Maptable address generation temporary register
160 #define BTREG 50 // Branch target temporary register
161
162   /* instruction types */
163 #define NOP 0     // No operation
164 #define LOAD 1    // Load
165 #define STORE 2   // Store
166 #define LOADLR 3  // Unaligned load
167 #define STORELR 4 // Unaligned store
168 #define MOV 5     // Move 
169 #define ALU 6     // Arithmetic/logic
170 #define MULTDIV 7 // Multiply/divide
171 #define SHIFT 8   // Shift by register
172 #define SHIFTIMM 9// Shift by immediate
173 #define IMM16 10  // 16-bit immediate
174 #define RJUMP 11  // Unconditional jump to register
175 #define UJUMP 12  // Unconditional jump
176 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
177 #define SJUMP 14  // Conditional branch (regimm format)
178 #define COP0 15   // Coprocessor 0
179 #define COP1 16   // Coprocessor 1
180 #define C1LS 17   // Coprocessor 1 load/store
181 #define FJUMP 18  // Conditional branch (floating point)
182 #define FLOAT 19  // Floating point unit
183 #define FCONV 20  // Convert integer to float
184 #define FCOMP 21  // Floating point compare (sets FSREG)
185 #define SYSCALL 22// SYSCALL
186 #define OTHER 23  // Other
187 #define SPAN 24   // Branch/delay slot spans 2 pages
188 #define NI 25     // Not implemented
189 #define HLECALL 26// PCSX fake opcodes for HLE
190 #define COP2 27   // Coprocessor 2 move
191 #define C2LS 28   // Coprocessor 2 load/store
192 #define C2OP 29   // Coprocessor 2 operation
193 #define INTCALL 30// Call interpreter to handle rare corner cases
194
195   /* stubs */
196 #define CC_STUB 1
197 #define FP_STUB 2
198 #define LOADB_STUB 3
199 #define LOADH_STUB 4
200 #define LOADW_STUB 5
201 #define LOADD_STUB 6
202 #define LOADBU_STUB 7
203 #define LOADHU_STUB 8
204 #define STOREB_STUB 9
205 #define STOREH_STUB 10
206 #define STOREW_STUB 11
207 #define STORED_STUB 12
208 #define STORELR_STUB 13
209 #define INVCODE_STUB 14
210
211   /* branch codes */
212 #define TAKEN 1
213 #define NOTTAKEN 2
214 #define NULLDS 3
215
216 // asm linkage
217 int new_recompile_block(int addr);
218 void *get_addr_ht(u_int vaddr);
219 void invalidate_block(u_int block);
220 void invalidate_addr(u_int addr);
221 void remove_hash(int vaddr);
222 void jump_vaddr();
223 void dyna_linker();
224 void dyna_linker_ds();
225 void verify_code();
226 void verify_code_vm();
227 void verify_code_ds();
228 void cc_interrupt();
229 void fp_exception();
230 void fp_exception_ds();
231 void jump_syscall();
232 void jump_syscall_hle();
233 void jump_eret();
234 void jump_hlecall();
235 void jump_intcall();
236 void new_dyna_leave();
237
238 // TLB
239 void TLBWI_new();
240 void TLBWR_new();
241 void read_nomem_new();
242 void read_nomemb_new();
243 void read_nomemh_new();
244 void read_nomemd_new();
245 void write_nomem_new();
246 void write_nomemb_new();
247 void write_nomemh_new();
248 void write_nomemd_new();
249 void write_rdram_new();
250 void write_rdramb_new();
251 void write_rdramh_new();
252 void write_rdramd_new();
253 extern u_int memory_map[1048576];
254
255 // Needed by assembler
256 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
257 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
258 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
259 void load_all_regs(signed char i_regmap[]);
260 void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
261 void load_regs_entry(int t);
262 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
263
264 int tracedebug=0;
265
266 //#define DEBUG_CYCLE_COUNT 1
267
268 void nullf() {}
269 //#define assem_debug printf
270 //#define inv_debug printf
271 #define assem_debug nullf
272 #define inv_debug nullf
273
274 static void tlb_hacks()
275 {
276 #ifndef DISABLE_TLB
277   // Goldeneye hack
278   if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
279   {
280     u_int addr;
281     int n;
282     switch (ROM_HEADER->Country_code&0xFF) 
283     {
284       case 0x45: // U
285         addr=0x34b30;
286         break;                   
287       case 0x4A: // J 
288         addr=0x34b70;    
289         break;    
290       case 0x50: // E 
291         addr=0x329f0;
292         break;                        
293       default: 
294         // Unknown country code
295         addr=0;
296         break;
297     }
298     u_int rom_addr=(u_int)rom;
299     #ifdef ROM_COPY
300     // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
301     // in the lower 4G of memory to use this hack.  Copy it if necessary.
302     if((void *)rom>(void *)0xffffffff) {
303       munmap(ROM_COPY, 67108864);
304       if(mmap(ROM_COPY, 12582912,
305               PROT_READ | PROT_WRITE,
306               MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
307               -1, 0) <= 0) {printf("mmap() failed\n");}
308       memcpy(ROM_COPY,rom,12582912);
309       rom_addr=(u_int)ROM_COPY;
310     }
311     #endif
312     if(addr) {
313       for(n=0x7F000;n<0x80000;n++) {
314         memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
315       }
316     }
317   }
318 #endif
319 }
320
321 static u_int get_page(u_int vaddr)
322 {
323 #ifndef PCSX
324   u_int page=(vaddr^0x80000000)>>12;
325 #else
326   u_int page=vaddr&~0xe0000000;
327   if (page < 0x1000000)
328     page &= ~0x0e00000; // RAM mirrors
329   page>>=12;
330 #endif
331 #ifndef DISABLE_TLB
332   if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
333 #endif
334   if(page>2048) page=2048+(page&2047);
335   return page;
336 }
337
338 static u_int get_vpage(u_int vaddr)
339 {
340   u_int vpage=(vaddr^0x80000000)>>12;
341 #ifndef DISABLE_TLB
342   if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
343 #endif
344   if(vpage>2048) vpage=2048+(vpage&2047);
345   return vpage;
346 }
347
348 // Get address from virtual address
349 // This is called from the recompiled JR/JALR instructions
350 void *get_addr(u_int vaddr)
351 {
352   u_int page=get_page(vaddr);
353   u_int vpage=get_vpage(vaddr);
354   struct ll_entry *head;
355   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
356   head=jump_in[page];
357   while(head!=NULL) {
358     if(head->vaddr==vaddr&&head->reg32==0) {
359   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
360       int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
361       ht_bin[3]=ht_bin[1];
362       ht_bin[2]=ht_bin[0];
363       ht_bin[1]=(int)head->addr;
364       ht_bin[0]=vaddr;
365       return head->addr;
366     }
367     head=head->next;
368   }
369   head=jump_dirty[vpage];
370   while(head!=NULL) {
371     if(head->vaddr==vaddr&&head->reg32==0) {
372       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
373       // Don't restore blocks which are about to expire from the cache
374       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
375       if(verify_dirty(head->addr)) {
376         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
377         invalid_code[vaddr>>12]=0;
378         inv_code_start=inv_code_end=~0;
379         memory_map[vaddr>>12]|=0x40000000;
380         if(vpage<2048) {
381 #ifndef DISABLE_TLB
382           if(tlb_LUT_r[vaddr>>12]) {
383             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
384             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
385           }
386 #endif
387           restore_candidate[vpage>>3]|=1<<(vpage&7);
388         }
389         else restore_candidate[page>>3]|=1<<(page&7);
390         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
391         if(ht_bin[0]==vaddr) {
392           ht_bin[1]=(int)head->addr; // Replace existing entry
393         }
394         else
395         {
396           ht_bin[3]=ht_bin[1];
397           ht_bin[2]=ht_bin[0];
398           ht_bin[1]=(int)head->addr;
399           ht_bin[0]=vaddr;
400         }
401         return head->addr;
402       }
403     }
404     head=head->next;
405   }
406   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
407   int r=new_recompile_block(vaddr);
408   if(r==0) return get_addr(vaddr);
409   // Execute in unmapped page, generate pagefault execption
410   Status|=2;
411   Cause=(vaddr<<31)|0x8;
412   EPC=(vaddr&1)?vaddr-5:vaddr;
413   BadVAddr=(vaddr&~1);
414   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
415   EntryHi=BadVAddr&0xFFFFE000;
416   return get_addr_ht(0x80000000);
417 }
418 // Look up address in hash table first
419 void *get_addr_ht(u_int vaddr)
420 {
421   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
422   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
423   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
424   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
425   return get_addr(vaddr);
426 }
427
428 void *get_addr_32(u_int vaddr,u_int flags)
429 {
430 #ifdef FORCE32
431   return get_addr(vaddr);
432 #else
433   //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
434   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
435   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
436   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
437   u_int page=get_page(vaddr);
438   u_int vpage=get_vpage(vaddr);
439   struct ll_entry *head;
440   head=jump_in[page];
441   while(head!=NULL) {
442     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
443       //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
444       if(head->reg32==0) {
445         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
446         if(ht_bin[0]==-1) {
447           ht_bin[1]=(int)head->addr;
448           ht_bin[0]=vaddr;
449         }else if(ht_bin[2]==-1) {
450           ht_bin[3]=(int)head->addr;
451           ht_bin[2]=vaddr;
452         }
453         //ht_bin[3]=ht_bin[1];
454         //ht_bin[2]=ht_bin[0];
455         //ht_bin[1]=(int)head->addr;
456         //ht_bin[0]=vaddr;
457       }
458       return head->addr;
459     }
460     head=head->next;
461   }
462   head=jump_dirty[vpage];
463   while(head!=NULL) {
464     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
465       //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
466       // Don't restore blocks which are about to expire from the cache
467       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
468       if(verify_dirty(head->addr)) {
469         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
470         invalid_code[vaddr>>12]=0;
471         inv_code_start=inv_code_end=~0;
472         memory_map[vaddr>>12]|=0x40000000;
473         if(vpage<2048) {
474 #ifndef DISABLE_TLB
475           if(tlb_LUT_r[vaddr>>12]) {
476             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
477             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
478           }
479 #endif
480           restore_candidate[vpage>>3]|=1<<(vpage&7);
481         }
482         else restore_candidate[page>>3]|=1<<(page&7);
483         if(head->reg32==0) {
484           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
485           if(ht_bin[0]==-1) {
486             ht_bin[1]=(int)head->addr;
487             ht_bin[0]=vaddr;
488           }else if(ht_bin[2]==-1) {
489             ht_bin[3]=(int)head->addr;
490             ht_bin[2]=vaddr;
491           }
492           //ht_bin[3]=ht_bin[1];
493           //ht_bin[2]=ht_bin[0];
494           //ht_bin[1]=(int)head->addr;
495           //ht_bin[0]=vaddr;
496         }
497         return head->addr;
498       }
499     }
500     head=head->next;
501   }
502   //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
503   int r=new_recompile_block(vaddr);
504   if(r==0) return get_addr(vaddr);
505   // Execute in unmapped page, generate pagefault execption
506   Status|=2;
507   Cause=(vaddr<<31)|0x8;
508   EPC=(vaddr&1)?vaddr-5:vaddr;
509   BadVAddr=(vaddr&~1);
510   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
511   EntryHi=BadVAddr&0xFFFFE000;
512   return get_addr_ht(0x80000000);
513 #endif
514 }
515
516 void clear_all_regs(signed char regmap[])
517 {
518   int hr;
519   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
520 }
521
522 signed char get_reg(signed char regmap[],int r)
523 {
524   int hr;
525   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
526   return -1;
527 }
528
529 // Find a register that is available for two consecutive cycles
530 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
531 {
532   int hr;
533   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
534   return -1;
535 }
536
537 int count_free_regs(signed char regmap[])
538 {
539   int count=0;
540   int hr;
541   for(hr=0;hr<HOST_REGS;hr++)
542   {
543     if(hr!=EXCLUDE_REG) {
544       if(regmap[hr]<0) count++;
545     }
546   }
547   return count;
548 }
549
550 void dirty_reg(struct regstat *cur,signed char reg)
551 {
552   int hr;
553   if(!reg) return;
554   for (hr=0;hr<HOST_REGS;hr++) {
555     if((cur->regmap[hr]&63)==reg) {
556       cur->dirty|=1<<hr;
557     }
558   }
559 }
560
561 // If we dirty the lower half of a 64 bit register which is now being
562 // sign-extended, we need to dump the upper half.
563 // Note: Do this only after completion of the instruction, because
564 // some instructions may need to read the full 64-bit value even if
565 // overwriting it (eg SLTI, DSRA32).
566 static void flush_dirty_uppers(struct regstat *cur)
567 {
568   int hr,reg;
569   for (hr=0;hr<HOST_REGS;hr++) {
570     if((cur->dirty>>hr)&1) {
571       reg=cur->regmap[hr];
572       if(reg>=64) 
573         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
574     }
575   }
576 }
577
578 void set_const(struct regstat *cur,signed char reg,uint64_t value)
579 {
580   int hr;
581   if(!reg) return;
582   for (hr=0;hr<HOST_REGS;hr++) {
583     if(cur->regmap[hr]==reg) {
584       cur->isconst|=1<<hr;
585       cur->constmap[hr]=value;
586     }
587     else if((cur->regmap[hr]^64)==reg) {
588       cur->isconst|=1<<hr;
589       cur->constmap[hr]=value>>32;
590     }
591   }
592 }
593
594 void clear_const(struct regstat *cur,signed char reg)
595 {
596   int hr;
597   if(!reg) return;
598   for (hr=0;hr<HOST_REGS;hr++) {
599     if((cur->regmap[hr]&63)==reg) {
600       cur->isconst&=~(1<<hr);
601     }
602   }
603 }
604
605 int is_const(struct regstat *cur,signed char reg)
606 {
607   int hr;
608   if(reg<0) return 0;
609   if(!reg) return 1;
610   for (hr=0;hr<HOST_REGS;hr++) {
611     if((cur->regmap[hr]&63)==reg) {
612       return (cur->isconst>>hr)&1;
613     }
614   }
615   return 0;
616 }
617 uint64_t get_const(struct regstat *cur,signed char reg)
618 {
619   int hr;
620   if(!reg) return 0;
621   for (hr=0;hr<HOST_REGS;hr++) {
622     if(cur->regmap[hr]==reg) {
623       return cur->constmap[hr];
624     }
625   }
626   printf("Unknown constant in r%d\n",reg);
627   exit(1);
628 }
629
630 // Least soon needed registers
631 // Look at the next ten instructions and see which registers
632 // will be used.  Try not to reallocate these.
633 void lsn(u_char hsn[], int i, int *preferred_reg)
634 {
635   int j;
636   int b=-1;
637   for(j=0;j<9;j++)
638   {
639     if(i+j>=slen) {
640       j=slen-i-1;
641       break;
642     }
643     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
644     {
645       // Don't go past an unconditonal jump
646       j++;
647       break;
648     }
649   }
650   for(;j>=0;j--)
651   {
652     if(rs1[i+j]) hsn[rs1[i+j]]=j;
653     if(rs2[i+j]) hsn[rs2[i+j]]=j;
654     if(rt1[i+j]) hsn[rt1[i+j]]=j;
655     if(rt2[i+j]) hsn[rt2[i+j]]=j;
656     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
657       // Stores can allocate zero
658       hsn[rs1[i+j]]=j;
659       hsn[rs2[i+j]]=j;
660     }
661     // On some architectures stores need invc_ptr
662     #if defined(HOST_IMM8)
663     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
664       hsn[INVCP]=j;
665     }
666     #endif
667     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
668     {
669       hsn[CCREG]=j;
670       b=j;
671     }
672   }
673   if(b>=0)
674   {
675     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
676     {
677       // Follow first branch
678       int t=(ba[i+b]-start)>>2;
679       j=7-b;if(t+j>=slen) j=slen-t-1;
680       for(;j>=0;j--)
681       {
682         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
683         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
684         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
685         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
686       }
687     }
688     // TODO: preferred register based on backward branch
689   }
690   // Delay slot should preferably not overwrite branch conditions or cycle count
691   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
692     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
693     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
694     hsn[CCREG]=1;
695     // ...or hash tables
696     hsn[RHASH]=1;
697     hsn[RHTBL]=1;
698   }
699   // Coprocessor load/store needs FTEMP, even if not declared
700   if(itype[i]==C1LS||itype[i]==C2LS) {
701     hsn[FTEMP]=0;
702   }
703   // Load L/R also uses FTEMP as a temporary register
704   if(itype[i]==LOADLR) {
705     hsn[FTEMP]=0;
706   }
707   // Also SWL/SWR/SDL/SDR
708   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
709     hsn[FTEMP]=0;
710   }
711   // Don't remove the TLB registers either
712   if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
713     hsn[TLREG]=0;
714   }
715   // Don't remove the miniht registers
716   if(itype[i]==UJUMP||itype[i]==RJUMP)
717   {
718     hsn[RHASH]=0;
719     hsn[RHTBL]=0;
720   }
721 }
722
723 // We only want to allocate registers if we're going to use them again soon
724 int needed_again(int r, int i)
725 {
726   int j;
727   int b=-1;
728   int rn=10;
729   
730   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
731   {
732     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
733       return 0; // Don't need any registers if exiting the block
734   }
735   for(j=0;j<9;j++)
736   {
737     if(i+j>=slen) {
738       j=slen-i-1;
739       break;
740     }
741     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
742     {
743       // Don't go past an unconditonal jump
744       j++;
745       break;
746     }
747     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
748     {
749       break;
750     }
751   }
752   for(;j>=1;j--)
753   {
754     if(rs1[i+j]==r) rn=j;
755     if(rs2[i+j]==r) rn=j;
756     if((unneeded_reg[i+j]>>r)&1) rn=10;
757     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
758     {
759       b=j;
760     }
761   }
762   /*
763   if(b>=0)
764   {
765     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
766     {
767       // Follow first branch
768       int o=rn;
769       int t=(ba[i+b]-start)>>2;
770       j=7-b;if(t+j>=slen) j=slen-t-1;
771       for(;j>=0;j--)
772       {
773         if(!((unneeded_reg[t+j]>>r)&1)) {
774           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
775           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
776         }
777         else rn=o;
778       }
779     }
780   }*/
781   if(rn<10) return 1;
782   return 0;
783 }
784
785 // Try to match register allocations at the end of a loop with those
786 // at the beginning
787 int loop_reg(int i, int r, int hr)
788 {
789   int j,k;
790   for(j=0;j<9;j++)
791   {
792     if(i+j>=slen) {
793       j=slen-i-1;
794       break;
795     }
796     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
797     {
798       // Don't go past an unconditonal jump
799       j++;
800       break;
801     }
802   }
803   k=0;
804   if(i>0){
805     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
806       k--;
807   }
808   for(;k<j;k++)
809   {
810     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
811     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
812     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
813     {
814       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
815       {
816         int t=(ba[i+k]-start)>>2;
817         int reg=get_reg(regs[t].regmap_entry,r);
818         if(reg>=0) return reg;
819         //reg=get_reg(regs[t+1].regmap_entry,r);
820         //if(reg>=0) return reg;
821       }
822     }
823   }
824   return hr;
825 }
826
827
828 // Allocate every register, preserving source/target regs
829 void alloc_all(struct regstat *cur,int i)
830 {
831   int hr;
832   
833   for(hr=0;hr<HOST_REGS;hr++) {
834     if(hr!=EXCLUDE_REG) {
835       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
836          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
837       {
838         cur->regmap[hr]=-1;
839         cur->dirty&=~(1<<hr);
840       }
841       // Don't need zeros
842       if((cur->regmap[hr]&63)==0)
843       {
844         cur->regmap[hr]=-1;
845         cur->dirty&=~(1<<hr);
846       }
847     }
848   }
849 }
850
851
852 void div64(int64_t dividend,int64_t divisor)
853 {
854   lo=dividend/divisor;
855   hi=dividend%divisor;
856   //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
857   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
858 }
859 void divu64(uint64_t dividend,uint64_t divisor)
860 {
861   lo=dividend/divisor;
862   hi=dividend%divisor;
863   //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
864   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
865 }
866
867 void mult64(uint64_t m1,uint64_t m2)
868 {
869    unsigned long long int op1, op2, op3, op4;
870    unsigned long long int result1, result2, result3, result4;
871    unsigned long long int temp1, temp2, temp3, temp4;
872    int sign = 0;
873    
874    if (m1 < 0)
875      {
876     op2 = -m1;
877     sign = 1 - sign;
878      }
879    else op2 = m1;
880    if (m2 < 0)
881      {
882     op4 = -m2;
883     sign = 1 - sign;
884      }
885    else op4 = m2;
886    
887    op1 = op2 & 0xFFFFFFFF;
888    op2 = (op2 >> 32) & 0xFFFFFFFF;
889    op3 = op4 & 0xFFFFFFFF;
890    op4 = (op4 >> 32) & 0xFFFFFFFF;
891    
892    temp1 = op1 * op3;
893    temp2 = (temp1 >> 32) + op1 * op4;
894    temp3 = op2 * op3;
895    temp4 = (temp3 >> 32) + op2 * op4;
896    
897    result1 = temp1 & 0xFFFFFFFF;
898    result2 = temp2 + (temp3 & 0xFFFFFFFF);
899    result3 = (result2 >> 32) + temp4;
900    result4 = (result3 >> 32);
901    
902    lo = result1 | (result2 << 32);
903    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
904    if (sign)
905      {
906     hi = ~hi;
907     if (!lo) hi++;
908     else lo = ~lo + 1;
909      }
910 }
911
912 void multu64(uint64_t m1,uint64_t m2)
913 {
914    unsigned long long int op1, op2, op3, op4;
915    unsigned long long int result1, result2, result3, result4;
916    unsigned long long int temp1, temp2, temp3, temp4;
917    
918    op1 = m1 & 0xFFFFFFFF;
919    op2 = (m1 >> 32) & 0xFFFFFFFF;
920    op3 = m2 & 0xFFFFFFFF;
921    op4 = (m2 >> 32) & 0xFFFFFFFF;
922    
923    temp1 = op1 * op3;
924    temp2 = (temp1 >> 32) + op1 * op4;
925    temp3 = op2 * op3;
926    temp4 = (temp3 >> 32) + op2 * op4;
927    
928    result1 = temp1 & 0xFFFFFFFF;
929    result2 = temp2 + (temp3 & 0xFFFFFFFF);
930    result3 = (result2 >> 32) + temp4;
931    result4 = (result3 >> 32);
932    
933    lo = result1 | (result2 << 32);
934    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
935    
936   //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
937   //                                      ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
938 }
939
940 uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
941 {
942   if(bits) {
943     original<<=64-bits;
944     original>>=64-bits;
945     loaded<<=bits;
946     original|=loaded;
947   }
948   else original=loaded;
949   return original;
950 }
951 uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
952 {
953   if(bits^56) {
954     original>>=64-(bits^56);
955     original<<=64-(bits^56);
956     loaded>>=bits^56;
957     original|=loaded;
958   }
959   else original=loaded;
960   return original;
961 }
962
963 #ifdef __i386__
964 #include "assem_x86.c"
965 #endif
966 #ifdef __x86_64__
967 #include "assem_x64.c"
968 #endif
969 #ifdef __arm__
970 #include "assem_arm.c"
971 #endif
972
973 // Add virtual address mapping to linked list
974 void ll_add(struct ll_entry **head,int vaddr,void *addr)
975 {
976   struct ll_entry *new_entry;
977   new_entry=malloc(sizeof(struct ll_entry));
978   assert(new_entry!=NULL);
979   new_entry->vaddr=vaddr;
980   new_entry->reg32=0;
981   new_entry->addr=addr;
982   new_entry->next=*head;
983   *head=new_entry;
984 }
985
986 // Add virtual address mapping for 32-bit compiled block
987 void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
988 {
989   ll_add(head,vaddr,addr);
990 #ifndef FORCE32
991   (*head)->reg32=reg32;
992 #endif
993 }
994
995 // Check if an address is already compiled
996 // but don't return addresses which are about to expire from the cache
997 void *check_addr(u_int vaddr)
998 {
999   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
1000   if(ht_bin[0]==vaddr) {
1001     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1002       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
1003   }
1004   if(ht_bin[2]==vaddr) {
1005     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1006       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
1007   }
1008   u_int page=get_page(vaddr);
1009   struct ll_entry *head;
1010   head=jump_in[page];
1011   while(head!=NULL) {
1012     if(head->vaddr==vaddr&&head->reg32==0) {
1013       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1014         // Update existing entry with current address
1015         if(ht_bin[0]==vaddr) {
1016           ht_bin[1]=(int)head->addr;
1017           return head->addr;
1018         }
1019         if(ht_bin[2]==vaddr) {
1020           ht_bin[3]=(int)head->addr;
1021           return head->addr;
1022         }
1023         // Insert into hash table with low priority.
1024         // Don't evict existing entries, as they are probably
1025         // addresses that are being accessed frequently.
1026         if(ht_bin[0]==-1) {
1027           ht_bin[1]=(int)head->addr;
1028           ht_bin[0]=vaddr;
1029         }else if(ht_bin[2]==-1) {
1030           ht_bin[3]=(int)head->addr;
1031           ht_bin[2]=vaddr;
1032         }
1033         return head->addr;
1034       }
1035     }
1036     head=head->next;
1037   }
1038   return 0;
1039 }
1040
1041 void remove_hash(int vaddr)
1042 {
1043   //printf("remove hash: %x\n",vaddr);
1044   int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1045   if(ht_bin[2]==vaddr) {
1046     ht_bin[2]=ht_bin[3]=-1;
1047   }
1048   if(ht_bin[0]==vaddr) {
1049     ht_bin[0]=ht_bin[2];
1050     ht_bin[1]=ht_bin[3];
1051     ht_bin[2]=ht_bin[3]=-1;
1052   }
1053 }
1054
1055 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1056 {
1057   struct ll_entry *next;
1058   while(*head) {
1059     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) || 
1060        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1061     {
1062       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1063       remove_hash((*head)->vaddr);
1064       next=(*head)->next;
1065       free(*head);
1066       *head=next;
1067     }
1068     else
1069     {
1070       head=&((*head)->next);
1071     }
1072   }
1073 }
1074
1075 // Remove all entries from linked list
1076 void ll_clear(struct ll_entry **head)
1077 {
1078   struct ll_entry *cur;
1079   struct ll_entry *next;
1080   if(cur=*head) {
1081     *head=0;
1082     while(cur) {
1083       next=cur->next;
1084       free(cur);
1085       cur=next;
1086     }
1087   }
1088 }
1089
1090 // Dereference the pointers and remove if it matches
1091 void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1092 {
1093   while(head) {
1094     int ptr=get_pointer(head->addr);
1095     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1096     if(((ptr>>shift)==(addr>>shift)) ||
1097        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1098     {
1099       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1100       u_int host_addr=(u_int)kill_pointer(head->addr);
1101       #ifdef __arm__
1102         needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1103       #endif
1104     }
1105     head=head->next;
1106   }
1107 }
1108
1109 // This is called when we write to a compiled block (see do_invstub)
1110 void invalidate_page(u_int page)
1111 {
1112   struct ll_entry *head;
1113   struct ll_entry *next;
1114   head=jump_in[page];
1115   jump_in[page]=0;
1116   while(head!=NULL) {
1117     inv_debug("INVALIDATE: %x\n",head->vaddr);
1118     remove_hash(head->vaddr);
1119     next=head->next;
1120     free(head);
1121     head=next;
1122   }
1123   head=jump_out[page];
1124   jump_out[page]=0;
1125   while(head!=NULL) {
1126     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1127     u_int host_addr=(u_int)kill_pointer(head->addr);
1128     #ifdef __arm__
1129       needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1130     #endif
1131     next=head->next;
1132     free(head);
1133     head=next;
1134   }
1135 }
1136
1137 static void invalidate_block_range(u_int block, u_int first, u_int last)
1138 {
1139   u_int page=get_page(block<<12);
1140   //printf("first=%d last=%d\n",first,last);
1141   invalidate_page(page);
1142   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1143   assert(last<page+5);
1144   // Invalidate the adjacent pages if a block crosses a 4K boundary
1145   while(first<page) {
1146     invalidate_page(first);
1147     first++;
1148   }
1149   for(first=page+1;first<last;first++) {
1150     invalidate_page(first);
1151   }
1152   #ifdef __arm__
1153     do_clear_cache();
1154   #endif
1155   
1156   // Don't trap writes
1157   invalid_code[block]=1;
1158 #ifndef DISABLE_TLB
1159   // If there is a valid TLB entry for this page, remove write protect
1160   if(tlb_LUT_w[block]) {
1161     assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1162     // CHECK: Is this right?
1163     memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1164     u_int real_block=tlb_LUT_w[block]>>12;
1165     invalid_code[real_block]=1;
1166     if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1167   }
1168   else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1169 #endif
1170
1171   #ifdef USE_MINI_HT
1172   memset(mini_ht,-1,sizeof(mini_ht));
1173   #endif
1174 }
1175
1176 void invalidate_block(u_int block)
1177 {
1178   u_int page=get_page(block<<12);
1179   u_int vpage=get_vpage(block<<12);
1180   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1181   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1182   u_int first,last;
1183   first=last=page;
1184   struct ll_entry *head;
1185   head=jump_dirty[vpage];
1186   //printf("page=%d vpage=%d\n",page,vpage);
1187   while(head!=NULL) {
1188     u_int start,end;
1189     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1190       get_bounds((int)head->addr,&start,&end);
1191       //printf("start: %x end: %x\n",start,end);
1192       if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
1193         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1194           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1195           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1196         }
1197       }
1198 #ifndef DISABLE_TLB
1199       if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1200         if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1201           if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1202           if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1203         }
1204       }
1205 #endif
1206     }
1207     head=head->next;
1208   }
1209   invalidate_block_range(block,first,last);
1210 }
1211
1212 void invalidate_addr(u_int addr)
1213 {
1214 #ifdef PCSX
1215   //static int rhits;
1216   // this check is done by the caller
1217   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1218   u_int page=get_page(addr);
1219   if(page<2048) { // RAM
1220     struct ll_entry *head;
1221     u_int addr_min=~0, addr_max=0;
1222     int mask=RAM_SIZE-1;
1223     int pg1;
1224     inv_code_start=addr&~0xfff;
1225     inv_code_end=addr|0xfff;
1226     pg1=page;
1227     if (pg1>0) {
1228       // must check previous page too because of spans..
1229       pg1--;
1230       inv_code_start-=0x1000;
1231     }
1232     for(;pg1<=page;pg1++) {
1233       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1234         u_int start,end;
1235         get_bounds((int)head->addr,&start,&end);
1236         if((start&mask)<=(addr&mask)&&(addr&mask)<(end&mask)) {
1237           if(start<addr_min) addr_min=start;
1238           if(end>addr_max) addr_max=end;
1239         }
1240         else if(addr<start) {
1241           if(start<inv_code_end)
1242             inv_code_end=start-1;
1243         }
1244         else {
1245           if(end>inv_code_start)
1246             inv_code_start=end;
1247         }
1248       }
1249     }
1250     if (addr_min!=~0) {
1251       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1252       inv_code_start=inv_code_end=~0;
1253       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1254       return;
1255     }
1256     else {
1257       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);//rhits);
1258     }
1259     //rhits=0;
1260     if(page!=0) // FIXME: don't know what's up with page 0 (Klonoa)
1261       return;
1262   }
1263 #endif
1264   invalidate_block(addr>>12);
1265 }
1266
1267 // This is called when loading a save state.
1268 // Anything could have changed, so invalidate everything.
1269 void invalidate_all_pages()
1270 {
1271   u_int page,n;
1272   for(page=0;page<4096;page++)
1273     invalidate_page(page);
1274   for(page=0;page<1048576;page++)
1275     if(!invalid_code[page]) {
1276       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1277       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1278     }
1279   #ifdef __arm__
1280   __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1281   #endif
1282   #ifdef USE_MINI_HT
1283   memset(mini_ht,-1,sizeof(mini_ht));
1284   #endif
1285   #ifndef DISABLE_TLB
1286   // TLB
1287   for(page=0;page<0x100000;page++) {
1288     if(tlb_LUT_r[page]) {
1289       memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1290       if(!tlb_LUT_w[page]||!invalid_code[page])
1291         memory_map[page]|=0x40000000; // Write protect
1292     }
1293     else memory_map[page]=-1;
1294     if(page==0x80000) page=0xC0000;
1295   }
1296   tlb_hacks();
1297   #endif
1298 }
1299
1300 // Add an entry to jump_out after making a link
1301 void add_link(u_int vaddr,void *src)
1302 {
1303   u_int page=get_page(vaddr);
1304   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1305   int *ptr=(int *)(src+4);
1306   assert((*ptr&0x0fff0000)==0x059f0000);
1307   ll_add(jump_out+page,vaddr,src);
1308   //int ptr=get_pointer(src);
1309   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1310 }
1311
1312 // If a code block was found to be unmodified (bit was set in
1313 // restore_candidate) and it remains unmodified (bit is clear
1314 // in invalid_code) then move the entries for that 4K page from
1315 // the dirty list to the clean list.
1316 void clean_blocks(u_int page)
1317 {
1318   struct ll_entry *head;
1319   inv_debug("INV: clean_blocks page=%d\n",page);
1320   head=jump_dirty[page];
1321   while(head!=NULL) {
1322     if(!invalid_code[head->vaddr>>12]) {
1323       // Don't restore blocks which are about to expire from the cache
1324       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1325         u_int start,end;
1326         if(verify_dirty((int)head->addr)) {
1327           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1328           u_int i;
1329           u_int inv=0;
1330           get_bounds((int)head->addr,&start,&end);
1331           if(start-(u_int)rdram<RAM_SIZE) {
1332             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1333               inv|=invalid_code[i];
1334             }
1335           }
1336           if((signed int)head->vaddr>=(signed int)0xC0000000) {
1337             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1338             //printf("addr=%x start=%x end=%x\n",addr,start,end);
1339             if(addr<start||addr>=end) inv=1;
1340           }
1341           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1342             inv=1;
1343           }
1344           if(!inv) {
1345             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1346             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1347               u_int ppage=page;
1348 #ifndef DISABLE_TLB
1349               if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1350 #endif
1351               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1352               //printf("page=%x, addr=%x\n",page,head->vaddr);
1353               //assert(head->vaddr>>12==(page|0x80000));
1354               ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1355               int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1356               if(!head->reg32) {
1357                 if(ht_bin[0]==head->vaddr) {
1358                   ht_bin[1]=(int)clean_addr; // Replace existing entry
1359                 }
1360                 if(ht_bin[2]==head->vaddr) {
1361                   ht_bin[3]=(int)clean_addr; // Replace existing entry
1362                 }
1363               }
1364             }
1365           }
1366         }
1367       }
1368     }
1369     head=head->next;
1370   }
1371 }
1372
1373
1374 void mov_alloc(struct regstat *current,int i)
1375 {
1376   // Note: Don't need to actually alloc the source registers
1377   if((~current->is32>>rs1[i])&1) {
1378     //alloc_reg64(current,i,rs1[i]);
1379     alloc_reg64(current,i,rt1[i]);
1380     current->is32&=~(1LL<<rt1[i]);
1381   } else {
1382     //alloc_reg(current,i,rs1[i]);
1383     alloc_reg(current,i,rt1[i]);
1384     current->is32|=(1LL<<rt1[i]);
1385   }
1386   clear_const(current,rs1[i]);
1387   clear_const(current,rt1[i]);
1388   dirty_reg(current,rt1[i]);
1389 }
1390
1391 void shiftimm_alloc(struct regstat *current,int i)
1392 {
1393   clear_const(current,rs1[i]);
1394   clear_const(current,rt1[i]);
1395   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1396   {
1397     if(rt1[i]) {
1398       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1399       else lt1[i]=rs1[i];
1400       alloc_reg(current,i,rt1[i]);
1401       current->is32|=1LL<<rt1[i];
1402       dirty_reg(current,rt1[i]);
1403     }
1404   }
1405   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1406   {
1407     if(rt1[i]) {
1408       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1409       alloc_reg64(current,i,rt1[i]);
1410       current->is32&=~(1LL<<rt1[i]);
1411       dirty_reg(current,rt1[i]);
1412     }
1413   }
1414   if(opcode2[i]==0x3c) // DSLL32
1415   {
1416     if(rt1[i]) {
1417       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1418       alloc_reg64(current,i,rt1[i]);
1419       current->is32&=~(1LL<<rt1[i]);
1420       dirty_reg(current,rt1[i]);
1421     }
1422   }
1423   if(opcode2[i]==0x3e) // DSRL32
1424   {
1425     if(rt1[i]) {
1426       alloc_reg64(current,i,rs1[i]);
1427       if(imm[i]==32) {
1428         alloc_reg64(current,i,rt1[i]);
1429         current->is32&=~(1LL<<rt1[i]);
1430       } else {
1431         alloc_reg(current,i,rt1[i]);
1432         current->is32|=1LL<<rt1[i];
1433       }
1434       dirty_reg(current,rt1[i]);
1435     }
1436   }
1437   if(opcode2[i]==0x3f) // DSRA32
1438   {
1439     if(rt1[i]) {
1440       alloc_reg64(current,i,rs1[i]);
1441       alloc_reg(current,i,rt1[i]);
1442       current->is32|=1LL<<rt1[i];
1443       dirty_reg(current,rt1[i]);
1444     }
1445   }
1446 }
1447
1448 void shift_alloc(struct regstat *current,int i)
1449 {
1450   if(rt1[i]) {
1451     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1452     {
1453       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1454       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1455       alloc_reg(current,i,rt1[i]);
1456       if(rt1[i]==rs2[i]) {
1457         alloc_reg_temp(current,i,-1);
1458         minimum_free_regs[i]=1;
1459       }
1460       current->is32|=1LL<<rt1[i];
1461     } else { // DSLLV/DSRLV/DSRAV
1462       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1463       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1464       alloc_reg64(current,i,rt1[i]);
1465       current->is32&=~(1LL<<rt1[i]);
1466       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1467       {
1468         alloc_reg_temp(current,i,-1);
1469         minimum_free_regs[i]=1;
1470       }
1471     }
1472     clear_const(current,rs1[i]);
1473     clear_const(current,rs2[i]);
1474     clear_const(current,rt1[i]);
1475     dirty_reg(current,rt1[i]);
1476   }
1477 }
1478
1479 void alu_alloc(struct regstat *current,int i)
1480 {
1481   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1482     if(rt1[i]) {
1483       if(rs1[i]&&rs2[i]) {
1484         alloc_reg(current,i,rs1[i]);
1485         alloc_reg(current,i,rs2[i]);
1486       }
1487       else {
1488         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1489         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1490       }
1491       alloc_reg(current,i,rt1[i]);
1492     }
1493     current->is32|=1LL<<rt1[i];
1494   }
1495   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1496     if(rt1[i]) {
1497       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1498       {
1499         alloc_reg64(current,i,rs1[i]);
1500         alloc_reg64(current,i,rs2[i]);
1501         alloc_reg(current,i,rt1[i]);
1502       } else {
1503         alloc_reg(current,i,rs1[i]);
1504         alloc_reg(current,i,rs2[i]);
1505         alloc_reg(current,i,rt1[i]);
1506       }
1507     }
1508     current->is32|=1LL<<rt1[i];
1509   }
1510   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1511     if(rt1[i]) {
1512       if(rs1[i]&&rs2[i]) {
1513         alloc_reg(current,i,rs1[i]);
1514         alloc_reg(current,i,rs2[i]);
1515       }
1516       else
1517       {
1518         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1519         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1520       }
1521       alloc_reg(current,i,rt1[i]);
1522       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1523       {
1524         if(!((current->uu>>rt1[i])&1)) {
1525           alloc_reg64(current,i,rt1[i]);
1526         }
1527         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1528           if(rs1[i]&&rs2[i]) {
1529             alloc_reg64(current,i,rs1[i]);
1530             alloc_reg64(current,i,rs2[i]);
1531           }
1532           else
1533           {
1534             // Is is really worth it to keep 64-bit values in registers?
1535             #ifdef NATIVE_64BIT
1536             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1537             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1538             #endif
1539           }
1540         }
1541         current->is32&=~(1LL<<rt1[i]);
1542       } else {
1543         current->is32|=1LL<<rt1[i];
1544       }
1545     }
1546   }
1547   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1548     if(rt1[i]) {
1549       if(rs1[i]&&rs2[i]) {
1550         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1551           alloc_reg64(current,i,rs1[i]);
1552           alloc_reg64(current,i,rs2[i]);
1553           alloc_reg64(current,i,rt1[i]);
1554         } else {
1555           alloc_reg(current,i,rs1[i]);
1556           alloc_reg(current,i,rs2[i]);
1557           alloc_reg(current,i,rt1[i]);
1558         }
1559       }
1560       else {
1561         alloc_reg(current,i,rt1[i]);
1562         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1563           // DADD used as move, or zeroing
1564           // If we have a 64-bit source, then make the target 64 bits too
1565           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1566             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1567             alloc_reg64(current,i,rt1[i]);
1568           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1569             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1570             alloc_reg64(current,i,rt1[i]);
1571           }
1572           if(opcode2[i]>=0x2e&&rs2[i]) {
1573             // DSUB used as negation - 64-bit result
1574             // If we have a 32-bit register, extend it to 64 bits
1575             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1576             alloc_reg64(current,i,rt1[i]);
1577           }
1578         }
1579       }
1580       if(rs1[i]&&rs2[i]) {
1581         current->is32&=~(1LL<<rt1[i]);
1582       } else if(rs1[i]) {
1583         current->is32&=~(1LL<<rt1[i]);
1584         if((current->is32>>rs1[i])&1)
1585           current->is32|=1LL<<rt1[i];
1586       } else if(rs2[i]) {
1587         current->is32&=~(1LL<<rt1[i]);
1588         if((current->is32>>rs2[i])&1)
1589           current->is32|=1LL<<rt1[i];
1590       } else {
1591         current->is32|=1LL<<rt1[i];
1592       }
1593     }
1594   }
1595   clear_const(current,rs1[i]);
1596   clear_const(current,rs2[i]);
1597   clear_const(current,rt1[i]);
1598   dirty_reg(current,rt1[i]);
1599 }
1600
1601 void imm16_alloc(struct regstat *current,int i)
1602 {
1603   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1604   else lt1[i]=rs1[i];
1605   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1606   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1607     current->is32&=~(1LL<<rt1[i]);
1608     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1609       // TODO: Could preserve the 32-bit flag if the immediate is zero
1610       alloc_reg64(current,i,rt1[i]);
1611       alloc_reg64(current,i,rs1[i]);
1612     }
1613     clear_const(current,rs1[i]);
1614     clear_const(current,rt1[i]);
1615   }
1616   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1617     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1618     current->is32|=1LL<<rt1[i];
1619     clear_const(current,rs1[i]);
1620     clear_const(current,rt1[i]);
1621   }
1622   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1623     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1624       if(rs1[i]!=rt1[i]) {
1625         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1626         alloc_reg64(current,i,rt1[i]);
1627         current->is32&=~(1LL<<rt1[i]);
1628       }
1629     }
1630     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1631     if(is_const(current,rs1[i])) {
1632       int v=get_const(current,rs1[i]);
1633       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1634       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1635       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1636     }
1637     else clear_const(current,rt1[i]);
1638   }
1639   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1640     if(is_const(current,rs1[i])) {
1641       int v=get_const(current,rs1[i]);
1642       set_const(current,rt1[i],v+imm[i]);
1643     }
1644     else clear_const(current,rt1[i]);
1645     current->is32|=1LL<<rt1[i];
1646   }
1647   else {
1648     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1649     current->is32|=1LL<<rt1[i];
1650   }
1651   dirty_reg(current,rt1[i]);
1652 }
1653
1654 void load_alloc(struct regstat *current,int i)
1655 {
1656   clear_const(current,rt1[i]);
1657   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1658   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1659   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1660   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1661     alloc_reg(current,i,rt1[i]);
1662     assert(get_reg(current->regmap,rt1[i])>=0);
1663     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1664     {
1665       current->is32&=~(1LL<<rt1[i]);
1666       alloc_reg64(current,i,rt1[i]);
1667     }
1668     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1669     {
1670       current->is32&=~(1LL<<rt1[i]);
1671       alloc_reg64(current,i,rt1[i]);
1672       alloc_all(current,i);
1673       alloc_reg64(current,i,FTEMP);
1674       minimum_free_regs[i]=HOST_REGS;
1675     }
1676     else current->is32|=1LL<<rt1[i];
1677     dirty_reg(current,rt1[i]);
1678     // If using TLB, need a register for pointer to the mapping table
1679     if(using_tlb) alloc_reg(current,i,TLREG);
1680     // LWL/LWR need a temporary register for the old value
1681     if(opcode[i]==0x22||opcode[i]==0x26)
1682     {
1683       alloc_reg(current,i,FTEMP);
1684       alloc_reg_temp(current,i,-1);
1685       minimum_free_regs[i]=1;
1686     }
1687   }
1688   else
1689   {
1690     // Load to r0 or unneeded register (dummy load)
1691     // but we still need a register to calculate the address
1692     if(opcode[i]==0x22||opcode[i]==0x26)
1693     {
1694       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1695     }
1696     // If using TLB, need a register for pointer to the mapping table
1697     if(using_tlb) alloc_reg(current,i,TLREG);
1698     alloc_reg_temp(current,i,-1);
1699     minimum_free_regs[i]=1;
1700     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1701     {
1702       alloc_all(current,i);
1703       alloc_reg64(current,i,FTEMP);
1704       minimum_free_regs[i]=HOST_REGS;
1705     }
1706   }
1707 }
1708
1709 void store_alloc(struct regstat *current,int i)
1710 {
1711   clear_const(current,rs2[i]);
1712   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1713   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1714   alloc_reg(current,i,rs2[i]);
1715   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1716     alloc_reg64(current,i,rs2[i]);
1717     if(rs2[i]) alloc_reg(current,i,FTEMP);
1718   }
1719   // If using TLB, need a register for pointer to the mapping table
1720   if(using_tlb) alloc_reg(current,i,TLREG);
1721   #if defined(HOST_IMM8)
1722   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1723   else alloc_reg(current,i,INVCP);
1724   #endif
1725   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1726     alloc_reg(current,i,FTEMP);
1727   }
1728   // We need a temporary register for address generation
1729   alloc_reg_temp(current,i,-1);
1730   minimum_free_regs[i]=1;
1731 }
1732
1733 void c1ls_alloc(struct regstat *current,int i)
1734 {
1735   //clear_const(current,rs1[i]); // FIXME
1736   clear_const(current,rt1[i]);
1737   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1738   alloc_reg(current,i,CSREG); // Status
1739   alloc_reg(current,i,FTEMP);
1740   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1741     alloc_reg64(current,i,FTEMP);
1742   }
1743   // If using TLB, need a register for pointer to the mapping table
1744   if(using_tlb) alloc_reg(current,i,TLREG);
1745   #if defined(HOST_IMM8)
1746   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1747   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1748     alloc_reg(current,i,INVCP);
1749   #endif
1750   // We need a temporary register for address generation
1751   alloc_reg_temp(current,i,-1);
1752 }
1753
1754 void c2ls_alloc(struct regstat *current,int i)
1755 {
1756   clear_const(current,rt1[i]);
1757   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1758   alloc_reg(current,i,FTEMP);
1759   // If using TLB, need a register for pointer to the mapping table
1760   if(using_tlb) alloc_reg(current,i,TLREG);
1761   #if defined(HOST_IMM8)
1762   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1763   else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1764     alloc_reg(current,i,INVCP);
1765   #endif
1766   // We need a temporary register for address generation
1767   alloc_reg_temp(current,i,-1);
1768   minimum_free_regs[i]=1;
1769 }
1770
1771 #ifndef multdiv_alloc
1772 void multdiv_alloc(struct regstat *current,int i)
1773 {
1774   //  case 0x18: MULT
1775   //  case 0x19: MULTU
1776   //  case 0x1A: DIV
1777   //  case 0x1B: DIVU
1778   //  case 0x1C: DMULT
1779   //  case 0x1D: DMULTU
1780   //  case 0x1E: DDIV
1781   //  case 0x1F: DDIVU
1782   clear_const(current,rs1[i]);
1783   clear_const(current,rs2[i]);
1784   if(rs1[i]&&rs2[i])
1785   {
1786     if((opcode2[i]&4)==0) // 32-bit
1787     {
1788       current->u&=~(1LL<<HIREG);
1789       current->u&=~(1LL<<LOREG);
1790       alloc_reg(current,i,HIREG);
1791       alloc_reg(current,i,LOREG);
1792       alloc_reg(current,i,rs1[i]);
1793       alloc_reg(current,i,rs2[i]);
1794       current->is32|=1LL<<HIREG;
1795       current->is32|=1LL<<LOREG;
1796       dirty_reg(current,HIREG);
1797       dirty_reg(current,LOREG);
1798     }
1799     else // 64-bit
1800     {
1801       current->u&=~(1LL<<HIREG);
1802       current->u&=~(1LL<<LOREG);
1803       current->uu&=~(1LL<<HIREG);
1804       current->uu&=~(1LL<<LOREG);
1805       alloc_reg64(current,i,HIREG);
1806       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1807       alloc_reg64(current,i,rs1[i]);
1808       alloc_reg64(current,i,rs2[i]);
1809       alloc_all(current,i);
1810       current->is32&=~(1LL<<HIREG);
1811       current->is32&=~(1LL<<LOREG);
1812       dirty_reg(current,HIREG);
1813       dirty_reg(current,LOREG);
1814       minimum_free_regs[i]=HOST_REGS;
1815     }
1816   }
1817   else
1818   {
1819     // Multiply by zero is zero.
1820     // MIPS does not have a divide by zero exception.
1821     // The result is undefined, we return zero.
1822     alloc_reg(current,i,HIREG);
1823     alloc_reg(current,i,LOREG);
1824     current->is32|=1LL<<HIREG;
1825     current->is32|=1LL<<LOREG;
1826     dirty_reg(current,HIREG);
1827     dirty_reg(current,LOREG);
1828   }
1829 }
1830 #endif
1831
1832 void cop0_alloc(struct regstat *current,int i)
1833 {
1834   if(opcode2[i]==0) // MFC0
1835   {
1836     if(rt1[i]) {
1837       clear_const(current,rt1[i]);
1838       alloc_all(current,i);
1839       alloc_reg(current,i,rt1[i]);
1840       current->is32|=1LL<<rt1[i];
1841       dirty_reg(current,rt1[i]);
1842     }
1843   }
1844   else if(opcode2[i]==4) // MTC0
1845   {
1846     if(rs1[i]){
1847       clear_const(current,rs1[i]);
1848       alloc_reg(current,i,rs1[i]);
1849       alloc_all(current,i);
1850     }
1851     else {
1852       alloc_all(current,i); // FIXME: Keep r0
1853       current->u&=~1LL;
1854       alloc_reg(current,i,0);
1855     }
1856   }
1857   else
1858   {
1859     // TLBR/TLBWI/TLBWR/TLBP/ERET
1860     assert(opcode2[i]==0x10);
1861     alloc_all(current,i);
1862   }
1863   minimum_free_regs[i]=HOST_REGS;
1864 }
1865
1866 void cop1_alloc(struct regstat *current,int i)
1867 {
1868   alloc_reg(current,i,CSREG); // Load status
1869   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1870   {
1871     if(rt1[i]){
1872       clear_const(current,rt1[i]);
1873       if(opcode2[i]==1) {
1874         alloc_reg64(current,i,rt1[i]); // DMFC1
1875         current->is32&=~(1LL<<rt1[i]);
1876       }else{
1877         alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1878         current->is32|=1LL<<rt1[i];
1879       }
1880       dirty_reg(current,rt1[i]);
1881     }
1882     alloc_reg_temp(current,i,-1);
1883   }
1884   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1885   {
1886     if(rs1[i]){
1887       clear_const(current,rs1[i]);
1888       if(opcode2[i]==5)
1889         alloc_reg64(current,i,rs1[i]); // DMTC1
1890       else
1891         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1892       alloc_reg_temp(current,i,-1);
1893     }
1894     else {
1895       current->u&=~1LL;
1896       alloc_reg(current,i,0);
1897       alloc_reg_temp(current,i,-1);
1898     }
1899   }
1900   minimum_free_regs[i]=1;
1901 }
1902 void fconv_alloc(struct regstat *current,int i)
1903 {
1904   alloc_reg(current,i,CSREG); // Load status
1905   alloc_reg_temp(current,i,-1);
1906   minimum_free_regs[i]=1;
1907 }
1908 void float_alloc(struct regstat *current,int i)
1909 {
1910   alloc_reg(current,i,CSREG); // Load status
1911   alloc_reg_temp(current,i,-1);
1912   minimum_free_regs[i]=1;
1913 }
1914 void c2op_alloc(struct regstat *current,int i)
1915 {
1916   alloc_reg_temp(current,i,-1);
1917 }
1918 void fcomp_alloc(struct regstat *current,int i)
1919 {
1920   alloc_reg(current,i,CSREG); // Load status
1921   alloc_reg(current,i,FSREG); // Load flags
1922   dirty_reg(current,FSREG); // Flag will be modified
1923   alloc_reg_temp(current,i,-1);
1924   minimum_free_regs[i]=1;
1925 }
1926
1927 void syscall_alloc(struct regstat *current,int i)
1928 {
1929   alloc_cc(current,i);
1930   dirty_reg(current,CCREG);
1931   alloc_all(current,i);
1932   minimum_free_regs[i]=HOST_REGS;
1933   current->isconst=0;
1934 }
1935
1936 void delayslot_alloc(struct regstat *current,int i)
1937 {
1938   switch(itype[i]) {
1939     case UJUMP:
1940     case CJUMP:
1941     case SJUMP:
1942     case RJUMP:
1943     case FJUMP:
1944     case SYSCALL:
1945     case HLECALL:
1946     case SPAN:
1947       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1948       printf("Disabled speculative precompilation\n");
1949       stop_after_jal=1;
1950       break;
1951     case IMM16:
1952       imm16_alloc(current,i);
1953       break;
1954     case LOAD:
1955     case LOADLR:
1956       load_alloc(current,i);
1957       break;
1958     case STORE:
1959     case STORELR:
1960       store_alloc(current,i);
1961       break;
1962     case ALU:
1963       alu_alloc(current,i);
1964       break;
1965     case SHIFT:
1966       shift_alloc(current,i);
1967       break;
1968     case MULTDIV:
1969       multdiv_alloc(current,i);
1970       break;
1971     case SHIFTIMM:
1972       shiftimm_alloc(current,i);
1973       break;
1974     case MOV:
1975       mov_alloc(current,i);
1976       break;
1977     case COP0:
1978       cop0_alloc(current,i);
1979       break;
1980     case COP1:
1981     case COP2:
1982       cop1_alloc(current,i);
1983       break;
1984     case C1LS:
1985       c1ls_alloc(current,i);
1986       break;
1987     case C2LS:
1988       c2ls_alloc(current,i);
1989       break;
1990     case FCONV:
1991       fconv_alloc(current,i);
1992       break;
1993     case FLOAT:
1994       float_alloc(current,i);
1995       break;
1996     case FCOMP:
1997       fcomp_alloc(current,i);
1998       break;
1999     case C2OP:
2000       c2op_alloc(current,i);
2001       break;
2002   }
2003 }
2004
2005 // Special case where a branch and delay slot span two pages in virtual memory
2006 static void pagespan_alloc(struct regstat *current,int i)
2007 {
2008   current->isconst=0;
2009   current->wasconst=0;
2010   regs[i].wasconst=0;
2011   minimum_free_regs[i]=HOST_REGS;
2012   alloc_all(current,i);
2013   alloc_cc(current,i);
2014   dirty_reg(current,CCREG);
2015   if(opcode[i]==3) // JAL
2016   {
2017     alloc_reg(current,i,31);
2018     dirty_reg(current,31);
2019   }
2020   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
2021   {
2022     alloc_reg(current,i,rs1[i]);
2023     if (rt1[i]!=0) {
2024       alloc_reg(current,i,rt1[i]);
2025       dirty_reg(current,rt1[i]);
2026     }
2027   }
2028   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
2029   {
2030     if(rs1[i]) alloc_reg(current,i,rs1[i]);
2031     if(rs2[i]) alloc_reg(current,i,rs2[i]);
2032     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
2033     {
2034       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2035       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
2036     }
2037   }
2038   else
2039   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
2040   {
2041     if(rs1[i]) alloc_reg(current,i,rs1[i]);
2042     if(!((current->is32>>rs1[i])&1))
2043     {
2044       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2045     }
2046   }
2047   else
2048   if(opcode[i]==0x11) // BC1
2049   {
2050     alloc_reg(current,i,FSREG);
2051     alloc_reg(current,i,CSREG);
2052   }
2053   //else ...
2054 }
2055
2056 add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
2057 {
2058   stubs[stubcount][0]=type;
2059   stubs[stubcount][1]=addr;
2060   stubs[stubcount][2]=retaddr;
2061   stubs[stubcount][3]=a;
2062   stubs[stubcount][4]=b;
2063   stubs[stubcount][5]=c;
2064   stubs[stubcount][6]=d;
2065   stubs[stubcount][7]=e;
2066   stubcount++;
2067 }
2068
2069 // Write out a single register
2070 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
2071 {
2072   int hr;
2073   for(hr=0;hr<HOST_REGS;hr++) {
2074     if(hr!=EXCLUDE_REG) {
2075       if((regmap[hr]&63)==r) {
2076         if((dirty>>hr)&1) {
2077           if(regmap[hr]<64) {
2078             emit_storereg(r,hr);
2079 #ifndef FORCE32
2080             if((is32>>regmap[hr])&1) {
2081               emit_sarimm(hr,31,hr);
2082               emit_storereg(r|64,hr);
2083             }
2084 #endif
2085           }else{
2086             emit_storereg(r|64,hr);
2087           }
2088         }
2089       }
2090     }
2091   }
2092 }
2093
2094 int mchecksum()
2095 {
2096   //if(!tracedebug) return 0;
2097   int i;
2098   int sum=0;
2099   for(i=0;i<2097152;i++) {
2100     unsigned int temp=sum;
2101     sum<<=1;
2102     sum|=(~temp)>>31;
2103     sum^=((u_int *)rdram)[i];
2104   }
2105   return sum;
2106 }
2107 int rchecksum()
2108 {
2109   int i;
2110   int sum=0;
2111   for(i=0;i<64;i++)
2112     sum^=((u_int *)reg)[i];
2113   return sum;
2114 }
2115 void rlist()
2116 {
2117   int i;
2118   printf("TRACE: ");
2119   for(i=0;i<32;i++)
2120     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2121   printf("\n");
2122 #ifndef DISABLE_COP1
2123   printf("TRACE: ");
2124   for(i=0;i<32;i++)
2125     printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2126   printf("\n");
2127 #endif
2128 }
2129
2130 void enabletrace()
2131 {
2132   tracedebug=1;
2133 }
2134
2135 void memdebug(int i)
2136 {
2137   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2138   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2139   //rlist();
2140   //if(tracedebug) {
2141   //if(Count>=-2084597794) {
2142   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2143   //if(0) {
2144     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2145     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2146     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2147     rlist();
2148     #ifdef __i386__
2149     printf("TRACE: %x\n",(&i)[-1]);
2150     #endif
2151     #ifdef __arm__
2152     int j;
2153     printf("TRACE: %x \n",(&j)[10]);
2154     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2155     #endif
2156     //fflush(stdout);
2157   }
2158   //printf("TRACE: %x\n",(&i)[-1]);
2159 }
2160
2161 void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2162 {
2163   printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2164 }
2165
2166 void alu_assemble(int i,struct regstat *i_regs)
2167 {
2168   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2169     if(rt1[i]) {
2170       signed char s1,s2,t;
2171       t=get_reg(i_regs->regmap,rt1[i]);
2172       if(t>=0) {
2173         s1=get_reg(i_regs->regmap,rs1[i]);
2174         s2=get_reg(i_regs->regmap,rs2[i]);
2175         if(rs1[i]&&rs2[i]) {
2176           assert(s1>=0);
2177           assert(s2>=0);
2178           if(opcode2[i]&2) emit_sub(s1,s2,t);
2179           else emit_add(s1,s2,t);
2180         }
2181         else if(rs1[i]) {
2182           if(s1>=0) emit_mov(s1,t);
2183           else emit_loadreg(rs1[i],t);
2184         }
2185         else if(rs2[i]) {
2186           if(s2>=0) {
2187             if(opcode2[i]&2) emit_neg(s2,t);
2188             else emit_mov(s2,t);
2189           }
2190           else {
2191             emit_loadreg(rs2[i],t);
2192             if(opcode2[i]&2) emit_neg(t,t);
2193           }
2194         }
2195         else emit_zeroreg(t);
2196       }
2197     }
2198   }
2199   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2200     if(rt1[i]) {
2201       signed char s1l,s2l,s1h,s2h,tl,th;
2202       tl=get_reg(i_regs->regmap,rt1[i]);
2203       th=get_reg(i_regs->regmap,rt1[i]|64);
2204       if(tl>=0) {
2205         s1l=get_reg(i_regs->regmap,rs1[i]);
2206         s2l=get_reg(i_regs->regmap,rs2[i]);
2207         s1h=get_reg(i_regs->regmap,rs1[i]|64);
2208         s2h=get_reg(i_regs->regmap,rs2[i]|64);
2209         if(rs1[i]&&rs2[i]) {
2210           assert(s1l>=0);
2211           assert(s2l>=0);
2212           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2213           else emit_adds(s1l,s2l,tl);
2214           if(th>=0) {
2215             #ifdef INVERTED_CARRY
2216             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2217             #else
2218             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2219             #endif
2220             else emit_add(s1h,s2h,th);
2221           }
2222         }
2223         else if(rs1[i]) {
2224           if(s1l>=0) emit_mov(s1l,tl);
2225           else emit_loadreg(rs1[i],tl);
2226           if(th>=0) {
2227             if(s1h>=0) emit_mov(s1h,th);
2228             else emit_loadreg(rs1[i]|64,th);
2229           }
2230         }
2231         else if(rs2[i]) {
2232           if(s2l>=0) {
2233             if(opcode2[i]&2) emit_negs(s2l,tl);
2234             else emit_mov(s2l,tl);
2235           }
2236           else {
2237             emit_loadreg(rs2[i],tl);
2238             if(opcode2[i]&2) emit_negs(tl,tl);
2239           }
2240           if(th>=0) {
2241             #ifdef INVERTED_CARRY
2242             if(s2h>=0) emit_mov(s2h,th);
2243             else emit_loadreg(rs2[i]|64,th);
2244             if(opcode2[i]&2) {
2245               emit_adcimm(-1,th); // x86 has inverted carry flag
2246               emit_not(th,th);
2247             }
2248             #else
2249             if(opcode2[i]&2) {
2250               if(s2h>=0) emit_rscimm(s2h,0,th);
2251               else {
2252                 emit_loadreg(rs2[i]|64,th);
2253                 emit_rscimm(th,0,th);
2254               }
2255             }else{
2256               if(s2h>=0) emit_mov(s2h,th);
2257               else emit_loadreg(rs2[i]|64,th);
2258             }
2259             #endif
2260           }
2261         }
2262         else {
2263           emit_zeroreg(tl);
2264           if(th>=0) emit_zeroreg(th);
2265         }
2266       }
2267     }
2268   }
2269   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2270     if(rt1[i]) {
2271       signed char s1l,s1h,s2l,s2h,t;
2272       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2273       {
2274         t=get_reg(i_regs->regmap,rt1[i]);
2275         //assert(t>=0);
2276         if(t>=0) {
2277           s1l=get_reg(i_regs->regmap,rs1[i]);
2278           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2279           s2l=get_reg(i_regs->regmap,rs2[i]);
2280           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2281           if(rs2[i]==0) // rx<r0
2282           {
2283             assert(s1h>=0);
2284             if(opcode2[i]==0x2a) // SLT
2285               emit_shrimm(s1h,31,t);
2286             else // SLTU (unsigned can not be less than zero)
2287               emit_zeroreg(t);
2288           }
2289           else if(rs1[i]==0) // r0<rx
2290           {
2291             assert(s2h>=0);
2292             if(opcode2[i]==0x2a) // SLT
2293               emit_set_gz64_32(s2h,s2l,t);
2294             else // SLTU (set if not zero)
2295               emit_set_nz64_32(s2h,s2l,t);
2296           }
2297           else {
2298             assert(s1l>=0);assert(s1h>=0);
2299             assert(s2l>=0);assert(s2h>=0);
2300             if(opcode2[i]==0x2a) // SLT
2301               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2302             else // SLTU
2303               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2304           }
2305         }
2306       } else {
2307         t=get_reg(i_regs->regmap,rt1[i]);
2308         //assert(t>=0);
2309         if(t>=0) {
2310           s1l=get_reg(i_regs->regmap,rs1[i]);
2311           s2l=get_reg(i_regs->regmap,rs2[i]);
2312           if(rs2[i]==0) // rx<r0
2313           {
2314             assert(s1l>=0);
2315             if(opcode2[i]==0x2a) // SLT
2316               emit_shrimm(s1l,31,t);
2317             else // SLTU (unsigned can not be less than zero)
2318               emit_zeroreg(t);
2319           }
2320           else if(rs1[i]==0) // r0<rx
2321           {
2322             assert(s2l>=0);
2323             if(opcode2[i]==0x2a) // SLT
2324               emit_set_gz32(s2l,t);
2325             else // SLTU (set if not zero)
2326               emit_set_nz32(s2l,t);
2327           }
2328           else{
2329             assert(s1l>=0);assert(s2l>=0);
2330             if(opcode2[i]==0x2a) // SLT
2331               emit_set_if_less32(s1l,s2l,t);
2332             else // SLTU
2333               emit_set_if_carry32(s1l,s2l,t);
2334           }
2335         }
2336       }
2337     }
2338   }
2339   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2340     if(rt1[i]) {
2341       signed char s1l,s1h,s2l,s2h,th,tl;
2342       tl=get_reg(i_regs->regmap,rt1[i]);
2343       th=get_reg(i_regs->regmap,rt1[i]|64);
2344       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2345       {
2346         assert(tl>=0);
2347         if(tl>=0) {
2348           s1l=get_reg(i_regs->regmap,rs1[i]);
2349           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2350           s2l=get_reg(i_regs->regmap,rs2[i]);
2351           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2352           if(rs1[i]&&rs2[i]) {
2353             assert(s1l>=0);assert(s1h>=0);
2354             assert(s2l>=0);assert(s2h>=0);
2355             if(opcode2[i]==0x24) { // AND
2356               emit_and(s1l,s2l,tl);
2357               emit_and(s1h,s2h,th);
2358             } else
2359             if(opcode2[i]==0x25) { // OR
2360               emit_or(s1l,s2l,tl);
2361               emit_or(s1h,s2h,th);
2362             } else
2363             if(opcode2[i]==0x26) { // XOR
2364               emit_xor(s1l,s2l,tl);
2365               emit_xor(s1h,s2h,th);
2366             } else
2367             if(opcode2[i]==0x27) { // NOR
2368               emit_or(s1l,s2l,tl);
2369               emit_or(s1h,s2h,th);
2370               emit_not(tl,tl);
2371               emit_not(th,th);
2372             }
2373           }
2374           else
2375           {
2376             if(opcode2[i]==0x24) { // AND
2377               emit_zeroreg(tl);
2378               emit_zeroreg(th);
2379             } else
2380             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2381               if(rs1[i]){
2382                 if(s1l>=0) emit_mov(s1l,tl);
2383                 else emit_loadreg(rs1[i],tl);
2384                 if(s1h>=0) emit_mov(s1h,th);
2385                 else emit_loadreg(rs1[i]|64,th);
2386               }
2387               else
2388               if(rs2[i]){
2389                 if(s2l>=0) emit_mov(s2l,tl);
2390                 else emit_loadreg(rs2[i],tl);
2391                 if(s2h>=0) emit_mov(s2h,th);
2392                 else emit_loadreg(rs2[i]|64,th);
2393               }
2394               else{
2395                 emit_zeroreg(tl);
2396                 emit_zeroreg(th);
2397               }
2398             } else
2399             if(opcode2[i]==0x27) { // NOR
2400               if(rs1[i]){
2401                 if(s1l>=0) emit_not(s1l,tl);
2402                 else{
2403                   emit_loadreg(rs1[i],tl);
2404                   emit_not(tl,tl);
2405                 }
2406                 if(s1h>=0) emit_not(s1h,th);
2407                 else{
2408                   emit_loadreg(rs1[i]|64,th);
2409                   emit_not(th,th);
2410                 }
2411               }
2412               else
2413               if(rs2[i]){
2414                 if(s2l>=0) emit_not(s2l,tl);
2415                 else{
2416                   emit_loadreg(rs2[i],tl);
2417                   emit_not(tl,tl);
2418                 }
2419                 if(s2h>=0) emit_not(s2h,th);
2420                 else{
2421                   emit_loadreg(rs2[i]|64,th);
2422                   emit_not(th,th);
2423                 }
2424               }
2425               else {
2426                 emit_movimm(-1,tl);
2427                 emit_movimm(-1,th);
2428               }
2429             }
2430           }
2431         }
2432       }
2433       else
2434       {
2435         // 32 bit
2436         if(tl>=0) {
2437           s1l=get_reg(i_regs->regmap,rs1[i]);
2438           s2l=get_reg(i_regs->regmap,rs2[i]);
2439           if(rs1[i]&&rs2[i]) {
2440             assert(s1l>=0);
2441             assert(s2l>=0);
2442             if(opcode2[i]==0x24) { // AND
2443               emit_and(s1l,s2l,tl);
2444             } else
2445             if(opcode2[i]==0x25) { // OR
2446               emit_or(s1l,s2l,tl);
2447             } else
2448             if(opcode2[i]==0x26) { // XOR
2449               emit_xor(s1l,s2l,tl);
2450             } else
2451             if(opcode2[i]==0x27) { // NOR
2452               emit_or(s1l,s2l,tl);
2453               emit_not(tl,tl);
2454             }
2455           }
2456           else
2457           {
2458             if(opcode2[i]==0x24) { // AND
2459               emit_zeroreg(tl);
2460             } else
2461             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2462               if(rs1[i]){
2463                 if(s1l>=0) emit_mov(s1l,tl);
2464                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2465               }
2466               else
2467               if(rs2[i]){
2468                 if(s2l>=0) emit_mov(s2l,tl);
2469                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2470               }
2471               else emit_zeroreg(tl);
2472             } else
2473             if(opcode2[i]==0x27) { // NOR
2474               if(rs1[i]){
2475                 if(s1l>=0) emit_not(s1l,tl);
2476                 else {
2477                   emit_loadreg(rs1[i],tl);
2478                   emit_not(tl,tl);
2479                 }
2480               }
2481               else
2482               if(rs2[i]){
2483                 if(s2l>=0) emit_not(s2l,tl);
2484                 else {
2485                   emit_loadreg(rs2[i],tl);
2486                   emit_not(tl,tl);
2487                 }
2488               }
2489               else emit_movimm(-1,tl);
2490             }
2491           }
2492         }
2493       }
2494     }
2495   }
2496 }
2497
2498 void imm16_assemble(int i,struct regstat *i_regs)
2499 {
2500   if (opcode[i]==0x0f) { // LUI
2501     if(rt1[i]) {
2502       signed char t;
2503       t=get_reg(i_regs->regmap,rt1[i]);
2504       //assert(t>=0);
2505       if(t>=0) {
2506         if(!((i_regs->isconst>>t)&1))
2507           emit_movimm(imm[i]<<16,t);
2508       }
2509     }
2510   }
2511   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2512     if(rt1[i]) {
2513       signed char s,t;
2514       t=get_reg(i_regs->regmap,rt1[i]);
2515       s=get_reg(i_regs->regmap,rs1[i]);
2516       if(rs1[i]) {
2517         //assert(t>=0);
2518         //assert(s>=0);
2519         if(t>=0) {
2520           if(!((i_regs->isconst>>t)&1)) {
2521             if(s<0) {
2522               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2523               emit_addimm(t,imm[i],t);
2524             }else{
2525               if(!((i_regs->wasconst>>s)&1))
2526                 emit_addimm(s,imm[i],t);
2527               else
2528                 emit_movimm(constmap[i][s]+imm[i],t);
2529             }
2530           }
2531         }
2532       } else {
2533         if(t>=0) {
2534           if(!((i_regs->isconst>>t)&1))
2535             emit_movimm(imm[i],t);
2536         }
2537       }
2538     }
2539   }
2540   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2541     if(rt1[i]) {
2542       signed char sh,sl,th,tl;
2543       th=get_reg(i_regs->regmap,rt1[i]|64);
2544       tl=get_reg(i_regs->regmap,rt1[i]);
2545       sh=get_reg(i_regs->regmap,rs1[i]|64);
2546       sl=get_reg(i_regs->regmap,rs1[i]);
2547       if(tl>=0) {
2548         if(rs1[i]) {
2549           assert(sh>=0);
2550           assert(sl>=0);
2551           if(th>=0) {
2552             emit_addimm64_32(sh,sl,imm[i],th,tl);
2553           }
2554           else {
2555             emit_addimm(sl,imm[i],tl);
2556           }
2557         } else {
2558           emit_movimm(imm[i],tl);
2559           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2560         }
2561       }
2562     }
2563   }
2564   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2565     if(rt1[i]) {
2566       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2567       signed char sh,sl,t;
2568       t=get_reg(i_regs->regmap,rt1[i]);
2569       sh=get_reg(i_regs->regmap,rs1[i]|64);
2570       sl=get_reg(i_regs->regmap,rs1[i]);
2571       //assert(t>=0);
2572       if(t>=0) {
2573         if(rs1[i]>0) {
2574           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2575           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2576             if(opcode[i]==0x0a) { // SLTI
2577               if(sl<0) {
2578                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2579                 emit_slti32(t,imm[i],t);
2580               }else{
2581                 emit_slti32(sl,imm[i],t);
2582               }
2583             }
2584             else { // SLTIU
2585               if(sl<0) {
2586                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2587                 emit_sltiu32(t,imm[i],t);
2588               }else{
2589                 emit_sltiu32(sl,imm[i],t);
2590               }
2591             }
2592           }else{ // 64-bit
2593             assert(sl>=0);
2594             if(opcode[i]==0x0a) // SLTI
2595               emit_slti64_32(sh,sl,imm[i],t);
2596             else // SLTIU
2597               emit_sltiu64_32(sh,sl,imm[i],t);
2598           }
2599         }else{
2600           // SLTI(U) with r0 is just stupid,
2601           // nonetheless examples can be found
2602           if(opcode[i]==0x0a) // SLTI
2603             if(0<imm[i]) emit_movimm(1,t);
2604             else emit_zeroreg(t);
2605           else // SLTIU
2606           {
2607             if(imm[i]) emit_movimm(1,t);
2608             else emit_zeroreg(t);
2609           }
2610         }
2611       }
2612     }
2613   }
2614   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2615     if(rt1[i]) {
2616       signed char sh,sl,th,tl;
2617       th=get_reg(i_regs->regmap,rt1[i]|64);
2618       tl=get_reg(i_regs->regmap,rt1[i]);
2619       sh=get_reg(i_regs->regmap,rs1[i]|64);
2620       sl=get_reg(i_regs->regmap,rs1[i]);
2621       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2622         if(opcode[i]==0x0c) //ANDI
2623         {
2624           if(rs1[i]) {
2625             if(sl<0) {
2626               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2627               emit_andimm(tl,imm[i],tl);
2628             }else{
2629               if(!((i_regs->wasconst>>sl)&1))
2630                 emit_andimm(sl,imm[i],tl);
2631               else
2632                 emit_movimm(constmap[i][sl]&imm[i],tl);
2633             }
2634           }
2635           else
2636             emit_zeroreg(tl);
2637           if(th>=0) emit_zeroreg(th);
2638         }
2639         else
2640         {
2641           if(rs1[i]) {
2642             if(sl<0) {
2643               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2644             }
2645             if(th>=0) {
2646               if(sh<0) {
2647                 emit_loadreg(rs1[i]|64,th);
2648               }else{
2649                 emit_mov(sh,th);
2650               }
2651             }
2652             if(opcode[i]==0x0d) //ORI
2653             if(sl<0) {
2654               emit_orimm(tl,imm[i],tl);
2655             }else{
2656               if(!((i_regs->wasconst>>sl)&1))
2657                 emit_orimm(sl,imm[i],tl);
2658               else
2659                 emit_movimm(constmap[i][sl]|imm[i],tl);
2660             }
2661             if(opcode[i]==0x0e) //XORI
2662             if(sl<0) {
2663               emit_xorimm(tl,imm[i],tl);
2664             }else{
2665               if(!((i_regs->wasconst>>sl)&1))
2666                 emit_xorimm(sl,imm[i],tl);
2667               else
2668                 emit_movimm(constmap[i][sl]^imm[i],tl);
2669             }
2670           }
2671           else {
2672             emit_movimm(imm[i],tl);
2673             if(th>=0) emit_zeroreg(th);
2674           }
2675         }
2676       }
2677     }
2678   }
2679 }
2680
2681 void shiftimm_assemble(int i,struct regstat *i_regs)
2682 {
2683   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2684   {
2685     if(rt1[i]) {
2686       signed char s,t;
2687       t=get_reg(i_regs->regmap,rt1[i]);
2688       s=get_reg(i_regs->regmap,rs1[i]);
2689       //assert(t>=0);
2690       if(t>=0){
2691         if(rs1[i]==0)
2692         {
2693           emit_zeroreg(t);
2694         }
2695         else
2696         {
2697           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2698           if(imm[i]) {
2699             if(opcode2[i]==0) // SLL
2700             {
2701               emit_shlimm(s<0?t:s,imm[i],t);
2702             }
2703             if(opcode2[i]==2) // SRL
2704             {
2705               emit_shrimm(s<0?t:s,imm[i],t);
2706             }
2707             if(opcode2[i]==3) // SRA
2708             {
2709               emit_sarimm(s<0?t:s,imm[i],t);
2710             }
2711           }else{
2712             // Shift by zero
2713             if(s>=0 && s!=t) emit_mov(s,t);
2714           }
2715         }
2716       }
2717       //emit_storereg(rt1[i],t); //DEBUG
2718     }
2719   }
2720   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2721   {
2722     if(rt1[i]) {
2723       signed char sh,sl,th,tl;
2724       th=get_reg(i_regs->regmap,rt1[i]|64);
2725       tl=get_reg(i_regs->regmap,rt1[i]);
2726       sh=get_reg(i_regs->regmap,rs1[i]|64);
2727       sl=get_reg(i_regs->regmap,rs1[i]);
2728       if(tl>=0) {
2729         if(rs1[i]==0)
2730         {
2731           emit_zeroreg(tl);
2732           if(th>=0) emit_zeroreg(th);
2733         }
2734         else
2735         {
2736           assert(sl>=0);
2737           assert(sh>=0);
2738           if(imm[i]) {
2739             if(opcode2[i]==0x38) // DSLL
2740             {
2741               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2742               emit_shlimm(sl,imm[i],tl);
2743             }
2744             if(opcode2[i]==0x3a) // DSRL
2745             {
2746               emit_shrdimm(sl,sh,imm[i],tl);
2747               if(th>=0) emit_shrimm(sh,imm[i],th);
2748             }
2749             if(opcode2[i]==0x3b) // DSRA
2750             {
2751               emit_shrdimm(sl,sh,imm[i],tl);
2752               if(th>=0) emit_sarimm(sh,imm[i],th);
2753             }
2754           }else{
2755             // Shift by zero
2756             if(sl!=tl) emit_mov(sl,tl);
2757             if(th>=0&&sh!=th) emit_mov(sh,th);
2758           }
2759         }
2760       }
2761     }
2762   }
2763   if(opcode2[i]==0x3c) // DSLL32
2764   {
2765     if(rt1[i]) {
2766       signed char sl,tl,th;
2767       tl=get_reg(i_regs->regmap,rt1[i]);
2768       th=get_reg(i_regs->regmap,rt1[i]|64);
2769       sl=get_reg(i_regs->regmap,rs1[i]);
2770       if(th>=0||tl>=0){
2771         assert(tl>=0);
2772         assert(th>=0);
2773         assert(sl>=0);
2774         emit_mov(sl,th);
2775         emit_zeroreg(tl);
2776         if(imm[i]>32)
2777         {
2778           emit_shlimm(th,imm[i]&31,th);
2779         }
2780       }
2781     }
2782   }
2783   if(opcode2[i]==0x3e) // DSRL32
2784   {
2785     if(rt1[i]) {
2786       signed char sh,tl,th;
2787       tl=get_reg(i_regs->regmap,rt1[i]);
2788       th=get_reg(i_regs->regmap,rt1[i]|64);
2789       sh=get_reg(i_regs->regmap,rs1[i]|64);
2790       if(tl>=0){
2791         assert(sh>=0);
2792         emit_mov(sh,tl);
2793         if(th>=0) emit_zeroreg(th);
2794         if(imm[i]>32)
2795         {
2796           emit_shrimm(tl,imm[i]&31,tl);
2797         }
2798       }
2799     }
2800   }
2801   if(opcode2[i]==0x3f) // DSRA32
2802   {
2803     if(rt1[i]) {
2804       signed char sh,tl;
2805       tl=get_reg(i_regs->regmap,rt1[i]);
2806       sh=get_reg(i_regs->regmap,rs1[i]|64);
2807       if(tl>=0){
2808         assert(sh>=0);
2809         emit_mov(sh,tl);
2810         if(imm[i]>32)
2811         {
2812           emit_sarimm(tl,imm[i]&31,tl);
2813         }
2814       }
2815     }
2816   }
2817 }
2818
2819 #ifndef shift_assemble
2820 void shift_assemble(int i,struct regstat *i_regs)
2821 {
2822   printf("Need shift_assemble for this architecture.\n");
2823   exit(1);
2824 }
2825 #endif
2826
2827 void load_assemble(int i,struct regstat *i_regs)
2828 {
2829   int s,th,tl,addr,map=-1;
2830   int offset;
2831   int jaddr=0;
2832   int memtarget=0,c=0;
2833   int fastload_reg_override=0;
2834   u_int hr,reglist=0;
2835   th=get_reg(i_regs->regmap,rt1[i]|64);
2836   tl=get_reg(i_regs->regmap,rt1[i]);
2837   s=get_reg(i_regs->regmap,rs1[i]);
2838   offset=imm[i];
2839   for(hr=0;hr<HOST_REGS;hr++) {
2840     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2841   }
2842   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2843   if(s>=0) {
2844     c=(i_regs->wasconst>>s)&1;
2845     if (c) {
2846       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2847       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2848     }
2849   }
2850   //printf("load_assemble: c=%d\n",c);
2851   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2852   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2853 #ifdef PCSX
2854   if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2855     ||rt1[i]==0) {
2856       // could be FIFO, must perform the read
2857       // ||dummy read
2858       assem_debug("(forced read)\n");
2859       tl=get_reg(i_regs->regmap,-1);
2860       assert(tl>=0);
2861   }
2862 #endif
2863   if(offset||s<0||c) addr=tl;
2864   else addr=s;
2865   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2866  if(tl>=0) {
2867   //printf("load_assemble: c=%d\n",c);
2868   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2869   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2870   reglist&=~(1<<tl);
2871   if(th>=0) reglist&=~(1<<th);
2872   if(!using_tlb) {
2873     if(!c) {
2874       #ifdef RAM_OFFSET
2875       map=get_reg(i_regs->regmap,ROREG);
2876       if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2877       #endif
2878 //#define R29_HACK 1
2879       #ifdef R29_HACK
2880       // Strmnnrmn's speed hack
2881       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2882       #endif
2883       {
2884         #ifdef PCSX
2885         if(sp_in_mirror&&rs1[i]==29) {
2886           emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2887           emit_cmpimm(HOST_TEMPREG,RAM_SIZE);
2888           fastload_reg_override=HOST_TEMPREG;
2889         }
2890         else
2891         #endif
2892         emit_cmpimm(addr,RAM_SIZE);
2893         jaddr=(int)out;
2894         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2895         // Hint to branch predictor that the branch is unlikely to be taken
2896         if(rs1[i]>=28)
2897           emit_jno_unlikely(0);
2898         else
2899         #endif
2900         emit_jno(0);
2901       }
2902     }
2903   }else{ // using tlb
2904     int x=0;
2905     if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2906     if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2907     map=get_reg(i_regs->regmap,TLREG);
2908     assert(map>=0);
2909     reglist&=~(1<<map);
2910     map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2911     do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2912   }
2913   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2914   if (opcode[i]==0x20) { // LB
2915     if(!c||memtarget) {
2916       if(!dummy) {
2917         #ifdef HOST_IMM_ADDR32
2918         if(c)
2919           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2920         else
2921         #endif
2922         {
2923           //emit_xorimm(addr,3,tl);
2924           //gen_tlb_addr_r(tl,map);
2925           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2926           int x=0,a=tl;
2927 #ifdef BIG_ENDIAN_MIPS
2928           if(!c) emit_xorimm(addr,3,tl);
2929           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2930 #else
2931           if(!c) a=addr;
2932 #endif
2933           if(fastload_reg_override) a=fastload_reg_override;
2934
2935           emit_movsbl_indexed_tlb(x,a,map,tl);
2936         }
2937       }
2938       if(jaddr)
2939         add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2940     }
2941     else
2942       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2943   }
2944   if (opcode[i]==0x21) { // LH
2945     if(!c||memtarget) {
2946       if(!dummy) {
2947         #ifdef HOST_IMM_ADDR32
2948         if(c)
2949           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2950         else
2951         #endif
2952         {
2953           int x=0,a=tl;
2954 #ifdef BIG_ENDIAN_MIPS
2955           if(!c) emit_xorimm(addr,2,tl);
2956           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2957 #else
2958           if(!c) a=addr;
2959 #endif
2960           if(fastload_reg_override) a=fastload_reg_override;
2961           //#ifdef
2962           //emit_movswl_indexed_tlb(x,tl,map,tl);
2963           //else
2964           if(map>=0) {
2965             gen_tlb_addr_r(a,map);
2966             emit_movswl_indexed(x,a,tl);
2967           }else{
2968             #ifdef RAM_OFFSET
2969             emit_movswl_indexed(x,a,tl);
2970             #else
2971             emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2972             #endif
2973           }
2974         }
2975       }
2976       if(jaddr)
2977         add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2978     }
2979     else
2980       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2981   }
2982   if (opcode[i]==0x23) { // LW
2983     if(!c||memtarget) {
2984       if(!dummy) {
2985         int a=addr;
2986         if(fastload_reg_override) a=fastload_reg_override;
2987         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2988         #ifdef HOST_IMM_ADDR32
2989         if(c)
2990           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2991         else
2992         #endif
2993         emit_readword_indexed_tlb(0,a,map,tl);
2994       }
2995       if(jaddr)
2996         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2997     }
2998     else
2999       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3000   }
3001   if (opcode[i]==0x24) { // LBU
3002     if(!c||memtarget) {
3003       if(!dummy) {
3004         #ifdef HOST_IMM_ADDR32
3005         if(c)
3006           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
3007         else
3008         #endif
3009         {
3010           //emit_xorimm(addr,3,tl);
3011           //gen_tlb_addr_r(tl,map);
3012           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
3013           int x=0,a=tl;
3014 #ifdef BIG_ENDIAN_MIPS
3015           if(!c) emit_xorimm(addr,3,tl);
3016           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3017 #else
3018           if(!c) a=addr;
3019 #endif
3020           if(fastload_reg_override) a=fastload_reg_override;
3021
3022           emit_movzbl_indexed_tlb(x,a,map,tl);
3023         }
3024       }
3025       if(jaddr)
3026         add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3027     }
3028     else
3029       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3030   }
3031   if (opcode[i]==0x25) { // LHU
3032     if(!c||memtarget) {
3033       if(!dummy) {
3034         #ifdef HOST_IMM_ADDR32
3035         if(c)
3036           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
3037         else
3038         #endif
3039         {
3040           int x=0,a=tl;
3041 #ifdef BIG_ENDIAN_MIPS
3042           if(!c) emit_xorimm(addr,2,tl);
3043           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3044 #else
3045           if(!c) a=addr;
3046 #endif
3047           if(fastload_reg_override) a=fastload_reg_override;
3048           //#ifdef
3049           //emit_movzwl_indexed_tlb(x,tl,map,tl);
3050           //#else
3051           if(map>=0) {
3052             gen_tlb_addr_r(a,map);
3053             emit_movzwl_indexed(x,a,tl);
3054           }else{
3055             #ifdef RAM_OFFSET
3056             emit_movzwl_indexed(x,a,tl);
3057             #else
3058             emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
3059             #endif
3060           }
3061         }
3062       }
3063       if(jaddr)
3064         add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3065     }
3066     else
3067       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3068   }
3069   if (opcode[i]==0x27) { // LWU
3070     assert(th>=0);
3071     if(!c||memtarget) {
3072       if(!dummy) {
3073         int a=addr;
3074         if(fastload_reg_override) a=fastload_reg_override;
3075         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
3076         #ifdef HOST_IMM_ADDR32
3077         if(c)
3078           emit_readword_tlb(constmap[i][s]+offset,map,tl);
3079         else
3080         #endif
3081         emit_readword_indexed_tlb(0,a,map,tl);
3082       }
3083       if(jaddr)
3084         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3085     }
3086     else {
3087       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3088     }
3089     emit_zeroreg(th);
3090   }
3091   if (opcode[i]==0x37) { // LD
3092     if(!c||memtarget) {
3093       if(!dummy) {
3094         int a=addr;
3095         if(fastload_reg_override) a=fastload_reg_override;
3096         //gen_tlb_addr_r(tl,map);
3097         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
3098         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
3099         #ifdef HOST_IMM_ADDR32
3100         if(c)
3101           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3102         else
3103         #endif
3104         emit_readdword_indexed_tlb(0,a,map,th,tl);
3105       }
3106       if(jaddr)
3107         add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3108     }
3109     else
3110       inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3111   }
3112  }
3113   //emit_storereg(rt1[i],tl); // DEBUG
3114   //if(opcode[i]==0x23)
3115   //if(opcode[i]==0x24)
3116   //if(opcode[i]==0x23||opcode[i]==0x24)
3117   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
3118   {
3119     //emit_pusha();
3120     save_regs(0x100f);
3121         emit_readword((int)&last_count,ECX);
3122         #ifdef __i386__
3123         if(get_reg(i_regs->regmap,CCREG)<0)
3124           emit_loadreg(CCREG,HOST_CCREG);
3125         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3126         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3127         emit_writeword(HOST_CCREG,(int)&Count);
3128         #endif
3129         #ifdef __arm__
3130         if(get_reg(i_regs->regmap,CCREG)<0)
3131           emit_loadreg(CCREG,0);
3132         else
3133           emit_mov(HOST_CCREG,0);
3134         emit_add(0,ECX,0);
3135         emit_addimm(0,2*ccadj[i],0);
3136         emit_writeword(0,(int)&Count);
3137         #endif
3138     emit_call((int)memdebug);
3139     //emit_popa();
3140     restore_regs(0x100f);
3141   }/**/
3142 }
3143
3144 #ifndef loadlr_assemble
3145 void loadlr_assemble(int i,struct regstat *i_regs)
3146 {
3147   printf("Need loadlr_assemble for this architecture.\n");
3148   exit(1);
3149 }
3150 #endif
3151
3152 void store_assemble(int i,struct regstat *i_regs)
3153 {
3154   int s,th,tl,map=-1;
3155   int addr,temp;
3156   int offset;
3157   int jaddr=0,jaddr2,type;
3158   int memtarget=0,c=0;
3159   int agr=AGEN1+(i&1);
3160   int faststore_reg_override=0;
3161   u_int hr,reglist=0;
3162   th=get_reg(i_regs->regmap,rs2[i]|64);
3163   tl=get_reg(i_regs->regmap,rs2[i]);
3164   s=get_reg(i_regs->regmap,rs1[i]);
3165   temp=get_reg(i_regs->regmap,agr);
3166   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3167   offset=imm[i];
3168   if(s>=0) {
3169     c=(i_regs->wasconst>>s)&1;
3170     if(c) {
3171       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3172       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3173     }
3174   }
3175   assert(tl>=0);
3176   assert(temp>=0);
3177   for(hr=0;hr<HOST_REGS;hr++) {
3178     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3179   }
3180   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3181   if(offset||s<0||c) addr=temp;
3182   else addr=s;
3183   if(!using_tlb) {
3184     if(!c) {
3185       #ifdef PCSX
3186       if(sp_in_mirror&&rs1[i]==29) {
3187         emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
3188         emit_cmpimm(HOST_TEMPREG,RAM_SIZE);
3189         faststore_reg_override=HOST_TEMPREG;
3190       }
3191       else
3192       #endif
3193       #ifdef R29_HACK
3194       // Strmnnrmn's speed hack
3195       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3196       #endif
3197       emit_cmpimm(addr,RAM_SIZE);
3198       #ifdef DESTRUCTIVE_SHIFT
3199       if(s==addr) emit_mov(s,temp);
3200       #endif
3201       #ifdef R29_HACK
3202       memtarget=1;
3203       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3204       #endif
3205       {
3206         jaddr=(int)out;
3207         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3208         // Hint to branch predictor that the branch is unlikely to be taken
3209         if(rs1[i]>=28)
3210           emit_jno_unlikely(0);
3211         else
3212         #endif
3213         emit_jno(0);
3214       }
3215     }
3216   }else{ // using tlb
3217     int x=0;
3218     if (opcode[i]==0x28) x=3; // SB
3219     if (opcode[i]==0x29) x=2; // SH
3220     map=get_reg(i_regs->regmap,TLREG);
3221     assert(map>=0);
3222     reglist&=~(1<<map);
3223     map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3224     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3225   }
3226
3227   if (opcode[i]==0x28) { // SB
3228     if(!c||memtarget) {
3229       int x=0,a=temp;
3230 #ifdef BIG_ENDIAN_MIPS
3231       if(!c) emit_xorimm(addr,3,temp);
3232       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3233 #else
3234       if(!c) a=addr;
3235 #endif
3236       if(faststore_reg_override) a=faststore_reg_override;
3237       //gen_tlb_addr_w(temp,map);
3238       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3239       emit_writebyte_indexed_tlb(tl,x,a,map,a);
3240     }
3241     type=STOREB_STUB;
3242   }
3243   if (opcode[i]==0x29) { // SH
3244     if(!c||memtarget) {
3245       int x=0,a=temp;
3246 #ifdef BIG_ENDIAN_MIPS
3247       if(!c) emit_xorimm(addr,2,temp);
3248       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3249 #else
3250       if(!c) a=addr;
3251 #endif
3252       if(faststore_reg_override) a=faststore_reg_override;
3253       //#ifdef
3254       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3255       //#else
3256       if(map>=0) {
3257         gen_tlb_addr_w(a,map);
3258         emit_writehword_indexed(tl,x,a);
3259       }else
3260         emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
3261     }
3262     type=STOREH_STUB;
3263   }
3264   if (opcode[i]==0x2B) { // SW
3265     if(!c||memtarget) {
3266       int a=addr;
3267       if(faststore_reg_override) a=faststore_reg_override;
3268       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3269       emit_writeword_indexed_tlb(tl,0,a,map,temp);
3270     }
3271     type=STOREW_STUB;
3272   }
3273   if (opcode[i]==0x3F) { // SD
3274     if(!c||memtarget) {
3275       int a=addr;
3276       if(faststore_reg_override) a=faststore_reg_override;
3277       if(rs2[i]) {
3278         assert(th>=0);
3279         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3280         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3281         emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
3282       }else{
3283         // Store zero
3284         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3285         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3286         emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
3287       }
3288     }
3289     type=STORED_STUB;
3290   }
3291 #ifdef PCSX
3292   if(jaddr) {
3293     // PCSX store handlers don't check invcode again
3294     reglist|=1<<addr;
3295     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3296     jaddr=0;
3297   }
3298 #endif
3299   if(!using_tlb) {
3300     if(!c||memtarget) {
3301       #ifdef DESTRUCTIVE_SHIFT
3302       // The x86 shift operation is 'destructive'; it overwrites the
3303       // source register, so we need to make a copy first and use that.
3304       addr=temp;
3305       #endif
3306       #if defined(HOST_IMM8)
3307       int ir=get_reg(i_regs->regmap,INVCP);
3308       assert(ir>=0);
3309       emit_cmpmem_indexedsr12_reg(ir,addr,1);
3310       #else
3311       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3312       #endif
3313       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3314       emit_callne(invalidate_addr_reg[addr]);
3315       #else
3316       jaddr2=(int)out;
3317       emit_jne(0);
3318       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3319       #endif
3320     }
3321   }
3322   if(jaddr) {
3323     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3324   } else if(c&&!memtarget) {
3325     inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3326   }
3327   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3328   //if(opcode[i]==0x2B || opcode[i]==0x28)
3329   //if(opcode[i]==0x2B || opcode[i]==0x29)
3330   //if(opcode[i]==0x2B)
3331   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3332   {
3333     #ifdef __i386__
3334     emit_pusha();
3335     #endif
3336     #ifdef __arm__
3337     save_regs(0x100f);
3338     #endif
3339         emit_readword((int)&last_count,ECX);
3340         #ifdef __i386__
3341         if(get_reg(i_regs->regmap,CCREG)<0)
3342           emit_loadreg(CCREG,HOST_CCREG);
3343         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3344         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3345         emit_writeword(HOST_CCREG,(int)&Count);
3346         #endif
3347         #ifdef __arm__
3348         if(get_reg(i_regs->regmap,CCREG)<0)
3349           emit_loadreg(CCREG,0);
3350         else
3351           emit_mov(HOST_CCREG,0);
3352         emit_add(0,ECX,0);
3353         emit_addimm(0,2*ccadj[i],0);
3354         emit_writeword(0,(int)&Count);
3355         #endif
3356     emit_call((int)memdebug);
3357     #ifdef __i386__
3358     emit_popa();
3359     #endif
3360     #ifdef __arm__
3361     restore_regs(0x100f);
3362     #endif
3363   }/**/
3364 }
3365
3366 void storelr_assemble(int i,struct regstat *i_regs)
3367 {
3368   int s,th,tl;
3369   int temp;
3370   int temp2;
3371   int offset;
3372   int jaddr=0,jaddr2;
3373   int case1,case2,case3;
3374   int done0,done1,done2;
3375   int memtarget=0,c=0;
3376   int agr=AGEN1+(i&1);
3377   u_int hr,reglist=0;
3378   th=get_reg(i_regs->regmap,rs2[i]|64);
3379   tl=get_reg(i_regs->regmap,rs2[i]);
3380   s=get_reg(i_regs->regmap,rs1[i]);
3381   temp=get_reg(i_regs->regmap,agr);
3382   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3383   offset=imm[i];
3384   if(s>=0) {
3385     c=(i_regs->isconst>>s)&1;
3386     if(c) {
3387       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3388       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3389     }
3390   }
3391   assert(tl>=0);
3392   for(hr=0;hr<HOST_REGS;hr++) {
3393     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3394   }
3395   assert(temp>=0);
3396   if(!using_tlb) {
3397     if(!c) {
3398       emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3399       if(!offset&&s!=temp) emit_mov(s,temp);
3400       jaddr=(int)out;
3401       emit_jno(0);
3402     }
3403     else
3404     {
3405       if(!memtarget||!rs1[i]) {
3406         jaddr=(int)out;
3407         emit_jmp(0);
3408       }
3409     }
3410     #ifdef RAM_OFFSET
3411     int map=get_reg(i_regs->regmap,ROREG);
3412     if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3413     gen_tlb_addr_w(temp,map);
3414     #else
3415     if((u_int)rdram!=0x80000000) 
3416       emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3417     #endif
3418   }else{ // using tlb
3419     int map=get_reg(i_regs->regmap,TLREG);
3420     assert(map>=0);
3421     reglist&=~(1<<map);
3422     map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3423     if(!c&&!offset&&s>=0) emit_mov(s,temp);
3424     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3425     if(!jaddr&&!memtarget) {
3426       jaddr=(int)out;
3427       emit_jmp(0);
3428     }
3429     gen_tlb_addr_w(temp,map);
3430   }
3431
3432   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3433     temp2=get_reg(i_regs->regmap,FTEMP);
3434     if(!rs2[i]) temp2=th=tl;
3435   }
3436
3437 #ifndef BIG_ENDIAN_MIPS
3438     emit_xorimm(temp,3,temp);
3439 #endif
3440   emit_testimm(temp,2);
3441   case2=(int)out;
3442   emit_jne(0);
3443   emit_testimm(temp,1);
3444   case1=(int)out;
3445   emit_jne(0);
3446   // 0
3447   if (opcode[i]==0x2A) { // SWL
3448     emit_writeword_indexed(tl,0,temp);
3449   }
3450   if (opcode[i]==0x2E) { // SWR
3451     emit_writebyte_indexed(tl,3,temp);
3452   }
3453   if (opcode[i]==0x2C) { // SDL
3454     emit_writeword_indexed(th,0,temp);
3455     if(rs2[i]) emit_mov(tl,temp2);
3456   }
3457   if (opcode[i]==0x2D) { // SDR
3458     emit_writebyte_indexed(tl,3,temp);
3459     if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3460   }
3461   done0=(int)out;
3462   emit_jmp(0);
3463   // 1
3464   set_jump_target(case1,(int)out);
3465   if (opcode[i]==0x2A) { // SWL
3466     // Write 3 msb into three least significant bytes
3467     if(rs2[i]) emit_rorimm(tl,8,tl);
3468     emit_writehword_indexed(tl,-1,temp);
3469     if(rs2[i]) emit_rorimm(tl,16,tl);
3470     emit_writebyte_indexed(tl,1,temp);
3471     if(rs2[i]) emit_rorimm(tl,8,tl);
3472   }
3473   if (opcode[i]==0x2E) { // SWR
3474     // Write two lsb into two most significant bytes
3475     emit_writehword_indexed(tl,1,temp);
3476   }
3477   if (opcode[i]==0x2C) { // SDL
3478     if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3479     // Write 3 msb into three least significant bytes
3480     if(rs2[i]) emit_rorimm(th,8,th);
3481     emit_writehword_indexed(th,-1,temp);
3482     if(rs2[i]) emit_rorimm(th,16,th);
3483     emit_writebyte_indexed(th,1,temp);
3484     if(rs2[i]) emit_rorimm(th,8,th);
3485   }
3486   if (opcode[i]==0x2D) { // SDR
3487     if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3488     // Write two lsb into two most significant bytes
3489     emit_writehword_indexed(tl,1,temp);
3490   }
3491   done1=(int)out;
3492   emit_jmp(0);
3493   // 2
3494   set_jump_target(case2,(int)out);
3495   emit_testimm(temp,1);
3496   case3=(int)out;
3497   emit_jne(0);
3498   if (opcode[i]==0x2A) { // SWL
3499     // Write two msb into two least significant bytes
3500     if(rs2[i]) emit_rorimm(tl,16,tl);
3501     emit_writehword_indexed(tl,-2,temp);
3502     if(rs2[i]) emit_rorimm(tl,16,tl);
3503   }
3504   if (opcode[i]==0x2E) { // SWR
3505     // Write 3 lsb into three most significant bytes
3506     emit_writebyte_indexed(tl,-1,temp);
3507     if(rs2[i]) emit_rorimm(tl,8,tl);
3508     emit_writehword_indexed(tl,0,temp);
3509     if(rs2[i]) emit_rorimm(tl,24,tl);
3510   }
3511   if (opcode[i]==0x2C) { // SDL
3512     if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3513     // Write two msb into two least significant bytes
3514     if(rs2[i]) emit_rorimm(th,16,th);
3515     emit_writehword_indexed(th,-2,temp);
3516     if(rs2[i]) emit_rorimm(th,16,th);
3517   }
3518   if (opcode[i]==0x2D) { // SDR
3519     if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3520     // Write 3 lsb into three most significant bytes
3521     emit_writebyte_indexed(tl,-1,temp);
3522     if(rs2[i]) emit_rorimm(tl,8,tl);
3523     emit_writehword_indexed(tl,0,temp);
3524     if(rs2[i]) emit_rorimm(tl,24,tl);
3525   }
3526   done2=(int)out;
3527   emit_jmp(0);
3528   // 3
3529   set_jump_target(case3,(int)out);
3530   if (opcode[i]==0x2A) { // SWL
3531     // Write msb into least significant byte
3532     if(rs2[i]) emit_rorimm(tl,24,tl);
3533     emit_writebyte_indexed(tl,-3,temp);
3534     if(rs2[i]) emit_rorimm(tl,8,tl);
3535   }
3536   if (opcode[i]==0x2E) { // SWR
3537     // Write entire word
3538     emit_writeword_indexed(tl,-3,temp);
3539   }
3540   if (opcode[i]==0x2C) { // SDL
3541     if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3542     // Write msb into least significant byte
3543     if(rs2[i]) emit_rorimm(th,24,th);
3544     emit_writebyte_indexed(th,-3,temp);
3545     if(rs2[i]) emit_rorimm(th,8,th);
3546   }
3547   if (opcode[i]==0x2D) { // SDR
3548     if(rs2[i]) emit_mov(th,temp2);
3549     // Write entire word
3550     emit_writeword_indexed(tl,-3,temp);
3551   }
3552   set_jump_target(done0,(int)out);
3553   set_jump_target(done1,(int)out);
3554   set_jump_target(done2,(int)out);
3555   if (opcode[i]==0x2C) { // SDL
3556     emit_testimm(temp,4);
3557     done0=(int)out;
3558     emit_jne(0);
3559     emit_andimm(temp,~3,temp);
3560     emit_writeword_indexed(temp2,4,temp);
3561     set_jump_target(done0,(int)out);
3562   }
3563   if (opcode[i]==0x2D) { // SDR
3564     emit_testimm(temp,4);
3565     done0=(int)out;
3566     emit_jeq(0);
3567     emit_andimm(temp,~3,temp);
3568     emit_writeword_indexed(temp2,-4,temp);
3569     set_jump_target(done0,(int)out);
3570   }
3571   if(!c||!memtarget)
3572     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3573   if(!using_tlb) {
3574     #ifdef RAM_OFFSET
3575     int map=get_reg(i_regs->regmap,ROREG);
3576     if(map<0) map=HOST_TEMPREG;
3577     gen_orig_addr_w(temp,map);
3578     #else
3579     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3580     #endif
3581     #if defined(HOST_IMM8)
3582     int ir=get_reg(i_regs->regmap,INVCP);
3583     assert(ir>=0);
3584     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3585     #else
3586     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3587     #endif
3588     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3589     emit_callne(invalidate_addr_reg[temp]);
3590     #else
3591     jaddr2=(int)out;
3592     emit_jne(0);
3593     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3594     #endif
3595   }
3596   /*
3597     emit_pusha();
3598     //save_regs(0x100f);
3599         emit_readword((int)&last_count,ECX);
3600         if(get_reg(i_regs->regmap,CCREG)<0)
3601           emit_loadreg(CCREG,HOST_CCREG);
3602         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3603         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3604         emit_writeword(HOST_CCREG,(int)&Count);
3605     emit_call((int)memdebug);
3606     emit_popa();
3607     //restore_regs(0x100f);
3608   /**/
3609 }
3610
3611 void c1ls_assemble(int i,struct regstat *i_regs)
3612 {
3613 #ifndef DISABLE_COP1
3614   int s,th,tl;
3615   int temp,ar;
3616   int map=-1;
3617   int offset;
3618   int c=0;
3619   int jaddr,jaddr2=0,jaddr3,type;
3620   int agr=AGEN1+(i&1);
3621   u_int hr,reglist=0;
3622   th=get_reg(i_regs->regmap,FTEMP|64);
3623   tl=get_reg(i_regs->regmap,FTEMP);
3624   s=get_reg(i_regs->regmap,rs1[i]);
3625   temp=get_reg(i_regs->regmap,agr);
3626   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3627   offset=imm[i];
3628   assert(tl>=0);
3629   assert(rs1[i]>0);
3630   assert(temp>=0);
3631   for(hr=0;hr<HOST_REGS;hr++) {
3632     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3633   }
3634   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3635   if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3636   {
3637     // Loads use a temporary register which we need to save
3638     reglist|=1<<temp;
3639   }
3640   if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3641     ar=temp;
3642   else // LWC1/LDC1
3643     ar=tl;
3644   //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3645   //else c=(i_regs->wasconst>>s)&1;
3646   if(s>=0) c=(i_regs->wasconst>>s)&1;
3647   // Check cop1 unusable
3648   if(!cop1_usable) {
3649     signed char rs=get_reg(i_regs->regmap,CSREG);
3650     assert(rs>=0);
3651     emit_testimm(rs,0x20000000);
3652     jaddr=(int)out;
3653     emit_jeq(0);
3654     add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3655     cop1_usable=1;
3656   }
3657   if (opcode[i]==0x39) { // SWC1 (get float address)
3658     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3659   }
3660   if (opcode[i]==0x3D) { // SDC1 (get double address)
3661     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3662   }
3663   // Generate address + offset
3664   if(!using_tlb) {
3665     if(!c)
3666       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3667   }
3668   else
3669   {
3670     map=get_reg(i_regs->regmap,TLREG);
3671     assert(map>=0);
3672     reglist&=~(1<<map);
3673     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3674       map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3675     }
3676     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3677       map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3678     }
3679   }
3680   if (opcode[i]==0x39) { // SWC1 (read float)
3681     emit_readword_indexed(0,tl,tl);
3682   }
3683   if (opcode[i]==0x3D) { // SDC1 (read double)
3684     emit_readword_indexed(4,tl,th);
3685     emit_readword_indexed(0,tl,tl);
3686   }
3687   if (opcode[i]==0x31) { // LWC1 (get target address)
3688     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3689   }
3690   if (opcode[i]==0x35) { // LDC1 (get target address)
3691     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3692   }
3693   if(!using_tlb) {
3694     if(!c) {
3695       jaddr2=(int)out;
3696       emit_jno(0);
3697     }
3698     else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3699       jaddr2=(int)out;
3700       emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
3701     }
3702     #ifdef DESTRUCTIVE_SHIFT
3703     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3704       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3705     }
3706     #endif
3707   }else{
3708     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3709       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3710     }
3711     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3712       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3713     }
3714   }
3715   if (opcode[i]==0x31) { // LWC1
3716     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3717     //gen_tlb_addr_r(ar,map);
3718     //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3719     #ifdef HOST_IMM_ADDR32
3720     if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3721     else
3722     #endif
3723     emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3724     type=LOADW_STUB;
3725   }
3726   if (opcode[i]==0x35) { // LDC1
3727     assert(th>=0);
3728     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3729     //gen_tlb_addr_r(ar,map);
3730     //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3731     //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3732     #ifdef HOST_IMM_ADDR32
3733     if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3734     else
3735     #endif
3736     emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3737     type=LOADD_STUB;
3738   }
3739   if (opcode[i]==0x39) { // SWC1
3740     //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3741     emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3742     type=STOREW_STUB;
3743   }
3744   if (opcode[i]==0x3D) { // SDC1
3745     assert(th>=0);
3746     //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3747     //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3748     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3749     type=STORED_STUB;
3750   }
3751   if(!using_tlb) {
3752     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3753       #ifndef DESTRUCTIVE_SHIFT
3754       temp=offset||c||s<0?ar:s;
3755       #endif
3756       #if defined(HOST_IMM8)
3757       int ir=get_reg(i_regs->regmap,INVCP);
3758       assert(ir>=0);
3759       emit_cmpmem_indexedsr12_reg(ir,temp,1);
3760       #else
3761       emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3762       #endif
3763       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3764       emit_callne(invalidate_addr_reg[temp]);
3765       #else
3766       jaddr3=(int)out;
3767       emit_jne(0);
3768       add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3769       #endif
3770     }
3771   }
3772   if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3773   if (opcode[i]==0x31) { // LWC1 (write float)
3774     emit_writeword_indexed(tl,0,temp);
3775   }
3776   if (opcode[i]==0x35) { // LDC1 (write double)
3777     emit_writeword_indexed(th,4,temp);
3778     emit_writeword_indexed(tl,0,temp);
3779   }
3780   //if(opcode[i]==0x39)
3781   /*if(opcode[i]==0x39||opcode[i]==0x31)
3782   {
3783     emit_pusha();
3784         emit_readword((int)&last_count,ECX);
3785         if(get_reg(i_regs->regmap,CCREG)<0)
3786           emit_loadreg(CCREG,HOST_CCREG);
3787         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3788         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3789         emit_writeword(HOST_CCREG,(int)&Count);
3790     emit_call((int)memdebug);
3791     emit_popa();
3792   }/**/
3793 #else
3794   cop1_unusable(i, i_regs);
3795 #endif
3796 }
3797
3798 void c2ls_assemble(int i,struct regstat *i_regs)
3799 {
3800   int s,tl;
3801   int ar;
3802   int offset;
3803   int memtarget=0,c=0;
3804   int jaddr2=0,jaddr3,type;
3805   int agr=AGEN1+(i&1);
3806   u_int hr,reglist=0;
3807   u_int copr=(source[i]>>16)&0x1f;
3808   s=get_reg(i_regs->regmap,rs1[i]);
3809   tl=get_reg(i_regs->regmap,FTEMP);
3810   offset=imm[i];
3811   assert(rs1[i]>0);
3812   assert(tl>=0);
3813   assert(!using_tlb);
3814
3815   for(hr=0;hr<HOST_REGS;hr++) {
3816     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3817   }
3818   if(i_regs->regmap[HOST_CCREG]==CCREG)
3819     reglist&=~(1<<HOST_CCREG);
3820
3821   // get the address
3822   if (opcode[i]==0x3a) { // SWC2
3823     ar=get_reg(i_regs->regmap,agr);
3824     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3825     reglist|=1<<ar;
3826   } else { // LWC2
3827     ar=tl;
3828   }
3829   if(s>=0) c=(i_regs->wasconst>>s)&1;
3830   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3831   if (!offset&&!c&&s>=0) ar=s;
3832   assert(ar>=0);
3833
3834   if (opcode[i]==0x3a) { // SWC2
3835     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3836     type=STOREW_STUB;
3837   }
3838   else
3839     type=LOADW_STUB;
3840
3841   if(c&&!memtarget) {
3842     jaddr2=(int)out;
3843     emit_jmp(0); // inline_readstub/inline_writestub?
3844   }
3845   else {
3846     if(!c) {
3847       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3848       jaddr2=(int)out;
3849       emit_jno(0);
3850     }
3851     if (opcode[i]==0x32) { // LWC2
3852       #ifdef HOST_IMM_ADDR32
3853       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3854       else
3855       #endif
3856       emit_readword_indexed(0,ar,tl);
3857     }
3858     if (opcode[i]==0x3a) { // SWC2
3859       #ifdef DESTRUCTIVE_SHIFT
3860       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3861       #endif
3862       emit_writeword_indexed(tl,0,ar);
3863     }
3864   }
3865   if(jaddr2)
3866     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3867   if (opcode[i]==0x3a) { // SWC2
3868 #if defined(HOST_IMM8)
3869     int ir=get_reg(i_regs->regmap,INVCP);
3870     assert(ir>=0);
3871     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3872 #else
3873     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3874 #endif
3875     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3876     emit_callne(invalidate_addr_reg[ar]);
3877     #else
3878     jaddr3=(int)out;
3879     emit_jne(0);
3880     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3881     #endif
3882   }
3883   if (opcode[i]==0x32) { // LWC2
3884     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3885   }
3886 }
3887
3888 #ifndef multdiv_assemble
3889 void multdiv_assemble(int i,struct regstat *i_regs)
3890 {
3891   printf("Need multdiv_assemble for this architecture.\n");
3892   exit(1);
3893 }
3894 #endif
3895
3896 void mov_assemble(int i,struct regstat *i_regs)
3897 {
3898   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3899   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3900   if(rt1[i]) {
3901     signed char sh,sl,th,tl;
3902     th=get_reg(i_regs->regmap,rt1[i]|64);
3903     tl=get_reg(i_regs->regmap,rt1[i]);
3904     //assert(tl>=0);
3905     if(tl>=0) {
3906       sh=get_reg(i_regs->regmap,rs1[i]|64);
3907       sl=get_reg(i_regs->regmap,rs1[i]);
3908       if(sl>=0) emit_mov(sl,tl);
3909       else emit_loadreg(rs1[i],tl);
3910       if(th>=0) {
3911         if(sh>=0) emit_mov(sh,th);
3912         else emit_loadreg(rs1[i]|64,th);
3913       }
3914     }
3915   }
3916 }
3917
3918 #ifndef fconv_assemble
3919 void fconv_assemble(int i,struct regstat *i_regs)
3920 {
3921   printf("Need fconv_assemble for this architecture.\n");
3922   exit(1);
3923 }
3924 #endif
3925
3926 #if 0
3927 void float_assemble(int i,struct regstat *i_regs)
3928 {
3929   printf("Need float_assemble for this architecture.\n");
3930   exit(1);
3931 }
3932 #endif
3933
3934 void syscall_assemble(int i,struct regstat *i_regs)
3935 {
3936   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3937   assert(ccreg==HOST_CCREG);
3938   assert(!is_delayslot);
3939   emit_movimm(start+i*4,EAX); // Get PC
3940   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3941   emit_jmp((int)jump_syscall_hle); // XXX
3942 }
3943
3944 void hlecall_assemble(int i,struct regstat *i_regs)
3945 {
3946   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3947   assert(ccreg==HOST_CCREG);
3948   assert(!is_delayslot);
3949   emit_movimm(start+i*4+4,0); // Get PC
3950   emit_movimm((int)psxHLEt[source[i]&7],1);
3951   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
3952   emit_jmp((int)jump_hlecall);
3953 }
3954
3955 void intcall_assemble(int i,struct regstat *i_regs)
3956 {
3957   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3958   assert(ccreg==HOST_CCREG);
3959   assert(!is_delayslot);
3960   emit_movimm(start+i*4,0); // Get PC
3961   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG);
3962   emit_jmp((int)jump_intcall);
3963 }
3964
3965 void ds_assemble(int i,struct regstat *i_regs)
3966 {
3967   is_delayslot=1;
3968   switch(itype[i]) {
3969     case ALU:
3970       alu_assemble(i,i_regs);break;
3971     case IMM16:
3972       imm16_assemble(i,i_regs);break;
3973     case SHIFT:
3974       shift_assemble(i,i_regs);break;
3975     case SHIFTIMM:
3976       shiftimm_assemble(i,i_regs);break;
3977     case LOAD:
3978       load_assemble(i,i_regs);break;
3979     case LOADLR:
3980       loadlr_assemble(i,i_regs);break;
3981     case STORE:
3982       store_assemble(i,i_regs);break;
3983     case STORELR:
3984       storelr_assemble(i,i_regs);break;
3985     case COP0:
3986       cop0_assemble(i,i_regs);break;
3987     case COP1:
3988       cop1_assemble(i,i_regs);break;
3989     case C1LS:
3990       c1ls_assemble(i,i_regs);break;
3991     case COP2:
3992       cop2_assemble(i,i_regs);break;
3993     case C2LS:
3994       c2ls_assemble(i,i_regs);break;
3995     case C2OP:
3996       c2op_assemble(i,i_regs);break;
3997     case FCONV:
3998       fconv_assemble(i,i_regs);break;
3999     case FLOAT:
4000       float_assemble(i,i_regs);break;
4001     case FCOMP:
4002       fcomp_assemble(i,i_regs);break;
4003     case MULTDIV:
4004       multdiv_assemble(i,i_regs);break;
4005     case MOV:
4006       mov_assemble(i,i_regs);break;
4007     case SYSCALL:
4008     case HLECALL:
4009     case INTCALL:
4010     case SPAN:
4011     case UJUMP:
4012     case RJUMP:
4013     case CJUMP:
4014     case SJUMP:
4015     case FJUMP:
4016       printf("Jump in the delay slot.  This is probably a bug.\n");
4017   }
4018   is_delayslot=0;
4019 }
4020
4021 // Is the branch target a valid internal jump?
4022 int internal_branch(uint64_t i_is32,int addr)
4023 {
4024   if(addr&1) return 0; // Indirect (register) jump
4025   if(addr>=start && addr<start+slen*4-4)
4026   {
4027     int t=(addr-start)>>2;
4028     // Delay slots are not valid branch targets
4029     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4030     // 64 -> 32 bit transition requires a recompile
4031     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
4032     {
4033       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
4034       else printf("optimizable: yes\n");
4035     }*/
4036     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4037 #ifndef FORCE32
4038     if(requires_32bit[t]&~i_is32) return 0;
4039     else
4040 #endif
4041       return 1;
4042   }
4043   return 0;
4044 }
4045
4046 #ifndef wb_invalidate
4047 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
4048   uint64_t u,uint64_t uu)
4049 {
4050   int hr;
4051   for(hr=0;hr<HOST_REGS;hr++) {
4052     if(hr!=EXCLUDE_REG) {
4053       if(pre[hr]!=entry[hr]) {
4054         if(pre[hr]>=0) {
4055           if((dirty>>hr)&1) {
4056             if(get_reg(entry,pre[hr])<0) {
4057               if(pre[hr]<64) {
4058                 if(!((u>>pre[hr])&1)) {
4059                   emit_storereg(pre[hr],hr);
4060                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
4061                     emit_sarimm(hr,31,hr);
4062                     emit_storereg(pre[hr]|64,hr);
4063                   }
4064                 }
4065               }else{
4066                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
4067                   emit_storereg(pre[hr],hr);
4068                 }
4069               }
4070             }
4071           }
4072         }
4073       }
4074     }
4075   }
4076   // Move from one register to another (no writeback)
4077   for(hr=0;hr<HOST_REGS;hr++) {
4078     if(hr!=EXCLUDE_REG) {
4079       if(pre[hr]!=entry[hr]) {
4080         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
4081           int nr;
4082           if((nr=get_reg(entry,pre[hr]))>=0) {
4083             emit_mov(hr,nr);
4084           }
4085         }
4086       }
4087     }
4088   }
4089 }
4090 #endif
4091
4092 // Load the specified registers
4093 // This only loads the registers given as arguments because
4094 // we don't want to load things that will be overwritten
4095 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
4096 {
4097   int hr;
4098   // Load 32-bit regs
4099   for(hr=0;hr<HOST_REGS;hr++) {
4100     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4101       if(entry[hr]!=regmap[hr]) {
4102         if(regmap[hr]==rs1||regmap[hr]==rs2)
4103         {
4104           if(regmap[hr]==0) {
4105             emit_zeroreg(hr);
4106           }
4107           else
4108           {
4109             emit_loadreg(regmap[hr],hr);
4110           }
4111         }
4112       }
4113     }
4114   }
4115   //Load 64-bit regs
4116   for(hr=0;hr<HOST_REGS;hr++) {
4117     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4118       if(entry[hr]!=regmap[hr]) {
4119         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
4120         {
4121           assert(regmap[hr]!=64);
4122           if((is32>>(regmap[hr]&63))&1) {
4123             int lr=get_reg(regmap,regmap[hr]-64);
4124             if(lr>=0)
4125               emit_sarimm(lr,31,hr);
4126             else
4127               emit_loadreg(regmap[hr],hr);
4128           }
4129           else
4130           {
4131             emit_loadreg(regmap[hr],hr);
4132           }
4133         }
4134       }
4135     }
4136   }
4137 }
4138
4139 // Load registers prior to the start of a loop
4140 // so that they are not loaded within the loop
4141 static void loop_preload(signed char pre[],signed char entry[])
4142 {
4143   int hr;
4144   for(hr=0;hr<HOST_REGS;hr++) {
4145     if(hr!=EXCLUDE_REG) {
4146       if(pre[hr]!=entry[hr]) {
4147         if(entry[hr]>=0) {
4148           if(get_reg(pre,entry[hr])<0) {
4149             assem_debug("loop preload:\n");
4150             //printf("loop preload: %d\n",hr);
4151             if(entry[hr]==0) {
4152               emit_zeroreg(hr);
4153             }
4154             else if(entry[hr]<TEMPREG)
4155             {
4156               emit_loadreg(entry[hr],hr);
4157             }
4158             else if(entry[hr]-64<TEMPREG)
4159             {
4160               emit_loadreg(entry[hr],hr);
4161             }
4162           }
4163         }
4164       }
4165     }
4166   }
4167 }
4168
4169 // Generate address for load/store instruction
4170 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
4171 void address_generation(int i,struct regstat *i_regs,signed char entry[])
4172 {
4173   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
4174     int ra=-1;
4175     int agr=AGEN1+(i&1);
4176     int mgr=MGEN1+(i&1);
4177     if(itype[i]==LOAD) {
4178       ra=get_reg(i_regs->regmap,rt1[i]);
4179       if(ra<0) ra=get_reg(i_regs->regmap,-1); 
4180       assert(ra>=0);
4181     }
4182     if(itype[i]==LOADLR) {
4183       ra=get_reg(i_regs->regmap,FTEMP);
4184     }
4185     if(itype[i]==STORE||itype[i]==STORELR) {
4186       ra=get_reg(i_regs->regmap,agr);
4187       if(ra<0) ra=get_reg(i_regs->regmap,-1);
4188     }
4189     if(itype[i]==C1LS||itype[i]==C2LS) {
4190       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
4191         ra=get_reg(i_regs->regmap,FTEMP);
4192       else { // SWC1/SDC1/SWC2/SDC2
4193         ra=get_reg(i_regs->regmap,agr);
4194         if(ra<0) ra=get_reg(i_regs->regmap,-1);
4195       }
4196     }
4197     int rs=get_reg(i_regs->regmap,rs1[i]);
4198     int rm=get_reg(i_regs->regmap,TLREG);
4199     if(ra>=0) {
4200       int offset=imm[i];
4201       int c=(i_regs->wasconst>>rs)&1;
4202       if(rs1[i]==0) {
4203         // Using r0 as a base address
4204         /*if(rm>=0) {
4205           if(!entry||entry[rm]!=mgr) {
4206             generate_map_const(offset,rm);
4207           } // else did it in the previous cycle
4208         }*/
4209         if(!entry||entry[ra]!=agr) {
4210           if (opcode[i]==0x22||opcode[i]==0x26) {
4211             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4212           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4213             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4214           }else{
4215             emit_movimm(offset,ra);
4216           }
4217         } // else did it in the previous cycle
4218       }
4219       else if(rs<0) {
4220         if(!entry||entry[ra]!=rs1[i])
4221           emit_loadreg(rs1[i],ra);
4222         //if(!entry||entry[ra]!=rs1[i])
4223         //  printf("poor load scheduling!\n");
4224       }
4225       else if(c) {
4226         if(rm>=0) {
4227           if(!entry||entry[rm]!=mgr) {
4228             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4229               // Stores to memory go thru the mapper to detect self-modifying
4230               // code, loads don't.
4231               if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4232                  (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
4233                 generate_map_const(constmap[i][rs]+offset,rm);
4234             }else{
4235               if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4236                 generate_map_const(constmap[i][rs]+offset,rm);
4237             }
4238           }
4239         }
4240         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4241           if(!entry||entry[ra]!=agr) {
4242             if (opcode[i]==0x22||opcode[i]==0x26) {
4243               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4244             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4245               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4246             }else{
4247               #ifdef HOST_IMM_ADDR32
4248               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4249                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4250               #endif
4251               emit_movimm(constmap[i][rs]+offset,ra);
4252             }
4253           } // else did it in the previous cycle
4254         } // else load_consts already did it
4255       }
4256       if(offset&&!c&&rs1[i]) {
4257         if(rs>=0) {
4258           emit_addimm(rs,offset,ra);
4259         }else{
4260           emit_addimm(ra,offset,ra);
4261         }
4262       }
4263     }
4264   }
4265   // Preload constants for next instruction
4266   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4267     int agr,ra;
4268     #ifndef HOST_IMM_ADDR32
4269     // Mapper entry
4270     agr=MGEN1+((i+1)&1);
4271     ra=get_reg(i_regs->regmap,agr);
4272     if(ra>=0) {
4273       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4274       int offset=imm[i+1];
4275       int c=(regs[i+1].wasconst>>rs)&1;
4276       if(c) {
4277         if(itype[i+1]==STORE||itype[i+1]==STORELR
4278            ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4279           // Stores to memory go thru the mapper to detect self-modifying
4280           // code, loads don't.
4281           if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4282              (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
4283             generate_map_const(constmap[i+1][rs]+offset,ra);
4284         }else{
4285           if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4286             generate_map_const(constmap[i+1][rs]+offset,ra);
4287         }
4288       }
4289       /*else if(rs1[i]==0) {
4290         generate_map_const(offset,ra);
4291       }*/
4292     }
4293     #endif
4294     // Actual address
4295     agr=AGEN1+((i+1)&1);
4296     ra=get_reg(i_regs->regmap,agr);
4297     if(ra>=0) {
4298       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4299       int offset=imm[i+1];
4300       int c=(regs[i+1].wasconst>>rs)&1;
4301       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4302         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4303           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4304         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4305           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4306         }else{
4307           #ifdef HOST_IMM_ADDR32
4308           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4309              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4310           #endif
4311           emit_movimm(constmap[i+1][rs]+offset,ra);
4312         }
4313       }
4314       else if(rs1[i+1]==0) {
4315         // Using r0 as a base address
4316         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4317           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4318         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4319           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4320         }else{
4321           emit_movimm(offset,ra);
4322         }
4323       }
4324     }
4325   }
4326 }
4327
4328 int get_final_value(int hr, int i, int *value)
4329 {
4330   int reg=regs[i].regmap[hr];
4331   while(i<slen-1) {
4332     if(regs[i+1].regmap[hr]!=reg) break;
4333     if(!((regs[i+1].isconst>>hr)&1)) break;
4334     if(bt[i+1]) break;
4335     i++;
4336   }
4337   if(i<slen-1) {
4338     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4339       *value=constmap[i][hr];
4340       return 1;
4341     }
4342     if(!bt[i+1]) {
4343       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4344         // Load in delay slot, out-of-order execution
4345         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4346         {
4347           #ifdef HOST_IMM_ADDR32
4348           if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4349           #endif
4350           // Precompute load address
4351           *value=constmap[i][hr]+imm[i+2];
4352           return 1;
4353         }
4354       }
4355       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4356       {
4357         #ifdef HOST_IMM_ADDR32
4358         if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4359         #endif
4360         // Precompute load address
4361         *value=constmap[i][hr]+imm[i+1];
4362         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4363         return 1;
4364       }
4365     }
4366   }
4367   *value=constmap[i][hr];
4368   //printf("c=%x\n",(int)constmap[i][hr]);
4369   if(i==slen-1) return 1;
4370   if(reg<64) {
4371     return !((unneeded_reg[i+1]>>reg)&1);
4372   }else{
4373     return !((unneeded_reg_upper[i+1]>>reg)&1);
4374   }
4375 }
4376
4377 // Load registers with known constants
4378 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4379 {
4380   int hr;
4381   // Load 32-bit regs
4382   for(hr=0;hr<HOST_REGS;hr++) {
4383     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4384       //if(entry[hr]!=regmap[hr]) {
4385       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4386         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4387           int value;
4388           if(get_final_value(hr,i,&value)) {
4389             if(value==0) {
4390               emit_zeroreg(hr);
4391             }
4392             else {
4393               emit_movimm(value,hr);
4394             }
4395           }
4396         }
4397       }
4398     }
4399   }
4400   // Load 64-bit regs
4401   for(hr=0;hr<HOST_REGS;hr++) {
4402     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4403       //if(entry[hr]!=regmap[hr]) {
4404       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4405         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4406           if((is32>>(regmap[hr]&63))&1) {
4407             int lr=get_reg(regmap,regmap[hr]-64);
4408             assert(lr>=0);
4409             emit_sarimm(lr,31,hr);
4410           }
4411           else
4412           {
4413             int value;
4414             if(get_final_value(hr,i,&value)) {
4415               if(value==0) {
4416                 emit_zeroreg(hr);
4417               }
4418               else {
4419                 emit_movimm(value,hr);
4420               }
4421             }
4422           }
4423         }
4424       }
4425     }
4426   }
4427 }
4428 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4429 {
4430   int hr;
4431   // Load 32-bit regs
4432   for(hr=0;hr<HOST_REGS;hr++) {
4433     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4434       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4435         int value=constmap[i][hr];
4436         if(value==0) {
4437           emit_zeroreg(hr);
4438         }
4439         else {
4440           emit_movimm(value,hr);
4441         }
4442       }
4443     }
4444   }
4445   // Load 64-bit regs
4446   for(hr=0;hr<HOST_REGS;hr++) {
4447     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4448       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4449         if((is32>>(regmap[hr]&63))&1) {
4450           int lr=get_reg(regmap,regmap[hr]-64);
4451           assert(lr>=0);
4452           emit_sarimm(lr,31,hr);
4453         }
4454         else
4455         {
4456           int value=constmap[i][hr];
4457           if(value==0) {
4458             emit_zeroreg(hr);
4459           }
4460           else {
4461             emit_movimm(value,hr);
4462           }
4463         }
4464       }
4465     }
4466   }
4467 }
4468
4469 // Write out all dirty registers (except cycle count)
4470 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4471 {
4472   int hr;
4473   for(hr=0;hr<HOST_REGS;hr++) {
4474     if(hr!=EXCLUDE_REG) {
4475       if(i_regmap[hr]>0) {
4476         if(i_regmap[hr]!=CCREG) {
4477           if((i_dirty>>hr)&1) {
4478             if(i_regmap[hr]<64) {
4479               emit_storereg(i_regmap[hr],hr);
4480 #ifndef FORCE32
4481               if( ((i_is32>>i_regmap[hr])&1) ) {
4482                 #ifdef DESTRUCTIVE_WRITEBACK
4483                 emit_sarimm(hr,31,hr);
4484                 emit_storereg(i_regmap[hr]|64,hr);
4485                 #else
4486                 emit_sarimm(hr,31,HOST_TEMPREG);
4487                 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4488                 #endif
4489               }
4490 #endif
4491             }else{
4492               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4493                 emit_storereg(i_regmap[hr],hr);
4494               }
4495             }
4496           }
4497         }
4498       }
4499     }
4500   }
4501 }
4502 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4503 // This writes the registers not written by store_regs_bt
4504 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4505 {
4506   int hr;
4507   int t=(addr-start)>>2;
4508   for(hr=0;hr<HOST_REGS;hr++) {
4509     if(hr!=EXCLUDE_REG) {
4510       if(i_regmap[hr]>0) {
4511         if(i_regmap[hr]!=CCREG) {
4512           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4513             if((i_dirty>>hr)&1) {
4514               if(i_regmap[hr]<64) {
4515                 emit_storereg(i_regmap[hr],hr);
4516 #ifndef FORCE32
4517                 if( ((i_is32>>i_regmap[hr])&1) ) {
4518                   #ifdef DESTRUCTIVE_WRITEBACK
4519                   emit_sarimm(hr,31,hr);
4520                   emit_storereg(i_regmap[hr]|64,hr);
4521                   #else
4522                   emit_sarimm(hr,31,HOST_TEMPREG);
4523                   emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4524                   #endif
4525                 }
4526 #endif
4527               }else{
4528                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4529                   emit_storereg(i_regmap[hr],hr);
4530                 }
4531               }
4532             }
4533           }
4534         }
4535       }
4536     }
4537   }
4538 }
4539
4540 // Load all registers (except cycle count)
4541 void load_all_regs(signed char i_regmap[])
4542 {
4543   int hr;
4544   for(hr=0;hr<HOST_REGS;hr++) {
4545     if(hr!=EXCLUDE_REG) {
4546       if(i_regmap[hr]==0) {
4547         emit_zeroreg(hr);
4548       }
4549       else
4550       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4551       {
4552         emit_loadreg(i_regmap[hr],hr);
4553       }
4554     }
4555   }
4556 }
4557
4558 // Load all current registers also needed by next instruction
4559 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4560 {
4561   int hr;
4562   for(hr=0;hr<HOST_REGS;hr++) {
4563     if(hr!=EXCLUDE_REG) {
4564       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4565         if(i_regmap[hr]==0) {
4566           emit_zeroreg(hr);
4567         }
4568         else
4569         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4570         {
4571           emit_loadreg(i_regmap[hr],hr);
4572         }
4573       }
4574     }
4575   }
4576 }
4577
4578 // Load all regs, storing cycle count if necessary
4579 void load_regs_entry(int t)
4580 {
4581   int hr;
4582   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4583   else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4584   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4585     emit_storereg(CCREG,HOST_CCREG);
4586   }
4587   // Load 32-bit regs
4588   for(hr=0;hr<HOST_REGS;hr++) {
4589     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4590       if(regs[t].regmap_entry[hr]==0) {
4591         emit_zeroreg(hr);
4592       }
4593       else if(regs[t].regmap_entry[hr]!=CCREG)
4594       {
4595         emit_loadreg(regs[t].regmap_entry[hr],hr);
4596       }
4597     }
4598   }
4599   // Load 64-bit regs
4600   for(hr=0;hr<HOST_REGS;hr++) {
4601     if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4602       assert(regs[t].regmap_entry[hr]!=64);
4603       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4604         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4605         if(lr<0) {
4606           emit_loadreg(regs[t].regmap_entry[hr],hr);
4607         }
4608         else
4609         {
4610           emit_sarimm(lr,31,hr);
4611         }
4612       }
4613       else
4614       {
4615         emit_loadreg(regs[t].regmap_entry[hr],hr);
4616       }
4617     }
4618   }
4619 }
4620
4621 // Store dirty registers prior to branch
4622 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4623 {
4624   if(internal_branch(i_is32,addr))
4625   {
4626     int t=(addr-start)>>2;
4627     int hr;
4628     for(hr=0;hr<HOST_REGS;hr++) {
4629       if(hr!=EXCLUDE_REG) {
4630         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4631           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4632             if((i_dirty>>hr)&1) {
4633               if(i_regmap[hr]<64) {
4634                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4635                   emit_storereg(i_regmap[hr],hr);
4636                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4637                     #ifdef DESTRUCTIVE_WRITEBACK
4638                     emit_sarimm(hr,31,hr);
4639                     emit_storereg(i_regmap[hr]|64,hr);
4640                     #else
4641                     emit_sarimm(hr,31,HOST_TEMPREG);
4642                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4643                     #endif
4644                   }
4645                 }
4646               }else{
4647                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4648                   emit_storereg(i_regmap[hr],hr);
4649                 }
4650               }
4651             }
4652           }
4653         }
4654       }
4655     }
4656   }
4657   else
4658   {
4659     // Branch out of this block, write out all dirty regs
4660     wb_dirtys(i_regmap,i_is32,i_dirty);
4661   }
4662 }
4663
4664 // Load all needed registers for branch target
4665 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4666 {
4667   //if(addr>=start && addr<(start+slen*4))
4668   if(internal_branch(i_is32,addr))
4669   {
4670     int t=(addr-start)>>2;
4671     int hr;
4672     // Store the cycle count before loading something else
4673     if(i_regmap[HOST_CCREG]!=CCREG) {
4674       assert(i_regmap[HOST_CCREG]==-1);
4675     }
4676     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4677       emit_storereg(CCREG,HOST_CCREG);
4678     }
4679     // Load 32-bit regs
4680     for(hr=0;hr<HOST_REGS;hr++) {
4681       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4682         #ifdef DESTRUCTIVE_WRITEBACK
4683         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4684         #else
4685         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4686         #endif
4687           if(regs[t].regmap_entry[hr]==0) {
4688             emit_zeroreg(hr);
4689           }
4690           else if(regs[t].regmap_entry[hr]!=CCREG)
4691           {
4692             emit_loadreg(regs[t].regmap_entry[hr],hr);
4693           }
4694         }
4695       }
4696     }
4697     //Load 64-bit regs
4698     for(hr=0;hr<HOST_REGS;hr++) {
4699       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4700         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4701           assert(regs[t].regmap_entry[hr]!=64);
4702           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4703             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4704             if(lr<0) {
4705               emit_loadreg(regs[t].regmap_entry[hr],hr);
4706             }
4707             else
4708             {
4709               emit_sarimm(lr,31,hr);
4710             }
4711           }
4712           else
4713           {
4714             emit_loadreg(regs[t].regmap_entry[hr],hr);
4715           }
4716         }
4717         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4718           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4719           assert(lr>=0);
4720           emit_sarimm(lr,31,hr);
4721         }
4722       }
4723     }
4724   }
4725 }
4726
4727 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4728 {
4729   if(addr>=start && addr<start+slen*4-4)
4730   {
4731     int t=(addr-start)>>2;
4732     int hr;
4733     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4734     for(hr=0;hr<HOST_REGS;hr++)
4735     {
4736       if(hr!=EXCLUDE_REG)
4737       {
4738         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4739         {
4740           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4741           {
4742             return 0;
4743           }
4744           else 
4745           if((i_dirty>>hr)&1)
4746           {
4747             if(i_regmap[hr]<TEMPREG)
4748             {
4749               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4750                 return 0;
4751             }
4752             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4753             {
4754               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4755                 return 0;
4756             }
4757           }
4758         }
4759         else // Same register but is it 32-bit or dirty?
4760         if(i_regmap[hr]>=0)
4761         {
4762           if(!((regs[t].dirty>>hr)&1))
4763           {
4764             if((i_dirty>>hr)&1)
4765             {
4766               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4767               {
4768                 //printf("%x: dirty no match\n",addr);
4769                 return 0;
4770               }
4771             }
4772           }
4773           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4774           {
4775             //printf("%x: is32 no match\n",addr);
4776             return 0;
4777           }
4778         }
4779       }
4780     }
4781     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4782 #ifndef FORCE32
4783     if(requires_32bit[t]&~i_is32) return 0;
4784 #endif
4785     // Delay slots are not valid branch targets
4786     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4787     // Delay slots require additional processing, so do not match
4788     if(is_ds[t]) return 0;
4789   }
4790   else
4791   {
4792     int hr;
4793     for(hr=0;hr<HOST_REGS;hr++)
4794     {
4795       if(hr!=EXCLUDE_REG)
4796       {
4797         if(i_regmap[hr]>=0)
4798         {
4799           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4800           {
4801             if((i_dirty>>hr)&1)
4802             {
4803               return 0;
4804             }
4805           }
4806         }
4807       }
4808     }
4809   }
4810   return 1;
4811 }
4812
4813 // Used when a branch jumps into the delay slot of another branch
4814 void ds_assemble_entry(int i)
4815 {
4816   int t=(ba[i]-start)>>2;
4817   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4818   assem_debug("Assemble delay slot at %x\n",ba[i]);
4819   assem_debug("<->\n");
4820   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4821     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4822   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4823   address_generation(t,&regs[t],regs[t].regmap_entry);
4824   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4825     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4826   cop1_usable=0;
4827   is_delayslot=0;
4828   switch(itype[t]) {
4829     case ALU:
4830       alu_assemble(t,&regs[t]);break;
4831     case IMM16:
4832       imm16_assemble(t,&regs[t]);break;
4833     case SHIFT:
4834       shift_assemble(t,&regs[t]);break;
4835     case SHIFTIMM:
4836       shiftimm_assemble(t,&regs[t]);break;
4837     case LOAD:
4838       load_assemble(t,&regs[t]);break;
4839     case LOADLR:
4840       loadlr_assemble(t,&regs[t]);break;
4841     case STORE:
4842       store_assemble(t,&regs[t]);break;
4843     case STORELR:
4844       storelr_assemble(t,&regs[t]);break;
4845     case COP0:
4846       cop0_assemble(t,&regs[t]);break;
4847     case COP1:
4848       cop1_assemble(t,&regs[t]);break;
4849     case C1LS:
4850       c1ls_assemble(t,&regs[t]);break;
4851     case COP2:
4852       cop2_assemble(t,&regs[t]);break;
4853     case C2LS:
4854       c2ls_assemble(t,&regs[t]);break;
4855     case C2OP:
4856       c2op_assemble(t,&regs[t]);break;
4857     case FCONV:
4858       fconv_assemble(t,&regs[t]);break;
4859     case FLOAT:
4860       float_assemble(t,&regs[t]);break;
4861     case FCOMP:
4862       fcomp_assemble(t,&regs[t]);break;
4863     case MULTDIV:
4864       multdiv_assemble(t,&regs[t]);break;
4865     case MOV:
4866       mov_assemble(t,&regs[t]);break;
4867     case SYSCALL:
4868     case HLECALL:
4869     case INTCALL:
4870     case SPAN:
4871     case UJUMP:
4872     case RJUMP:
4873     case CJUMP:
4874     case SJUMP:
4875     case FJUMP:
4876       printf("Jump in the delay slot.  This is probably a bug.\n");
4877   }
4878   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4879   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4880   if(internal_branch(regs[t].is32,ba[i]+4))
4881     assem_debug("branch: internal\n");
4882   else
4883     assem_debug("branch: external\n");
4884   assert(internal_branch(regs[t].is32,ba[i]+4));
4885   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4886   emit_jmp(0);
4887 }
4888
4889 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4890 {
4891   int count;
4892   int jaddr;
4893   int idle=0;
4894   if(itype[i]==RJUMP)
4895   {
4896     *adj=0;
4897   }
4898   //if(ba[i]>=start && ba[i]<(start+slen*4))
4899   if(internal_branch(branch_regs[i].is32,ba[i]))
4900   {
4901     int t=(ba[i]-start)>>2;
4902     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4903     else *adj=ccadj[t];
4904   }
4905   else
4906   {
4907     *adj=0;
4908   }
4909   count=ccadj[i];
4910   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4911     // Idle loop
4912     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4913     idle=(int)out;
4914     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4915     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4916     jaddr=(int)out;
4917     emit_jmp(0);
4918   }
4919   else if(*adj==0||invert) {
4920     emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4921     jaddr=(int)out;
4922     emit_jns(0);
4923   }
4924   else
4925   {
4926     emit_cmpimm(HOST_CCREG,-CLOCK_DIVIDER*(count+2));
4927     jaddr=(int)out;
4928     emit_jns(0);
4929   }
4930   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4931 }
4932
4933 void do_ccstub(int n)
4934 {
4935   literal_pool(256);
4936   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4937   set_jump_target(stubs[n][1],(int)out);
4938   int i=stubs[n][4];
4939   if(stubs[n][6]==NULLDS) {
4940     // Delay slot instruction is nullified ("likely" branch)
4941     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4942   }
4943   else if(stubs[n][6]!=TAKEN) {
4944     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4945   }
4946   else {
4947     if(internal_branch(branch_regs[i].is32,ba[i]))
4948       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4949   }
4950   if(stubs[n][5]!=-1)
4951   {
4952     // Save PC as return address
4953     emit_movimm(stubs[n][5],EAX);
4954     emit_writeword(EAX,(int)&pcaddr);
4955   }
4956   else
4957   {
4958     // Return address depends on which way the branch goes
4959     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4960     {
4961       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4962       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4963       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4964       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4965       if(rs1[i]==0)
4966       {
4967         s1l=s2l;s1h=s2h;
4968         s2l=s2h=-1;
4969       }
4970       else if(rs2[i]==0)
4971       {
4972         s2l=s2h=-1;
4973       }
4974       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4975         s1h=s2h=-1;
4976       }
4977       assert(s1l>=0);
4978       #ifdef DESTRUCTIVE_WRITEBACK
4979       if(rs1[i]) {
4980         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4981           emit_loadreg(rs1[i],s1l);
4982       } 
4983       else {
4984         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4985           emit_loadreg(rs2[i],s1l);
4986       }
4987       if(s2l>=0)
4988         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4989           emit_loadreg(rs2[i],s2l);
4990       #endif
4991       int hr=0;
4992       int addr=-1,alt=-1,ntaddr=-1;
4993       while(hr<HOST_REGS)
4994       {
4995         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4996            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4997            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4998         {
4999           addr=hr++;break;
5000         }
5001         hr++;
5002       }
5003       while(hr<HOST_REGS)
5004       {
5005         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5006            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5007            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5008         {
5009           alt=hr++;break;
5010         }
5011         hr++;
5012       }
5013       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5014       {
5015         while(hr<HOST_REGS)
5016         {
5017           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5018              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5019              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5020           {
5021             ntaddr=hr;break;
5022           }
5023           hr++;
5024         }
5025         assert(hr<HOST_REGS);
5026       }
5027       if((opcode[i]&0x2f)==4) // BEQ
5028       {
5029         #ifdef HAVE_CMOV_IMM
5030         if(s1h<0) {
5031           if(s2l>=0) emit_cmp(s1l,s2l);
5032           else emit_test(s1l,s1l);
5033           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5034         }
5035         else
5036         #endif
5037         {
5038           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5039           if(s1h>=0) {
5040             if(s2h>=0) emit_cmp(s1h,s2h);
5041             else emit_test(s1h,s1h);
5042             emit_cmovne_reg(alt,addr);
5043           }
5044           if(s2l>=0) emit_cmp(s1l,s2l);
5045           else emit_test(s1l,s1l);
5046           emit_cmovne_reg(alt,addr);
5047         }
5048       }
5049       if((opcode[i]&0x2f)==5) // BNE
5050       {
5051         #ifdef HAVE_CMOV_IMM
5052         if(s1h<0) {
5053           if(s2l>=0) emit_cmp(s1l,s2l);
5054           else emit_test(s1l,s1l);
5055           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5056         }
5057         else
5058         #endif
5059         {
5060           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5061           if(s1h>=0) {
5062             if(s2h>=0) emit_cmp(s1h,s2h);
5063             else emit_test(s1h,s1h);
5064             emit_cmovne_reg(alt,addr);
5065           }
5066           if(s2l>=0) emit_cmp(s1l,s2l);
5067           else emit_test(s1l,s1l);
5068           emit_cmovne_reg(alt,addr);
5069         }
5070       }
5071       if((opcode[i]&0x2f)==6) // BLEZ
5072       {
5073         //emit_movimm(ba[i],alt);
5074         //emit_movimm(start+i*4+8,addr);
5075         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5076         emit_cmpimm(s1l,1);
5077         if(s1h>=0) emit_mov(addr,ntaddr);
5078         emit_cmovl_reg(alt,addr);
5079         if(s1h>=0) {
5080           emit_test(s1h,s1h);
5081           emit_cmovne_reg(ntaddr,addr);
5082           emit_cmovs_reg(alt,addr);
5083         }
5084       }
5085       if((opcode[i]&0x2f)==7) // BGTZ
5086       {
5087         //emit_movimm(ba[i],addr);
5088         //emit_movimm(start+i*4+8,ntaddr);
5089         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
5090         emit_cmpimm(s1l,1);
5091         if(s1h>=0) emit_mov(addr,alt);
5092         emit_cmovl_reg(ntaddr,addr);
5093         if(s1h>=0) {
5094           emit_test(s1h,s1h);
5095           emit_cmovne_reg(alt,addr);
5096           emit_cmovs_reg(ntaddr,addr);
5097         }
5098       }
5099       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
5100       {
5101         //emit_movimm(ba[i],alt);
5102         //emit_movimm(start+i*4+8,addr);
5103         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5104         if(s1h>=0) emit_test(s1h,s1h);
5105         else emit_test(s1l,s1l);
5106         emit_cmovs_reg(alt,addr);
5107       }
5108       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
5109       {
5110         //emit_movimm(ba[i],addr);
5111         //emit_movimm(start+i*4+8,alt);
5112         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5113         if(s1h>=0) emit_test(s1h,s1h);
5114         else emit_test(s1l,s1l);
5115         emit_cmovs_reg(alt,addr);
5116       }
5117       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
5118         if(source[i]&0x10000) // BC1T
5119         {
5120           //emit_movimm(ba[i],alt);
5121           //emit_movimm(start+i*4+8,addr);
5122           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5123           emit_testimm(s1l,0x800000);
5124           emit_cmovne_reg(alt,addr);
5125         }
5126         else // BC1F
5127         {
5128           //emit_movimm(ba[i],addr);
5129           //emit_movimm(start+i*4+8,alt);
5130           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5131           emit_testimm(s1l,0x800000);
5132           emit_cmovne_reg(alt,addr);
5133         }
5134       }
5135       emit_writeword(addr,(int)&pcaddr);
5136     }
5137     else
5138     if(itype[i]==RJUMP)
5139     {
5140       int r=get_reg(branch_regs[i].regmap,rs1[i]);
5141       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5142         r=get_reg(branch_regs[i].regmap,RTEMP);
5143       }
5144       emit_writeword(r,(int)&pcaddr);
5145     }
5146     else {printf("Unknown branch type in do_ccstub\n");exit(1);}
5147   }
5148   // Update cycle count
5149   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
5150   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5151   emit_call((int)cc_interrupt);
5152   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5153   if(stubs[n][6]==TAKEN) {
5154     if(internal_branch(branch_regs[i].is32,ba[i]))
5155       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
5156     else if(itype[i]==RJUMP) {
5157       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5158         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5159       else
5160         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
5161     }
5162   }else if(stubs[n][6]==NOTTAKEN) {
5163     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5164     else load_all_regs(branch_regs[i].regmap);
5165   }else if(stubs[n][6]==NULLDS) {
5166     // Delay slot instruction is nullified ("likely" branch)
5167     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
5168     else load_all_regs(regs[i].regmap);
5169   }else{
5170     load_all_regs(branch_regs[i].regmap);
5171   }
5172   emit_jmp(stubs[n][2]); // return address
5173   
5174   /* This works but uses a lot of memory...
5175   emit_readword((int)&last_count,ECX);
5176   emit_add(HOST_CCREG,ECX,EAX);
5177   emit_writeword(EAX,(int)&Count);
5178   emit_call((int)gen_interupt);
5179   emit_readword((int)&Count,HOST_CCREG);
5180   emit_readword((int)&next_interupt,EAX);
5181   emit_readword((int)&pending_exception,EBX);
5182   emit_writeword(EAX,(int)&last_count);
5183   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
5184   emit_test(EBX,EBX);
5185   int jne_instr=(int)out;
5186   emit_jne(0);
5187   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
5188   load_all_regs(branch_regs[i].regmap);
5189   emit_jmp(stubs[n][2]); // return address
5190   set_jump_target(jne_instr,(int)out);
5191   emit_readword((int)&pcaddr,EAX);
5192   // Call get_addr_ht instead of doing the hash table here.
5193   // This code is executed infrequently and takes up a lot of space
5194   // so smaller is better.
5195   emit_storereg(CCREG,HOST_CCREG);
5196   emit_pushreg(EAX);
5197   emit_call((int)get_addr_ht);
5198   emit_loadreg(CCREG,HOST_CCREG);
5199   emit_addimm(ESP,4,ESP);
5200   emit_jmpreg(EAX);*/
5201 }
5202
5203 add_to_linker(int addr,int target,int ext)
5204 {
5205   link_addr[linkcount][0]=addr;
5206   link_addr[linkcount][1]=target;
5207   link_addr[linkcount][2]=ext;  
5208   linkcount++;
5209 }
5210
5211 static void ujump_assemble_write_ra(int i)
5212 {
5213   int rt;
5214   unsigned int return_address;
5215   rt=get_reg(branch_regs[i].regmap,31);
5216   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5217   //assert(rt>=0);
5218   return_address=start+i*4+8;
5219   if(rt>=0) {
5220     #ifdef USE_MINI_HT
5221     if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
5222       int temp=-1; // note: must be ds-safe
5223       #ifdef HOST_TEMPREG
5224       temp=HOST_TEMPREG;
5225       #endif
5226       if(temp>=0) do_miniht_insert(return_address,rt,temp);
5227       else emit_movimm(return_address,rt);
5228     }
5229     else
5230     #endif
5231     {
5232       #ifdef REG_PREFETCH
5233       if(temp>=0) 
5234       {
5235         if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5236       }
5237       #endif
5238       emit_movimm(return_address,rt); // PC into link register
5239       #ifdef IMM_PREFETCH
5240       emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5241       #endif
5242     }
5243   }
5244 }
5245
5246 void ujump_assemble(int i,struct regstat *i_regs)
5247 {
5248   signed char *i_regmap=i_regs->regmap;
5249   int ra_done=0;
5250   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5251   address_generation(i+1,i_regs,regs[i].regmap_entry);
5252   #ifdef REG_PREFETCH
5253   int temp=get_reg(branch_regs[i].regmap,PTEMP);
5254   if(rt1[i]==31&&temp>=0) 
5255   {
5256     int return_address=start+i*4+8;
5257     if(get_reg(branch_regs[i].regmap,31)>0) 
5258     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5259   }
5260   #endif
5261   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5262     ujump_assemble_write_ra(i); // writeback ra for DS
5263     ra_done=1;
5264   }
5265   ds_assemble(i+1,i_regs);
5266   uint64_t bc_unneeded=branch_regs[i].u;
5267   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5268   bc_unneeded|=1|(1LL<<rt1[i]);
5269   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5270   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5271                 bc_unneeded,bc_unneeded_upper);
5272   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5273   if(!ra_done&&rt1[i]==31)
5274     ujump_assemble_write_ra(i);
5275   int cc,adj;
5276   cc=get_reg(branch_regs[i].regmap,CCREG);
5277   assert(cc==HOST_CCREG);
5278   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5279   #ifdef REG_PREFETCH
5280   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5281   #endif
5282   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5283   if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5284   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5285   if(internal_branch(branch_regs[i].is32,ba[i]))
5286     assem_debug("branch: internal\n");
5287   else
5288     assem_debug("branch: external\n");
5289   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5290     ds_assemble_entry(i);
5291   }
5292   else {
5293     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5294     emit_jmp(0);
5295   }
5296 }
5297
5298 static void rjump_assemble_write_ra(int i)
5299 {
5300   int rt,return_address;
5301   assert(rt1[i+1]!=rt1[i]);
5302   assert(rt2[i+1]!=rt1[i]);
5303   rt=get_reg(branch_regs[i].regmap,rt1[i]);
5304   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5305   assert(rt>=0);
5306   return_address=start+i*4+8;
5307   #ifdef REG_PREFETCH
5308   if(temp>=0) 
5309   {
5310     if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5311   }
5312   #endif
5313   emit_movimm(return_address,rt); // PC into link register
5314   #ifdef IMM_PREFETCH
5315   emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5316   #endif
5317 }
5318
5319 void rjump_assemble(int i,struct regstat *i_regs)
5320 {
5321   signed char *i_regmap=i_regs->regmap;
5322   int temp;
5323   int rs,cc,adj;
5324   int ra_done=0;
5325   rs=get_reg(branch_regs[i].regmap,rs1[i]);
5326   assert(rs>=0);
5327   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5328     // Delay slot abuse, make a copy of the branch address register
5329     temp=get_reg(branch_regs[i].regmap,RTEMP);
5330     assert(temp>=0);
5331     assert(regs[i].regmap[temp]==RTEMP);
5332     emit_mov(rs,temp);
5333     rs=temp;
5334   }
5335   address_generation(i+1,i_regs,regs[i].regmap_entry);
5336   #ifdef REG_PREFETCH
5337   if(rt1[i]==31) 
5338   {
5339     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5340       int return_address=start+i*4+8;
5341       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5342     }
5343   }
5344   #endif
5345   #ifdef USE_MINI_HT
5346   if(rs1[i]==31) {
5347     int rh=get_reg(regs[i].regmap,RHASH);
5348     if(rh>=0) do_preload_rhash(rh);
5349   }
5350   #endif
5351   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5352     rjump_assemble_write_ra(i);
5353     ra_done=1;
5354   }
5355   ds_assemble(i+1,i_regs);
5356   uint64_t bc_unneeded=branch_regs[i].u;
5357   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5358   bc_unneeded|=1|(1LL<<rt1[i]);
5359   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5360   bc_unneeded&=~(1LL<<rs1[i]);
5361   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5362                 bc_unneeded,bc_unneeded_upper);
5363   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5364   if(!ra_done&&rt1[i]!=0)
5365     rjump_assemble_write_ra(i);
5366   cc=get_reg(branch_regs[i].regmap,CCREG);
5367   assert(cc==HOST_CCREG);
5368   #ifdef USE_MINI_HT
5369   int rh=get_reg(branch_regs[i].regmap,RHASH);
5370   int ht=get_reg(branch_regs[i].regmap,RHTBL);
5371   if(rs1[i]==31) {
5372     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5373     do_preload_rhtbl(ht);
5374     do_rhash(rs,rh);
5375   }
5376   #endif
5377   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5378   #ifdef DESTRUCTIVE_WRITEBACK
5379   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5380     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5381       emit_loadreg(rs1[i],rs);
5382     }
5383   }
5384   #endif
5385   #ifdef REG_PREFETCH
5386   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5387   #endif
5388   #ifdef USE_MINI_HT
5389   if(rs1[i]==31) {
5390     do_miniht_load(ht,rh);
5391   }
5392   #endif
5393   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5394   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5395   //assert(adj==0);
5396   emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5397   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5398   emit_jns(0);
5399   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5400   #ifdef USE_MINI_HT
5401   if(rs1[i]==31) {
5402     do_miniht_jump(rs,rh,ht);
5403   }
5404   else
5405   #endif
5406   {
5407     //if(rs!=EAX) emit_mov(rs,EAX);
5408     //emit_jmp((int)jump_vaddr_eax);
5409     emit_jmp(jump_vaddr_reg[rs]);
5410   }
5411   /* Check hash table
5412   temp=!rs;
5413   emit_mov(rs,temp);
5414   emit_shrimm(rs,16,rs);
5415   emit_xor(temp,rs,rs);
5416   emit_movzwl_reg(rs,rs);
5417   emit_shlimm(rs,4,rs);
5418   emit_cmpmem_indexed((int)hash_table,rs,temp);
5419   emit_jne((int)out+14);
5420   emit_readword_indexed((int)hash_table+4,rs,rs);
5421   emit_jmpreg(rs);
5422   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5423   emit_addimm_no_flags(8,rs);
5424   emit_jeq((int)out-17);
5425   // No hit on hash table, call compiler
5426   emit_pushreg(temp);
5427 //DEBUG >
5428 #ifdef DEBUG_CYCLE_COUNT
5429   emit_readword((int)&last_count,ECX);
5430   emit_add(HOST_CCREG,ECX,HOST_CCREG);
5431   emit_readword((int)&next_interupt,ECX);
5432   emit_writeword(HOST_CCREG,(int)&Count);
5433   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5434   emit_writeword(ECX,(int)&last_count);
5435 #endif
5436 //DEBUG <
5437   emit_storereg(CCREG,HOST_CCREG);
5438   emit_call((int)get_addr);
5439   emit_loadreg(CCREG,HOST_CCREG);
5440   emit_addimm(ESP,4,ESP);
5441   emit_jmpreg(EAX);*/
5442   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5443   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5444   #endif
5445 }
5446
5447 void cjump_assemble(int i,struct regstat *i_regs)
5448 {
5449   signed char *i_regmap=i_regs->regmap;
5450   int cc;
5451   int match;
5452   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5453   assem_debug("match=%d\n",match);
5454   int s1h,s1l,s2h,s2l;
5455   int prev_cop1_usable=cop1_usable;
5456   int unconditional=0,nop=0;
5457   int only32=0;
5458   int invert=0;
5459   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5460   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5461   if(!match) invert=1;
5462   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5463   if(i>(ba[i]-start)>>2) invert=1;
5464   #endif
5465   
5466   if(ooo[i]) {
5467     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5468     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5469     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5470     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5471   }
5472   else {
5473     s1l=get_reg(i_regmap,rs1[i]);
5474     s1h=get_reg(i_regmap,rs1[i]|64);
5475     s2l=get_reg(i_regmap,rs2[i]);
5476     s2h=get_reg(i_regmap,rs2[i]|64);
5477   }
5478   if(rs1[i]==0&&rs2[i]==0)
5479   {
5480     if(opcode[i]&1) nop=1;
5481     else unconditional=1;
5482     //assert(opcode[i]!=5);
5483     //assert(opcode[i]!=7);
5484     //assert(opcode[i]!=0x15);
5485     //assert(opcode[i]!=0x17);
5486   }
5487   else if(rs1[i]==0)
5488   {
5489     s1l=s2l;s1h=s2h;
5490     s2l=s2h=-1;
5491     only32=(regs[i].was32>>rs2[i])&1;
5492   }
5493   else if(rs2[i]==0)
5494   {
5495     s2l=s2h=-1;
5496     only32=(regs[i].was32>>rs1[i])&1;
5497   }
5498   else {
5499     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5500   }
5501
5502   if(ooo[i]) {
5503     // Out of order execution (delay slot first)
5504     //printf("OOOE\n");
5505     address_generation(i+1,i_regs,regs[i].regmap_entry);
5506     ds_assemble(i+1,i_regs);
5507     int adj;
5508     uint64_t bc_unneeded=branch_regs[i].u;
5509     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5510     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5511     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5512     bc_unneeded|=1;
5513     bc_unneeded_upper|=1;
5514     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5515                   bc_unneeded,bc_unneeded_upper);
5516     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5517     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5518     cc=get_reg(branch_regs[i].regmap,CCREG);
5519     assert(cc==HOST_CCREG);
5520     if(unconditional) 
5521       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5522     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5523     //assem_debug("cycle count (adj)\n");
5524     if(unconditional) {
5525       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5526       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5527         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5528         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5529         if(internal)
5530           assem_debug("branch: internal\n");
5531         else
5532           assem_debug("branch: external\n");
5533         if(internal&&is_ds[(ba[i]-start)>>2]) {
5534           ds_assemble_entry(i);
5535         }
5536         else {
5537           add_to_linker((int)out,ba[i],internal);
5538           emit_jmp(0);
5539         }
5540         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5541         if(((u_int)out)&7) emit_addnop(0);
5542         #endif
5543       }
5544     }
5545     else if(nop) {
5546       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5547       int jaddr=(int)out;
5548       emit_jns(0);
5549       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5550     }
5551     else {
5552       int taken=0,nottaken=0,nottaken1=0;
5553       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5554       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5555       if(!only32)
5556       {
5557         assert(s1h>=0);
5558         if(opcode[i]==4) // BEQ
5559         {
5560           if(s2h>=0) emit_cmp(s1h,s2h);
5561           else emit_test(s1h,s1h);
5562           nottaken1=(int)out;
5563           emit_jne(1);
5564         }
5565         if(opcode[i]==5) // BNE
5566         {
5567           if(s2h>=0) emit_cmp(s1h,s2h);
5568           else emit_test(s1h,s1h);
5569           if(invert) taken=(int)out;
5570           else add_to_linker((int)out,ba[i],internal);
5571           emit_jne(0);
5572         }
5573         if(opcode[i]==6) // BLEZ
5574         {
5575           emit_test(s1h,s1h);
5576           if(invert) taken=(int)out;
5577           else add_to_linker((int)out,ba[i],internal);
5578           emit_js(0);
5579           nottaken1=(int)out;
5580           emit_jne(1);
5581         }
5582         if(opcode[i]==7) // BGTZ
5583         {
5584           emit_test(s1h,s1h);
5585           nottaken1=(int)out;
5586           emit_js(1);
5587           if(invert) taken=(int)out;
5588           else add_to_linker((int)out,ba[i],internal);
5589           emit_jne(0);
5590         }
5591       } // if(!only32)
5592           
5593       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5594       assert(s1l>=0);
5595       if(opcode[i]==4) // BEQ
5596       {
5597         if(s2l>=0) emit_cmp(s1l,s2l);
5598         else emit_test(s1l,s1l);
5599         if(invert){
5600           nottaken=(int)out;
5601           emit_jne(1);
5602         }else{
5603           add_to_linker((int)out,ba[i],internal);
5604           emit_jeq(0);
5605         }
5606       }
5607       if(opcode[i]==5) // BNE
5608       {
5609         if(s2l>=0) emit_cmp(s1l,s2l);
5610         else emit_test(s1l,s1l);
5611         if(invert){
5612           nottaken=(int)out;
5613           emit_jeq(1);
5614         }else{
5615           add_to_linker((int)out,ba[i],internal);
5616           emit_jne(0);
5617         }
5618       }
5619       if(opcode[i]==6) // BLEZ
5620       {
5621         emit_cmpimm(s1l,1);
5622         if(invert){
5623           nottaken=(int)out;
5624           emit_jge(1);
5625         }else{
5626           add_to_linker((int)out,ba[i],internal);
5627           emit_jl(0);
5628         }
5629       }
5630       if(opcode[i]==7) // BGTZ
5631       {
5632         emit_cmpimm(s1l,1);
5633         if(invert){
5634           nottaken=(int)out;
5635           emit_jl(1);
5636         }else{
5637           add_to_linker((int)out,ba[i],internal);
5638           emit_jge(0);
5639         }
5640       }
5641       if(invert) {
5642         if(taken) set_jump_target(taken,(int)out);
5643         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5644         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5645           if(adj) {
5646             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5647             add_to_linker((int)out,ba[i],internal);
5648           }else{
5649             emit_addnop(13);
5650             add_to_linker((int)out,ba[i],internal*2);
5651           }
5652           emit_jmp(0);
5653         }else
5654         #endif
5655         {
5656           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5657           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5658           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5659           if(internal)
5660             assem_debug("branch: internal\n");
5661           else
5662             assem_debug("branch: external\n");
5663           if(internal&&is_ds[(ba[i]-start)>>2]) {
5664             ds_assemble_entry(i);
5665           }
5666           else {
5667             add_to_linker((int)out,ba[i],internal);
5668             emit_jmp(0);
5669           }
5670         }
5671         set_jump_target(nottaken,(int)out);
5672       }
5673
5674       if(nottaken1) set_jump_target(nottaken1,(int)out);
5675       if(adj) {
5676         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5677       }
5678     } // (!unconditional)
5679   } // if(ooo)
5680   else
5681   {
5682     // In-order execution (branch first)
5683     //if(likely[i]) printf("IOL\n");
5684     //else
5685     //printf("IOE\n");
5686     int taken=0,nottaken=0,nottaken1=0;
5687     if(!unconditional&&!nop) {
5688       if(!only32)
5689       {
5690         assert(s1h>=0);
5691         if((opcode[i]&0x2f)==4) // BEQ
5692         {
5693           if(s2h>=0) emit_cmp(s1h,s2h);
5694           else emit_test(s1h,s1h);
5695           nottaken1=(int)out;
5696           emit_jne(2);
5697         }
5698         if((opcode[i]&0x2f)==5) // BNE
5699         {
5700           if(s2h>=0) emit_cmp(s1h,s2h);
5701           else emit_test(s1h,s1h);
5702           taken=(int)out;
5703           emit_jne(1);
5704         }
5705         if((opcode[i]&0x2f)==6) // BLEZ
5706         {
5707           emit_test(s1h,s1h);
5708           taken=(int)out;
5709           emit_js(1);
5710           nottaken1=(int)out;
5711           emit_jne(2);
5712         }
5713         if((opcode[i]&0x2f)==7) // BGTZ
5714         {
5715           emit_test(s1h,s1h);
5716           nottaken1=(int)out;
5717           emit_js(2);
5718           taken=(int)out;
5719           emit_jne(1);
5720         }
5721       } // if(!only32)
5722           
5723       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5724       assert(s1l>=0);
5725       if((opcode[i]&0x2f)==4) // BEQ
5726       {
5727         if(s2l>=0) emit_cmp(s1l,s2l);
5728         else emit_test(s1l,s1l);
5729         nottaken=(int)out;
5730         emit_jne(2);
5731       }
5732       if((opcode[i]&0x2f)==5) // BNE
5733       {
5734         if(s2l>=0) emit_cmp(s1l,s2l);
5735         else emit_test(s1l,s1l);
5736         nottaken=(int)out;
5737         emit_jeq(2);
5738       }
5739       if((opcode[i]&0x2f)==6) // BLEZ
5740       {
5741         emit_cmpimm(s1l,1);
5742         nottaken=(int)out;
5743         emit_jge(2);
5744       }
5745       if((opcode[i]&0x2f)==7) // BGTZ
5746       {
5747         emit_cmpimm(s1l,1);
5748         nottaken=(int)out;
5749         emit_jl(2);
5750       }
5751     } // if(!unconditional)
5752     int adj;
5753     uint64_t ds_unneeded=branch_regs[i].u;
5754     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5755     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5756     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5757     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5758     ds_unneeded|=1;
5759     ds_unneeded_upper|=1;
5760     // branch taken
5761     if(!nop) {
5762       if(taken) set_jump_target(taken,(int)out);
5763       assem_debug("1:\n");
5764       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5765                     ds_unneeded,ds_unneeded_upper);
5766       // load regs
5767       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5768       address_generation(i+1,&branch_regs[i],0);
5769       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5770       ds_assemble(i+1,&branch_regs[i]);
5771       cc=get_reg(branch_regs[i].regmap,CCREG);
5772       if(cc==-1) {
5773         emit_loadreg(CCREG,cc=HOST_CCREG);
5774         // CHECK: Is the following instruction (fall thru) allocated ok?
5775       }
5776       assert(cc==HOST_CCREG);
5777       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5778       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5779       assem_debug("cycle count (adj)\n");
5780       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5781       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5782       if(internal)
5783         assem_debug("branch: internal\n");
5784       else
5785         assem_debug("branch: external\n");
5786       if(internal&&is_ds[(ba[i]-start)>>2]) {
5787         ds_assemble_entry(i);
5788       }
5789       else {
5790         add_to_linker((int)out,ba[i],internal);
5791         emit_jmp(0);
5792       }
5793     }
5794     // branch not taken
5795     cop1_usable=prev_cop1_usable;
5796     if(!unconditional) {
5797       if(nottaken1) set_jump_target(nottaken1,(int)out);
5798       set_jump_target(nottaken,(int)out);
5799       assem_debug("2:\n");
5800       if(!likely[i]) {
5801         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5802                       ds_unneeded,ds_unneeded_upper);
5803         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5804         address_generation(i+1,&branch_regs[i],0);
5805         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5806         ds_assemble(i+1,&branch_regs[i]);
5807       }
5808       cc=get_reg(branch_regs[i].regmap,CCREG);
5809       if(cc==-1&&!likely[i]) {
5810         // Cycle count isn't in a register, temporarily load it then write it out
5811         emit_loadreg(CCREG,HOST_CCREG);
5812         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5813         int jaddr=(int)out;
5814         emit_jns(0);
5815         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5816         emit_storereg(CCREG,HOST_CCREG);
5817       }
5818       else{
5819         cc=get_reg(i_regmap,CCREG);
5820         assert(cc==HOST_CCREG);
5821         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5822         int jaddr=(int)out;
5823         emit_jns(0);
5824         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5825       }
5826     }
5827   }
5828 }
5829
5830 void sjump_assemble(int i,struct regstat *i_regs)
5831 {
5832   signed char *i_regmap=i_regs->regmap;
5833   int cc;
5834   int match;
5835   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5836   assem_debug("smatch=%d\n",match);
5837   int s1h,s1l;
5838   int prev_cop1_usable=cop1_usable;
5839   int unconditional=0,nevertaken=0;
5840   int only32=0;
5841   int invert=0;
5842   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5843   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5844   if(!match) invert=1;
5845   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5846   if(i>(ba[i]-start)>>2) invert=1;
5847   #endif
5848
5849   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5850   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5851
5852   if(ooo[i]) {
5853     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5854     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5855   }
5856   else {
5857     s1l=get_reg(i_regmap,rs1[i]);
5858     s1h=get_reg(i_regmap,rs1[i]|64);
5859   }
5860   if(rs1[i]==0)
5861   {
5862     if(opcode2[i]&1) unconditional=1;
5863     else nevertaken=1;
5864     // These are never taken (r0 is never less than zero)
5865     //assert(opcode2[i]!=0);
5866     //assert(opcode2[i]!=2);
5867     //assert(opcode2[i]!=0x10);
5868     //assert(opcode2[i]!=0x12);
5869   }
5870   else {
5871     only32=(regs[i].was32>>rs1[i])&1;
5872   }
5873
5874   if(ooo[i]) {
5875     // Out of order execution (delay slot first)
5876     //printf("OOOE\n");
5877     address_generation(i+1,i_regs,regs[i].regmap_entry);
5878     ds_assemble(i+1,i_regs);
5879     int adj;
5880     uint64_t bc_unneeded=branch_regs[i].u;
5881     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5882     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5883     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5884     bc_unneeded|=1;
5885     bc_unneeded_upper|=1;
5886     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5887                   bc_unneeded,bc_unneeded_upper);
5888     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5889     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5890     if(rt1[i]==31) {
5891       int rt,return_address;
5892       rt=get_reg(branch_regs[i].regmap,31);
5893       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5894       if(rt>=0) {
5895         // Save the PC even if the branch is not taken
5896         return_address=start+i*4+8;
5897         emit_movimm(return_address,rt); // PC into link register
5898         #ifdef IMM_PREFETCH
5899         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5900         #endif
5901       }
5902     }
5903     cc=get_reg(branch_regs[i].regmap,CCREG);
5904     assert(cc==HOST_CCREG);
5905     if(unconditional) 
5906       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5907     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5908     assem_debug("cycle count (adj)\n");
5909     if(unconditional) {
5910       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5911       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5912         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5913         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5914         if(internal)
5915           assem_debug("branch: internal\n");
5916         else
5917           assem_debug("branch: external\n");
5918         if(internal&&is_ds[(ba[i]-start)>>2]) {
5919           ds_assemble_entry(i);
5920         }
5921         else {
5922           add_to_linker((int)out,ba[i],internal);
5923           emit_jmp(0);
5924         }
5925         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5926         if(((u_int)out)&7) emit_addnop(0);
5927         #endif
5928       }
5929     }
5930     else if(nevertaken) {
5931       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5932       int jaddr=(int)out;
5933       emit_jns(0);
5934       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5935     }
5936     else {
5937       int nottaken=0;
5938       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5939       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5940       if(!only32)
5941       {
5942         assert(s1h>=0);
5943         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5944         {
5945           emit_test(s1h,s1h);
5946           if(invert){
5947             nottaken=(int)out;
5948             emit_jns(1);
5949           }else{
5950             add_to_linker((int)out,ba[i],internal);
5951             emit_js(0);
5952           }
5953         }
5954         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5955         {
5956           emit_test(s1h,s1h);
5957           if(invert){
5958             nottaken=(int)out;
5959             emit_js(1);
5960           }else{
5961             add_to_linker((int)out,ba[i],internal);
5962             emit_jns(0);
5963           }
5964         }
5965       } // if(!only32)
5966       else
5967       {
5968         assert(s1l>=0);
5969         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5970         {
5971           emit_test(s1l,s1l);
5972           if(invert){
5973             nottaken=(int)out;
5974             emit_jns(1);
5975           }else{
5976             add_to_linker((int)out,ba[i],internal);
5977             emit_js(0);
5978           }
5979         }
5980         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5981         {
5982           emit_test(s1l,s1l);
5983           if(invert){
5984             nottaken=(int)out;
5985             emit_js(1);
5986           }else{
5987             add_to_linker((int)out,ba[i],internal);
5988             emit_jns(0);
5989           }
5990         }
5991       } // if(!only32)
5992           
5993       if(invert) {
5994         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5995         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5996           if(adj) {
5997             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5998             add_to_linker((int)out,ba[i],internal);
5999           }else{
6000             emit_addnop(13);
6001             add_to_linker((int)out,ba[i],internal*2);
6002           }
6003           emit_jmp(0);
6004         }else
6005         #endif
6006         {
6007           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6008           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6009           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6010           if(internal)
6011             assem_debug("branch: internal\n");
6012           else
6013             assem_debug("branch: external\n");
6014           if(internal&&is_ds[(ba[i]-start)>>2]) {
6015             ds_assemble_entry(i);
6016           }
6017           else {
6018             add_to_linker((int)out,ba[i],internal);
6019             emit_jmp(0);
6020           }
6021         }
6022         set_jump_target(nottaken,(int)out);
6023       }
6024
6025       if(adj) {
6026         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6027       }
6028     } // (!unconditional)
6029   } // if(ooo)
6030   else
6031   {
6032     // In-order execution (branch first)
6033     //printf("IOE\n");
6034     int nottaken=0;
6035     if(rt1[i]==31) {
6036       int rt,return_address;
6037       rt=get_reg(branch_regs[i].regmap,31);
6038       if(rt>=0) {
6039         // Save the PC even if the branch is not taken
6040         return_address=start+i*4+8;
6041         emit_movimm(return_address,rt); // PC into link register
6042         #ifdef IMM_PREFETCH
6043         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
6044         #endif
6045       }
6046     }
6047     if(!unconditional) {
6048       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6049       if(!only32)
6050       {
6051         assert(s1h>=0);
6052         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
6053         {
6054           emit_test(s1h,s1h);
6055           nottaken=(int)out;
6056           emit_jns(1);
6057         }
6058         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
6059         {
6060           emit_test(s1h,s1h);
6061           nottaken=(int)out;
6062           emit_js(1);
6063         }
6064       } // if(!only32)
6065       else
6066       {
6067         assert(s1l>=0);
6068         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
6069         {
6070           emit_test(s1l,s1l);
6071           nottaken=(int)out;
6072           emit_jns(1);
6073         }
6074         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
6075         {
6076           emit_test(s1l,s1l);
6077           nottaken=(int)out;
6078           emit_js(1);
6079         }
6080       }
6081     } // if(!unconditional)
6082     int adj;
6083     uint64_t ds_unneeded=branch_regs[i].u;
6084     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6085     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6086     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6087     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6088     ds_unneeded|=1;
6089     ds_unneeded_upper|=1;
6090     // branch taken
6091     if(!nevertaken) {
6092       //assem_debug("1:\n");
6093       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6094                     ds_unneeded,ds_unneeded_upper);
6095       // load regs
6096       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6097       address_generation(i+1,&branch_regs[i],0);
6098       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6099       ds_assemble(i+1,&branch_regs[i]);
6100       cc=get_reg(branch_regs[i].regmap,CCREG);
6101       if(cc==-1) {
6102         emit_loadreg(CCREG,cc=HOST_CCREG);
6103         // CHECK: Is the following instruction (fall thru) allocated ok?
6104       }
6105       assert(cc==HOST_CCREG);
6106       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6107       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6108       assem_debug("cycle count (adj)\n");
6109       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6110       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6111       if(internal)
6112         assem_debug("branch: internal\n");
6113       else
6114         assem_debug("branch: external\n");
6115       if(internal&&is_ds[(ba[i]-start)>>2]) {
6116         ds_assemble_entry(i);
6117       }
6118       else {
6119         add_to_linker((int)out,ba[i],internal);
6120         emit_jmp(0);
6121       }
6122     }
6123     // branch not taken
6124     cop1_usable=prev_cop1_usable;
6125     if(!unconditional) {
6126       set_jump_target(nottaken,(int)out);
6127       assem_debug("1:\n");
6128       if(!likely[i]) {
6129         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6130                       ds_unneeded,ds_unneeded_upper);
6131         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6132         address_generation(i+1,&branch_regs[i],0);
6133         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6134         ds_assemble(i+1,&branch_regs[i]);
6135       }
6136       cc=get_reg(branch_regs[i].regmap,CCREG);
6137       if(cc==-1&&!likely[i]) {
6138         // Cycle count isn't in a register, temporarily load it then write it out
6139         emit_loadreg(CCREG,HOST_CCREG);
6140         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6141         int jaddr=(int)out;
6142         emit_jns(0);
6143         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6144         emit_storereg(CCREG,HOST_CCREG);
6145       }
6146       else{
6147         cc=get_reg(i_regmap,CCREG);
6148         assert(cc==HOST_CCREG);
6149         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6150         int jaddr=(int)out;
6151         emit_jns(0);
6152         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6153       }
6154     }
6155   }
6156 }
6157
6158 void fjump_assemble(int i,struct regstat *i_regs)
6159 {
6160   signed char *i_regmap=i_regs->regmap;
6161   int cc;
6162   int match;
6163   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6164   assem_debug("fmatch=%d\n",match);
6165   int fs,cs;
6166   int eaddr;
6167   int invert=0;
6168   int internal=internal_branch(branch_regs[i].is32,ba[i]);
6169   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
6170   if(!match) invert=1;
6171   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6172   if(i>(ba[i]-start)>>2) invert=1;
6173   #endif
6174
6175   if(ooo[i]) {
6176     fs=get_reg(branch_regs[i].regmap,FSREG);
6177     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
6178   }
6179   else {
6180     fs=get_reg(i_regmap,FSREG);
6181   }
6182
6183   // Check cop1 unusable
6184   if(!cop1_usable) {
6185     cs=get_reg(i_regmap,CSREG);
6186     assert(cs>=0);
6187     emit_testimm(cs,0x20000000);
6188     eaddr=(int)out;
6189     emit_jeq(0);
6190     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
6191     cop1_usable=1;
6192   }
6193
6194   if(ooo[i]) {
6195     // Out of order execution (delay slot first)
6196     //printf("OOOE\n");
6197     ds_assemble(i+1,i_regs);
6198     int adj;
6199     uint64_t bc_unneeded=branch_regs[i].u;
6200     uint64_t bc_unneeded_upper=branch_regs[i].uu;
6201     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6202     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
6203     bc_unneeded|=1;
6204     bc_unneeded_upper|=1;
6205     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6206                   bc_unneeded,bc_unneeded_upper);
6207     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
6208     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6209     cc=get_reg(branch_regs[i].regmap,CCREG);
6210     assert(cc==HOST_CCREG);
6211     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6212     assem_debug("cycle count (adj)\n");
6213     if(1) {
6214       int nottaken=0;
6215       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6216       if(1) {
6217         assert(fs>=0);
6218         emit_testimm(fs,0x800000);
6219         if(source[i]&0x10000) // BC1T
6220         {
6221           if(invert){
6222             nottaken=(int)out;
6223             emit_jeq(1);
6224           }else{
6225             add_to_linker((int)out,ba[i],internal);
6226             emit_jne(0);
6227           }
6228         }
6229         else // BC1F
6230           if(invert){
6231             nottaken=(int)out;
6232             emit_jne(1);
6233           }else{
6234             add_to_linker((int)out,ba[i],internal);
6235             emit_jeq(0);
6236           }
6237         {
6238         }
6239       } // if(!only32)
6240           
6241       if(invert) {
6242         if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6243         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6244         else if(match) emit_addnop(13);
6245         #endif
6246         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6247         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6248         if(internal)
6249           assem_debug("branch: internal\n");
6250         else
6251           assem_debug("branch: external\n");
6252         if(internal&&is_ds[(ba[i]-start)>>2]) {
6253           ds_assemble_entry(i);
6254         }
6255         else {
6256           add_to_linker((int)out,ba[i],internal);
6257           emit_jmp(0);
6258         }
6259         set_jump_target(nottaken,(int)out);
6260       }
6261
6262       if(adj) {
6263         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6264       }
6265     } // (!unconditional)
6266   } // if(ooo)
6267   else
6268   {
6269     // In-order execution (branch first)
6270     //printf("IOE\n");
6271     int nottaken=0;
6272     if(1) {
6273       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6274       if(1) {
6275         assert(fs>=0);
6276         emit_testimm(fs,0x800000);
6277         if(source[i]&0x10000) // BC1T
6278         {
6279           nottaken=(int)out;
6280           emit_jeq(1);
6281         }
6282         else // BC1F
6283         {
6284           nottaken=(int)out;
6285           emit_jne(1);
6286         }
6287       }
6288     } // if(!unconditional)
6289     int adj;
6290     uint64_t ds_unneeded=branch_regs[i].u;
6291     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6292     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6293     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6294     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6295     ds_unneeded|=1;
6296     ds_unneeded_upper|=1;
6297     // branch taken
6298     //assem_debug("1:\n");
6299     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6300                   ds_unneeded,ds_unneeded_upper);
6301     // load regs
6302     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6303     address_generation(i+1,&branch_regs[i],0);
6304     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6305     ds_assemble(i+1,&branch_regs[i]);
6306     cc=get_reg(branch_regs[i].regmap,CCREG);
6307     if(cc==-1) {
6308       emit_loadreg(CCREG,cc=HOST_CCREG);
6309       // CHECK: Is the following instruction (fall thru) allocated ok?
6310     }
6311     assert(cc==HOST_CCREG);
6312     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6313     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6314     assem_debug("cycle count (adj)\n");
6315     if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6316     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6317     if(internal)
6318       assem_debug("branch: internal\n");
6319     else
6320       assem_debug("branch: external\n");
6321     if(internal&&is_ds[(ba[i]-start)>>2]) {
6322       ds_assemble_entry(i);
6323     }
6324     else {
6325       add_to_linker((int)out,ba[i],internal);
6326       emit_jmp(0);
6327     }
6328
6329     // branch not taken
6330     if(1) { // <- FIXME (don't need this)
6331       set_jump_target(nottaken,(int)out);
6332       assem_debug("1:\n");
6333       if(!likely[i]) {
6334         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6335                       ds_unneeded,ds_unneeded_upper);
6336         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6337         address_generation(i+1,&branch_regs[i],0);
6338         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6339         ds_assemble(i+1,&branch_regs[i]);
6340       }
6341       cc=get_reg(branch_regs[i].regmap,CCREG);
6342       if(cc==-1&&!likely[i]) {
6343         // Cycle count isn't in a register, temporarily load it then write it out
6344         emit_loadreg(CCREG,HOST_CCREG);
6345         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6346         int jaddr=(int)out;
6347         emit_jns(0);
6348         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6349         emit_storereg(CCREG,HOST_CCREG);
6350       }
6351       else{
6352         cc=get_reg(i_regmap,CCREG);
6353         assert(cc==HOST_CCREG);
6354         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6355         int jaddr=(int)out;
6356         emit_jns(0);
6357         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6358       }
6359     }
6360   }
6361 }
6362
6363 static void pagespan_assemble(int i,struct regstat *i_regs)
6364 {
6365   int s1l=get_reg(i_regs->regmap,rs1[i]);
6366   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6367   int s2l=get_reg(i_regs->regmap,rs2[i]);
6368   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6369   void *nt_branch=NULL;
6370   int taken=0;
6371   int nottaken=0;
6372   int unconditional=0;
6373   if(rs1[i]==0)
6374   {
6375     s1l=s2l;s1h=s2h;
6376     s2l=s2h=-1;
6377   }
6378   else if(rs2[i]==0)
6379   {
6380     s2l=s2h=-1;
6381   }
6382   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6383     s1h=s2h=-1;
6384   }
6385   int hr=0;
6386   int addr,alt,ntaddr;
6387   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6388   else {
6389     while(hr<HOST_REGS)
6390     {
6391       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6392          (i_regs->regmap[hr]&63)!=rs1[i] &&
6393          (i_regs->regmap[hr]&63)!=rs2[i] )
6394       {
6395         addr=hr++;break;
6396       }
6397       hr++;
6398     }
6399   }
6400   while(hr<HOST_REGS)
6401   {
6402     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6403        (i_regs->regmap[hr]&63)!=rs1[i] &&
6404        (i_regs->regmap[hr]&63)!=rs2[i] )
6405     {
6406       alt=hr++;break;
6407     }
6408     hr++;
6409   }
6410   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6411   {
6412     while(hr<HOST_REGS)
6413     {
6414       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6415          (i_regs->regmap[hr]&63)!=rs1[i] &&
6416          (i_regs->regmap[hr]&63)!=rs2[i] )
6417       {
6418         ntaddr=hr;break;
6419       }
6420       hr++;
6421     }
6422   }
6423   assert(hr<HOST_REGS);
6424   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6425     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6426   }
6427   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6428   if(opcode[i]==2) // J
6429   {
6430     unconditional=1;
6431   }
6432   if(opcode[i]==3) // JAL
6433   {
6434     // TODO: mini_ht
6435     int rt=get_reg(i_regs->regmap,31);
6436     emit_movimm(start+i*4+8,rt);
6437     unconditional=1;
6438   }
6439   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6440   {
6441     emit_mov(s1l,addr);
6442     if(opcode2[i]==9) // JALR
6443     {
6444       int rt=get_reg(i_regs->regmap,rt1[i]);
6445       emit_movimm(start+i*4+8,rt);
6446     }
6447   }
6448   if((opcode[i]&0x3f)==4) // BEQ
6449   {
6450     if(rs1[i]==rs2[i])
6451     {
6452       unconditional=1;
6453     }
6454     else
6455     #ifdef HAVE_CMOV_IMM
6456     if(s1h<0) {
6457       if(s2l>=0) emit_cmp(s1l,s2l);
6458       else emit_test(s1l,s1l);
6459       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6460     }
6461     else
6462     #endif
6463     {
6464       assert(s1l>=0);
6465       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6466       if(s1h>=0) {
6467         if(s2h>=0) emit_cmp(s1h,s2h);
6468         else emit_test(s1h,s1h);
6469         emit_cmovne_reg(alt,addr);
6470       }
6471       if(s2l>=0) emit_cmp(s1l,s2l);
6472       else emit_test(s1l,s1l);
6473       emit_cmovne_reg(alt,addr);
6474     }
6475   }
6476   if((opcode[i]&0x3f)==5) // BNE
6477   {
6478     #ifdef HAVE_CMOV_IMM
6479     if(s1h<0) {
6480       if(s2l>=0) emit_cmp(s1l,s2l);
6481       else emit_test(s1l,s1l);
6482       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6483     }
6484     else
6485     #endif
6486     {
6487       assert(s1l>=0);
6488       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6489       if(s1h>=0) {
6490         if(s2h>=0) emit_cmp(s1h,s2h);
6491         else emit_test(s1h,s1h);
6492         emit_cmovne_reg(alt,addr);
6493       }
6494       if(s2l>=0) emit_cmp(s1l,s2l);
6495       else emit_test(s1l,s1l);
6496       emit_cmovne_reg(alt,addr);
6497     }
6498   }
6499   if((opcode[i]&0x3f)==0x14) // BEQL
6500   {
6501     if(s1h>=0) {
6502       if(s2h>=0) emit_cmp(s1h,s2h);
6503       else emit_test(s1h,s1h);
6504       nottaken=(int)out;
6505       emit_jne(0);
6506     }
6507     if(s2l>=0) emit_cmp(s1l,s2l);
6508     else emit_test(s1l,s1l);
6509     if(nottaken) set_jump_target(nottaken,(int)out);
6510     nottaken=(int)out;
6511     emit_jne(0);
6512   }
6513   if((opcode[i]&0x3f)==0x15) // BNEL
6514   {
6515     if(s1h>=0) {
6516       if(s2h>=0) emit_cmp(s1h,s2h);
6517       else emit_test(s1h,s1h);
6518       taken=(int)out;
6519       emit_jne(0);
6520     }
6521     if(s2l>=0) emit_cmp(s1l,s2l);
6522     else emit_test(s1l,s1l);
6523     nottaken=(int)out;
6524     emit_jeq(0);
6525     if(taken) set_jump_target(taken,(int)out);
6526   }
6527   if((opcode[i]&0x3f)==6) // BLEZ
6528   {
6529     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6530     emit_cmpimm(s1l,1);
6531     if(s1h>=0) emit_mov(addr,ntaddr);
6532     emit_cmovl_reg(alt,addr);
6533     if(s1h>=0) {
6534       emit_test(s1h,s1h);
6535       emit_cmovne_reg(ntaddr,addr);
6536       emit_cmovs_reg(alt,addr);
6537     }
6538   }
6539   if((opcode[i]&0x3f)==7) // BGTZ
6540   {
6541     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6542     emit_cmpimm(s1l,1);
6543     if(s1h>=0) emit_mov(addr,alt);
6544     emit_cmovl_reg(ntaddr,addr);
6545     if(s1h>=0) {
6546       emit_test(s1h,s1h);
6547       emit_cmovne_reg(alt,addr);
6548       emit_cmovs_reg(ntaddr,addr);
6549     }
6550   }
6551   if((opcode[i]&0x3f)==0x16) // BLEZL
6552   {
6553     assert((opcode[i]&0x3f)!=0x16);
6554   }
6555   if((opcode[i]&0x3f)==0x17) // BGTZL
6556   {
6557     assert((opcode[i]&0x3f)!=0x17);
6558   }
6559   assert(opcode[i]!=1); // BLTZ/BGEZ
6560
6561   //FIXME: Check CSREG
6562   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6563     if((source[i]&0x30000)==0) // BC1F
6564     {
6565       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6566       emit_testimm(s1l,0x800000);
6567       emit_cmovne_reg(alt,addr);
6568     }
6569     if((source[i]&0x30000)==0x10000) // BC1T
6570     {
6571       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6572       emit_testimm(s1l,0x800000);
6573       emit_cmovne_reg(alt,addr);
6574     }
6575     if((source[i]&0x30000)==0x20000) // BC1FL
6576     {
6577       emit_testimm(s1l,0x800000);
6578       nottaken=(int)out;
6579       emit_jne(0);
6580     }
6581     if((source[i]&0x30000)==0x30000) // BC1TL
6582     {
6583       emit_testimm(s1l,0x800000);
6584       nottaken=(int)out;
6585       emit_jeq(0);
6586     }
6587   }
6588
6589   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6590   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6591   if(likely[i]||unconditional)
6592   {
6593     emit_movimm(ba[i],HOST_BTREG);
6594   }
6595   else if(addr!=HOST_BTREG)
6596   {
6597     emit_mov(addr,HOST_BTREG);
6598   }
6599   void *branch_addr=out;
6600   emit_jmp(0);
6601   int target_addr=start+i*4+5;
6602   void *stub=out;
6603   void *compiled_target_addr=check_addr(target_addr);
6604   emit_extjump_ds((int)branch_addr,target_addr);
6605   if(compiled_target_addr) {
6606     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6607     add_link(target_addr,stub);
6608   }
6609   else set_jump_target((int)branch_addr,(int)stub);
6610   if(likely[i]) {
6611     // Not-taken path
6612     set_jump_target((int)nottaken,(int)out);
6613     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6614     void *branch_addr=out;
6615     emit_jmp(0);
6616     int target_addr=start+i*4+8;
6617     void *stub=out;
6618     void *compiled_target_addr=check_addr(target_addr);
6619     emit_extjump_ds((int)branch_addr,target_addr);
6620     if(compiled_target_addr) {
6621       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6622       add_link(target_addr,stub);
6623     }
6624     else set_jump_target((int)branch_addr,(int)stub);
6625   }
6626 }
6627
6628 // Assemble the delay slot for the above
6629 static void pagespan_ds()
6630 {
6631   assem_debug("initial delay slot:\n");
6632   u_int vaddr=start+1;
6633   u_int page=get_page(vaddr);
6634   u_int vpage=get_vpage(vaddr);
6635   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6636   do_dirty_stub_ds();
6637   ll_add(jump_in+page,vaddr,(void *)out);
6638   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6639   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6640     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6641   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6642     emit_writeword(HOST_BTREG,(int)&branch_target);
6643   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6644   address_generation(0,&regs[0],regs[0].regmap_entry);
6645   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6646     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6647   cop1_usable=0;
6648   is_delayslot=0;
6649   switch(itype[0]) {
6650     case ALU:
6651       alu_assemble(0,&regs[0]);break;
6652     case IMM16:
6653       imm16_assemble(0,&regs[0]);break;
6654     case SHIFT:
6655       shift_assemble(0,&regs[0]);break;
6656     case SHIFTIMM:
6657       shiftimm_assemble(0,&regs[0]);break;
6658     case LOAD:
6659       load_assemble(0,&regs[0]);break;
6660     case LOADLR:
6661       loadlr_assemble(0,&regs[0]);break;
6662     case STORE:
6663       store_assemble(0,&regs[0]);break;
6664     case STORELR:
6665       storelr_assemble(0,&regs[0]);break;
6666     case COP0:
6667       cop0_assemble(0,&regs[0]);break;
6668     case COP1:
6669       cop1_assemble(0,&regs[0]);break;
6670     case C1LS:
6671       c1ls_assemble(0,&regs[0]);break;
6672     case COP2:
6673       cop2_assemble(0,&regs[0]);break;
6674     case C2LS:
6675       c2ls_assemble(0,&regs[0]);break;
6676     case C2OP:
6677       c2op_assemble(0,&regs[0]);break;
6678     case FCONV:
6679       fconv_assemble(0,&regs[0]);break;
6680     case FLOAT:
6681       float_assemble(0,&regs[0]);break;
6682     case FCOMP:
6683       fcomp_assemble(0,&regs[0]);break;
6684     case MULTDIV:
6685       multdiv_assemble(0,&regs[0]);break;
6686     case MOV:
6687       mov_assemble(0,&regs[0]);break;
6688     case SYSCALL:
6689     case HLECALL:
6690     case INTCALL:
6691     case SPAN:
6692     case UJUMP:
6693     case RJUMP:
6694     case CJUMP:
6695     case SJUMP:
6696     case FJUMP:
6697       printf("Jump in the delay slot.  This is probably a bug.\n");
6698   }
6699   int btaddr=get_reg(regs[0].regmap,BTREG);
6700   if(btaddr<0) {
6701     btaddr=get_reg(regs[0].regmap,-1);
6702     emit_readword((int)&branch_target,btaddr);
6703   }
6704   assert(btaddr!=HOST_CCREG);
6705   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6706 #ifdef HOST_IMM8
6707   emit_movimm(start+4,HOST_TEMPREG);
6708   emit_cmp(btaddr,HOST_TEMPREG);
6709 #else
6710   emit_cmpimm(btaddr,start+4);
6711 #endif
6712   int branch=(int)out;
6713   emit_jeq(0);
6714   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6715   emit_jmp(jump_vaddr_reg[btaddr]);
6716   set_jump_target(branch,(int)out);
6717   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6718   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6719 }
6720
6721 // Basic liveness analysis for MIPS registers
6722 void unneeded_registers(int istart,int iend,int r)
6723 {
6724   int i;
6725   uint64_t u,uu,gte_u,b,bu,gte_bu;
6726   uint64_t temp_u,temp_uu,temp_gte_u;
6727   uint64_t tdep;
6728   if(iend==slen-1) {
6729     u=1;uu=1;
6730   }else{
6731     u=unneeded_reg[iend+1];
6732     uu=unneeded_reg_upper[iend+1];
6733     u=1;uu=1;
6734   }
6735   gte_u=temp_gte_u=0;
6736
6737   for (i=iend;i>=istart;i--)
6738   {
6739     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6740     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6741     {
6742       // If subroutine call, flag return address as a possible branch target
6743       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6744       
6745       if(ba[i]<start || ba[i]>=(start+slen*4))
6746       {
6747         // Branch out of this block, flush all regs
6748         u=1;
6749         uu=1;
6750         gte_u=0;
6751         /* Hexagon hack 
6752         if(itype[i]==UJUMP&&rt1[i]==31)
6753         {
6754           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6755         }
6756         if(itype[i]==RJUMP&&rs1[i]==31)
6757         {
6758           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6759         }
6760         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6761           if(itype[i]==UJUMP&&rt1[i]==31)
6762           {
6763             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6764             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6765           }
6766           if(itype[i]==RJUMP&&rs1[i]==31)
6767           {
6768             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6769             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6770           }
6771         }*/
6772         branch_unneeded_reg[i]=u;
6773         branch_unneeded_reg_upper[i]=uu;
6774         // Merge in delay slot
6775         tdep=(~uu>>rt1[i+1])&1;
6776         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6777         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6778         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6779         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6780         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6781         u|=1;uu|=1;
6782         gte_u|=gte_rt[i+1];
6783         gte_u&=~gte_rs[i+1];
6784         // If branch is "likely" (and conditional)
6785         // then we skip the delay slot on the fall-thru path
6786         if(likely[i]) {
6787           if(i<slen-1) {
6788             u&=unneeded_reg[i+2];
6789             uu&=unneeded_reg_upper[i+2];
6790             gte_u&=gte_unneeded[i+2];
6791           }
6792           else
6793           {
6794             u=1;
6795             uu=1;
6796             gte_u=0;
6797           }
6798         }
6799       }
6800       else
6801       {
6802         // Internal branch, flag target
6803         bt[(ba[i]-start)>>2]=1;
6804         if(ba[i]<=start+i*4) {
6805           // Backward branch
6806           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6807           {
6808             // Unconditional branch
6809             temp_u=1;temp_uu=1;
6810             temp_gte_u=0;
6811           } else {
6812             // Conditional branch (not taken case)
6813             temp_u=unneeded_reg[i+2];
6814             temp_uu=unneeded_reg_upper[i+2];
6815             temp_gte_u&=gte_unneeded[i+2];
6816           }
6817           // Merge in delay slot
6818           tdep=(~temp_uu>>rt1[i+1])&1;
6819           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6820           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6821           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6822           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6823           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6824           temp_u|=1;temp_uu|=1;
6825           temp_gte_u|=gte_rt[i+1];
6826           temp_gte_u&=~gte_rs[i+1];
6827           // If branch is "likely" (and conditional)
6828           // then we skip the delay slot on the fall-thru path
6829           if(likely[i]) {
6830             if(i<slen-1) {
6831               temp_u&=unneeded_reg[i+2];
6832               temp_uu&=unneeded_reg_upper[i+2];
6833               temp_gte_u&=gte_unneeded[i+2];
6834             }
6835             else
6836             {
6837               temp_u=1;
6838               temp_uu=1;
6839               temp_gte_u=0;
6840             }
6841           }
6842           tdep=(~temp_uu>>rt1[i])&1;
6843           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6844           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6845           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6846           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6847           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6848           temp_u|=1;temp_uu|=1;
6849           temp_gte_u|=gte_rt[i];
6850           temp_gte_u&=~gte_rs[i];
6851           unneeded_reg[i]=temp_u;
6852           unneeded_reg_upper[i]=temp_uu;
6853           gte_unneeded[i]=temp_gte_u;
6854           // Only go three levels deep.  This recursion can take an
6855           // excessive amount of time if there are a lot of nested loops.
6856           if(r<2) {
6857             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6858           }else{
6859             unneeded_reg[(ba[i]-start)>>2]=1;
6860             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6861             gte_unneeded[(ba[i]-start)>>2]=0;
6862           }
6863         } /*else*/ if(1) {
6864           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6865           {
6866             // Unconditional branch
6867             u=unneeded_reg[(ba[i]-start)>>2];
6868             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6869             gte_u=gte_unneeded[(ba[i]-start)>>2];
6870             branch_unneeded_reg[i]=u;
6871             branch_unneeded_reg_upper[i]=uu;
6872         //u=1;
6873         //uu=1;
6874         //branch_unneeded_reg[i]=u;
6875         //branch_unneeded_reg_upper[i]=uu;
6876             // Merge in delay slot
6877             tdep=(~uu>>rt1[i+1])&1;
6878             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6879             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6880             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6881             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6882             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6883             u|=1;uu|=1;
6884             gte_u|=gte_rt[i+1];
6885             gte_u&=~gte_rs[i+1];
6886           } else {
6887             // Conditional branch
6888             b=unneeded_reg[(ba[i]-start)>>2];
6889             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6890             gte_bu=gte_unneeded[(ba[i]-start)>>2];
6891             branch_unneeded_reg[i]=b;
6892             branch_unneeded_reg_upper[i]=bu;
6893         //b=1;
6894         //bu=1;
6895         //branch_unneeded_reg[i]=b;
6896         //branch_unneeded_reg_upper[i]=bu;
6897             // Branch delay slot
6898             tdep=(~uu>>rt1[i+1])&1;
6899             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6900             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6901             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6902             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6903             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6904             b|=1;bu|=1;
6905             gte_bu|=gte_rt[i+1];
6906             gte_bu&=~gte_rs[i+1];
6907             // If branch is "likely" then we skip the
6908             // delay slot on the fall-thru path
6909             if(likely[i]) {
6910               u=b;
6911               uu=bu;
6912               gte_u=gte_bu;
6913               if(i<slen-1) {
6914                 u&=unneeded_reg[i+2];
6915                 uu&=unneeded_reg_upper[i+2];
6916                 gte_u&=gte_unneeded[i+2];
6917         //u=1;
6918         //uu=1;
6919               }
6920             } else {
6921               u&=b;
6922               uu&=bu;
6923               gte_u&=gte_bu;
6924         //u=1;
6925         //uu=1;
6926             }
6927             if(i<slen-1) {
6928               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6929               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6930         //branch_unneeded_reg[i]=1;
6931         //branch_unneeded_reg_upper[i]=1;
6932             } else {
6933               branch_unneeded_reg[i]=1;
6934               branch_unneeded_reg_upper[i]=1;
6935             }
6936           }
6937         }
6938       }
6939     }
6940     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6941     {
6942       // SYSCALL instruction (software interrupt)
6943       u=1;
6944       uu=1;
6945     }
6946     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6947     {
6948       // ERET instruction (return from interrupt)
6949       u=1;
6950       uu=1;
6951     }
6952     //u=uu=1; // DEBUG
6953     tdep=(~uu>>rt1[i])&1;
6954     // Written registers are unneeded
6955     u|=1LL<<rt1[i];
6956     u|=1LL<<rt2[i];
6957     uu|=1LL<<rt1[i];
6958     uu|=1LL<<rt2[i];
6959     gte_u|=gte_rt[i];
6960     // Accessed registers are needed
6961     u&=~(1LL<<rs1[i]);
6962     u&=~(1LL<<rs2[i]);
6963     uu&=~(1LL<<us1[i]);
6964     uu&=~(1LL<<us2[i]);
6965     gte_u&=~gte_rs[i];
6966     // Source-target dependencies
6967     uu&=~(tdep<<dep1[i]);
6968     uu&=~(tdep<<dep2[i]);
6969     // R0 is always unneeded
6970     u|=1;uu|=1;
6971     // Save it
6972     unneeded_reg[i]=u;
6973     unneeded_reg_upper[i]=uu;
6974     gte_unneeded[i]=gte_u;
6975     /*
6976     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6977     printf("U:");
6978     int r;
6979     for(r=1;r<=CCREG;r++) {
6980       if((unneeded_reg[i]>>r)&1) {
6981         if(r==HIREG) printf(" HI");
6982         else if(r==LOREG) printf(" LO");
6983         else printf(" r%d",r);
6984       }
6985     }
6986     printf(" UU:");
6987     for(r=1;r<=CCREG;r++) {
6988       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6989         if(r==HIREG) printf(" HI");
6990         else if(r==LOREG) printf(" LO");
6991         else printf(" r%d",r);
6992       }
6993     }
6994     printf("\n");*/
6995   }
6996 #ifdef FORCE32
6997   for (i=iend;i>=istart;i--)
6998   {
6999     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
7000   }
7001 #endif
7002 }
7003
7004 // Identify registers which are likely to contain 32-bit values
7005 // This is used to predict whether any branches will jump to a
7006 // location with 64-bit values in registers.
7007 static void provisional_32bit()
7008 {
7009   int i,j;
7010   uint64_t is32=1;
7011   uint64_t lastbranch=1;
7012   
7013   for(i=0;i<slen;i++)
7014   {
7015     if(i>0) {
7016       if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
7017         if(i>1) is32=lastbranch;
7018         else is32=1;
7019       }
7020     }
7021     if(i>1)
7022     {
7023       if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
7024         if(likely[i-2]) {
7025           if(i>2) is32=lastbranch;
7026           else is32=1;
7027         }
7028       }
7029       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
7030       {
7031         if(rs1[i-2]==0||rs2[i-2]==0)
7032         {
7033           if(rs1[i-2]) {
7034             is32|=1LL<<rs1[i-2];
7035           }
7036           if(rs2[i-2]) {
7037             is32|=1LL<<rs2[i-2];
7038           }
7039         }
7040       }
7041     }
7042     // If something jumps here with 64-bit values
7043     // then promote those registers to 64 bits
7044     if(bt[i])
7045     {
7046       uint64_t temp_is32=is32;
7047       for(j=i-1;j>=0;j--)
7048       {
7049         if(ba[j]==start+i*4) 
7050           //temp_is32&=branch_regs[j].is32;
7051           temp_is32&=p32[j];
7052       }
7053       for(j=i;j<slen;j++)
7054       {
7055         if(ba[j]==start+i*4) 
7056           temp_is32=1;
7057       }
7058       is32=temp_is32;
7059     }
7060     int type=itype[i];
7061     int op=opcode[i];
7062     int op2=opcode2[i];
7063     int rt=rt1[i];
7064     int s1=rs1[i];
7065     int s2=rs2[i];
7066     if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7067       // Branches don't write registers, consider the delay slot instead.
7068       type=itype[i+1];
7069       op=opcode[i+1];
7070       op2=opcode2[i+1];
7071       rt=rt1[i+1];
7072       s1=rs1[i+1];
7073       s2=rs2[i+1];
7074       lastbranch=is32;
7075     }
7076     switch(type) {
7077       case LOAD:
7078         if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
7079            opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
7080           is32&=~(1LL<<rt);
7081         else
7082           is32|=1LL<<rt;
7083         break;
7084       case STORE:
7085       case STORELR:
7086         break;
7087       case LOADLR:
7088         if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
7089         if(op==0x22) is32|=1LL<<rt; // LWL
7090         break;
7091       case IMM16:
7092         if (op==0x08||op==0x09|| // ADDI/ADDIU
7093             op==0x0a||op==0x0b|| // SLTI/SLTIU
7094             op==0x0c|| // ANDI
7095             op==0x0f)  // LUI
7096         {
7097           is32|=1LL<<rt;
7098         }
7099         if(op==0x18||op==0x19) { // DADDI/DADDIU
7100           is32&=~(1LL<<rt);
7101           //if(imm[i]==0)
7102           //  is32|=((is32>>s1)&1LL)<<rt;
7103         }
7104         if(op==0x0d||op==0x0e) { // ORI/XORI
7105           uint64_t sr=((is32>>s1)&1LL);
7106           is32&=~(1LL<<rt);
7107           is32|=sr<<rt;
7108         }
7109         break;
7110       case UJUMP:
7111         break;
7112       case RJUMP:
7113         break;
7114       case CJUMP:
7115         break;
7116       case SJUMP:
7117         break;
7118       case FJUMP:
7119         break;
7120       case ALU:
7121         if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
7122           is32|=1LL<<rt;
7123         }
7124         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7125           is32|=1LL<<rt;
7126         }
7127         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7128           uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
7129           is32&=~(1LL<<rt);
7130           is32|=sr<<rt;
7131         }
7132         else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
7133           if(s1==0&&s2==0) {
7134             is32|=1LL<<rt;
7135           }
7136           else if(s2==0) {
7137             uint64_t sr=((is32>>s1)&1LL);
7138             is32&=~(1LL<<rt);
7139             is32|=sr<<rt;
7140           }
7141           else if(s1==0) {
7142             uint64_t sr=((is32>>s2)&1LL);
7143             is32&=~(1LL<<rt);
7144             is32|=sr<<rt;
7145           }
7146           else {
7147             is32&=~(1LL<<rt);
7148           }
7149         }
7150         else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
7151           if(s1==0&&s2==0) {
7152             is32|=1LL<<rt;
7153           }
7154           else if(s2==0) {
7155             uint64_t sr=((is32>>s1)&1LL);
7156             is32&=~(1LL<<rt);
7157             is32|=sr<<rt;
7158           }
7159           else {
7160             is32&=~(1LL<<rt);
7161           }
7162         }
7163         break;
7164       case MULTDIV:
7165         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7166           is32&=~((1LL<<HIREG)|(1LL<<LOREG));
7167         }
7168         else {
7169           is32|=(1LL<<HIREG)|(1LL<<LOREG);
7170         }
7171         break;
7172       case MOV:
7173         {
7174           uint64_t sr=((is32>>s1)&1LL);
7175           is32&=~(1LL<<rt);
7176           is32|=sr<<rt;
7177         }
7178         break;
7179       case SHIFT:
7180         if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
7181         else is32|=1LL<<rt; // SLLV/SRLV/SRAV
7182         break;
7183       case SHIFTIMM:
7184         is32|=1LL<<rt;
7185         // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
7186         if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
7187         break;
7188       case COP0:
7189         if(op2==0) is32|=1LL<<rt; // MFC0
7190         break;
7191       case COP1:
7192       case COP2:
7193         if(op2==0) is32|=1LL<<rt; // MFC1
7194         if(op2==1) is32&=~(1LL<<rt); // DMFC1
7195         if(op2==2) is32|=1LL<<rt; // CFC1
7196         break;
7197       case C1LS:
7198       case C2LS:
7199         break;
7200       case FLOAT:
7201       case FCONV:
7202         break;
7203       case FCOMP:
7204         break;
7205       case C2OP:
7206       case SYSCALL:
7207       case HLECALL:
7208         break;
7209       default:
7210         break;
7211     }
7212     is32|=1;
7213     p32[i]=is32;
7214
7215     if(i>0)
7216     {
7217       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
7218       {
7219         if(rt1[i-1]==31) // JAL/JALR
7220         {
7221           // Subroutine call will return here, don't alloc any registers
7222           is32=1;
7223         }
7224         else if(i+1<slen)
7225         {
7226           // Internal branch will jump here, match registers to caller
7227           is32=0x3FFFFFFFFLL;
7228         }
7229       }
7230     }
7231   }
7232 }
7233
7234 // Identify registers which may be assumed to contain 32-bit values
7235 // and where optimizations will rely on this.
7236 // This is used to determine whether backward branches can safely
7237 // jump to a location with 64-bit values in registers.
7238 static void provisional_r32()
7239 {
7240   u_int r32=0;
7241   int i;
7242   
7243   for (i=slen-1;i>=0;i--)
7244   {
7245     int hr;
7246     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7247     {
7248       if(ba[i]<start || ba[i]>=(start+slen*4))
7249       {
7250         // Branch out of this block, don't need anything
7251         r32=0;
7252       }
7253       else
7254       {
7255         // Internal branch
7256         // Need whatever matches the target
7257         // (and doesn't get overwritten by the delay slot instruction)
7258         r32=0;
7259         int t=(ba[i]-start)>>2;
7260         if(ba[i]>start+i*4) {
7261           // Forward branch
7262           //if(!(requires_32bit[t]&~regs[i].was32))
7263           //  r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7264           if(!(pr32[t]&~regs[i].was32))
7265             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7266         }else{
7267           // Backward branch
7268           if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7269             r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7270         }
7271       }
7272       // Conditional branch may need registers for following instructions
7273       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7274       {
7275         if(i<slen-2) {
7276           //r32|=requires_32bit[i+2];
7277           r32|=pr32[i+2];
7278           r32&=regs[i].was32;
7279           // Mark this address as a branch target since it may be called
7280           // upon return from interrupt
7281           //bt[i+2]=1;
7282         }
7283       }
7284       // Merge in delay slot
7285       if(!likely[i]) {
7286         // These are overwritten unless the branch is "likely"
7287         // and the delay slot is nullified if not taken
7288         r32&=~(1LL<<rt1[i+1]);
7289         r32&=~(1LL<<rt2[i+1]);
7290       }
7291       // Assume these are needed (delay slot)
7292       if(us1[i+1]>0)
7293       {
7294         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7295       }
7296       if(us2[i+1]>0)
7297       {
7298         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7299       }
7300       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7301       {
7302         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7303       }
7304       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7305       {
7306         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7307       }
7308     }
7309     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7310     {
7311       // SYSCALL instruction (software interrupt)
7312       r32=0;
7313     }
7314     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7315     {
7316       // ERET instruction (return from interrupt)
7317       r32=0;
7318     }
7319     // Check 32 bits
7320     r32&=~(1LL<<rt1[i]);
7321     r32&=~(1LL<<rt2[i]);
7322     if(us1[i]>0)
7323     {
7324       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7325     }
7326     if(us2[i]>0)
7327     {
7328       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7329     }
7330     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7331     {
7332       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7333     }
7334     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7335     {
7336       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7337     }
7338     //requires_32bit[i]=r32;
7339     pr32[i]=r32;
7340     
7341     // Dirty registers which are 32-bit, require 32-bit input
7342     // as they will be written as 32-bit values
7343     for(hr=0;hr<HOST_REGS;hr++)
7344     {
7345       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7346         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7347           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7348           pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7349           //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7350         }
7351       }
7352     }
7353   }
7354 }
7355
7356 // Write back dirty registers as soon as we will no longer modify them,
7357 // so that we don't end up with lots of writes at the branches.
7358 void clean_registers(int istart,int iend,int wr)
7359 {
7360   int i;
7361   int r;
7362   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7363   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7364   if(iend==slen-1) {
7365     will_dirty_i=will_dirty_next=0;
7366     wont_dirty_i=wont_dirty_next=0;
7367   }else{
7368     will_dirty_i=will_dirty_next=will_dirty[iend+1];
7369     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7370   }
7371   for (i=iend;i>=istart;i--)
7372   {
7373     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7374     {
7375       if(ba[i]<start || ba[i]>=(start+slen*4))
7376       {
7377         // Branch out of this block, flush all regs
7378         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7379         {
7380           // Unconditional branch
7381           will_dirty_i=0;
7382           wont_dirty_i=0;
7383           // Merge in delay slot (will dirty)
7384           for(r=0;r<HOST_REGS;r++) {
7385             if(r!=EXCLUDE_REG) {
7386               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7387               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7388               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7389               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7390               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7391               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7392               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7393               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7394               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7395               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7396               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7397               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7398               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7399               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7400             }
7401           }
7402         }
7403         else
7404         {
7405           // Conditional branch
7406           will_dirty_i=0;
7407           wont_dirty_i=wont_dirty_next;
7408           // Merge in delay slot (will dirty)
7409           for(r=0;r<HOST_REGS;r++) {
7410             if(r!=EXCLUDE_REG) {
7411               if(!likely[i]) {
7412                 // Might not dirty if likely branch is not taken
7413                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7414                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7415                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7416                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7417                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7418                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7419                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7420                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7421                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7422                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7423                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7424                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7425                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7426                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7427               }
7428             }
7429           }
7430         }
7431         // Merge in delay slot (wont dirty)
7432         for(r=0;r<HOST_REGS;r++) {
7433           if(r!=EXCLUDE_REG) {
7434             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7435             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7436             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7437             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7438             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7439             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7440             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7441             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7442             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7443             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7444           }
7445         }
7446         if(wr) {
7447           #ifndef DESTRUCTIVE_WRITEBACK
7448           branch_regs[i].dirty&=wont_dirty_i;
7449           #endif
7450           branch_regs[i].dirty|=will_dirty_i;
7451         }
7452       }
7453       else
7454       {
7455         // Internal branch
7456         if(ba[i]<=start+i*4) {
7457           // Backward branch
7458           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7459           {
7460             // Unconditional branch
7461             temp_will_dirty=0;
7462             temp_wont_dirty=0;
7463             // Merge in delay slot (will dirty)
7464             for(r=0;r<HOST_REGS;r++) {
7465               if(r!=EXCLUDE_REG) {
7466                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7467                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7468                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7469                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7470                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7471                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7472                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7473                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7474                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7475                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7476                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7477                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7478                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7479                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7480               }
7481             }
7482           } else {
7483             // Conditional branch (not taken case)
7484             temp_will_dirty=will_dirty_next;
7485             temp_wont_dirty=wont_dirty_next;
7486             // Merge in delay slot (will dirty)
7487             for(r=0;r<HOST_REGS;r++) {
7488               if(r!=EXCLUDE_REG) {
7489                 if(!likely[i]) {
7490                   // Will not dirty if likely branch is not taken
7491                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7492                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7493                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7494                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7495                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7496                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7497                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7498                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7499                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7500                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7501                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7502                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7503                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7504                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7505                 }
7506               }
7507             }
7508           }
7509           // Merge in delay slot (wont dirty)
7510           for(r=0;r<HOST_REGS;r++) {
7511             if(r!=EXCLUDE_REG) {
7512               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7513               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7514               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7515               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7516               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7517               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7518               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7519               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7520               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7521               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7522             }
7523           }
7524           // Deal with changed mappings
7525           if(i<iend) {
7526             for(r=0;r<HOST_REGS;r++) {
7527               if(r!=EXCLUDE_REG) {
7528                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7529                   temp_will_dirty&=~(1<<r);
7530                   temp_wont_dirty&=~(1<<r);
7531                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7532                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7533                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7534                   } else {
7535                     temp_will_dirty|=1<<r;
7536                     temp_wont_dirty|=1<<r;
7537                   }
7538                 }
7539               }
7540             }
7541           }
7542           if(wr) {
7543             will_dirty[i]=temp_will_dirty;
7544             wont_dirty[i]=temp_wont_dirty;
7545             clean_registers((ba[i]-start)>>2,i-1,0);
7546           }else{
7547             // Limit recursion.  It can take an excessive amount
7548             // of time if there are a lot of nested loops.
7549             will_dirty[(ba[i]-start)>>2]=0;
7550             wont_dirty[(ba[i]-start)>>2]=-1;
7551           }
7552         }
7553         /*else*/ if(1)
7554         {
7555           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7556           {
7557             // Unconditional branch
7558             will_dirty_i=0;
7559             wont_dirty_i=0;
7560           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7561             for(r=0;r<HOST_REGS;r++) {
7562               if(r!=EXCLUDE_REG) {
7563                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7564                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7565                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7566                 }
7567                 if(branch_regs[i].regmap[r]>=0) {
7568                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7569                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7570                 }
7571               }
7572             }
7573           //}
7574             // Merge in delay slot
7575             for(r=0;r<HOST_REGS;r++) {
7576               if(r!=EXCLUDE_REG) {
7577                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7578                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7579                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7580                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7581                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7582                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7583                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7584                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7585                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7586                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7587                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7588                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7589                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7590                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7591               }
7592             }
7593           } else {
7594             // Conditional branch
7595             will_dirty_i=will_dirty_next;
7596             wont_dirty_i=wont_dirty_next;
7597           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7598             for(r=0;r<HOST_REGS;r++) {
7599               if(r!=EXCLUDE_REG) {
7600                 signed char target_reg=branch_regs[i].regmap[r];
7601                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7602                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7603                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7604                 }
7605                 else if(target_reg>=0) {
7606                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7607                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7608                 }
7609                 // Treat delay slot as part of branch too
7610                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7611                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7612                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7613                 }
7614                 else
7615                 {
7616                   will_dirty[i+1]&=~(1<<r);
7617                 }*/
7618               }
7619             }
7620           //}
7621             // Merge in delay slot
7622             for(r=0;r<HOST_REGS;r++) {
7623               if(r!=EXCLUDE_REG) {
7624                 if(!likely[i]) {
7625                   // Might not dirty if likely branch is not taken
7626                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7627                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7628                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7629                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7630                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7631                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7632                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7633                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7634                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7635                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7636                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7637                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7638                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7639                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7640                 }
7641               }
7642             }
7643           }
7644           // Merge in delay slot (won't dirty)
7645           for(r=0;r<HOST_REGS;r++) {
7646             if(r!=EXCLUDE_REG) {
7647               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7648               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7649               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7650               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7651               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7652               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7653               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7654               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7655               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7656               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7657             }
7658           }
7659           if(wr) {
7660             #ifndef DESTRUCTIVE_WRITEBACK
7661             branch_regs[i].dirty&=wont_dirty_i;
7662             #endif
7663             branch_regs[i].dirty|=will_dirty_i;
7664           }
7665         }
7666       }
7667     }
7668     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7669     {
7670       // SYSCALL instruction (software interrupt)
7671       will_dirty_i=0;
7672       wont_dirty_i=0;
7673     }
7674     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7675     {
7676       // ERET instruction (return from interrupt)
7677       will_dirty_i=0;
7678       wont_dirty_i=0;
7679     }
7680     will_dirty_next=will_dirty_i;
7681     wont_dirty_next=wont_dirty_i;
7682     for(r=0;r<HOST_REGS;r++) {
7683       if(r!=EXCLUDE_REG) {
7684         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7685         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7686         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7687         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7688         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7689         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7690         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7691         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7692         if(i>istart) {
7693           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP) 
7694           {
7695             // Don't store a register immediately after writing it,
7696             // may prevent dual-issue.
7697             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7698             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7699           }
7700         }
7701       }
7702     }
7703     // Save it
7704     will_dirty[i]=will_dirty_i;
7705     wont_dirty[i]=wont_dirty_i;
7706     // Mark registers that won't be dirtied as not dirty
7707     if(wr) {
7708       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7709       for(r=0;r<HOST_REGS;r++) {
7710         if((will_dirty_i>>r)&1) {
7711           printf(" r%d",r);
7712         }
7713       }
7714       printf("\n");*/
7715
7716       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7717         regs[i].dirty|=will_dirty_i;
7718         #ifndef DESTRUCTIVE_WRITEBACK
7719         regs[i].dirty&=wont_dirty_i;
7720         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7721         {
7722           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7723             for(r=0;r<HOST_REGS;r++) {
7724               if(r!=EXCLUDE_REG) {
7725                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7726                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7727                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7728               }
7729             }
7730           }
7731         }
7732         else
7733         {
7734           if(i<iend) {
7735             for(r=0;r<HOST_REGS;r++) {
7736               if(r!=EXCLUDE_REG) {
7737                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7738                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7739                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7740               }
7741             }
7742           }
7743         }
7744         #endif
7745       //}
7746     }
7747     // Deal with changed mappings
7748     temp_will_dirty=will_dirty_i;
7749     temp_wont_dirty=wont_dirty_i;
7750     for(r=0;r<HOST_REGS;r++) {
7751       if(r!=EXCLUDE_REG) {
7752         int nr;
7753         if(regs[i].regmap[r]==regmap_pre[i][r]) {
7754           if(wr) {
7755             #ifndef DESTRUCTIVE_WRITEBACK
7756             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7757             #endif
7758             regs[i].wasdirty|=will_dirty_i&(1<<r);
7759           }
7760         }
7761         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7762           // Register moved to a different register
7763           will_dirty_i&=~(1<<r);
7764           wont_dirty_i&=~(1<<r);
7765           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7766           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7767           if(wr) {
7768             #ifndef DESTRUCTIVE_WRITEBACK
7769             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7770             #endif
7771             regs[i].wasdirty|=will_dirty_i&(1<<r);
7772           }
7773         }
7774         else {
7775           will_dirty_i&=~(1<<r);
7776           wont_dirty_i&=~(1<<r);
7777           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7778             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7779             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7780           } else {
7781             wont_dirty_i|=1<<r;
7782             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7783           }
7784         }
7785       }
7786     }
7787   }
7788 }
7789
7790   /* disassembly */
7791 void disassemble_inst(int i)
7792 {
7793     if (bt[i]) printf("*"); else printf(" ");
7794     switch(itype[i]) {
7795       case UJUMP:
7796         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7797       case CJUMP:
7798         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7799       case SJUMP:
7800         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7801       case FJUMP:
7802         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7803       case RJUMP:
7804         if (opcode[i]==0x9&&rt1[i]!=31)
7805           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7806         else
7807           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7808         break;
7809       case SPAN:
7810         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7811       case IMM16:
7812         if(opcode[i]==0xf) //LUI
7813           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7814         else
7815           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7816         break;
7817       case LOAD:
7818       case LOADLR:
7819         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7820         break;
7821       case STORE:
7822       case STORELR:
7823         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7824         break;
7825       case ALU:
7826       case SHIFT:
7827         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7828         break;
7829       case MULTDIV:
7830         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7831         break;
7832       case SHIFTIMM:
7833         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7834         break;
7835       case MOV:
7836         if((opcode2[i]&0x1d)==0x10)
7837           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7838         else if((opcode2[i]&0x1d)==0x11)
7839           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7840         else
7841           printf (" %x: %s\n",start+i*4,insn[i]);
7842         break;
7843       case COP0:
7844         if(opcode2[i]==0)
7845           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7846         else if(opcode2[i]==4)
7847           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7848         else printf (" %x: %s\n",start+i*4,insn[i]);
7849         break;
7850       case COP1:
7851         if(opcode2[i]<3)
7852           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7853         else if(opcode2[i]>3)
7854           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7855         else printf (" %x: %s\n",start+i*4,insn[i]);
7856         break;
7857       case COP2:
7858         if(opcode2[i]<3)
7859           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7860         else if(opcode2[i]>3)
7861           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7862         else printf (" %x: %s\n",start+i*4,insn[i]);
7863         break;
7864       case C1LS:
7865         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7866         break;
7867       case C2LS:
7868         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7869         break;
7870       case INTCALL:
7871         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7872         break;
7873       default:
7874         //printf (" %s %8x\n",insn[i],source[i]);
7875         printf (" %x: %s\n",start+i*4,insn[i]);
7876     }
7877 }
7878
7879 // clear the state completely, instead of just marking
7880 // things invalid like invalidate_all_pages() does
7881 void new_dynarec_clear_full()
7882 {
7883   int n;
7884   out=(u_char *)BASE_ADDR;
7885   memset(invalid_code,1,sizeof(invalid_code));
7886   memset(hash_table,0xff,sizeof(hash_table));
7887   memset(mini_ht,-1,sizeof(mini_ht));
7888   memset(restore_candidate,0,sizeof(restore_candidate));
7889   memset(shadow,0,sizeof(shadow));
7890   copy=shadow;
7891   expirep=16384; // Expiry pointer, +2 blocks
7892   pending_exception=0;
7893   literalcount=0;
7894   stop_after_jal=0;
7895   inv_code_start=inv_code_end=~0;
7896   gte_reads_flags=0;
7897   // TLB
7898 #ifndef DISABLE_TLB
7899   using_tlb=0;
7900 #endif
7901   sp_in_mirror=0;
7902   for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7903     memory_map[n]=-1;
7904   for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7905     memory_map[n]=((u_int)rdram-0x80000000)>>2;
7906   for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7907     memory_map[n]=-1;
7908   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7909   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7910   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7911 }
7912
7913 void new_dynarec_init()
7914 {
7915   printf("Init new dynarec\n");
7916   out=(u_char *)BASE_ADDR;
7917   if (mmap (out, 1<<TARGET_SIZE_2,
7918             PROT_READ | PROT_WRITE | PROT_EXEC,
7919             MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7920             -1, 0) <= 0) {printf("mmap() failed\n");}
7921 #ifdef MUPEN64
7922   rdword=&readmem_dword;
7923   fake_pc.f.r.rs=&readmem_dword;
7924   fake_pc.f.r.rt=&readmem_dword;
7925   fake_pc.f.r.rd=&readmem_dword;
7926 #endif
7927   int n;
7928   new_dynarec_clear_full();
7929 #ifdef HOST_IMM8
7930   // Copy this into local area so we don't have to put it in every literal pool
7931   invc_ptr=invalid_code;
7932 #endif
7933 #ifdef MUPEN64
7934   for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7935     writemem[n] = write_nomem_new;
7936     writememb[n] = write_nomemb_new;
7937     writememh[n] = write_nomemh_new;
7938 #ifndef FORCE32
7939     writememd[n] = write_nomemd_new;
7940 #endif
7941     readmem[n] = read_nomem_new;
7942     readmemb[n] = read_nomemb_new;
7943     readmemh[n] = read_nomemh_new;
7944 #ifndef FORCE32
7945     readmemd[n] = read_nomemd_new;
7946 #endif
7947   }
7948   for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7949     writemem[n] = write_rdram_new;
7950     writememb[n] = write_rdramb_new;
7951     writememh[n] = write_rdramh_new;
7952 #ifndef FORCE32
7953     writememd[n] = write_rdramd_new;
7954 #endif
7955   }
7956   for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7957     writemem[n] = write_nomem_new;
7958     writememb[n] = write_nomemb_new;
7959     writememh[n] = write_nomemh_new;
7960 #ifndef FORCE32
7961     writememd[n] = write_nomemd_new;
7962 #endif
7963     readmem[n] = read_nomem_new;
7964     readmemb[n] = read_nomemb_new;
7965     readmemh[n] = read_nomemh_new;
7966 #ifndef FORCE32
7967     readmemd[n] = read_nomemd_new;
7968 #endif
7969   }
7970 #endif
7971   tlb_hacks();
7972   arch_init();
7973 }
7974
7975 void new_dynarec_cleanup()
7976 {
7977   int n;
7978   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7979   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7980   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7981   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7982   #ifdef ROM_COPY
7983   if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7984   #endif
7985 }
7986
7987 int new_recompile_block(int addr)
7988 {
7989 /*
7990   if(addr==0x800cd050) {
7991     int block;
7992     for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7993     int n;
7994     for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7995   }
7996 */
7997   //if(Count==365117028) tracedebug=1;
7998   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7999   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8000   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
8001   //if(debug) 
8002   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
8003   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
8004   /*if(Count>=312978186) {
8005     rlist();
8006   }*/
8007   //rlist();
8008   start = (u_int)addr&~3;
8009   //assert(((u_int)addr&1)==0);
8010 #ifdef PCSX
8011   if(!sp_in_mirror&&(signed int)(psxRegs.GPR.n.sp&0xffe00000)>0x80200000&&
8012      0x10000<=psxRegs.GPR.n.sp&&(psxRegs.GPR.n.sp&~0xe0e00000)<RAM_SIZE) {
8013     printf("SP hack enabled (%08x), @%08x\n", psxRegs.GPR.n.sp, psxRegs.pc);
8014     sp_in_mirror=1;
8015   }
8016   if (Config.HLE && start == 0x80001000) // hlecall
8017   {
8018     // XXX: is this enough? Maybe check hleSoftCall?
8019     u_int beginning=(u_int)out;
8020     u_int page=get_page(start);
8021     invalid_code[start>>12]=0;
8022     emit_movimm(start,0);
8023     emit_writeword(0,(int)&pcaddr);
8024     emit_jmp((int)new_dyna_leave);
8025     literal_pool(0);
8026 #ifdef __arm__
8027     __clear_cache((void *)beginning,out);
8028 #endif
8029     ll_add(jump_in+page,start,(void *)beginning);
8030     return 0;
8031   }
8032   else if ((u_int)addr < 0x00200000 ||
8033     (0xa0000000 <= addr && addr < 0xa0200000)) {
8034     // used for BIOS calls mostly?
8035     source = (u_int *)((u_int)rdram+(start&0x1fffff));
8036     pagelimit = (addr&0xa0000000)|0x00200000;
8037   }
8038   else if (!Config.HLE && (
8039 /*    (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
8040     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
8041     // BIOS
8042     source = (u_int *)((u_int)psxR+(start&0x7ffff));
8043     pagelimit = (addr&0xfff00000)|0x80000;
8044   }
8045   else
8046 #endif
8047 #ifdef MUPEN64
8048   if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
8049     source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
8050     pagelimit = 0xa4001000;
8051   }
8052   else
8053 #endif
8054   if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
8055     source = (u_int *)((u_int)rdram+start-0x80000000);
8056     pagelimit = 0x80000000+RAM_SIZE;
8057   }
8058 #ifndef DISABLE_TLB
8059   else if ((signed int)addr >= (signed int)0xC0000000) {
8060     //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
8061     //if(tlb_LUT_r[start>>12])
8062       //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
8063     if((signed int)memory_map[start>>12]>=0) {
8064       source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
8065       pagelimit=(start+4096)&0xFFFFF000;
8066       int map=memory_map[start>>12];
8067       int i;
8068       for(i=0;i<5;i++) {
8069         //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
8070         if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
8071       }
8072       assem_debug("pagelimit=%x\n",pagelimit);
8073       assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
8074     }
8075     else {
8076       assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
8077       //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
8078       return -1; // Caller will invoke exception handler
8079     }
8080     //printf("source= %x\n",(int)source);
8081   }
8082 #endif
8083   else {
8084     printf("Compile at bogus memory address: %x \n", (int)addr);
8085     exit(1);
8086   }
8087
8088   /* Pass 1: disassemble */
8089   /* Pass 2: register dependencies, branch targets */
8090   /* Pass 3: register allocation */
8091   /* Pass 4: branch dependencies */
8092   /* Pass 5: pre-alloc */
8093   /* Pass 6: optimize clean/dirty state */
8094   /* Pass 7: flag 32-bit registers */
8095   /* Pass 8: assembly */
8096   /* Pass 9: linker */
8097   /* Pass 10: garbage collection / free memory */
8098
8099   int i,j;
8100   int done=0;
8101   unsigned int type,op,op2;
8102
8103   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
8104   
8105   /* Pass 1 disassembly */
8106
8107   for(i=0;!done;i++) {
8108     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
8109     minimum_free_regs[i]=0;
8110     opcode[i]=op=source[i]>>26;
8111     switch(op)
8112     {
8113       case 0x00: strcpy(insn[i],"special"); type=NI;
8114         op2=source[i]&0x3f;
8115         switch(op2)
8116         {
8117           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
8118           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
8119           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
8120           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
8121           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
8122           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
8123           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
8124           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
8125           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
8126           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
8127           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
8128           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
8129           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
8130           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
8131           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
8132           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
8133           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
8134           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
8135           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
8136           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
8137           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
8138           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
8139           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
8140           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
8141           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
8142           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
8143           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
8144           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
8145           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
8146           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
8147           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
8148           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
8149           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
8150           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
8151           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
8152 #ifndef FORCE32
8153           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
8154           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
8155           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
8156           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
8157           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
8158           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
8159           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
8160           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
8161           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
8162           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
8163           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
8164           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
8165           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
8166           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
8167           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
8168           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
8169           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
8170 #endif
8171         }
8172         break;
8173       case 0x01: strcpy(insn[i],"regimm"); type=NI;
8174         op2=(source[i]>>16)&0x1f;
8175         switch(op2)
8176         {
8177           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
8178           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
8179           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
8180           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
8181           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
8182           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
8183           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
8184           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
8185           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
8186           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
8187           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
8188           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
8189           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
8190           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
8191         }
8192         break;
8193       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
8194       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
8195       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
8196       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
8197       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
8198       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
8199       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
8200       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
8201       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
8202       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
8203       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
8204       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
8205       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
8206       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
8207       case 0x10: strcpy(insn[i],"cop0"); type=NI;
8208         op2=(source[i]>>21)&0x1f;
8209         switch(op2)
8210         {
8211           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
8212           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
8213           case 0x10: strcpy(insn[i],"tlb"); type=NI;
8214           switch(source[i]&0x3f)
8215           {
8216             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
8217             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
8218             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
8219             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
8220 #ifdef PCSX
8221             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
8222 #else
8223             case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
8224 #endif
8225           }
8226         }
8227         break;
8228       case 0x11: strcpy(insn[i],"cop1"); type=NI;
8229         op2=(source[i]>>21)&0x1f;
8230         switch(op2)
8231         {
8232           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
8233           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
8234           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
8235           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
8236           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
8237           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
8238           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
8239           switch((source[i]>>16)&0x3)
8240           {
8241             case 0x00: strcpy(insn[i],"BC1F"); break;
8242             case 0x01: strcpy(insn[i],"BC1T"); break;
8243             case 0x02: strcpy(insn[i],"BC1FL"); break;
8244             case 0x03: strcpy(insn[i],"BC1TL"); break;
8245           }
8246           break;
8247           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
8248           switch(source[i]&0x3f)
8249           {
8250             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
8251             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
8252             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
8253             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
8254             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
8255             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
8256             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
8257             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
8258             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
8259             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
8260             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
8261             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
8262             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
8263             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
8264             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
8265             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
8266             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
8267             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
8268             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
8269             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
8270             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
8271             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
8272             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
8273             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8274             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8275             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8276             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8277             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8278             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8279             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8280             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8281             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8282             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8283             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8284             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8285           }
8286           break;
8287           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8288           switch(source[i]&0x3f)
8289           {
8290             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8291             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8292             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8293             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8294             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8295             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8296             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8297             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8298             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8299             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8300             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8301             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8302             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8303             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8304             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8305             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8306             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8307             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8308             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8309             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8310             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8311             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8312             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8313             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8314             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8315             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8316             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8317             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8318             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8319             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8320             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8321             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8322             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8323             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8324             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8325           }
8326           break;
8327           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8328           switch(source[i]&0x3f)
8329           {
8330             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8331             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8332           }
8333           break;
8334           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8335           switch(source[i]&0x3f)
8336           {
8337             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8338             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8339           }
8340           break;
8341         }
8342         break;
8343 #ifndef FORCE32
8344       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8345       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8346       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8347       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8348       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8349       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8350       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8351       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8352 #endif
8353       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8354       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8355       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8356       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8357       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8358       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8359       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8360 #ifndef FORCE32
8361       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8362 #endif
8363       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8364       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8365       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8366       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8367 #ifndef FORCE32
8368       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8369       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8370 #endif
8371       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8372       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8373       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8374       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8375 #ifndef FORCE32
8376       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8377       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8378       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8379 #endif
8380       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8381       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8382 #ifndef FORCE32
8383       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8384       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8385       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8386 #endif
8387 #ifdef PCSX
8388       case 0x12: strcpy(insn[i],"COP2"); type=NI;
8389         op2=(source[i]>>21)&0x1f;
8390         //if (op2 & 0x10) {
8391         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
8392           if (gte_handlers[source[i]&0x3f]!=NULL) {
8393             if (gte_regnames[source[i]&0x3f]!=NULL)
8394               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
8395             else
8396               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8397             type=C2OP;
8398           }
8399         }
8400         else switch(op2)
8401         {
8402           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8403           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8404           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8405           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8406         }
8407         break;
8408       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8409       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8410       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8411 #endif
8412       default: strcpy(insn[i],"???"); type=NI;
8413         printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
8414         break;
8415     }
8416     itype[i]=type;
8417     opcode2[i]=op2;
8418     /* Get registers/immediates */
8419     lt1[i]=0;
8420     us1[i]=0;
8421     us2[i]=0;
8422     dep1[i]=0;
8423     dep2[i]=0;
8424     gte_rs[i]=gte_rt[i]=0;
8425     switch(type) {
8426       case LOAD:
8427         rs1[i]=(source[i]>>21)&0x1f;
8428         rs2[i]=0;
8429         rt1[i]=(source[i]>>16)&0x1f;
8430         rt2[i]=0;
8431         imm[i]=(short)source[i];
8432         break;
8433       case STORE:
8434       case STORELR:
8435         rs1[i]=(source[i]>>21)&0x1f;
8436         rs2[i]=(source[i]>>16)&0x1f;
8437         rt1[i]=0;
8438         rt2[i]=0;
8439         imm[i]=(short)source[i];
8440         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8441         break;
8442       case LOADLR:
8443         // LWL/LWR only load part of the register,
8444         // therefore the target register must be treated as a source too
8445         rs1[i]=(source[i]>>21)&0x1f;
8446         rs2[i]=(source[i]>>16)&0x1f;
8447         rt1[i]=(source[i]>>16)&0x1f;
8448         rt2[i]=0;
8449         imm[i]=(short)source[i];
8450         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8451         if(op==0x26) dep1[i]=rt1[i]; // LWR
8452         break;
8453       case IMM16:
8454         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8455         else rs1[i]=(source[i]>>21)&0x1f;
8456         rs2[i]=0;
8457         rt1[i]=(source[i]>>16)&0x1f;
8458         rt2[i]=0;
8459         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8460           imm[i]=(unsigned short)source[i];
8461         }else{
8462           imm[i]=(short)source[i];
8463         }
8464         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8465         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8466         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8467         break;
8468       case UJUMP:
8469         rs1[i]=0;
8470         rs2[i]=0;
8471         rt1[i]=0;
8472         rt2[i]=0;
8473         // The JAL instruction writes to r31.
8474         if (op&1) {
8475           rt1[i]=31;
8476         }
8477         rs2[i]=CCREG;
8478         break;
8479       case RJUMP:
8480         rs1[i]=(source[i]>>21)&0x1f;
8481         rs2[i]=0;
8482         rt1[i]=0;
8483         rt2[i]=0;
8484         // The JALR instruction writes to rd.
8485         if (op2&1) {
8486           rt1[i]=(source[i]>>11)&0x1f;
8487         }
8488         rs2[i]=CCREG;
8489         break;
8490       case CJUMP:
8491         rs1[i]=(source[i]>>21)&0x1f;
8492         rs2[i]=(source[i]>>16)&0x1f;
8493         rt1[i]=0;
8494         rt2[i]=0;
8495         if(op&2) { // BGTZ/BLEZ
8496           rs2[i]=0;
8497         }
8498         us1[i]=rs1[i];
8499         us2[i]=rs2[i];
8500         likely[i]=op>>4;
8501         break;
8502       case SJUMP:
8503         rs1[i]=(source[i]>>21)&0x1f;
8504         rs2[i]=CCREG;
8505         rt1[i]=0;
8506         rt2[i]=0;
8507         us1[i]=rs1[i];
8508         if(op2&0x10) { // BxxAL
8509           rt1[i]=31;
8510           // NOTE: If the branch is not taken, r31 is still overwritten
8511         }
8512         likely[i]=(op2&2)>>1;
8513         break;
8514       case FJUMP:
8515         rs1[i]=FSREG;
8516         rs2[i]=CSREG;
8517         rt1[i]=0;
8518         rt2[i]=0;
8519         likely[i]=((source[i])>>17)&1;
8520         break;
8521       case ALU:
8522         rs1[i]=(source[i]>>21)&0x1f; // source
8523         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8524         rt1[i]=(source[i]>>11)&0x1f; // destination
8525         rt2[i]=0;
8526         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8527           us1[i]=rs1[i];us2[i]=rs2[i];
8528         }
8529         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8530           dep1[i]=rs1[i];dep2[i]=rs2[i];
8531         }
8532         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8533           dep1[i]=rs1[i];dep2[i]=rs2[i];
8534         }
8535         break;
8536       case MULTDIV:
8537         rs1[i]=(source[i]>>21)&0x1f; // source
8538         rs2[i]=(source[i]>>16)&0x1f; // divisor
8539         rt1[i]=HIREG;
8540         rt2[i]=LOREG;
8541         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8542           us1[i]=rs1[i];us2[i]=rs2[i];
8543         }
8544         break;
8545       case MOV:
8546         rs1[i]=0;
8547         rs2[i]=0;
8548         rt1[i]=0;
8549         rt2[i]=0;
8550         if(op2==0x10) rs1[i]=HIREG; // MFHI
8551         if(op2==0x11) rt1[i]=HIREG; // MTHI
8552         if(op2==0x12) rs1[i]=LOREG; // MFLO
8553         if(op2==0x13) rt1[i]=LOREG; // MTLO
8554         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8555         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8556         dep1[i]=rs1[i];
8557         break;
8558       case SHIFT:
8559         rs1[i]=(source[i]>>16)&0x1f; // target of shift
8560         rs2[i]=(source[i]>>21)&0x1f; // shift amount
8561         rt1[i]=(source[i]>>11)&0x1f; // destination
8562         rt2[i]=0;
8563         // DSLLV/DSRLV/DSRAV are 64-bit
8564         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8565         break;
8566       case SHIFTIMM:
8567         rs1[i]=(source[i]>>16)&0x1f;
8568         rs2[i]=0;
8569         rt1[i]=(source[i]>>11)&0x1f;
8570         rt2[i]=0;
8571         imm[i]=(source[i]>>6)&0x1f;
8572         // DSxx32 instructions
8573         if(op2>=0x3c) imm[i]|=0x20;
8574         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8575         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8576         break;
8577       case COP0:
8578         rs1[i]=0;
8579         rs2[i]=0;
8580         rt1[i]=0;
8581         rt2[i]=0;
8582         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8583         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8584         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8585         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8586         break;
8587       case COP1:
8588         rs1[i]=0;
8589         rs2[i]=0;
8590         rt1[i]=0;
8591         rt2[i]=0;
8592         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8593         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8594         if(op2==5) us1[i]=rs1[i]; // DMTC1
8595         rs2[i]=CSREG;
8596         break;
8597       case COP2:
8598         rs1[i]=0;
8599         rs2[i]=0;
8600         rt1[i]=0;
8601         rt2[i]=0;
8602         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
8603         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
8604         rs2[i]=CSREG;
8605         int gr=(source[i]>>11)&0x1F;
8606         switch(op2)
8607         {
8608           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
8609           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
8610           case 0x02: gte_rs[i]=1ll<<(gr+32); // CFC2
8611             if(gr==31&&!gte_reads_flags) {
8612               assem_debug("gte flag read encountered @%08x\n",addr + i*4);
8613               gte_reads_flags=1;
8614             }
8615             break;
8616           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
8617         }
8618         break;
8619       case C1LS:
8620         rs1[i]=(source[i]>>21)&0x1F;
8621         rs2[i]=CSREG;
8622         rt1[i]=0;
8623         rt2[i]=0;
8624         imm[i]=(short)source[i];
8625         break;
8626       case C2LS:
8627         rs1[i]=(source[i]>>21)&0x1F;
8628         rs2[i]=0;
8629         rt1[i]=0;
8630         rt2[i]=0;
8631         imm[i]=(short)source[i];
8632         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
8633         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
8634         break;
8635       case C2OP:
8636         rs1[i]=0;
8637         rs2[i]=0;
8638         rt1[i]=0;
8639         rt2[i]=0;
8640         gte_rt[i]=1ll<<63; // every op changes flags
8641         // TODO: other regs?
8642         break;
8643       case FLOAT:
8644       case FCONV:
8645         rs1[i]=0;
8646         rs2[i]=CSREG;
8647         rt1[i]=0;
8648         rt2[i]=0;
8649         break;
8650       case FCOMP:
8651         rs1[i]=FSREG;
8652         rs2[i]=CSREG;
8653         rt1[i]=FSREG;
8654         rt2[i]=0;
8655         break;
8656       case SYSCALL:
8657       case HLECALL:
8658       case INTCALL:
8659         rs1[i]=CCREG;
8660         rs2[i]=0;
8661         rt1[i]=0;
8662         rt2[i]=0;
8663         break;
8664       default:
8665         rs1[i]=0;
8666         rs2[i]=0;
8667         rt1[i]=0;
8668         rt2[i]=0;
8669     }
8670     /* Calculate branch target addresses */
8671     if(type==UJUMP)
8672       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8673     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8674       ba[i]=start+i*4+8; // Ignore never taken branch
8675     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8676       ba[i]=start+i*4+8; // Ignore never taken branch
8677     else if(type==CJUMP||type==SJUMP||type==FJUMP)
8678       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8679     else ba[i]=-1;
8680 #ifdef PCSX
8681     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
8682       int do_in_intrp=0;
8683       // branch in delay slot?
8684       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
8685         // don't handle first branch and call interpreter if it's hit
8686         printf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
8687         do_in_intrp=1;
8688       }
8689       // basic load delay detection
8690       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
8691         int t=(ba[i-1]-start)/4;
8692         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
8693           // jump target wants DS result - potential load delay effect
8694           printf("load delay @%08x (%08x)\n", addr + i*4, addr);
8695           do_in_intrp=1;
8696           bt[t+1]=1; // expected return from interpreter
8697         }
8698         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
8699               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
8700           // v0 overwrite like this is a sign of trouble, bail out
8701           printf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
8702           do_in_intrp=1;
8703         }
8704       }
8705       if(do_in_intrp) {
8706         rs1[i-1]=CCREG;
8707         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
8708         ba[i-1]=-1;
8709         itype[i-1]=INTCALL;
8710         done=2;
8711         i--; // don't compile the DS
8712       }
8713     }
8714 #endif
8715     /* Is this the end of the block? */
8716     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8717       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8718         done=2;
8719       }
8720       else {
8721         if(stop_after_jal) done=1;
8722         // Stop on BREAK
8723         if((source[i+1]&0xfc00003f)==0x0d) done=1;
8724       }
8725       // Don't recompile stuff that's already compiled
8726       if(check_addr(start+i*4+4)) done=1;
8727       // Don't get too close to the limit
8728       if(i>MAXBLOCK/2) done=1;
8729     }
8730     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
8731     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
8732     if(done==2) {
8733       // Does the block continue due to a branch?
8734       for(j=i-1;j>=0;j--)
8735       {
8736         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
8737         if(ba[j]==start+i*4+4) done=j=0;
8738         if(ba[j]==start+i*4+8) done=j=0;
8739       }
8740     }
8741     //assert(i<MAXBLOCK-1);
8742     if(start+i*4==pagelimit-4) done=1;
8743     assert(start+i*4<pagelimit);
8744     if (i==MAXBLOCK-1) done=1;
8745     // Stop if we're compiling junk
8746     if(itype[i]==NI&&opcode[i]==0x11) {
8747       done=stop_after_jal=1;
8748       printf("Disabled speculative precompilation\n");
8749     }
8750   }
8751   slen=i;
8752   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8753     if(start+i*4==pagelimit) {
8754       itype[i-1]=SPAN;
8755     }
8756   }
8757   assert(slen>0);
8758
8759   /* Pass 2 - Register dependencies and branch targets */
8760
8761   unneeded_registers(0,slen-1,0);
8762   
8763   /* Pass 3 - Register allocation */
8764
8765   struct regstat current; // Current register allocations/status
8766   current.is32=1;
8767   current.dirty=0;
8768   current.u=unneeded_reg[0];
8769   current.uu=unneeded_reg_upper[0];
8770   clear_all_regs(current.regmap);
8771   alloc_reg(&current,0,CCREG);
8772   dirty_reg(&current,CCREG);
8773   current.isconst=0;
8774   current.wasconst=0;
8775   int ds=0;
8776   int cc=0;
8777   int hr=-1;
8778
8779 #ifndef FORCE32
8780   provisional_32bit();
8781 #endif
8782   if((u_int)addr&1) {
8783     // First instruction is delay slot
8784     cc=-1;
8785     bt[1]=1;
8786     ds=1;
8787     unneeded_reg[0]=1;
8788     unneeded_reg_upper[0]=1;
8789     current.regmap[HOST_BTREG]=BTREG;
8790   }
8791   
8792   for(i=0;i<slen;i++)
8793   {
8794     if(bt[i])
8795     {
8796       int hr;
8797       for(hr=0;hr<HOST_REGS;hr++)
8798       {
8799         // Is this really necessary?
8800         if(current.regmap[hr]==0) current.regmap[hr]=-1;
8801       }
8802       current.isconst=0;
8803     }
8804     if(i>1)
8805     {
8806       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8807       {
8808         if(rs1[i-2]==0||rs2[i-2]==0)
8809         {
8810           if(rs1[i-2]) {
8811             current.is32|=1LL<<rs1[i-2];
8812             int hr=get_reg(current.regmap,rs1[i-2]|64);
8813             if(hr>=0) current.regmap[hr]=-1;
8814           }
8815           if(rs2[i-2]) {
8816             current.is32|=1LL<<rs2[i-2];
8817             int hr=get_reg(current.regmap,rs2[i-2]|64);
8818             if(hr>=0) current.regmap[hr]=-1;
8819           }
8820         }
8821       }
8822     }
8823 #ifndef FORCE32
8824     // If something jumps here with 64-bit values
8825     // then promote those registers to 64 bits
8826     if(bt[i])
8827     {
8828       uint64_t temp_is32=current.is32;
8829       for(j=i-1;j>=0;j--)
8830       {
8831         if(ba[j]==start+i*4) 
8832           temp_is32&=branch_regs[j].is32;
8833       }
8834       for(j=i;j<slen;j++)
8835       {
8836         if(ba[j]==start+i*4) 
8837           //temp_is32=1;
8838           temp_is32&=p32[j];
8839       }
8840       if(temp_is32!=current.is32) {
8841         //printf("dumping 32-bit regs (%x)\n",start+i*4);
8842         #ifndef DESTRUCTIVE_WRITEBACK
8843         if(ds)
8844         #endif
8845         for(hr=0;hr<HOST_REGS;hr++)
8846         {
8847           int r=current.regmap[hr];
8848           if(r>0&&r<64)
8849           {
8850             if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8851               temp_is32|=1LL<<r;
8852               //printf("restore %d\n",r);
8853             }
8854           }
8855         }
8856         current.is32=temp_is32;
8857       }
8858     }
8859 #else
8860     current.is32=-1LL;
8861 #endif
8862
8863     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8864     regs[i].wasconst=current.isconst;
8865     regs[i].was32=current.is32;
8866     regs[i].wasdirty=current.dirty;
8867     #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
8868     // To change a dirty register from 32 to 64 bits, we must write
8869     // it out during the previous cycle (for branches, 2 cycles)
8870     if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8871     {
8872       uint64_t temp_is32=current.is32;
8873       for(j=i-1;j>=0;j--)
8874       {
8875         if(ba[j]==start+i*4+4) 
8876           temp_is32&=branch_regs[j].is32;
8877       }
8878       for(j=i;j<slen;j++)
8879       {
8880         if(ba[j]==start+i*4+4) 
8881           //temp_is32=1;
8882           temp_is32&=p32[j];
8883       }
8884       if(temp_is32!=current.is32) {
8885         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8886         for(hr=0;hr<HOST_REGS;hr++)
8887         {
8888           int r=current.regmap[hr];
8889           if(r>0)
8890           {
8891             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8892               if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8893               {
8894                 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8895                 {
8896                   //printf("dump %d/r%d\n",hr,r);
8897                   current.regmap[hr]=-1;
8898                   if(get_reg(current.regmap,r|64)>=0) 
8899                     current.regmap[get_reg(current.regmap,r|64)]=-1;
8900                 }
8901               }
8902             }
8903           }
8904         }
8905       }
8906     }
8907     else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8908     {
8909       uint64_t temp_is32=current.is32;
8910       for(j=i-1;j>=0;j--)
8911       {
8912         if(ba[j]==start+i*4+8) 
8913           temp_is32&=branch_regs[j].is32;
8914       }
8915       for(j=i;j<slen;j++)
8916       {
8917         if(ba[j]==start+i*4+8) 
8918           //temp_is32=1;
8919           temp_is32&=p32[j];
8920       }
8921       if(temp_is32!=current.is32) {
8922         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8923         for(hr=0;hr<HOST_REGS;hr++)
8924         {
8925           int r=current.regmap[hr];
8926           if(r>0)
8927           {
8928             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8929               if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8930               {
8931                 //printf("dump %d/r%d\n",hr,r);
8932                 current.regmap[hr]=-1;
8933                 if(get_reg(current.regmap,r|64)>=0) 
8934                   current.regmap[get_reg(current.regmap,r|64)]=-1;
8935               }
8936             }
8937           }
8938         }
8939       }
8940     }
8941     #endif
8942     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8943       if(i+1<slen) {
8944         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8945         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8946         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8947         current.u|=1;
8948         current.uu|=1;
8949       } else {
8950         current.u=1;
8951         current.uu=1;
8952       }
8953     } else {
8954       if(i+1<slen) {
8955         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8956         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8957         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8958         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8959         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8960         current.u|=1;
8961         current.uu|=1;
8962       } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8963     }
8964     is_ds[i]=ds;
8965     if(ds) {
8966       ds=0; // Skip delay slot, already allocated as part of branch
8967       // ...but we need to alloc it in case something jumps here
8968       if(i+1<slen) {
8969         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8970         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8971       }else{
8972         current.u=branch_unneeded_reg[i-1];
8973         current.uu=branch_unneeded_reg_upper[i-1];
8974       }
8975       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8976       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8977       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8978       current.u|=1;
8979       current.uu|=1;
8980       struct regstat temp;
8981       memcpy(&temp,&current,sizeof(current));
8982       temp.wasdirty=temp.dirty;
8983       temp.was32=temp.is32;
8984       // TODO: Take into account unconditional branches, as below
8985       delayslot_alloc(&temp,i);
8986       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8987       regs[i].wasdirty=temp.wasdirty;
8988       regs[i].was32=temp.was32;
8989       regs[i].dirty=temp.dirty;
8990       regs[i].is32=temp.is32;
8991       regs[i].isconst=0;
8992       regs[i].wasconst=0;
8993       current.isconst=0;
8994       // Create entry (branch target) regmap
8995       for(hr=0;hr<HOST_REGS;hr++)
8996       {
8997         int r=temp.regmap[hr];
8998         if(r>=0) {
8999           if(r!=regmap_pre[i][hr]) {
9000             regs[i].regmap_entry[hr]=-1;
9001           }
9002           else
9003           {
9004             if(r<64){
9005               if((current.u>>r)&1) {
9006                 regs[i].regmap_entry[hr]=-1;
9007                 regs[i].regmap[hr]=-1;
9008                 //Don't clear regs in the delay slot as the branch might need them
9009                 //current.regmap[hr]=-1;
9010               }else
9011                 regs[i].regmap_entry[hr]=r;
9012             }
9013             else {
9014               if((current.uu>>(r&63))&1) {
9015                 regs[i].regmap_entry[hr]=-1;
9016                 regs[i].regmap[hr]=-1;
9017                 //Don't clear regs in the delay slot as the branch might need them
9018                 //current.regmap[hr]=-1;
9019               }else
9020                 regs[i].regmap_entry[hr]=r;
9021             }
9022           }
9023         } else {
9024           // First instruction expects CCREG to be allocated
9025           if(i==0&&hr==HOST_CCREG) 
9026             regs[i].regmap_entry[hr]=CCREG;
9027           else
9028             regs[i].regmap_entry[hr]=-1;
9029         }
9030       }
9031     }
9032     else { // Not delay slot
9033       switch(itype[i]) {
9034         case UJUMP:
9035           //current.isconst=0; // DEBUG
9036           //current.wasconst=0; // DEBUG
9037           //regs[i].wasconst=0; // DEBUG
9038           clear_const(&current,rt1[i]);
9039           alloc_cc(&current,i);
9040           dirty_reg(&current,CCREG);
9041           if (rt1[i]==31) {
9042             alloc_reg(&current,i,31);
9043             dirty_reg(&current,31);
9044             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
9045             //assert(rt1[i+1]!=rt1[i]);
9046             #ifdef REG_PREFETCH
9047             alloc_reg(&current,i,PTEMP);
9048             #endif
9049             //current.is32|=1LL<<rt1[i];
9050           }
9051           ooo[i]=1;
9052           delayslot_alloc(&current,i+1);
9053           //current.isconst=0; // DEBUG
9054           ds=1;
9055           //printf("i=%d, isconst=%x\n",i,current.isconst);
9056           break;
9057         case RJUMP:
9058           //current.isconst=0;
9059           //current.wasconst=0;
9060           //regs[i].wasconst=0;
9061           clear_const(&current,rs1[i]);
9062           clear_const(&current,rt1[i]);
9063           alloc_cc(&current,i);
9064           dirty_reg(&current,CCREG);
9065           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
9066             alloc_reg(&current,i,rs1[i]);
9067             if (rt1[i]!=0) {
9068               alloc_reg(&current,i,rt1[i]);
9069               dirty_reg(&current,rt1[i]);
9070               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
9071               assert(rt1[i+1]!=rt1[i]);
9072               #ifdef REG_PREFETCH
9073               alloc_reg(&current,i,PTEMP);
9074               #endif
9075             }
9076             #ifdef USE_MINI_HT
9077             if(rs1[i]==31) { // JALR
9078               alloc_reg(&current,i,RHASH);
9079               #ifndef HOST_IMM_ADDR32
9080               alloc_reg(&current,i,RHTBL);
9081               #endif
9082             }
9083             #endif
9084             delayslot_alloc(&current,i+1);
9085           } else {
9086             // The delay slot overwrites our source register,
9087             // allocate a temporary register to hold the old value.
9088             current.isconst=0;
9089             current.wasconst=0;
9090             regs[i].wasconst=0;
9091             delayslot_alloc(&current,i+1);
9092             current.isconst=0;
9093             alloc_reg(&current,i,RTEMP);
9094           }
9095           //current.isconst=0; // DEBUG
9096           ooo[i]=1;
9097           ds=1;
9098           break;
9099         case CJUMP:
9100           //current.isconst=0;
9101           //current.wasconst=0;
9102           //regs[i].wasconst=0;
9103           clear_const(&current,rs1[i]);
9104           clear_const(&current,rs2[i]);
9105           if((opcode[i]&0x3E)==4) // BEQ/BNE
9106           {
9107             alloc_cc(&current,i);
9108             dirty_reg(&current,CCREG);
9109             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9110             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9111             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9112             {
9113               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9114               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9115             }
9116             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
9117                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
9118               // The delay slot overwrites one of our conditions.
9119               // Allocate the branch condition registers instead.
9120               current.isconst=0;
9121               current.wasconst=0;
9122               regs[i].wasconst=0;
9123               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9124               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9125               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9126               {
9127                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9128                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9129               }
9130             }
9131             else
9132             {
9133               ooo[i]=1;
9134               delayslot_alloc(&current,i+1);
9135             }
9136           }
9137           else
9138           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
9139           {
9140             alloc_cc(&current,i);
9141             dirty_reg(&current,CCREG);
9142             alloc_reg(&current,i,rs1[i]);
9143             if(!(current.is32>>rs1[i]&1))
9144             {
9145               alloc_reg64(&current,i,rs1[i]);
9146             }
9147             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
9148               // The delay slot overwrites one of our conditions.
9149               // Allocate the branch condition registers instead.
9150               current.isconst=0;
9151               current.wasconst=0;
9152               regs[i].wasconst=0;
9153               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9154               if(!((current.is32>>rs1[i])&1))
9155               {
9156                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9157               }
9158             }
9159             else
9160             {
9161               ooo[i]=1;
9162               delayslot_alloc(&current,i+1);
9163             }
9164           }
9165           else
9166           // Don't alloc the delay slot yet because we might not execute it
9167           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
9168           {
9169             current.isconst=0;
9170             current.wasconst=0;
9171             regs[i].wasconst=0;
9172             alloc_cc(&current,i);
9173             dirty_reg(&current,CCREG);
9174             alloc_reg(&current,i,rs1[i]);
9175             alloc_reg(&current,i,rs2[i]);
9176             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9177             {
9178               alloc_reg64(&current,i,rs1[i]);
9179               alloc_reg64(&current,i,rs2[i]);
9180             }
9181           }
9182           else
9183           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
9184           {
9185             current.isconst=0;
9186             current.wasconst=0;
9187             regs[i].wasconst=0;
9188             alloc_cc(&current,i);
9189             dirty_reg(&current,CCREG);
9190             alloc_reg(&current,i,rs1[i]);
9191             if(!(current.is32>>rs1[i]&1))
9192             {
9193               alloc_reg64(&current,i,rs1[i]);
9194             }
9195           }
9196           ds=1;
9197           //current.isconst=0;
9198           break;
9199         case SJUMP:
9200           //current.isconst=0;
9201           //current.wasconst=0;
9202           //regs[i].wasconst=0;
9203           clear_const(&current,rs1[i]);
9204           clear_const(&current,rt1[i]);
9205           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
9206           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
9207           {
9208             alloc_cc(&current,i);
9209             dirty_reg(&current,CCREG);
9210             alloc_reg(&current,i,rs1[i]);
9211             if(!(current.is32>>rs1[i]&1))
9212             {
9213               alloc_reg64(&current,i,rs1[i]);
9214             }
9215             if (rt1[i]==31) { // BLTZAL/BGEZAL
9216               alloc_reg(&current,i,31);
9217               dirty_reg(&current,31);
9218               //#ifdef REG_PREFETCH
9219               //alloc_reg(&current,i,PTEMP);
9220               //#endif
9221               //current.is32|=1LL<<rt1[i];
9222             }
9223             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
9224                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
9225               // Allocate the branch condition registers instead.
9226               current.isconst=0;
9227               current.wasconst=0;
9228               regs[i].wasconst=0;
9229               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9230               if(!((current.is32>>rs1[i])&1))
9231               {
9232                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9233               }
9234             }
9235             else
9236             {
9237               ooo[i]=1;
9238               delayslot_alloc(&current,i+1);
9239             }
9240           }
9241           else
9242           // Don't alloc the delay slot yet because we might not execute it
9243           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
9244           {
9245             current.isconst=0;
9246             current.wasconst=0;
9247             regs[i].wasconst=0;
9248             alloc_cc(&current,i);
9249             dirty_reg(&current,CCREG);
9250             alloc_reg(&current,i,rs1[i]);
9251             if(!(current.is32>>rs1[i]&1))
9252             {
9253               alloc_reg64(&current,i,rs1[i]);
9254             }
9255           }
9256           ds=1;
9257           //current.isconst=0;
9258           break;
9259         case FJUMP:
9260           current.isconst=0;
9261           current.wasconst=0;
9262           regs[i].wasconst=0;
9263           if(likely[i]==0) // BC1F/BC1T
9264           {
9265             // TODO: Theoretically we can run out of registers here on x86.
9266             // The delay slot can allocate up to six, and we need to check
9267             // CSREG before executing the delay slot.  Possibly we can drop
9268             // the cycle count and then reload it after checking that the
9269             // FPU is in a usable state, or don't do out-of-order execution.
9270             alloc_cc(&current,i);
9271             dirty_reg(&current,CCREG);
9272             alloc_reg(&current,i,FSREG);
9273             alloc_reg(&current,i,CSREG);
9274             if(itype[i+1]==FCOMP) {
9275               // The delay slot overwrites the branch condition.
9276               // Allocate the branch condition registers instead.
9277               alloc_cc(&current,i);
9278               dirty_reg(&current,CCREG);
9279               alloc_reg(&current,i,CSREG);
9280               alloc_reg(&current,i,FSREG);
9281             }
9282             else {
9283               ooo[i]=1;
9284               delayslot_alloc(&current,i+1);
9285               alloc_reg(&current,i+1,CSREG);
9286             }
9287           }
9288           else
9289           // Don't alloc the delay slot yet because we might not execute it
9290           if(likely[i]) // BC1FL/BC1TL
9291           {
9292             alloc_cc(&current,i);
9293             dirty_reg(&current,CCREG);
9294             alloc_reg(&current,i,CSREG);
9295             alloc_reg(&current,i,FSREG);
9296           }
9297           ds=1;
9298           current.isconst=0;
9299           break;
9300         case IMM16:
9301           imm16_alloc(&current,i);
9302           break;
9303         case LOAD:
9304         case LOADLR:
9305           load_alloc(&current,i);
9306           break;
9307         case STORE:
9308         case STORELR:
9309           store_alloc(&current,i);
9310           break;
9311         case ALU:
9312           alu_alloc(&current,i);
9313           break;
9314         case SHIFT:
9315           shift_alloc(&current,i);
9316           break;
9317         case MULTDIV:
9318           multdiv_alloc(&current,i);
9319           break;
9320         case SHIFTIMM:
9321           shiftimm_alloc(&current,i);
9322           break;
9323         case MOV:
9324           mov_alloc(&current,i);
9325           break;
9326         case COP0:
9327           cop0_alloc(&current,i);
9328           break;
9329         case COP1:
9330         case COP2:
9331           cop1_alloc(&current,i);
9332           break;
9333         case C1LS:
9334           c1ls_alloc(&current,i);
9335           break;
9336         case C2LS:
9337           c2ls_alloc(&current,i);
9338           break;
9339         case C2OP:
9340           c2op_alloc(&current,i);
9341           break;
9342         case FCONV:
9343           fconv_alloc(&current,i);
9344           break;
9345         case FLOAT:
9346           float_alloc(&current,i);
9347           break;
9348         case FCOMP:
9349           fcomp_alloc(&current,i);
9350           break;
9351         case SYSCALL:
9352         case HLECALL:
9353         case INTCALL:
9354           syscall_alloc(&current,i);
9355           break;
9356         case SPAN:
9357           pagespan_alloc(&current,i);
9358           break;
9359       }
9360       
9361       // Drop the upper half of registers that have become 32-bit
9362       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9363       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9364         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9365         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9366         current.uu|=1;
9367       } else {
9368         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9369         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9370         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9371         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9372         current.uu|=1;
9373       }
9374
9375       // Create entry (branch target) regmap
9376       for(hr=0;hr<HOST_REGS;hr++)
9377       {
9378         int r,or,er;
9379         r=current.regmap[hr];
9380         if(r>=0) {
9381           if(r!=regmap_pre[i][hr]) {
9382             // TODO: delay slot (?)
9383             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9384             if(or<0||(r&63)>=TEMPREG){
9385               regs[i].regmap_entry[hr]=-1;
9386             }
9387             else
9388             {
9389               // Just move it to a different register
9390               regs[i].regmap_entry[hr]=r;
9391               // If it was dirty before, it's still dirty
9392               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9393             }
9394           }
9395           else
9396           {
9397             // Unneeded
9398             if(r==0){
9399               regs[i].regmap_entry[hr]=0;
9400             }
9401             else
9402             if(r<64){
9403               if((current.u>>r)&1) {
9404                 regs[i].regmap_entry[hr]=-1;
9405                 //regs[i].regmap[hr]=-1;
9406                 current.regmap[hr]=-1;
9407               }else
9408                 regs[i].regmap_entry[hr]=r;
9409             }
9410             else {
9411               if((current.uu>>(r&63))&1) {
9412                 regs[i].regmap_entry[hr]=-1;
9413                 //regs[i].regmap[hr]=-1;
9414                 current.regmap[hr]=-1;
9415               }else
9416                 regs[i].regmap_entry[hr]=r;
9417             }
9418           }
9419         } else {
9420           // Branches expect CCREG to be allocated at the target
9421           if(regmap_pre[i][hr]==CCREG) 
9422             regs[i].regmap_entry[hr]=CCREG;
9423           else
9424             regs[i].regmap_entry[hr]=-1;
9425         }
9426       }
9427       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9428     }
9429     /* Branch post-alloc */
9430     if(i>0)
9431     {
9432       current.was32=current.is32;
9433       current.wasdirty=current.dirty;
9434       switch(itype[i-1]) {
9435         case UJUMP:
9436           memcpy(&branch_regs[i-1],&current,sizeof(current));
9437           branch_regs[i-1].isconst=0;
9438           branch_regs[i-1].wasconst=0;
9439           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9440           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9441           alloc_cc(&branch_regs[i-1],i-1);
9442           dirty_reg(&branch_regs[i-1],CCREG);
9443           if(rt1[i-1]==31) { // JAL
9444             alloc_reg(&branch_regs[i-1],i-1,31);
9445             dirty_reg(&branch_regs[i-1],31);
9446             branch_regs[i-1].is32|=1LL<<31;
9447           }
9448           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9449           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9450           break;
9451         case RJUMP:
9452           memcpy(&branch_regs[i-1],&current,sizeof(current));
9453           branch_regs[i-1].isconst=0;
9454           branch_regs[i-1].wasconst=0;
9455           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9456           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9457           alloc_cc(&branch_regs[i-1],i-1);
9458           dirty_reg(&branch_regs[i-1],CCREG);
9459           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9460           if(rt1[i-1]!=0) { // JALR
9461             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9462             dirty_reg(&branch_regs[i-1],rt1[i-1]);
9463             branch_regs[i-1].is32|=1LL<<rt1[i-1];
9464           }
9465           #ifdef USE_MINI_HT
9466           if(rs1[i-1]==31) { // JALR
9467             alloc_reg(&branch_regs[i-1],i-1,RHASH);
9468             #ifndef HOST_IMM_ADDR32
9469             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9470             #endif
9471           }
9472           #endif
9473           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9474           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9475           break;
9476         case CJUMP:
9477           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9478           {
9479             alloc_cc(&current,i-1);
9480             dirty_reg(&current,CCREG);
9481             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9482                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9483               // The delay slot overwrote one of our conditions
9484               // Delay slot goes after the test (in order)
9485               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9486               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9487               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9488               current.u|=1;
9489               current.uu|=1;
9490               delayslot_alloc(&current,i);
9491               current.isconst=0;
9492             }
9493             else
9494             {
9495               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9496               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9497               // Alloc the branch condition registers
9498               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9499               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9500               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9501               {
9502                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9503                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9504               }
9505             }
9506             memcpy(&branch_regs[i-1],&current,sizeof(current));
9507             branch_regs[i-1].isconst=0;
9508             branch_regs[i-1].wasconst=0;
9509             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9510             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9511           }
9512           else
9513           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9514           {
9515             alloc_cc(&current,i-1);
9516             dirty_reg(&current,CCREG);
9517             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9518               // The delay slot overwrote the branch condition
9519               // Delay slot goes after the test (in order)
9520               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9521               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9522               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9523               current.u|=1;
9524               current.uu|=1;
9525               delayslot_alloc(&current,i);
9526               current.isconst=0;
9527             }
9528             else
9529             {
9530               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9531               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9532               // Alloc the branch condition register
9533               alloc_reg(&current,i-1,rs1[i-1]);
9534               if(!(current.is32>>rs1[i-1]&1))
9535               {
9536                 alloc_reg64(&current,i-1,rs1[i-1]);
9537               }
9538             }
9539             memcpy(&branch_regs[i-1],&current,sizeof(current));
9540             branch_regs[i-1].isconst=0;
9541             branch_regs[i-1].wasconst=0;
9542             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9543             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9544           }
9545           else
9546           // Alloc the delay slot in case the branch is taken
9547           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9548           {
9549             memcpy(&branch_regs[i-1],&current,sizeof(current));
9550             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9551             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9552             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9553             alloc_cc(&branch_regs[i-1],i);
9554             dirty_reg(&branch_regs[i-1],CCREG);
9555             delayslot_alloc(&branch_regs[i-1],i);
9556             branch_regs[i-1].isconst=0;
9557             alloc_reg(&current,i,CCREG); // Not taken path
9558             dirty_reg(&current,CCREG);
9559             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9560           }
9561           else
9562           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9563           {
9564             memcpy(&branch_regs[i-1],&current,sizeof(current));
9565             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9566             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9567             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9568             alloc_cc(&branch_regs[i-1],i);
9569             dirty_reg(&branch_regs[i-1],CCREG);
9570             delayslot_alloc(&branch_regs[i-1],i);
9571             branch_regs[i-1].isconst=0;
9572             alloc_reg(&current,i,CCREG); // Not taken path
9573             dirty_reg(&current,CCREG);
9574             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9575           }
9576           break;
9577         case SJUMP:
9578           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9579           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9580           {
9581             alloc_cc(&current,i-1);
9582             dirty_reg(&current,CCREG);
9583             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9584               // The delay slot overwrote the branch condition
9585               // Delay slot goes after the test (in order)
9586               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9587               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9588               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9589               current.u|=1;
9590               current.uu|=1;
9591               delayslot_alloc(&current,i);
9592               current.isconst=0;
9593             }
9594             else
9595             {
9596               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9597               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9598               // Alloc the branch condition register
9599               alloc_reg(&current,i-1,rs1[i-1]);
9600               if(!(current.is32>>rs1[i-1]&1))
9601               {
9602                 alloc_reg64(&current,i-1,rs1[i-1]);
9603               }
9604             }
9605             memcpy(&branch_regs[i-1],&current,sizeof(current));
9606             branch_regs[i-1].isconst=0;
9607             branch_regs[i-1].wasconst=0;
9608             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9609             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9610           }
9611           else
9612           // Alloc the delay slot in case the branch is taken
9613           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9614           {
9615             memcpy(&branch_regs[i-1],&current,sizeof(current));
9616             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9617             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9618             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9619             alloc_cc(&branch_regs[i-1],i);
9620             dirty_reg(&branch_regs[i-1],CCREG);
9621             delayslot_alloc(&branch_regs[i-1],i);
9622             branch_regs[i-1].isconst=0;
9623             alloc_reg(&current,i,CCREG); // Not taken path
9624             dirty_reg(&current,CCREG);
9625             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9626           }
9627           // FIXME: BLTZAL/BGEZAL
9628           if(opcode2[i-1]&0x10) { // BxxZAL
9629             alloc_reg(&branch_regs[i-1],i-1,31);
9630             dirty_reg(&branch_regs[i-1],31);
9631             branch_regs[i-1].is32|=1LL<<31;
9632           }
9633           break;
9634         case FJUMP:
9635           if(likely[i-1]==0) // BC1F/BC1T
9636           {
9637             alloc_cc(&current,i-1);
9638             dirty_reg(&current,CCREG);
9639             if(itype[i]==FCOMP) {
9640               // The delay slot overwrote the branch condition
9641               // Delay slot goes after the test (in order)
9642               delayslot_alloc(&current,i);
9643               current.isconst=0;
9644             }
9645             else
9646             {
9647               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9648               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9649               // Alloc the branch condition register
9650               alloc_reg(&current,i-1,FSREG);
9651             }
9652             memcpy(&branch_regs[i-1],&current,sizeof(current));
9653             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9654           }
9655           else // BC1FL/BC1TL
9656           {
9657             // Alloc the delay slot in case the branch is taken
9658             memcpy(&branch_regs[i-1],&current,sizeof(current));
9659             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9660             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9661             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9662             alloc_cc(&branch_regs[i-1],i);
9663             dirty_reg(&branch_regs[i-1],CCREG);
9664             delayslot_alloc(&branch_regs[i-1],i);
9665             branch_regs[i-1].isconst=0;
9666             alloc_reg(&current,i,CCREG); // Not taken path
9667             dirty_reg(&current,CCREG);
9668             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9669           }
9670           break;
9671       }
9672
9673       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9674       {
9675         if(rt1[i-1]==31) // JAL/JALR
9676         {
9677           // Subroutine call will return here, don't alloc any registers
9678           current.is32=1;
9679           current.dirty=0;
9680           clear_all_regs(current.regmap);
9681           alloc_reg(&current,i,CCREG);
9682           dirty_reg(&current,CCREG);
9683         }
9684         else if(i+1<slen)
9685         {
9686           // Internal branch will jump here, match registers to caller
9687           current.is32=0x3FFFFFFFFLL;
9688           current.dirty=0;
9689           clear_all_regs(current.regmap);
9690           alloc_reg(&current,i,CCREG);
9691           dirty_reg(&current,CCREG);
9692           for(j=i-1;j>=0;j--)
9693           {
9694             if(ba[j]==start+i*4+4) {
9695               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9696               current.is32=branch_regs[j].is32;
9697               current.dirty=branch_regs[j].dirty;
9698               break;
9699             }
9700           }
9701           while(j>=0) {
9702             if(ba[j]==start+i*4+4) {
9703               for(hr=0;hr<HOST_REGS;hr++) {
9704                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9705                   current.regmap[hr]=-1;
9706                 }
9707                 current.is32&=branch_regs[j].is32;
9708                 current.dirty&=branch_regs[j].dirty;
9709               }
9710             }
9711             j--;
9712           }
9713         }
9714       }
9715     }
9716
9717     // Count cycles in between branches
9718     ccadj[i]=cc;
9719     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9720     {
9721       cc=0;
9722     }
9723 #ifdef PCSX
9724     else if(/*itype[i]==LOAD||*/itype[i]==STORE||itype[i]==C1LS) // load causes weird timing issues
9725     {
9726       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
9727     }
9728     else if(itype[i]==C2LS)
9729     {
9730       cc+=4;
9731     }
9732 #endif
9733     else
9734     {
9735       cc++;
9736     }
9737
9738     flush_dirty_uppers(&current);
9739     if(!is_ds[i]) {
9740       regs[i].is32=current.is32;
9741       regs[i].dirty=current.dirty;
9742       regs[i].isconst=current.isconst;
9743       memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9744     }
9745     for(hr=0;hr<HOST_REGS;hr++) {
9746       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9747         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9748           regs[i].wasconst&=~(1<<hr);
9749         }
9750       }
9751     }
9752     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9753   }
9754   
9755   /* Pass 4 - Cull unused host registers */
9756   
9757   uint64_t nr=0;
9758   
9759   for (i=slen-1;i>=0;i--)
9760   {
9761     int hr;
9762     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9763     {
9764       if(ba[i]<start || ba[i]>=(start+slen*4))
9765       {
9766         // Branch out of this block, don't need anything
9767         nr=0;
9768       }
9769       else
9770       {
9771         // Internal branch
9772         // Need whatever matches the target
9773         nr=0;
9774         int t=(ba[i]-start)>>2;
9775         for(hr=0;hr<HOST_REGS;hr++)
9776         {
9777           if(regs[i].regmap_entry[hr]>=0) {
9778             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9779           }
9780         }
9781       }
9782       // Conditional branch may need registers for following instructions
9783       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9784       {
9785         if(i<slen-2) {
9786           nr|=needed_reg[i+2];
9787           for(hr=0;hr<HOST_REGS;hr++)
9788           {
9789             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9790             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9791           }
9792         }
9793       }
9794       // Don't need stuff which is overwritten
9795       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9796       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9797       // Merge in delay slot
9798       for(hr=0;hr<HOST_REGS;hr++)
9799       {
9800         if(!likely[i]) {
9801           // These are overwritten unless the branch is "likely"
9802           // and the delay slot is nullified if not taken
9803           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9804           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9805         }
9806         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9807         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9808         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9809         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9810         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9811         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9812         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9813         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9814         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9815           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9816           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9817         }
9818         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9819           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9820           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9821         }
9822         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
9823           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9824           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9825         }
9826       }
9827     }
9828     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
9829     {
9830       // SYSCALL instruction (software interrupt)
9831       nr=0;
9832     }
9833     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9834     {
9835       // ERET instruction (return from interrupt)
9836       nr=0;
9837     }
9838     else // Non-branch
9839     {
9840       if(i<slen-1) {
9841         for(hr=0;hr<HOST_REGS;hr++) {
9842           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9843           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9844           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9845           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9846         }
9847       }
9848     }
9849     for(hr=0;hr<HOST_REGS;hr++)
9850     {
9851       // Overwritten registers are not needed
9852       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9853       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9854       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9855       // Source registers are needed
9856       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9857       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9858       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9859       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9860       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9861       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9862       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9863       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9864       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9865         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9866         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9867       }
9868       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9869         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9870         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9871       }
9872       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
9873         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9874         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9875       }
9876       // Don't store a register immediately after writing it,
9877       // may prevent dual-issue.
9878       // But do so if this is a branch target, otherwise we
9879       // might have to load the register before the branch.
9880       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9881         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9882            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9883           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9884           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9885         }
9886         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9887            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9888           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9889           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9890         }
9891       }
9892     }
9893     // Cycle count is needed at branches.  Assume it is needed at the target too.
9894     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9895       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9896       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9897     }
9898     // Save it
9899     needed_reg[i]=nr;
9900     
9901     // Deallocate unneeded registers
9902     for(hr=0;hr<HOST_REGS;hr++)
9903     {
9904       if(!((nr>>hr)&1)) {
9905         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9906         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9907            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9908            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9909         {
9910           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9911           {
9912             if(likely[i]) {
9913               regs[i].regmap[hr]=-1;
9914               regs[i].isconst&=~(1<<hr);
9915               if(i<slen-2) {
9916                 regmap_pre[i+2][hr]=-1;
9917                 regs[i+2].wasconst&=~(1<<hr);
9918               }
9919             }
9920           }
9921         }
9922         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9923         {
9924           int d1=0,d2=0,map=0,temp=0;
9925           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9926           {
9927             d1=dep1[i+1];
9928             d2=dep2[i+1];
9929           }
9930           if(using_tlb) {
9931             if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9932                itype[i+1]==STORE || itype[i+1]==STORELR ||
9933                itype[i+1]==C1LS || itype[i+1]==C2LS)
9934             map=TLREG;
9935           } else
9936           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9937              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9938             map=INVCP;
9939           }
9940           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9941              itype[i+1]==C1LS || itype[i+1]==C2LS)
9942             temp=FTEMP;
9943           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9944              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9945              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9946              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9947              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9948              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9949              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9950              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9951              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9952              regs[i].regmap[hr]!=map )
9953           {
9954             regs[i].regmap[hr]=-1;
9955             regs[i].isconst&=~(1<<hr);
9956             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9957                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9958                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9959                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9960                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9961                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9962                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9963                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9964                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9965                branch_regs[i].regmap[hr]!=map)
9966             {
9967               branch_regs[i].regmap[hr]=-1;
9968               branch_regs[i].regmap_entry[hr]=-1;
9969               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9970               {
9971                 if(!likely[i]&&i<slen-2) {
9972                   regmap_pre[i+2][hr]=-1;
9973                   regs[i+2].wasconst&=~(1<<hr);
9974                 }
9975               }
9976             }
9977           }
9978         }
9979         else
9980         {
9981           // Non-branch
9982           if(i>0)
9983           {
9984             int d1=0,d2=0,map=-1,temp=-1;
9985             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9986             {
9987               d1=dep1[i];
9988               d2=dep2[i];
9989             }
9990             if(using_tlb) {
9991               if(itype[i]==LOAD || itype[i]==LOADLR ||
9992                  itype[i]==STORE || itype[i]==STORELR ||
9993                  itype[i]==C1LS || itype[i]==C2LS)
9994               map=TLREG;
9995             } else if(itype[i]==STORE || itype[i]==STORELR ||
9996                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9997               map=INVCP;
9998             }
9999             if(itype[i]==LOADLR || itype[i]==STORELR ||
10000                itype[i]==C1LS || itype[i]==C2LS)
10001               temp=FTEMP;
10002             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
10003                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
10004                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
10005                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
10006                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
10007                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
10008             {
10009               if(i<slen-1&&!is_ds[i]) {
10010                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
10011                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
10012                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
10013                 {
10014                   printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
10015                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
10016                 }
10017                 regmap_pre[i+1][hr]=-1;
10018                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
10019                 regs[i+1].wasconst&=~(1<<hr);
10020               }
10021               regs[i].regmap[hr]=-1;
10022               regs[i].isconst&=~(1<<hr);
10023             }
10024           }
10025         }
10026       }
10027     }
10028   }
10029   
10030   /* Pass 5 - Pre-allocate registers */
10031   
10032   // If a register is allocated during a loop, try to allocate it for the
10033   // entire loop, if possible.  This avoids loading/storing registers
10034   // inside of the loop.
10035   
10036   signed char f_regmap[HOST_REGS];
10037   clear_all_regs(f_regmap);
10038   for(i=0;i<slen-1;i++)
10039   {
10040     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10041     {
10042       if(ba[i]>=start && ba[i]<(start+i*4)) 
10043       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
10044       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
10045       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
10046       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
10047       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
10048       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
10049       {
10050         int t=(ba[i]-start)>>2;
10051         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
10052         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
10053         for(hr=0;hr<HOST_REGS;hr++)
10054         {
10055           if(regs[i].regmap[hr]>64) {
10056             if(!((regs[i].dirty>>hr)&1))
10057               f_regmap[hr]=regs[i].regmap[hr];
10058             else f_regmap[hr]=-1;
10059           }
10060           else if(regs[i].regmap[hr]>=0) {
10061             if(f_regmap[hr]!=regs[i].regmap[hr]) {
10062               // dealloc old register
10063               int n;
10064               for(n=0;n<HOST_REGS;n++)
10065               {
10066                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10067               }
10068               // and alloc new one
10069               f_regmap[hr]=regs[i].regmap[hr];
10070             }
10071           }
10072           if(branch_regs[i].regmap[hr]>64) {
10073             if(!((branch_regs[i].dirty>>hr)&1))
10074               f_regmap[hr]=branch_regs[i].regmap[hr];
10075             else f_regmap[hr]=-1;
10076           }
10077           else if(branch_regs[i].regmap[hr]>=0) {
10078             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
10079               // dealloc old register
10080               int n;
10081               for(n=0;n<HOST_REGS;n++)
10082               {
10083                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
10084               }
10085               // and alloc new one
10086               f_regmap[hr]=branch_regs[i].regmap[hr];
10087             }
10088           }
10089           if(ooo[i]) {
10090             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) 
10091               f_regmap[hr]=branch_regs[i].regmap[hr];
10092           }else{
10093             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) 
10094               f_regmap[hr]=branch_regs[i].regmap[hr];
10095           }
10096           // Avoid dirty->clean transition
10097           #ifdef DESTRUCTIVE_WRITEBACK
10098           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
10099           #endif
10100           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
10101           // case above, however it's always a good idea.  We can't hoist the
10102           // load if the register was already allocated, so there's no point
10103           // wasting time analyzing most of these cases.  It only "succeeds"
10104           // when the mapping was different and the load can be replaced with
10105           // a mov, which is of negligible benefit.  So such cases are
10106           // skipped below.
10107           if(f_regmap[hr]>0) {
10108             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
10109               int r=f_regmap[hr];
10110               for(j=t;j<=i;j++)
10111               {
10112                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10113                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
10114                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
10115                 if(r>63) {
10116                   // NB This can exclude the case where the upper-half
10117                   // register is lower numbered than the lower-half
10118                   // register.  Not sure if it's worth fixing...
10119                   if(get_reg(regs[j].regmap,r&63)<0) break;
10120                   if(get_reg(regs[j].regmap_entry,r&63)<0) break;
10121                   if(regs[j].is32&(1LL<<(r&63))) break;
10122                 }
10123                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
10124                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10125                   int k;
10126                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
10127                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
10128                     if(r>63) {
10129                       if(get_reg(regs[i].regmap,r&63)<0) break;
10130                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
10131                     }
10132                     k=i;
10133                     while(k>1&&regs[k-1].regmap[hr]==-1) {
10134                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10135                         //printf("no free regs for store %x\n",start+(k-1)*4);
10136                         break;
10137                       }
10138                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
10139                         //printf("no-match due to different register\n");
10140                         break;
10141                       }
10142                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
10143                         //printf("no-match due to branch\n");
10144                         break;
10145                       }
10146                       // call/ret fast path assumes no registers allocated
10147                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
10148                         break;
10149                       }
10150                       if(r>63) {
10151                         // NB This can exclude the case where the upper-half
10152                         // register is lower numbered than the lower-half
10153                         // register.  Not sure if it's worth fixing...
10154                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
10155                         if(regs[k-1].is32&(1LL<<(r&63))) break;
10156                       }
10157                       k--;
10158                     }
10159                     if(i<slen-1) {
10160                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
10161                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
10162                         //printf("bad match after branch\n");
10163                         break;
10164                       }
10165                     }
10166                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
10167                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
10168                       while(k<i) {
10169                         regs[k].regmap_entry[hr]=f_regmap[hr];
10170                         regs[k].regmap[hr]=f_regmap[hr];
10171                         regmap_pre[k+1][hr]=f_regmap[hr];
10172                         regs[k].wasdirty&=~(1<<hr);
10173                         regs[k].dirty&=~(1<<hr);
10174                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
10175                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
10176                         regs[k].wasconst&=~(1<<hr);
10177                         regs[k].isconst&=~(1<<hr);
10178                         k++;
10179                       }
10180                     }
10181                     else {
10182                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
10183                       break;
10184                     }
10185                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
10186                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
10187                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
10188                       regs[i].regmap_entry[hr]=f_regmap[hr];
10189                       regs[i].regmap[hr]=f_regmap[hr];
10190                       regs[i].wasdirty&=~(1<<hr);
10191                       regs[i].dirty&=~(1<<hr);
10192                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
10193                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
10194                       regs[i].wasconst&=~(1<<hr);
10195                       regs[i].isconst&=~(1<<hr);
10196                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
10197                       branch_regs[i].wasdirty&=~(1<<hr);
10198                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
10199                       branch_regs[i].regmap[hr]=f_regmap[hr];
10200                       branch_regs[i].dirty&=~(1<<hr);
10201                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
10202                       branch_regs[i].wasconst&=~(1<<hr);
10203                       branch_regs[i].isconst&=~(1<<hr);
10204                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
10205                         regmap_pre[i+2][hr]=f_regmap[hr];
10206                         regs[i+2].wasdirty&=~(1<<hr);
10207                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
10208                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
10209                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
10210                       }
10211                     }
10212                   }
10213                   for(k=t;k<j;k++) {
10214                     // Alloc register clean at beginning of loop,
10215                     // but may dirty it in pass 6
10216                     regs[k].regmap_entry[hr]=f_regmap[hr];
10217                     regs[k].regmap[hr]=f_regmap[hr];
10218                     regs[k].dirty&=~(1<<hr);
10219                     regs[k].wasconst&=~(1<<hr);
10220                     regs[k].isconst&=~(1<<hr);
10221                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
10222                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
10223                       branch_regs[k].regmap[hr]=f_regmap[hr];
10224                       branch_regs[k].dirty&=~(1<<hr);
10225                       branch_regs[k].wasconst&=~(1<<hr);
10226                       branch_regs[k].isconst&=~(1<<hr);
10227                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
10228                         regmap_pre[k+2][hr]=f_regmap[hr];
10229                         regs[k+2].wasdirty&=~(1<<hr);
10230                         assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
10231                           (regs[k+2].was32&(1LL<<f_regmap[hr])));
10232                       }
10233                     }
10234                     else
10235                     {
10236                       regmap_pre[k+1][hr]=f_regmap[hr];
10237                       regs[k+1].wasdirty&=~(1<<hr);
10238                     }
10239                   }
10240                   if(regs[j].regmap[hr]==f_regmap[hr])
10241                     regs[j].regmap_entry[hr]=f_regmap[hr];
10242                   break;
10243                 }
10244                 if(j==i) break;
10245                 if(regs[j].regmap[hr]>=0)
10246                   break;
10247                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
10248                   //printf("no-match due to different register\n");
10249                   break;
10250                 }
10251                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
10252                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
10253                   break;
10254                 }
10255                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10256                 {
10257                   // Stop on unconditional branch
10258                   break;
10259                 }
10260                 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
10261                 {
10262                   if(ooo[j]) {
10263                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) 
10264                       break;
10265                   }else{
10266                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) 
10267                       break;
10268                   }
10269                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
10270                     //printf("no-match due to different register (branch)\n");
10271                     break;
10272                   }
10273                 }
10274                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10275                   //printf("No free regs for store %x\n",start+j*4);
10276                   break;
10277                 }
10278                 if(f_regmap[hr]>=64) {
10279                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
10280                     break;
10281                   }
10282                   else
10283                   {
10284                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
10285                       break;
10286                     }
10287                   }
10288                 }
10289               }
10290             }
10291           }
10292         }
10293       }
10294     }else{
10295       // Non branch or undetermined branch target
10296       for(hr=0;hr<HOST_REGS;hr++)
10297       {
10298         if(hr!=EXCLUDE_REG) {
10299           if(regs[i].regmap[hr]>64) {
10300             if(!((regs[i].dirty>>hr)&1))
10301               f_regmap[hr]=regs[i].regmap[hr];
10302           }
10303           else if(regs[i].regmap[hr]>=0) {
10304             if(f_regmap[hr]!=regs[i].regmap[hr]) {
10305               // dealloc old register
10306               int n;
10307               for(n=0;n<HOST_REGS;n++)
10308               {
10309                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10310               }
10311               // and alloc new one
10312               f_regmap[hr]=regs[i].regmap[hr];
10313             }
10314           }
10315         }
10316       }
10317       // Try to restore cycle count at branch targets
10318       if(bt[i]) {
10319         for(j=i;j<slen-1;j++) {
10320           if(regs[j].regmap[HOST_CCREG]!=-1) break;
10321           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10322             //printf("no free regs for store %x\n",start+j*4);
10323             break;
10324           }
10325         }
10326         if(regs[j].regmap[HOST_CCREG]==CCREG) {
10327           int k=i;
10328           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
10329           while(k<j) {
10330             regs[k].regmap_entry[HOST_CCREG]=CCREG;
10331             regs[k].regmap[HOST_CCREG]=CCREG;
10332             regmap_pre[k+1][HOST_CCREG]=CCREG;
10333             regs[k+1].wasdirty|=1<<HOST_CCREG;
10334             regs[k].dirty|=1<<HOST_CCREG;
10335             regs[k].wasconst&=~(1<<HOST_CCREG);
10336             regs[k].isconst&=~(1<<HOST_CCREG);
10337             k++;
10338           }
10339           regs[j].regmap_entry[HOST_CCREG]=CCREG;          
10340         }
10341         // Work backwards from the branch target
10342         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
10343         {
10344           //printf("Extend backwards\n");
10345           int k;
10346           k=i;
10347           while(regs[k-1].regmap[HOST_CCREG]==-1) {
10348             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10349               //printf("no free regs for store %x\n",start+(k-1)*4);
10350               break;
10351             }
10352             k--;
10353           }
10354           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
10355             //printf("Extend CC, %x ->\n",start+k*4);
10356             while(k<=i) {
10357               regs[k].regmap_entry[HOST_CCREG]=CCREG;
10358               regs[k].regmap[HOST_CCREG]=CCREG;
10359               regmap_pre[k+1][HOST_CCREG]=CCREG;
10360               regs[k+1].wasdirty|=1<<HOST_CCREG;
10361               regs[k].dirty|=1<<HOST_CCREG;
10362               regs[k].wasconst&=~(1<<HOST_CCREG);
10363               regs[k].isconst&=~(1<<HOST_CCREG);
10364               k++;
10365             }
10366           }
10367           else {
10368             //printf("Fail Extend CC, %x ->\n",start+k*4);
10369           }
10370         }
10371       }
10372       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
10373          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
10374          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
10375          itype[i]!=FCONV&&itype[i]!=FCOMP)
10376       {
10377         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
10378       }
10379     }
10380   }
10381   
10382   // Cache memory offset or tlb map pointer if a register is available
10383   #ifndef HOST_IMM_ADDR32
10384   #ifndef RAM_OFFSET
10385   if(using_tlb)
10386   #endif
10387   {
10388     int earliest_available[HOST_REGS];
10389     int loop_start[HOST_REGS];
10390     int score[HOST_REGS];
10391     int end[HOST_REGS];
10392     int reg=using_tlb?MMREG:ROREG;
10393
10394     // Init
10395     for(hr=0;hr<HOST_REGS;hr++) {
10396       score[hr]=0;earliest_available[hr]=0;
10397       loop_start[hr]=MAXBLOCK;
10398     }
10399     for(i=0;i<slen-1;i++)
10400     {
10401       // Can't do anything if no registers are available
10402       if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
10403         for(hr=0;hr<HOST_REGS;hr++) {
10404           score[hr]=0;earliest_available[hr]=i+1;
10405           loop_start[hr]=MAXBLOCK;
10406         }
10407       }
10408       if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10409         if(!ooo[i]) {
10410           if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
10411             for(hr=0;hr<HOST_REGS;hr++) {
10412               score[hr]=0;earliest_available[hr]=i+1;
10413               loop_start[hr]=MAXBLOCK;
10414             }
10415           }
10416         }else{
10417           if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
10418             for(hr=0;hr<HOST_REGS;hr++) {
10419               score[hr]=0;earliest_available[hr]=i+1;
10420               loop_start[hr]=MAXBLOCK;
10421             }
10422           }
10423         }
10424       }
10425       // Mark unavailable registers
10426       for(hr=0;hr<HOST_REGS;hr++) {
10427         if(regs[i].regmap[hr]>=0) {
10428           score[hr]=0;earliest_available[hr]=i+1;
10429           loop_start[hr]=MAXBLOCK;
10430         }
10431         if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10432           if(branch_regs[i].regmap[hr]>=0) {
10433             score[hr]=0;earliest_available[hr]=i+2;
10434             loop_start[hr]=MAXBLOCK;
10435           }
10436         }
10437       }
10438       // No register allocations after unconditional jumps
10439       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10440       {
10441         for(hr=0;hr<HOST_REGS;hr++) {
10442           score[hr]=0;earliest_available[hr]=i+2;
10443           loop_start[hr]=MAXBLOCK;
10444         }
10445         i++; // Skip delay slot too
10446         //printf("skip delay slot: %x\n",start+i*4);
10447       }
10448       else
10449       // Possible match
10450       if(itype[i]==LOAD||itype[i]==LOADLR||
10451          itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
10452         for(hr=0;hr<HOST_REGS;hr++) {
10453           if(hr!=EXCLUDE_REG) {
10454             end[hr]=i-1;
10455             for(j=i;j<slen-1;j++) {
10456               if(regs[j].regmap[hr]>=0) break;
10457               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10458                 if(branch_regs[j].regmap[hr]>=0) break;
10459                 if(ooo[j]) {
10460                   if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
10461                 }else{
10462                   if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
10463                 }
10464               }
10465               else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
10466               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10467                 int t=(ba[j]-start)>>2;
10468                 if(t<j&&t>=earliest_available[hr]) {
10469                   if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
10470                     // Score a point for hoisting loop invariant
10471                     if(t<loop_start[hr]) loop_start[hr]=t;
10472                     //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
10473                     score[hr]++;
10474                     end[hr]=j;
10475                   }
10476                 }
10477                 else if(t<j) {
10478                   if(regs[t].regmap[hr]==reg) {
10479                     // Score a point if the branch target matches this register
10480                     score[hr]++;
10481                     end[hr]=j;
10482                   }
10483                 }
10484                 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
10485                    itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
10486                   score[hr]++;
10487                   end[hr]=j;
10488                 }
10489               }
10490               if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10491               {
10492                 // Stop on unconditional branch
10493                 break;
10494               }
10495               else
10496               if(itype[j]==LOAD||itype[j]==LOADLR||
10497                  itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
10498                 score[hr]++;
10499                 end[hr]=j;
10500               }
10501             }
10502           }
10503         }
10504         // Find highest score and allocate that register
10505         int maxscore=0;
10506         for(hr=0;hr<HOST_REGS;hr++) {
10507           if(hr!=EXCLUDE_REG) {
10508             if(score[hr]>score[maxscore]) {
10509               maxscore=hr;
10510               //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
10511             }
10512           }
10513         }
10514         if(score[maxscore]>1)
10515         {
10516           if(i<loop_start[maxscore]) loop_start[maxscore]=i;
10517           for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
10518             //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
10519             assert(regs[j].regmap[maxscore]<0);
10520             if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
10521             regs[j].regmap[maxscore]=reg;
10522             regs[j].dirty&=~(1<<maxscore);
10523             regs[j].wasconst&=~(1<<maxscore);
10524             regs[j].isconst&=~(1<<maxscore);
10525             if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10526               branch_regs[j].regmap[maxscore]=reg;
10527               branch_regs[j].wasdirty&=~(1<<maxscore);
10528               branch_regs[j].dirty&=~(1<<maxscore);
10529               branch_regs[j].wasconst&=~(1<<maxscore);
10530               branch_regs[j].isconst&=~(1<<maxscore);
10531               if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
10532                 regmap_pre[j+2][maxscore]=reg;
10533                 regs[j+2].wasdirty&=~(1<<maxscore);
10534               }
10535               // loop optimization (loop_preload)
10536               int t=(ba[j]-start)>>2;
10537               if(t==loop_start[maxscore]) {
10538                 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
10539                   regs[t].regmap_entry[maxscore]=reg;
10540               }
10541             }
10542             else
10543             {
10544               if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
10545                 regmap_pre[j+1][maxscore]=reg;
10546                 regs[j+1].wasdirty&=~(1<<maxscore);
10547               }
10548             }
10549           }
10550           i=j-1;
10551           if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
10552           for(hr=0;hr<HOST_REGS;hr++) {
10553             score[hr]=0;earliest_available[hr]=i+i;
10554             loop_start[hr]=MAXBLOCK;
10555           }
10556         }
10557       }
10558     }
10559   }
10560   #endif
10561   
10562   // This allocates registers (if possible) one instruction prior
10563   // to use, which can avoid a load-use penalty on certain CPUs.
10564   for(i=0;i<slen-1;i++)
10565   {
10566     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10567     {
10568       if(!bt[i+1])
10569       {
10570         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10571            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
10572         {
10573           if(rs1[i+1]) {
10574             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10575             {
10576               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10577               {
10578                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10579                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10580                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10581                 regs[i].isconst&=~(1<<hr);
10582                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10583                 constmap[i][hr]=constmap[i+1][hr];
10584                 regs[i+1].wasdirty&=~(1<<hr);
10585                 regs[i].dirty&=~(1<<hr);
10586               }
10587             }
10588           }
10589           if(rs2[i+1]) {
10590             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10591             {
10592               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10593               {
10594                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10595                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10596                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10597                 regs[i].isconst&=~(1<<hr);
10598                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10599                 constmap[i][hr]=constmap[i+1][hr];
10600                 regs[i+1].wasdirty&=~(1<<hr);
10601                 regs[i].dirty&=~(1<<hr);
10602               }
10603             }
10604           }
10605           // Preload target address for load instruction (non-constant)
10606           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10607             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10608             {
10609               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10610               {
10611                 regs[i].regmap[hr]=rs1[i+1];
10612                 regmap_pre[i+1][hr]=rs1[i+1];
10613                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10614                 regs[i].isconst&=~(1<<hr);
10615                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10616                 constmap[i][hr]=constmap[i+1][hr];
10617                 regs[i+1].wasdirty&=~(1<<hr);
10618                 regs[i].dirty&=~(1<<hr);
10619               }
10620             }
10621           }
10622           // Load source into target register 
10623           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10624             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10625             {
10626               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10627               {
10628                 regs[i].regmap[hr]=rs1[i+1];
10629                 regmap_pre[i+1][hr]=rs1[i+1];
10630                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10631                 regs[i].isconst&=~(1<<hr);
10632                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10633                 constmap[i][hr]=constmap[i+1][hr];
10634                 regs[i+1].wasdirty&=~(1<<hr);
10635                 regs[i].dirty&=~(1<<hr);
10636               }
10637             }
10638           }
10639           // Preload map address
10640           #ifndef HOST_IMM_ADDR32
10641           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10642             hr=get_reg(regs[i+1].regmap,TLREG);
10643             if(hr>=0) {
10644               int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10645               if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10646                 int nr;
10647                 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10648                 {
10649                   regs[i].regmap[hr]=MGEN1+((i+1)&1);
10650                   regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10651                   regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10652                   regs[i].isconst&=~(1<<hr);
10653                   regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10654                   constmap[i][hr]=constmap[i+1][hr];
10655                   regs[i+1].wasdirty&=~(1<<hr);
10656                   regs[i].dirty&=~(1<<hr);
10657                 }
10658                 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10659                 {
10660                   // move it to another register
10661                   regs[i+1].regmap[hr]=-1;
10662                   regmap_pre[i+2][hr]=-1;
10663                   regs[i+1].regmap[nr]=TLREG;
10664                   regmap_pre[i+2][nr]=TLREG;
10665                   regs[i].regmap[nr]=MGEN1+((i+1)&1);
10666                   regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10667                   regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10668                   regs[i].isconst&=~(1<<nr);
10669                   regs[i+1].isconst&=~(1<<nr);
10670                   regs[i].dirty&=~(1<<nr);
10671                   regs[i+1].wasdirty&=~(1<<nr);
10672                   regs[i+1].dirty&=~(1<<nr);
10673                   regs[i+2].wasdirty&=~(1<<nr);
10674                 }
10675               }
10676             }
10677           }
10678           #endif
10679           // Address for store instruction (non-constant)
10680           if(itype[i+1]==STORE||itype[i+1]==STORELR
10681              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10682             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10683               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10684               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10685               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10686               assert(hr>=0);
10687               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10688               {
10689                 regs[i].regmap[hr]=rs1[i+1];
10690                 regmap_pre[i+1][hr]=rs1[i+1];
10691                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10692                 regs[i].isconst&=~(1<<hr);
10693                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10694                 constmap[i][hr]=constmap[i+1][hr];
10695                 regs[i+1].wasdirty&=~(1<<hr);
10696                 regs[i].dirty&=~(1<<hr);
10697               }
10698             }
10699           }
10700           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10701             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10702               int nr;
10703               hr=get_reg(regs[i+1].regmap,FTEMP);
10704               assert(hr>=0);
10705               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10706               {
10707                 regs[i].regmap[hr]=rs1[i+1];
10708                 regmap_pre[i+1][hr]=rs1[i+1];
10709                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10710                 regs[i].isconst&=~(1<<hr);
10711                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10712                 constmap[i][hr]=constmap[i+1][hr];
10713                 regs[i+1].wasdirty&=~(1<<hr);
10714                 regs[i].dirty&=~(1<<hr);
10715               }
10716               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10717               {
10718                 // move it to another register
10719                 regs[i+1].regmap[hr]=-1;
10720                 regmap_pre[i+2][hr]=-1;
10721                 regs[i+1].regmap[nr]=FTEMP;
10722                 regmap_pre[i+2][nr]=FTEMP;
10723                 regs[i].regmap[nr]=rs1[i+1];
10724                 regmap_pre[i+1][nr]=rs1[i+1];
10725                 regs[i+1].regmap_entry[nr]=rs1[i+1];
10726                 regs[i].isconst&=~(1<<nr);
10727                 regs[i+1].isconst&=~(1<<nr);
10728                 regs[i].dirty&=~(1<<nr);
10729                 regs[i+1].wasdirty&=~(1<<nr);
10730                 regs[i+1].dirty&=~(1<<nr);
10731                 regs[i+2].wasdirty&=~(1<<nr);
10732               }
10733             }
10734           }
10735           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10736             if(itype[i+1]==LOAD) 
10737               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10738             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10739               hr=get_reg(regs[i+1].regmap,FTEMP);
10740             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10741               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10742               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10743             }
10744             if(hr>=0&&regs[i].regmap[hr]<0) {
10745               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10746               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10747                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10748                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10749                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10750                 regs[i].isconst&=~(1<<hr);
10751                 regs[i+1].wasdirty&=~(1<<hr);
10752                 regs[i].dirty&=~(1<<hr);
10753               }
10754             }
10755           }
10756         }
10757       }
10758     }
10759   }
10760   
10761   /* Pass 6 - Optimize clean/dirty state */
10762   clean_registers(0,slen-1,1);
10763   
10764   /* Pass 7 - Identify 32-bit registers */
10765 #ifndef FORCE32
10766   provisional_r32();
10767
10768   u_int r32=0;
10769   
10770   for (i=slen-1;i>=0;i--)
10771   {
10772     int hr;
10773     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10774     {
10775       if(ba[i]<start || ba[i]>=(start+slen*4))
10776       {
10777         // Branch out of this block, don't need anything
10778         r32=0;
10779       }
10780       else
10781       {
10782         // Internal branch
10783         // Need whatever matches the target
10784         // (and doesn't get overwritten by the delay slot instruction)
10785         r32=0;
10786         int t=(ba[i]-start)>>2;
10787         if(ba[i]>start+i*4) {
10788           // Forward branch
10789           if(!(requires_32bit[t]&~regs[i].was32))
10790             r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10791         }else{
10792           // Backward branch
10793           //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10794           //  r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10795           if(!(pr32[t]&~regs[i].was32))
10796             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10797         }
10798       }
10799       // Conditional branch may need registers for following instructions
10800       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10801       {
10802         if(i<slen-2) {
10803           r32|=requires_32bit[i+2];
10804           r32&=regs[i].was32;
10805           // Mark this address as a branch target since it may be called
10806           // upon return from interrupt
10807           bt[i+2]=1;
10808         }
10809       }
10810       // Merge in delay slot
10811       if(!likely[i]) {
10812         // These are overwritten unless the branch is "likely"
10813         // and the delay slot is nullified if not taken
10814         r32&=~(1LL<<rt1[i+1]);
10815         r32&=~(1LL<<rt2[i+1]);
10816       }
10817       // Assume these are needed (delay slot)
10818       if(us1[i+1]>0)
10819       {
10820         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10821       }
10822       if(us2[i+1]>0)
10823       {
10824         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10825       }
10826       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10827       {
10828         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10829       }
10830       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10831       {
10832         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10833       }
10834     }
10835     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
10836     {
10837       // SYSCALL instruction (software interrupt)
10838       r32=0;
10839     }
10840     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10841     {
10842       // ERET instruction (return from interrupt)
10843       r32=0;
10844     }
10845     // Check 32 bits
10846     r32&=~(1LL<<rt1[i]);
10847     r32&=~(1LL<<rt2[i]);
10848     if(us1[i]>0)
10849     {
10850       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10851     }
10852     if(us2[i]>0)
10853     {
10854       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10855     }
10856     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10857     {
10858       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10859     }
10860     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10861     {
10862       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10863     }
10864     requires_32bit[i]=r32;
10865     
10866     // Dirty registers which are 32-bit, require 32-bit input
10867     // as they will be written as 32-bit values
10868     for(hr=0;hr<HOST_REGS;hr++)
10869     {
10870       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10871         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10872           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10873           requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10874         }
10875       }
10876     }
10877     //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10878   }
10879 #else
10880   for (i=slen-1;i>=0;i--)
10881   {
10882     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10883     {
10884       // Conditional branch
10885       if((source[i]>>16)!=0x1000&&i<slen-2) {
10886         // Mark this address as a branch target since it may be called
10887         // upon return from interrupt
10888         bt[i+2]=1;
10889       }
10890     }
10891   }
10892 #endif
10893
10894   if(itype[slen-1]==SPAN) {
10895     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10896   }
10897   
10898   /* Debug/disassembly */
10899   if((void*)assem_debug==(void*)printf) 
10900   for(i=0;i<slen;i++)
10901   {
10902     printf("U:");
10903     int r;
10904     for(r=1;r<=CCREG;r++) {
10905       if((unneeded_reg[i]>>r)&1) {
10906         if(r==HIREG) printf(" HI");
10907         else if(r==LOREG) printf(" LO");
10908         else printf(" r%d",r);
10909       }
10910     }
10911 #ifndef FORCE32
10912     printf(" UU:");
10913     for(r=1;r<=CCREG;r++) {
10914       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10915         if(r==HIREG) printf(" HI");
10916         else if(r==LOREG) printf(" LO");
10917         else printf(" r%d",r);
10918       }
10919     }
10920     printf(" 32:");
10921     for(r=0;r<=CCREG;r++) {
10922       //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10923       if((regs[i].was32>>r)&1) {
10924         if(r==CCREG) printf(" CC");
10925         else if(r==HIREG) printf(" HI");
10926         else if(r==LOREG) printf(" LO");
10927         else printf(" r%d",r);
10928       }
10929     }
10930 #endif
10931     printf("\n");
10932     #if defined(__i386__) || defined(__x86_64__)
10933     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10934     #endif
10935     #ifdef __arm__
10936     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10937     #endif
10938     printf("needs: ");
10939     if(needed_reg[i]&1) printf("eax ");
10940     if((needed_reg[i]>>1)&1) printf("ecx ");
10941     if((needed_reg[i]>>2)&1) printf("edx ");
10942     if((needed_reg[i]>>3)&1) printf("ebx ");
10943     if((needed_reg[i]>>5)&1) printf("ebp ");
10944     if((needed_reg[i]>>6)&1) printf("esi ");
10945     if((needed_reg[i]>>7)&1) printf("edi ");
10946     printf("r:");
10947     for(r=0;r<=CCREG;r++) {
10948       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10949       if((requires_32bit[i]>>r)&1) {
10950         if(r==CCREG) printf(" CC");
10951         else if(r==HIREG) printf(" HI");
10952         else if(r==LOREG) printf(" LO");
10953         else printf(" r%d",r);
10954       }
10955     }
10956     printf("\n");
10957     /*printf("pr:");
10958     for(r=0;r<=CCREG;r++) {
10959       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10960       if((pr32[i]>>r)&1) {
10961         if(r==CCREG) printf(" CC");
10962         else if(r==HIREG) printf(" HI");
10963         else if(r==LOREG) printf(" LO");
10964         else printf(" r%d",r);
10965       }
10966     }
10967     if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10968     printf("\n");*/
10969     #if defined(__i386__) || defined(__x86_64__)
10970     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10971     printf("dirty: ");
10972     if(regs[i].wasdirty&1) printf("eax ");
10973     if((regs[i].wasdirty>>1)&1) printf("ecx ");
10974     if((regs[i].wasdirty>>2)&1) printf("edx ");
10975     if((regs[i].wasdirty>>3)&1) printf("ebx ");
10976     if((regs[i].wasdirty>>5)&1) printf("ebp ");
10977     if((regs[i].wasdirty>>6)&1) printf("esi ");
10978     if((regs[i].wasdirty>>7)&1) printf("edi ");
10979     #endif
10980     #ifdef __arm__
10981     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10982     printf("dirty: ");
10983     if(regs[i].wasdirty&1) printf("r0 ");
10984     if((regs[i].wasdirty>>1)&1) printf("r1 ");
10985     if((regs[i].wasdirty>>2)&1) printf("r2 ");
10986     if((regs[i].wasdirty>>3)&1) printf("r3 ");
10987     if((regs[i].wasdirty>>4)&1) printf("r4 ");
10988     if((regs[i].wasdirty>>5)&1) printf("r5 ");
10989     if((regs[i].wasdirty>>6)&1) printf("r6 ");
10990     if((regs[i].wasdirty>>7)&1) printf("r7 ");
10991     if((regs[i].wasdirty>>8)&1) printf("r8 ");
10992     if((regs[i].wasdirty>>9)&1) printf("r9 ");
10993     if((regs[i].wasdirty>>10)&1) printf("r10 ");
10994     if((regs[i].wasdirty>>12)&1) printf("r12 ");
10995     #endif
10996     printf("\n");
10997     disassemble_inst(i);
10998     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10999     #if defined(__i386__) || defined(__x86_64__)
11000     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
11001     if(regs[i].dirty&1) printf("eax ");
11002     if((regs[i].dirty>>1)&1) printf("ecx ");
11003     if((regs[i].dirty>>2)&1) printf("edx ");
11004     if((regs[i].dirty>>3)&1) printf("ebx ");
11005     if((regs[i].dirty>>5)&1) printf("ebp ");
11006     if((regs[i].dirty>>6)&1) printf("esi ");
11007     if((regs[i].dirty>>7)&1) printf("edi ");
11008     #endif
11009     #ifdef __arm__
11010     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
11011     if(regs[i].dirty&1) printf("r0 ");
11012     if((regs[i].dirty>>1)&1) printf("r1 ");
11013     if((regs[i].dirty>>2)&1) printf("r2 ");
11014     if((regs[i].dirty>>3)&1) printf("r3 ");
11015     if((regs[i].dirty>>4)&1) printf("r4 ");
11016     if((regs[i].dirty>>5)&1) printf("r5 ");
11017     if((regs[i].dirty>>6)&1) printf("r6 ");
11018     if((regs[i].dirty>>7)&1) printf("r7 ");
11019     if((regs[i].dirty>>8)&1) printf("r8 ");
11020     if((regs[i].dirty>>9)&1) printf("r9 ");
11021     if((regs[i].dirty>>10)&1) printf("r10 ");
11022     if((regs[i].dirty>>12)&1) printf("r12 ");
11023     #endif
11024     printf("\n");
11025     if(regs[i].isconst) {
11026       printf("constants: ");
11027       #if defined(__i386__) || defined(__x86_64__)
11028       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
11029       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
11030       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
11031       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
11032       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
11033       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
11034       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
11035       #endif
11036       #ifdef __arm__
11037       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
11038       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
11039       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
11040       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
11041       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
11042       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
11043       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
11044       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
11045       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
11046       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
11047       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
11048       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
11049       #endif
11050       printf("\n");
11051     }
11052 #ifndef FORCE32
11053     printf(" 32:");
11054     for(r=0;r<=CCREG;r++) {
11055       if((regs[i].is32>>r)&1) {
11056         if(r==CCREG) printf(" CC");
11057         else if(r==HIREG) printf(" HI");
11058         else if(r==LOREG) printf(" LO");
11059         else printf(" r%d",r);
11060       }
11061     }
11062     printf("\n");
11063 #endif
11064     /*printf(" p32:");
11065     for(r=0;r<=CCREG;r++) {
11066       if((p32[i]>>r)&1) {
11067         if(r==CCREG) printf(" CC");
11068         else if(r==HIREG) printf(" HI");
11069         else if(r==LOREG) printf(" LO");
11070         else printf(" r%d",r);
11071       }
11072     }
11073     if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
11074     else printf("\n");*/
11075     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
11076       #if defined(__i386__) || defined(__x86_64__)
11077       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
11078       if(branch_regs[i].dirty&1) printf("eax ");
11079       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
11080       if((branch_regs[i].dirty>>2)&1) printf("edx ");
11081       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
11082       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
11083       if((branch_regs[i].dirty>>6)&1) printf("esi ");
11084       if((branch_regs[i].dirty>>7)&1) printf("edi ");
11085       #endif
11086       #ifdef __arm__
11087       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
11088       if(branch_regs[i].dirty&1) printf("r0 ");
11089       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
11090       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
11091       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
11092       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
11093       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
11094       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
11095       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
11096       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
11097       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
11098       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
11099       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
11100       #endif
11101 #ifndef FORCE32
11102       printf(" 32:");
11103       for(r=0;r<=CCREG;r++) {
11104         if((branch_regs[i].is32>>r)&1) {
11105           if(r==CCREG) printf(" CC");
11106           else if(r==HIREG) printf(" HI");
11107           else if(r==LOREG) printf(" LO");
11108           else printf(" r%d",r);
11109         }
11110       }
11111       printf("\n");
11112 #endif
11113     }
11114   }
11115
11116   /* Pass 8 - Assembly */
11117   linkcount=0;stubcount=0;
11118   ds=0;is_delayslot=0;
11119   cop1_usable=0;
11120   uint64_t is32_pre=0;
11121   u_int dirty_pre=0;
11122   u_int beginning=(u_int)out;
11123   if((u_int)addr&1) {
11124     ds=1;
11125     pagespan_ds();
11126   }
11127   u_int instr_addr0_override=0;
11128
11129 #ifdef PCSX
11130   if (start == 0x80030000) {
11131     // nasty hack for fastbios thing
11132     // override block entry to this code
11133     instr_addr0_override=(u_int)out;
11134     emit_movimm(start,0);
11135     // abuse io address var as a flag that we
11136     // have already returned here once
11137     emit_readword((int)&address,1);
11138     emit_writeword(0,(int)&pcaddr);
11139     emit_writeword(0,(int)&address);
11140     emit_cmp(0,1);
11141     emit_jne((int)new_dyna_leave);
11142   }
11143 #endif
11144   for(i=0;i<slen;i++)
11145   {
11146     //if(ds) printf("ds: ");
11147     if((void*)assem_debug==(void*)printf) disassemble_inst(i);
11148     if(ds) {
11149       ds=0; // Skip delay slot
11150       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
11151       instr_addr[i]=0;
11152     } else {
11153       #ifndef DESTRUCTIVE_WRITEBACK
11154       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11155       {
11156         wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
11157               unneeded_reg[i],unneeded_reg_upper[i]);
11158         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
11159               unneeded_reg[i],unneeded_reg_upper[i]);
11160       }
11161       if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
11162         is32_pre=branch_regs[i].is32;
11163         dirty_pre=branch_regs[i].dirty;
11164       }else{
11165         is32_pre=regs[i].is32;
11166         dirty_pre=regs[i].dirty;
11167       }
11168       #endif
11169       // write back
11170       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11171       {
11172         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
11173                       unneeded_reg[i],unneeded_reg_upper[i]);
11174         loop_preload(regmap_pre[i],regs[i].regmap_entry);
11175       }
11176       // branch target entry point
11177       instr_addr[i]=(u_int)out;
11178       assem_debug("<->\n");
11179       // load regs
11180       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
11181         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
11182       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
11183       address_generation(i,&regs[i],regs[i].regmap_entry);
11184       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
11185       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
11186       {
11187         // Load the delay slot registers if necessary
11188         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
11189           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11190         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
11191           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11192         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
11193           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11194       }
11195       else if(i+1<slen)
11196       {
11197         // Preload registers for following instruction
11198         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
11199           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
11200             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11201         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
11202           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
11203             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11204       }
11205       // TODO: if(is_ooo(i)) address_generation(i+1);
11206       if(itype[i]==CJUMP||itype[i]==FJUMP)
11207         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
11208       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
11209         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11210       if(bt[i]) cop1_usable=0;
11211       // assemble
11212       switch(itype[i]) {
11213         case ALU:
11214           alu_assemble(i,&regs[i]);break;
11215         case IMM16:
11216           imm16_assemble(i,&regs[i]);break;
11217         case SHIFT:
11218           shift_assemble(i,&regs[i]);break;
11219         case SHIFTIMM:
11220           shiftimm_assemble(i,&regs[i]);break;
11221         case LOAD:
11222           load_assemble(i,&regs[i]);break;
11223         case LOADLR:
11224           loadlr_assemble(i,&regs[i]);break;
11225         case STORE:
11226           store_assemble(i,&regs[i]);break;
11227         case STORELR:
11228           storelr_assemble(i,&regs[i]);break;
11229         case COP0:
11230           cop0_assemble(i,&regs[i]);break;
11231         case COP1:
11232           cop1_assemble(i,&regs[i]);break;
11233         case C1LS:
11234           c1ls_assemble(i,&regs[i]);break;
11235         case COP2:
11236           cop2_assemble(i,&regs[i]);break;
11237         case C2LS:
11238           c2ls_assemble(i,&regs[i]);break;
11239         case C2OP:
11240           c2op_assemble(i,&regs[i]);break;
11241         case FCONV:
11242           fconv_assemble(i,&regs[i]);break;
11243         case FLOAT:
11244           float_assemble(i,&regs[i]);break;
11245         case FCOMP:
11246           fcomp_assemble(i,&regs[i]);break;
11247         case MULTDIV:
11248           multdiv_assemble(i,&regs[i]);break;
11249         case MOV:
11250           mov_assemble(i,&regs[i]);break;
11251         case SYSCALL:
11252           syscall_assemble(i,&regs[i]);break;
11253         case HLECALL:
11254           hlecall_assemble(i,&regs[i]);break;
11255         case INTCALL:
11256           intcall_assemble(i,&regs[i]);break;
11257         case UJUMP:
11258           ujump_assemble(i,&regs[i]);ds=1;break;
11259         case RJUMP:
11260           rjump_assemble(i,&regs[i]);ds=1;break;
11261         case CJUMP:
11262           cjump_assemble(i,&regs[i]);ds=1;break;
11263         case SJUMP:
11264           sjump_assemble(i,&regs[i]);ds=1;break;
11265         case FJUMP:
11266           fjump_assemble(i,&regs[i]);ds=1;break;
11267         case SPAN:
11268           pagespan_assemble(i,&regs[i]);break;
11269       }
11270       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
11271         literal_pool(1024);
11272       else
11273         literal_pool_jumpover(256);
11274     }
11275   }
11276   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
11277   // If the block did not end with an unconditional branch,
11278   // add a jump to the next instruction.
11279   if(i>1) {
11280     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
11281       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11282       assert(i==slen);
11283       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
11284         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11285         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11286           emit_loadreg(CCREG,HOST_CCREG);
11287         emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
11288       }
11289       else if(!likely[i-2])
11290       {
11291         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
11292         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
11293       }
11294       else
11295       {
11296         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
11297         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
11298       }
11299       add_to_linker((int)out,start+i*4,0);
11300       emit_jmp(0);
11301     }
11302   }
11303   else
11304   {
11305     assert(i>0);
11306     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11307     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11308     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11309       emit_loadreg(CCREG,HOST_CCREG);
11310     emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
11311     add_to_linker((int)out,start+i*4,0);
11312     emit_jmp(0);
11313   }
11314
11315   // TODO: delay slot stubs?
11316   // Stubs
11317   for(i=0;i<stubcount;i++)
11318   {
11319     switch(stubs[i][0])
11320     {
11321       case LOADB_STUB:
11322       case LOADH_STUB:
11323       case LOADW_STUB:
11324       case LOADD_STUB:
11325       case LOADBU_STUB:
11326       case LOADHU_STUB:
11327         do_readstub(i);break;
11328       case STOREB_STUB:
11329       case STOREH_STUB:
11330       case STOREW_STUB:
11331       case STORED_STUB:
11332         do_writestub(i);break;
11333       case CC_STUB:
11334         do_ccstub(i);break;
11335       case INVCODE_STUB:
11336         do_invstub(i);break;
11337       case FP_STUB:
11338         do_cop1stub(i);break;
11339       case STORELR_STUB:
11340         do_unalignedwritestub(i);break;
11341     }
11342   }
11343
11344   if (instr_addr0_override)
11345     instr_addr[0] = instr_addr0_override;
11346
11347   /* Pass 9 - Linker */
11348   for(i=0;i<linkcount;i++)
11349   {
11350     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
11351     literal_pool(64);
11352     if(!link_addr[i][2])
11353     {
11354       void *stub=out;
11355       void *addr=check_addr(link_addr[i][1]);
11356       emit_extjump(link_addr[i][0],link_addr[i][1]);
11357       if(addr) {
11358         set_jump_target(link_addr[i][0],(int)addr);
11359         add_link(link_addr[i][1],stub);
11360       }
11361       else set_jump_target(link_addr[i][0],(int)stub);
11362     }
11363     else
11364     {
11365       // Internal branch
11366       int target=(link_addr[i][1]-start)>>2;
11367       assert(target>=0&&target<slen);
11368       assert(instr_addr[target]);
11369       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11370       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
11371       //#else
11372       set_jump_target(link_addr[i][0],instr_addr[target]);
11373       //#endif
11374     }
11375   }
11376   // External Branch Targets (jump_in)
11377   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
11378   for(i=0;i<slen;i++)
11379   {
11380     if(bt[i]||i==0)
11381     {
11382       if(instr_addr[i]) // TODO - delay slots (=null)
11383       {
11384         u_int vaddr=start+i*4;
11385         u_int page=get_page(vaddr);
11386         u_int vpage=get_vpage(vaddr);
11387         literal_pool(256);
11388         //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
11389 #ifndef FORCE32
11390         if(!requires_32bit[i])
11391 #else
11392         if(1)
11393 #endif
11394         {
11395           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11396           assem_debug("jump_in: %x\n",start+i*4);
11397           ll_add(jump_dirty+vpage,vaddr,(void *)out);
11398           int entry_point=do_dirty_stub(i);
11399           ll_add(jump_in+page,vaddr,(void *)entry_point);
11400           // If there was an existing entry in the hash table,
11401           // replace it with the new address.
11402           // Don't add new entries.  We'll insert the
11403           // ones that actually get used in check_addr().
11404           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
11405           if(ht_bin[0]==vaddr) {
11406             ht_bin[1]=entry_point;
11407           }
11408           if(ht_bin[2]==vaddr) {
11409             ht_bin[3]=entry_point;
11410           }
11411         }
11412         else
11413         {
11414           u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
11415           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11416           assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
11417           //int entry_point=(int)out;
11418           ////assem_debug("entry_point: %x\n",entry_point);
11419           //load_regs_entry(i);
11420           //if(entry_point==(int)out)
11421           //  entry_point=instr_addr[i];
11422           //else
11423           //  emit_jmp(instr_addr[i]);
11424           //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
11425           ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
11426           int entry_point=do_dirty_stub(i);
11427           ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
11428         }
11429       }
11430     }
11431   }
11432   // Write out the literal pool if necessary
11433   literal_pool(0);
11434   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11435   // Align code
11436   if(((u_int)out)&7) emit_addnop(13);
11437   #endif
11438   assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
11439   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
11440   memcpy(copy,source,slen*4);
11441   copy+=slen*4;
11442   
11443   #ifdef __arm__
11444   __clear_cache((void *)beginning,out);
11445   #endif
11446   
11447   // If we're within 256K of the end of the buffer,
11448   // start over from the beginning. (Is 256K enough?)
11449   if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
11450   
11451   // Trap writes to any of the pages we compiled
11452   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
11453     invalid_code[i]=0;
11454 #ifndef DISABLE_TLB
11455     memory_map[i]|=0x40000000;
11456     if((signed int)start>=(signed int)0xC0000000) {
11457       assert(using_tlb);
11458       j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
11459       invalid_code[j]=0;
11460       memory_map[j]|=0x40000000;
11461       //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
11462     }
11463 #endif
11464   }
11465   inv_code_start=inv_code_end=~0;
11466 #ifdef PCSX
11467   // for PCSX we need to mark all mirrors too
11468   if(get_page(start)<(RAM_SIZE>>12))
11469     for(i=start>>12;i<=(start+slen*4)>>12;i++)
11470       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
11471       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
11472       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
11473 #endif
11474   
11475   /* Pass 10 - Free memory by expiring oldest blocks */
11476   
11477   int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
11478   while(expirep!=end)
11479   {
11480     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
11481     int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
11482     inv_debug("EXP: Phase %d\n",expirep);
11483     switch((expirep>>11)&3)
11484     {
11485       case 0:
11486         // Clear jump_in and jump_dirty
11487         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
11488         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
11489         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
11490         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
11491         break;
11492       case 1:
11493         // Clear pointers
11494         ll_kill_pointers(jump_out[expirep&2047],base,shift);
11495         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
11496         break;
11497       case 2:
11498         // Clear hash table
11499         for(i=0;i<32;i++) {
11500           int *ht_bin=hash_table[((expirep&2047)<<5)+i];
11501           if((ht_bin[3]>>shift)==(base>>shift) ||
11502              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11503             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
11504             ht_bin[2]=ht_bin[3]=-1;
11505           }
11506           if((ht_bin[1]>>shift)==(base>>shift) ||
11507              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11508             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
11509             ht_bin[0]=ht_bin[2];
11510             ht_bin[1]=ht_bin[3];
11511             ht_bin[2]=ht_bin[3]=-1;
11512           }
11513         }
11514         break;
11515       case 3:
11516         // Clear jump_out
11517         #ifdef __arm__
11518         if((expirep&2047)==0) 
11519           do_clear_cache();
11520         #endif
11521         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
11522         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
11523         break;
11524     }
11525     expirep=(expirep+1)&65535;
11526   }
11527   return 0;
11528 }
11529
11530 // vim:shiftwidth=2:expandtab