2c9130b328b97032e3478956c3fb3eb06711115f
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24 #include <sys/mman.h>
25
26 #include "emu_if.h" //emulator interface
27
28 //#define DISASM
29 //#define assem_debug printf
30 //#define inv_debug printf
31 #define assem_debug(...)
32 #define inv_debug(...)
33
34 #ifdef __i386__
35 #include "assem_x86.h"
36 #endif
37 #ifdef __x86_64__
38 #include "assem_x64.h"
39 #endif
40 #ifdef __arm__
41 #include "assem_arm.h"
42 #endif
43
44 #define MAXBLOCK 4096
45 #define MAX_OUTPUT_BLOCK_SIZE 262144
46 #define CLOCK_DIVIDER 2
47
48 struct regstat
49 {
50   signed char regmap_entry[HOST_REGS];
51   signed char regmap[HOST_REGS];
52   uint64_t was32;
53   uint64_t is32;
54   uint64_t wasdirty;
55   uint64_t dirty;
56   uint64_t u;
57   uint64_t uu;
58   u_int wasconst;
59   u_int isconst;
60   uint64_t constmap[HOST_REGS];
61 };
62
63 struct ll_entry
64 {
65   u_int vaddr;
66   u_int reg32;
67   void *addr;
68   struct ll_entry *next;
69 };
70
71   u_int start;
72   u_int *source;
73   u_int pagelimit;
74   char insn[MAXBLOCK][10];
75   u_char itype[MAXBLOCK];
76   u_char opcode[MAXBLOCK];
77   u_char opcode2[MAXBLOCK];
78   u_char bt[MAXBLOCK];
79   u_char rs1[MAXBLOCK];
80   u_char rs2[MAXBLOCK];
81   u_char rt1[MAXBLOCK];
82   u_char rt2[MAXBLOCK];
83   u_char us1[MAXBLOCK];
84   u_char us2[MAXBLOCK];
85   u_char dep1[MAXBLOCK];
86   u_char dep2[MAXBLOCK];
87   u_char lt1[MAXBLOCK];
88   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
89   static uint64_t gte_rt[MAXBLOCK];
90   static uint64_t gte_unneeded[MAXBLOCK];
91   static int gte_reads_flags; // gte flag read encountered
92   static u_int smrv[32]; // speculated MIPS register values
93   static u_int smrv_strong; // mask or regs that are likely to have correct values
94   static u_int smrv_weak; // same, but somewhat less likely
95   static u_int smrv_strong_next; // same, but after current insn executes
96   static u_int smrv_weak_next;
97   int imm[MAXBLOCK];
98   u_int ba[MAXBLOCK];
99   char likely[MAXBLOCK];
100   char is_ds[MAXBLOCK];
101   char ooo[MAXBLOCK];
102   uint64_t unneeded_reg[MAXBLOCK];
103   uint64_t unneeded_reg_upper[MAXBLOCK];
104   uint64_t branch_unneeded_reg[MAXBLOCK];
105   uint64_t branch_unneeded_reg_upper[MAXBLOCK];
106   uint64_t p32[MAXBLOCK];
107   uint64_t pr32[MAXBLOCK];
108   signed char regmap_pre[MAXBLOCK][HOST_REGS];
109   signed char regmap[MAXBLOCK][HOST_REGS];
110   signed char regmap_entry[MAXBLOCK][HOST_REGS];
111   uint64_t constmap[MAXBLOCK][HOST_REGS];
112   struct regstat regs[MAXBLOCK];
113   struct regstat branch_regs[MAXBLOCK];
114   signed char minimum_free_regs[MAXBLOCK];
115   u_int needed_reg[MAXBLOCK];
116   uint64_t requires_32bit[MAXBLOCK];
117   u_int wont_dirty[MAXBLOCK];
118   u_int will_dirty[MAXBLOCK];
119   int ccadj[MAXBLOCK];
120   int slen;
121   u_int instr_addr[MAXBLOCK];
122   u_int link_addr[MAXBLOCK][3];
123   int linkcount;
124   u_int stubs[MAXBLOCK*3][8];
125   int stubcount;
126   u_int literals[1024][2];
127   int literalcount;
128   int is_delayslot;
129   int cop1_usable;
130   u_char *out;
131   struct ll_entry *jump_in[4096];
132   struct ll_entry *jump_out[4096];
133   struct ll_entry *jump_dirty[4096];
134   u_int hash_table[65536][4]  __attribute__((aligned(16)));
135   char shadow[1048576]  __attribute__((aligned(16)));
136   void *copy;
137   int expirep;
138 #ifndef PCSX
139   u_int using_tlb;
140 #else
141   static const u_int using_tlb=0;
142 #endif
143   int new_dynarec_did_compile;
144   u_int stop_after_jal;
145   extern u_char restore_candidate[512];
146   extern int cycle_count;
147
148   /* registers that may be allocated */
149   /* 1-31 gpr */
150 #define HIREG 32 // hi
151 #define LOREG 33 // lo
152 #define FSREG 34 // FPU status (FCSR)
153 #define CSREG 35 // Coprocessor status
154 #define CCREG 36 // Cycle count
155 #define INVCP 37 // Pointer to invalid_code
156 #define MMREG 38 // Pointer to memory_map
157 #define ROREG 39 // ram offset (if rdram!=0x80000000)
158 #define TEMPREG 40
159 #define FTEMP 40 // FPU temporary register
160 #define PTEMP 41 // Prefetch temporary register
161 #define TLREG 42 // TLB mapping offset
162 #define RHASH 43 // Return address hash
163 #define RHTBL 44 // Return address hash table address
164 #define RTEMP 45 // JR/JALR address register
165 #define MAXREG 45
166 #define AGEN1 46 // Address generation temporary register
167 #define AGEN2 47 // Address generation temporary register
168 #define MGEN1 48 // Maptable address generation temporary register
169 #define MGEN2 49 // Maptable address generation temporary register
170 #define BTREG 50 // Branch target temporary register
171
172   /* instruction types */
173 #define NOP 0     // No operation
174 #define LOAD 1    // Load
175 #define STORE 2   // Store
176 #define LOADLR 3  // Unaligned load
177 #define STORELR 4 // Unaligned store
178 #define MOV 5     // Move 
179 #define ALU 6     // Arithmetic/logic
180 #define MULTDIV 7 // Multiply/divide
181 #define SHIFT 8   // Shift by register
182 #define SHIFTIMM 9// Shift by immediate
183 #define IMM16 10  // 16-bit immediate
184 #define RJUMP 11  // Unconditional jump to register
185 #define UJUMP 12  // Unconditional jump
186 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
187 #define SJUMP 14  // Conditional branch (regimm format)
188 #define COP0 15   // Coprocessor 0
189 #define COP1 16   // Coprocessor 1
190 #define C1LS 17   // Coprocessor 1 load/store
191 #define FJUMP 18  // Conditional branch (floating point)
192 #define FLOAT 19  // Floating point unit
193 #define FCONV 20  // Convert integer to float
194 #define FCOMP 21  // Floating point compare (sets FSREG)
195 #define SYSCALL 22// SYSCALL
196 #define OTHER 23  // Other
197 #define SPAN 24   // Branch/delay slot spans 2 pages
198 #define NI 25     // Not implemented
199 #define HLECALL 26// PCSX fake opcodes for HLE
200 #define COP2 27   // Coprocessor 2 move
201 #define C2LS 28   // Coprocessor 2 load/store
202 #define C2OP 29   // Coprocessor 2 operation
203 #define INTCALL 30// Call interpreter to handle rare corner cases
204
205   /* stubs */
206 #define CC_STUB 1
207 #define FP_STUB 2
208 #define LOADB_STUB 3
209 #define LOADH_STUB 4
210 #define LOADW_STUB 5
211 #define LOADD_STUB 6
212 #define LOADBU_STUB 7
213 #define LOADHU_STUB 8
214 #define STOREB_STUB 9
215 #define STOREH_STUB 10
216 #define STOREW_STUB 11
217 #define STORED_STUB 12
218 #define STORELR_STUB 13
219 #define INVCODE_STUB 14
220
221   /* branch codes */
222 #define TAKEN 1
223 #define NOTTAKEN 2
224 #define NULLDS 3
225
226 // asm linkage
227 int new_recompile_block(int addr);
228 void *get_addr_ht(u_int vaddr);
229 void invalidate_block(u_int block);
230 void invalidate_addr(u_int addr);
231 void remove_hash(int vaddr);
232 void jump_vaddr();
233 void dyna_linker();
234 void dyna_linker_ds();
235 void verify_code();
236 void verify_code_vm();
237 void verify_code_ds();
238 void cc_interrupt();
239 void fp_exception();
240 void fp_exception_ds();
241 void jump_syscall();
242 void jump_syscall_hle();
243 void jump_eret();
244 void jump_hlecall();
245 void jump_intcall();
246 void new_dyna_leave();
247
248 // TLB
249 void TLBWI_new();
250 void TLBWR_new();
251 void read_nomem_new();
252 void read_nomemb_new();
253 void read_nomemh_new();
254 void read_nomemd_new();
255 void write_nomem_new();
256 void write_nomemb_new();
257 void write_nomemh_new();
258 void write_nomemd_new();
259 void write_rdram_new();
260 void write_rdramb_new();
261 void write_rdramh_new();
262 void write_rdramd_new();
263 extern u_int memory_map[1048576];
264
265 // Needed by assembler
266 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
267 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
268 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
269 void load_all_regs(signed char i_regmap[]);
270 void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
271 void load_regs_entry(int t);
272 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
273
274 int tracedebug=0;
275
276 //#define DEBUG_CYCLE_COUNT 1
277
278 static void tlb_hacks()
279 {
280 #ifndef DISABLE_TLB
281   // Goldeneye hack
282   if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
283   {
284     u_int addr;
285     int n;
286     switch (ROM_HEADER->Country_code&0xFF) 
287     {
288       case 0x45: // U
289         addr=0x34b30;
290         break;                   
291       case 0x4A: // J 
292         addr=0x34b70;    
293         break;    
294       case 0x50: // E 
295         addr=0x329f0;
296         break;                        
297       default: 
298         // Unknown country code
299         addr=0;
300         break;
301     }
302     u_int rom_addr=(u_int)rom;
303     #ifdef ROM_COPY
304     // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
305     // in the lower 4G of memory to use this hack.  Copy it if necessary.
306     if((void *)rom>(void *)0xffffffff) {
307       munmap(ROM_COPY, 67108864);
308       if(mmap(ROM_COPY, 12582912,
309               PROT_READ | PROT_WRITE,
310               MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
311               -1, 0) <= 0) {printf("mmap() failed\n");}
312       memcpy(ROM_COPY,rom,12582912);
313       rom_addr=(u_int)ROM_COPY;
314     }
315     #endif
316     if(addr) {
317       for(n=0x7F000;n<0x80000;n++) {
318         memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
319       }
320     }
321   }
322 #endif
323 }
324
325 static u_int get_page(u_int vaddr)
326 {
327 #ifndef PCSX
328   u_int page=(vaddr^0x80000000)>>12;
329 #else
330   u_int page=vaddr&~0xe0000000;
331   if (page < 0x1000000)
332     page &= ~0x0e00000; // RAM mirrors
333   page>>=12;
334 #endif
335 #ifndef DISABLE_TLB
336   if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
337 #endif
338   if(page>2048) page=2048+(page&2047);
339   return page;
340 }
341
342 static u_int get_vpage(u_int vaddr)
343 {
344   u_int vpage=(vaddr^0x80000000)>>12;
345 #ifndef DISABLE_TLB
346   if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
347 #endif
348   if(vpage>2048) vpage=2048+(vpage&2047);
349   return vpage;
350 }
351
352 // Get address from virtual address
353 // This is called from the recompiled JR/JALR instructions
354 void *get_addr(u_int vaddr)
355 {
356   u_int page=get_page(vaddr);
357   u_int vpage=get_vpage(vaddr);
358   struct ll_entry *head;
359   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
360   head=jump_in[page];
361   while(head!=NULL) {
362     if(head->vaddr==vaddr&&head->reg32==0) {
363   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
364       int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
365       ht_bin[3]=ht_bin[1];
366       ht_bin[2]=ht_bin[0];
367       ht_bin[1]=(int)head->addr;
368       ht_bin[0]=vaddr;
369       return head->addr;
370     }
371     head=head->next;
372   }
373   head=jump_dirty[vpage];
374   while(head!=NULL) {
375     if(head->vaddr==vaddr&&head->reg32==0) {
376       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
377       // Don't restore blocks which are about to expire from the cache
378       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
379       if(verify_dirty(head->addr)) {
380         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
381         invalid_code[vaddr>>12]=0;
382         inv_code_start=inv_code_end=~0;
383         memory_map[vaddr>>12]|=0x40000000;
384         if(vpage<2048) {
385 #ifndef DISABLE_TLB
386           if(tlb_LUT_r[vaddr>>12]) {
387             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
388             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
389           }
390 #endif
391           restore_candidate[vpage>>3]|=1<<(vpage&7);
392         }
393         else restore_candidate[page>>3]|=1<<(page&7);
394         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
395         if(ht_bin[0]==vaddr) {
396           ht_bin[1]=(int)head->addr; // Replace existing entry
397         }
398         else
399         {
400           ht_bin[3]=ht_bin[1];
401           ht_bin[2]=ht_bin[0];
402           ht_bin[1]=(int)head->addr;
403           ht_bin[0]=vaddr;
404         }
405         return head->addr;
406       }
407     }
408     head=head->next;
409   }
410   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
411   int r=new_recompile_block(vaddr);
412   if(r==0) return get_addr(vaddr);
413   // Execute in unmapped page, generate pagefault execption
414   Status|=2;
415   Cause=(vaddr<<31)|0x8;
416   EPC=(vaddr&1)?vaddr-5:vaddr;
417   BadVAddr=(vaddr&~1);
418   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
419   EntryHi=BadVAddr&0xFFFFE000;
420   return get_addr_ht(0x80000000);
421 }
422 // Look up address in hash table first
423 void *get_addr_ht(u_int vaddr)
424 {
425   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
426   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
427   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
428   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
429   return get_addr(vaddr);
430 }
431
432 void *get_addr_32(u_int vaddr,u_int flags)
433 {
434 #ifdef FORCE32
435   return get_addr(vaddr);
436 #else
437   //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
438   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
439   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
440   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
441   u_int page=get_page(vaddr);
442   u_int vpage=get_vpage(vaddr);
443   struct ll_entry *head;
444   head=jump_in[page];
445   while(head!=NULL) {
446     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
447       //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
448       if(head->reg32==0) {
449         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
450         if(ht_bin[0]==-1) {
451           ht_bin[1]=(int)head->addr;
452           ht_bin[0]=vaddr;
453         }else if(ht_bin[2]==-1) {
454           ht_bin[3]=(int)head->addr;
455           ht_bin[2]=vaddr;
456         }
457         //ht_bin[3]=ht_bin[1];
458         //ht_bin[2]=ht_bin[0];
459         //ht_bin[1]=(int)head->addr;
460         //ht_bin[0]=vaddr;
461       }
462       return head->addr;
463     }
464     head=head->next;
465   }
466   head=jump_dirty[vpage];
467   while(head!=NULL) {
468     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
469       //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
470       // Don't restore blocks which are about to expire from the cache
471       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
472       if(verify_dirty(head->addr)) {
473         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
474         invalid_code[vaddr>>12]=0;
475         inv_code_start=inv_code_end=~0;
476         memory_map[vaddr>>12]|=0x40000000;
477         if(vpage<2048) {
478 #ifndef DISABLE_TLB
479           if(tlb_LUT_r[vaddr>>12]) {
480             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
481             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
482           }
483 #endif
484           restore_candidate[vpage>>3]|=1<<(vpage&7);
485         }
486         else restore_candidate[page>>3]|=1<<(page&7);
487         if(head->reg32==0) {
488           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
489           if(ht_bin[0]==-1) {
490             ht_bin[1]=(int)head->addr;
491             ht_bin[0]=vaddr;
492           }else if(ht_bin[2]==-1) {
493             ht_bin[3]=(int)head->addr;
494             ht_bin[2]=vaddr;
495           }
496           //ht_bin[3]=ht_bin[1];
497           //ht_bin[2]=ht_bin[0];
498           //ht_bin[1]=(int)head->addr;
499           //ht_bin[0]=vaddr;
500         }
501         return head->addr;
502       }
503     }
504     head=head->next;
505   }
506   //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
507   int r=new_recompile_block(vaddr);
508   if(r==0) return get_addr(vaddr);
509   // Execute in unmapped page, generate pagefault execption
510   Status|=2;
511   Cause=(vaddr<<31)|0x8;
512   EPC=(vaddr&1)?vaddr-5:vaddr;
513   BadVAddr=(vaddr&~1);
514   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
515   EntryHi=BadVAddr&0xFFFFE000;
516   return get_addr_ht(0x80000000);
517 #endif
518 }
519
520 void clear_all_regs(signed char regmap[])
521 {
522   int hr;
523   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
524 }
525
526 signed char get_reg(signed char regmap[],int r)
527 {
528   int hr;
529   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
530   return -1;
531 }
532
533 // Find a register that is available for two consecutive cycles
534 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
535 {
536   int hr;
537   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
538   return -1;
539 }
540
541 int count_free_regs(signed char regmap[])
542 {
543   int count=0;
544   int hr;
545   for(hr=0;hr<HOST_REGS;hr++)
546   {
547     if(hr!=EXCLUDE_REG) {
548       if(regmap[hr]<0) count++;
549     }
550   }
551   return count;
552 }
553
554 void dirty_reg(struct regstat *cur,signed char reg)
555 {
556   int hr;
557   if(!reg) return;
558   for (hr=0;hr<HOST_REGS;hr++) {
559     if((cur->regmap[hr]&63)==reg) {
560       cur->dirty|=1<<hr;
561     }
562   }
563 }
564
565 // If we dirty the lower half of a 64 bit register which is now being
566 // sign-extended, we need to dump the upper half.
567 // Note: Do this only after completion of the instruction, because
568 // some instructions may need to read the full 64-bit value even if
569 // overwriting it (eg SLTI, DSRA32).
570 static void flush_dirty_uppers(struct regstat *cur)
571 {
572   int hr,reg;
573   for (hr=0;hr<HOST_REGS;hr++) {
574     if((cur->dirty>>hr)&1) {
575       reg=cur->regmap[hr];
576       if(reg>=64) 
577         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
578     }
579   }
580 }
581
582 void set_const(struct regstat *cur,signed char reg,uint64_t value)
583 {
584   int hr;
585   if(!reg) return;
586   for (hr=0;hr<HOST_REGS;hr++) {
587     if(cur->regmap[hr]==reg) {
588       cur->isconst|=1<<hr;
589       cur->constmap[hr]=value;
590     }
591     else if((cur->regmap[hr]^64)==reg) {
592       cur->isconst|=1<<hr;
593       cur->constmap[hr]=value>>32;
594     }
595   }
596 }
597
598 void clear_const(struct regstat *cur,signed char reg)
599 {
600   int hr;
601   if(!reg) return;
602   for (hr=0;hr<HOST_REGS;hr++) {
603     if((cur->regmap[hr]&63)==reg) {
604       cur->isconst&=~(1<<hr);
605     }
606   }
607 }
608
609 int is_const(struct regstat *cur,signed char reg)
610 {
611   int hr;
612   if(reg<0) return 0;
613   if(!reg) return 1;
614   for (hr=0;hr<HOST_REGS;hr++) {
615     if((cur->regmap[hr]&63)==reg) {
616       return (cur->isconst>>hr)&1;
617     }
618   }
619   return 0;
620 }
621 uint64_t get_const(struct regstat *cur,signed char reg)
622 {
623   int hr;
624   if(!reg) return 0;
625   for (hr=0;hr<HOST_REGS;hr++) {
626     if(cur->regmap[hr]==reg) {
627       return cur->constmap[hr];
628     }
629   }
630   printf("Unknown constant in r%d\n",reg);
631   exit(1);
632 }
633
634 // Least soon needed registers
635 // Look at the next ten instructions and see which registers
636 // will be used.  Try not to reallocate these.
637 void lsn(u_char hsn[], int i, int *preferred_reg)
638 {
639   int j;
640   int b=-1;
641   for(j=0;j<9;j++)
642   {
643     if(i+j>=slen) {
644       j=slen-i-1;
645       break;
646     }
647     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
648     {
649       // Don't go past an unconditonal jump
650       j++;
651       break;
652     }
653   }
654   for(;j>=0;j--)
655   {
656     if(rs1[i+j]) hsn[rs1[i+j]]=j;
657     if(rs2[i+j]) hsn[rs2[i+j]]=j;
658     if(rt1[i+j]) hsn[rt1[i+j]]=j;
659     if(rt2[i+j]) hsn[rt2[i+j]]=j;
660     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
661       // Stores can allocate zero
662       hsn[rs1[i+j]]=j;
663       hsn[rs2[i+j]]=j;
664     }
665     // On some architectures stores need invc_ptr
666     #if defined(HOST_IMM8)
667     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
668       hsn[INVCP]=j;
669     }
670     #endif
671     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
672     {
673       hsn[CCREG]=j;
674       b=j;
675     }
676   }
677   if(b>=0)
678   {
679     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
680     {
681       // Follow first branch
682       int t=(ba[i+b]-start)>>2;
683       j=7-b;if(t+j>=slen) j=slen-t-1;
684       for(;j>=0;j--)
685       {
686         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
687         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
688         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
689         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
690       }
691     }
692     // TODO: preferred register based on backward branch
693   }
694   // Delay slot should preferably not overwrite branch conditions or cycle count
695   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
696     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
697     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
698     hsn[CCREG]=1;
699     // ...or hash tables
700     hsn[RHASH]=1;
701     hsn[RHTBL]=1;
702   }
703   // Coprocessor load/store needs FTEMP, even if not declared
704   if(itype[i]==C1LS||itype[i]==C2LS) {
705     hsn[FTEMP]=0;
706   }
707   // Load L/R also uses FTEMP as a temporary register
708   if(itype[i]==LOADLR) {
709     hsn[FTEMP]=0;
710   }
711   // Also SWL/SWR/SDL/SDR
712   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
713     hsn[FTEMP]=0;
714   }
715   // Don't remove the TLB registers either
716   if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
717     hsn[TLREG]=0;
718   }
719   // Don't remove the miniht registers
720   if(itype[i]==UJUMP||itype[i]==RJUMP)
721   {
722     hsn[RHASH]=0;
723     hsn[RHTBL]=0;
724   }
725 }
726
727 // We only want to allocate registers if we're going to use them again soon
728 int needed_again(int r, int i)
729 {
730   int j;
731   int b=-1;
732   int rn=10;
733   
734   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
735   {
736     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
737       return 0; // Don't need any registers if exiting the block
738   }
739   for(j=0;j<9;j++)
740   {
741     if(i+j>=slen) {
742       j=slen-i-1;
743       break;
744     }
745     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
746     {
747       // Don't go past an unconditonal jump
748       j++;
749       break;
750     }
751     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
752     {
753       break;
754     }
755   }
756   for(;j>=1;j--)
757   {
758     if(rs1[i+j]==r) rn=j;
759     if(rs2[i+j]==r) rn=j;
760     if((unneeded_reg[i+j]>>r)&1) rn=10;
761     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
762     {
763       b=j;
764     }
765   }
766   /*
767   if(b>=0)
768   {
769     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
770     {
771       // Follow first branch
772       int o=rn;
773       int t=(ba[i+b]-start)>>2;
774       j=7-b;if(t+j>=slen) j=slen-t-1;
775       for(;j>=0;j--)
776       {
777         if(!((unneeded_reg[t+j]>>r)&1)) {
778           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
779           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
780         }
781         else rn=o;
782       }
783     }
784   }*/
785   if(rn<10) return 1;
786   return 0;
787 }
788
789 // Try to match register allocations at the end of a loop with those
790 // at the beginning
791 int loop_reg(int i, int r, int hr)
792 {
793   int j,k;
794   for(j=0;j<9;j++)
795   {
796     if(i+j>=slen) {
797       j=slen-i-1;
798       break;
799     }
800     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
801     {
802       // Don't go past an unconditonal jump
803       j++;
804       break;
805     }
806   }
807   k=0;
808   if(i>0){
809     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
810       k--;
811   }
812   for(;k<j;k++)
813   {
814     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
815     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
816     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
817     {
818       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
819       {
820         int t=(ba[i+k]-start)>>2;
821         int reg=get_reg(regs[t].regmap_entry,r);
822         if(reg>=0) return reg;
823         //reg=get_reg(regs[t+1].regmap_entry,r);
824         //if(reg>=0) return reg;
825       }
826     }
827   }
828   return hr;
829 }
830
831
832 // Allocate every register, preserving source/target regs
833 void alloc_all(struct regstat *cur,int i)
834 {
835   int hr;
836   
837   for(hr=0;hr<HOST_REGS;hr++) {
838     if(hr!=EXCLUDE_REG) {
839       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
840          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
841       {
842         cur->regmap[hr]=-1;
843         cur->dirty&=~(1<<hr);
844       }
845       // Don't need zeros
846       if((cur->regmap[hr]&63)==0)
847       {
848         cur->regmap[hr]=-1;
849         cur->dirty&=~(1<<hr);
850       }
851     }
852   }
853 }
854
855 #ifndef FORCE32
856 void div64(int64_t dividend,int64_t divisor)
857 {
858   lo=dividend/divisor;
859   hi=dividend%divisor;
860   //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
861   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
862 }
863 void divu64(uint64_t dividend,uint64_t divisor)
864 {
865   lo=dividend/divisor;
866   hi=dividend%divisor;
867   //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
868   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
869 }
870
871 void mult64(uint64_t m1,uint64_t m2)
872 {
873    unsigned long long int op1, op2, op3, op4;
874    unsigned long long int result1, result2, result3, result4;
875    unsigned long long int temp1, temp2, temp3, temp4;
876    int sign = 0;
877    
878    if (m1 < 0)
879      {
880     op2 = -m1;
881     sign = 1 - sign;
882      }
883    else op2 = m1;
884    if (m2 < 0)
885      {
886     op4 = -m2;
887     sign = 1 - sign;
888      }
889    else op4 = m2;
890    
891    op1 = op2 & 0xFFFFFFFF;
892    op2 = (op2 >> 32) & 0xFFFFFFFF;
893    op3 = op4 & 0xFFFFFFFF;
894    op4 = (op4 >> 32) & 0xFFFFFFFF;
895    
896    temp1 = op1 * op3;
897    temp2 = (temp1 >> 32) + op1 * op4;
898    temp3 = op2 * op3;
899    temp4 = (temp3 >> 32) + op2 * op4;
900    
901    result1 = temp1 & 0xFFFFFFFF;
902    result2 = temp2 + (temp3 & 0xFFFFFFFF);
903    result3 = (result2 >> 32) + temp4;
904    result4 = (result3 >> 32);
905    
906    lo = result1 | (result2 << 32);
907    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
908    if (sign)
909      {
910     hi = ~hi;
911     if (!lo) hi++;
912     else lo = ~lo + 1;
913      }
914 }
915
916 void multu64(uint64_t m1,uint64_t m2)
917 {
918    unsigned long long int op1, op2, op3, op4;
919    unsigned long long int result1, result2, result3, result4;
920    unsigned long long int temp1, temp2, temp3, temp4;
921    
922    op1 = m1 & 0xFFFFFFFF;
923    op2 = (m1 >> 32) & 0xFFFFFFFF;
924    op3 = m2 & 0xFFFFFFFF;
925    op4 = (m2 >> 32) & 0xFFFFFFFF;
926    
927    temp1 = op1 * op3;
928    temp2 = (temp1 >> 32) + op1 * op4;
929    temp3 = op2 * op3;
930    temp4 = (temp3 >> 32) + op2 * op4;
931    
932    result1 = temp1 & 0xFFFFFFFF;
933    result2 = temp2 + (temp3 & 0xFFFFFFFF);
934    result3 = (result2 >> 32) + temp4;
935    result4 = (result3 >> 32);
936    
937    lo = result1 | (result2 << 32);
938    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
939    
940   //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
941   //                                      ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
942 }
943
944 uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
945 {
946   if(bits) {
947     original<<=64-bits;
948     original>>=64-bits;
949     loaded<<=bits;
950     original|=loaded;
951   }
952   else original=loaded;
953   return original;
954 }
955 uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
956 {
957   if(bits^56) {
958     original>>=64-(bits^56);
959     original<<=64-(bits^56);
960     loaded>>=bits^56;
961     original|=loaded;
962   }
963   else original=loaded;
964   return original;
965 }
966 #endif
967
968 #ifdef __i386__
969 #include "assem_x86.c"
970 #endif
971 #ifdef __x86_64__
972 #include "assem_x64.c"
973 #endif
974 #ifdef __arm__
975 #include "assem_arm.c"
976 #endif
977
978 // Add virtual address mapping to linked list
979 void ll_add(struct ll_entry **head,int vaddr,void *addr)
980 {
981   struct ll_entry *new_entry;
982   new_entry=malloc(sizeof(struct ll_entry));
983   assert(new_entry!=NULL);
984   new_entry->vaddr=vaddr;
985   new_entry->reg32=0;
986   new_entry->addr=addr;
987   new_entry->next=*head;
988   *head=new_entry;
989 }
990
991 // Add virtual address mapping for 32-bit compiled block
992 void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
993 {
994   ll_add(head,vaddr,addr);
995 #ifndef FORCE32
996   (*head)->reg32=reg32;
997 #endif
998 }
999
1000 // Check if an address is already compiled
1001 // but don't return addresses which are about to expire from the cache
1002 void *check_addr(u_int vaddr)
1003 {
1004   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
1005   if(ht_bin[0]==vaddr) {
1006     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1007       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
1008   }
1009   if(ht_bin[2]==vaddr) {
1010     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1011       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
1012   }
1013   u_int page=get_page(vaddr);
1014   struct ll_entry *head;
1015   head=jump_in[page];
1016   while(head!=NULL) {
1017     if(head->vaddr==vaddr&&head->reg32==0) {
1018       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1019         // Update existing entry with current address
1020         if(ht_bin[0]==vaddr) {
1021           ht_bin[1]=(int)head->addr;
1022           return head->addr;
1023         }
1024         if(ht_bin[2]==vaddr) {
1025           ht_bin[3]=(int)head->addr;
1026           return head->addr;
1027         }
1028         // Insert into hash table with low priority.
1029         // Don't evict existing entries, as they are probably
1030         // addresses that are being accessed frequently.
1031         if(ht_bin[0]==-1) {
1032           ht_bin[1]=(int)head->addr;
1033           ht_bin[0]=vaddr;
1034         }else if(ht_bin[2]==-1) {
1035           ht_bin[3]=(int)head->addr;
1036           ht_bin[2]=vaddr;
1037         }
1038         return head->addr;
1039       }
1040     }
1041     head=head->next;
1042   }
1043   return 0;
1044 }
1045
1046 void remove_hash(int vaddr)
1047 {
1048   //printf("remove hash: %x\n",vaddr);
1049   int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1050   if(ht_bin[2]==vaddr) {
1051     ht_bin[2]=ht_bin[3]=-1;
1052   }
1053   if(ht_bin[0]==vaddr) {
1054     ht_bin[0]=ht_bin[2];
1055     ht_bin[1]=ht_bin[3];
1056     ht_bin[2]=ht_bin[3]=-1;
1057   }
1058 }
1059
1060 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1061 {
1062   struct ll_entry *next;
1063   while(*head) {
1064     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) || 
1065        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1066     {
1067       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1068       remove_hash((*head)->vaddr);
1069       next=(*head)->next;
1070       free(*head);
1071       *head=next;
1072     }
1073     else
1074     {
1075       head=&((*head)->next);
1076     }
1077   }
1078 }
1079
1080 // Remove all entries from linked list
1081 void ll_clear(struct ll_entry **head)
1082 {
1083   struct ll_entry *cur;
1084   struct ll_entry *next;
1085   if(cur=*head) {
1086     *head=0;
1087     while(cur) {
1088       next=cur->next;
1089       free(cur);
1090       cur=next;
1091     }
1092   }
1093 }
1094
1095 // Dereference the pointers and remove if it matches
1096 void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1097 {
1098   while(head) {
1099     int ptr=get_pointer(head->addr);
1100     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1101     if(((ptr>>shift)==(addr>>shift)) ||
1102        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1103     {
1104       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1105       u_int host_addr=(u_int)kill_pointer(head->addr);
1106       #ifdef __arm__
1107         needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1108       #endif
1109     }
1110     head=head->next;
1111   }
1112 }
1113
1114 // This is called when we write to a compiled block (see do_invstub)
1115 void invalidate_page(u_int page)
1116 {
1117   struct ll_entry *head;
1118   struct ll_entry *next;
1119   head=jump_in[page];
1120   jump_in[page]=0;
1121   while(head!=NULL) {
1122     inv_debug("INVALIDATE: %x\n",head->vaddr);
1123     remove_hash(head->vaddr);
1124     next=head->next;
1125     free(head);
1126     head=next;
1127   }
1128   head=jump_out[page];
1129   jump_out[page]=0;
1130   while(head!=NULL) {
1131     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1132     u_int host_addr=(u_int)kill_pointer(head->addr);
1133     #ifdef __arm__
1134       needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1135     #endif
1136     next=head->next;
1137     free(head);
1138     head=next;
1139   }
1140 }
1141
1142 static void invalidate_block_range(u_int block, u_int first, u_int last)
1143 {
1144   u_int page=get_page(block<<12);
1145   //printf("first=%d last=%d\n",first,last);
1146   invalidate_page(page);
1147   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1148   assert(last<page+5);
1149   // Invalidate the adjacent pages if a block crosses a 4K boundary
1150   while(first<page) {
1151     invalidate_page(first);
1152     first++;
1153   }
1154   for(first=page+1;first<last;first++) {
1155     invalidate_page(first);
1156   }
1157   #ifdef __arm__
1158     do_clear_cache();
1159   #endif
1160   
1161   // Don't trap writes
1162   invalid_code[block]=1;
1163 #ifndef DISABLE_TLB
1164   // If there is a valid TLB entry for this page, remove write protect
1165   if(tlb_LUT_w[block]) {
1166     assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1167     // CHECK: Is this right?
1168     memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1169     u_int real_block=tlb_LUT_w[block]>>12;
1170     invalid_code[real_block]=1;
1171     if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1172   }
1173   else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1174 #endif
1175
1176   #ifdef USE_MINI_HT
1177   memset(mini_ht,-1,sizeof(mini_ht));
1178   #endif
1179 }
1180
1181 void invalidate_block(u_int block)
1182 {
1183   u_int page=get_page(block<<12);
1184   u_int vpage=get_vpage(block<<12);
1185   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1186   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1187   u_int first,last;
1188   first=last=page;
1189   struct ll_entry *head;
1190   head=jump_dirty[vpage];
1191   //printf("page=%d vpage=%d\n",page,vpage);
1192   while(head!=NULL) {
1193     u_int start,end;
1194     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1195       get_bounds((int)head->addr,&start,&end);
1196       //printf("start: %x end: %x\n",start,end);
1197       if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
1198         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1199           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1200           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1201         }
1202       }
1203 #ifndef DISABLE_TLB
1204       if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1205         if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1206           if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1207           if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1208         }
1209       }
1210 #endif
1211     }
1212     head=head->next;
1213   }
1214   invalidate_block_range(block,first,last);
1215 }
1216
1217 void invalidate_addr(u_int addr)
1218 {
1219 #ifdef PCSX
1220   //static int rhits;
1221   // this check is done by the caller
1222   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1223   u_int page=get_page(addr);
1224   if(page<2048) { // RAM
1225     struct ll_entry *head;
1226     u_int addr_min=~0, addr_max=0;
1227     int mask=RAM_SIZE-1;
1228     int pg1;
1229     inv_code_start=addr&~0xfff;
1230     inv_code_end=addr|0xfff;
1231     pg1=page;
1232     if (pg1>0) {
1233       // must check previous page too because of spans..
1234       pg1--;
1235       inv_code_start-=0x1000;
1236     }
1237     for(;pg1<=page;pg1++) {
1238       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1239         u_int start,end;
1240         get_bounds((int)head->addr,&start,&end);
1241         if((start&mask)<=(addr&mask)&&(addr&mask)<(end&mask)) {
1242           if(start<addr_min) addr_min=start;
1243           if(end>addr_max) addr_max=end;
1244         }
1245         else if(addr<start) {
1246           if(start<inv_code_end)
1247             inv_code_end=start-1;
1248         }
1249         else {
1250           if(end>inv_code_start)
1251             inv_code_start=end;
1252         }
1253       }
1254     }
1255     if (addr_min!=~0) {
1256       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1257       inv_code_start=inv_code_end=~0;
1258       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1259       return;
1260     }
1261     else {
1262       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);//rhits);
1263     }
1264     //rhits=0;
1265     if(page!=0) // FIXME: don't know what's up with page 0 (Klonoa)
1266       return;
1267   }
1268 #endif
1269   invalidate_block(addr>>12);
1270 }
1271
1272 // This is called when loading a save state.
1273 // Anything could have changed, so invalidate everything.
1274 void invalidate_all_pages()
1275 {
1276   u_int page,n;
1277   for(page=0;page<4096;page++)
1278     invalidate_page(page);
1279   for(page=0;page<1048576;page++)
1280     if(!invalid_code[page]) {
1281       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1282       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1283     }
1284   #ifdef __arm__
1285   __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1286   #endif
1287   #ifdef USE_MINI_HT
1288   memset(mini_ht,-1,sizeof(mini_ht));
1289   #endif
1290   #ifndef DISABLE_TLB
1291   // TLB
1292   for(page=0;page<0x100000;page++) {
1293     if(tlb_LUT_r[page]) {
1294       memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1295       if(!tlb_LUT_w[page]||!invalid_code[page])
1296         memory_map[page]|=0x40000000; // Write protect
1297     }
1298     else memory_map[page]=-1;
1299     if(page==0x80000) page=0xC0000;
1300   }
1301   tlb_hacks();
1302   #endif
1303 }
1304
1305 // Add an entry to jump_out after making a link
1306 void add_link(u_int vaddr,void *src)
1307 {
1308   u_int page=get_page(vaddr);
1309   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1310   int *ptr=(int *)(src+4);
1311   assert((*ptr&0x0fff0000)==0x059f0000);
1312   ll_add(jump_out+page,vaddr,src);
1313   //int ptr=get_pointer(src);
1314   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1315 }
1316
1317 // If a code block was found to be unmodified (bit was set in
1318 // restore_candidate) and it remains unmodified (bit is clear
1319 // in invalid_code) then move the entries for that 4K page from
1320 // the dirty list to the clean list.
1321 void clean_blocks(u_int page)
1322 {
1323   struct ll_entry *head;
1324   inv_debug("INV: clean_blocks page=%d\n",page);
1325   head=jump_dirty[page];
1326   while(head!=NULL) {
1327     if(!invalid_code[head->vaddr>>12]) {
1328       // Don't restore blocks which are about to expire from the cache
1329       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1330         u_int start,end;
1331         if(verify_dirty((int)head->addr)) {
1332           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1333           u_int i;
1334           u_int inv=0;
1335           get_bounds((int)head->addr,&start,&end);
1336           if(start-(u_int)rdram<RAM_SIZE) {
1337             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1338               inv|=invalid_code[i];
1339             }
1340           }
1341           if((signed int)head->vaddr>=(signed int)0xC0000000) {
1342             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1343             //printf("addr=%x start=%x end=%x\n",addr,start,end);
1344             if(addr<start||addr>=end) inv=1;
1345           }
1346           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1347             inv=1;
1348           }
1349           if(!inv) {
1350             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1351             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1352               u_int ppage=page;
1353 #ifndef DISABLE_TLB
1354               if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1355 #endif
1356               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1357               //printf("page=%x, addr=%x\n",page,head->vaddr);
1358               //assert(head->vaddr>>12==(page|0x80000));
1359               ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1360               int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1361               if(!head->reg32) {
1362                 if(ht_bin[0]==head->vaddr) {
1363                   ht_bin[1]=(int)clean_addr; // Replace existing entry
1364                 }
1365                 if(ht_bin[2]==head->vaddr) {
1366                   ht_bin[3]=(int)clean_addr; // Replace existing entry
1367                 }
1368               }
1369             }
1370           }
1371         }
1372       }
1373     }
1374     head=head->next;
1375   }
1376 }
1377
1378
1379 void mov_alloc(struct regstat *current,int i)
1380 {
1381   // Note: Don't need to actually alloc the source registers
1382   if((~current->is32>>rs1[i])&1) {
1383     //alloc_reg64(current,i,rs1[i]);
1384     alloc_reg64(current,i,rt1[i]);
1385     current->is32&=~(1LL<<rt1[i]);
1386   } else {
1387     //alloc_reg(current,i,rs1[i]);
1388     alloc_reg(current,i,rt1[i]);
1389     current->is32|=(1LL<<rt1[i]);
1390   }
1391   clear_const(current,rs1[i]);
1392   clear_const(current,rt1[i]);
1393   dirty_reg(current,rt1[i]);
1394 }
1395
1396 void shiftimm_alloc(struct regstat *current,int i)
1397 {
1398   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1399   {
1400     if(rt1[i]) {
1401       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1402       else lt1[i]=rs1[i];
1403       alloc_reg(current,i,rt1[i]);
1404       current->is32|=1LL<<rt1[i];
1405       dirty_reg(current,rt1[i]);
1406       if(is_const(current,rs1[i])) {
1407         int v=get_const(current,rs1[i]);
1408         if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1409         if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1410         if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1411       }
1412       else clear_const(current,rt1[i]);
1413     }
1414   }
1415   else
1416   {
1417     clear_const(current,rs1[i]);
1418     clear_const(current,rt1[i]);
1419   }
1420
1421   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1422   {
1423     if(rt1[i]) {
1424       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1425       alloc_reg64(current,i,rt1[i]);
1426       current->is32&=~(1LL<<rt1[i]);
1427       dirty_reg(current,rt1[i]);
1428     }
1429   }
1430   if(opcode2[i]==0x3c) // DSLL32
1431   {
1432     if(rt1[i]) {
1433       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1434       alloc_reg64(current,i,rt1[i]);
1435       current->is32&=~(1LL<<rt1[i]);
1436       dirty_reg(current,rt1[i]);
1437     }
1438   }
1439   if(opcode2[i]==0x3e) // DSRL32
1440   {
1441     if(rt1[i]) {
1442       alloc_reg64(current,i,rs1[i]);
1443       if(imm[i]==32) {
1444         alloc_reg64(current,i,rt1[i]);
1445         current->is32&=~(1LL<<rt1[i]);
1446       } else {
1447         alloc_reg(current,i,rt1[i]);
1448         current->is32|=1LL<<rt1[i];
1449       }
1450       dirty_reg(current,rt1[i]);
1451     }
1452   }
1453   if(opcode2[i]==0x3f) // DSRA32
1454   {
1455     if(rt1[i]) {
1456       alloc_reg64(current,i,rs1[i]);
1457       alloc_reg(current,i,rt1[i]);
1458       current->is32|=1LL<<rt1[i];
1459       dirty_reg(current,rt1[i]);
1460     }
1461   }
1462 }
1463
1464 void shift_alloc(struct regstat *current,int i)
1465 {
1466   if(rt1[i]) {
1467     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1468     {
1469       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1470       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1471       alloc_reg(current,i,rt1[i]);
1472       if(rt1[i]==rs2[i]) {
1473         alloc_reg_temp(current,i,-1);
1474         minimum_free_regs[i]=1;
1475       }
1476       current->is32|=1LL<<rt1[i];
1477     } else { // DSLLV/DSRLV/DSRAV
1478       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1479       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1480       alloc_reg64(current,i,rt1[i]);
1481       current->is32&=~(1LL<<rt1[i]);
1482       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1483       {
1484         alloc_reg_temp(current,i,-1);
1485         minimum_free_regs[i]=1;
1486       }
1487     }
1488     clear_const(current,rs1[i]);
1489     clear_const(current,rs2[i]);
1490     clear_const(current,rt1[i]);
1491     dirty_reg(current,rt1[i]);
1492   }
1493 }
1494
1495 void alu_alloc(struct regstat *current,int i)
1496 {
1497   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1498     if(rt1[i]) {
1499       if(rs1[i]&&rs2[i]) {
1500         alloc_reg(current,i,rs1[i]);
1501         alloc_reg(current,i,rs2[i]);
1502       }
1503       else {
1504         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1505         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1506       }
1507       alloc_reg(current,i,rt1[i]);
1508     }
1509     current->is32|=1LL<<rt1[i];
1510   }
1511   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1512     if(rt1[i]) {
1513       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1514       {
1515         alloc_reg64(current,i,rs1[i]);
1516         alloc_reg64(current,i,rs2[i]);
1517         alloc_reg(current,i,rt1[i]);
1518       } else {
1519         alloc_reg(current,i,rs1[i]);
1520         alloc_reg(current,i,rs2[i]);
1521         alloc_reg(current,i,rt1[i]);
1522       }
1523     }
1524     current->is32|=1LL<<rt1[i];
1525   }
1526   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1527     if(rt1[i]) {
1528       if(rs1[i]&&rs2[i]) {
1529         alloc_reg(current,i,rs1[i]);
1530         alloc_reg(current,i,rs2[i]);
1531       }
1532       else
1533       {
1534         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1535         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1536       }
1537       alloc_reg(current,i,rt1[i]);
1538       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1539       {
1540         if(!((current->uu>>rt1[i])&1)) {
1541           alloc_reg64(current,i,rt1[i]);
1542         }
1543         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1544           if(rs1[i]&&rs2[i]) {
1545             alloc_reg64(current,i,rs1[i]);
1546             alloc_reg64(current,i,rs2[i]);
1547           }
1548           else
1549           {
1550             // Is is really worth it to keep 64-bit values in registers?
1551             #ifdef NATIVE_64BIT
1552             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1553             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1554             #endif
1555           }
1556         }
1557         current->is32&=~(1LL<<rt1[i]);
1558       } else {
1559         current->is32|=1LL<<rt1[i];
1560       }
1561     }
1562   }
1563   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1564     if(rt1[i]) {
1565       if(rs1[i]&&rs2[i]) {
1566         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1567           alloc_reg64(current,i,rs1[i]);
1568           alloc_reg64(current,i,rs2[i]);
1569           alloc_reg64(current,i,rt1[i]);
1570         } else {
1571           alloc_reg(current,i,rs1[i]);
1572           alloc_reg(current,i,rs2[i]);
1573           alloc_reg(current,i,rt1[i]);
1574         }
1575       }
1576       else {
1577         alloc_reg(current,i,rt1[i]);
1578         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1579           // DADD used as move, or zeroing
1580           // If we have a 64-bit source, then make the target 64 bits too
1581           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1582             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1583             alloc_reg64(current,i,rt1[i]);
1584           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1585             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1586             alloc_reg64(current,i,rt1[i]);
1587           }
1588           if(opcode2[i]>=0x2e&&rs2[i]) {
1589             // DSUB used as negation - 64-bit result
1590             // If we have a 32-bit register, extend it to 64 bits
1591             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1592             alloc_reg64(current,i,rt1[i]);
1593           }
1594         }
1595       }
1596       if(rs1[i]&&rs2[i]) {
1597         current->is32&=~(1LL<<rt1[i]);
1598       } else if(rs1[i]) {
1599         current->is32&=~(1LL<<rt1[i]);
1600         if((current->is32>>rs1[i])&1)
1601           current->is32|=1LL<<rt1[i];
1602       } else if(rs2[i]) {
1603         current->is32&=~(1LL<<rt1[i]);
1604         if((current->is32>>rs2[i])&1)
1605           current->is32|=1LL<<rt1[i];
1606       } else {
1607         current->is32|=1LL<<rt1[i];
1608       }
1609     }
1610   }
1611   clear_const(current,rs1[i]);
1612   clear_const(current,rs2[i]);
1613   clear_const(current,rt1[i]);
1614   dirty_reg(current,rt1[i]);
1615 }
1616
1617 void imm16_alloc(struct regstat *current,int i)
1618 {
1619   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1620   else lt1[i]=rs1[i];
1621   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1622   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1623     current->is32&=~(1LL<<rt1[i]);
1624     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1625       // TODO: Could preserve the 32-bit flag if the immediate is zero
1626       alloc_reg64(current,i,rt1[i]);
1627       alloc_reg64(current,i,rs1[i]);
1628     }
1629     clear_const(current,rs1[i]);
1630     clear_const(current,rt1[i]);
1631   }
1632   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1633     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1634     current->is32|=1LL<<rt1[i];
1635     clear_const(current,rs1[i]);
1636     clear_const(current,rt1[i]);
1637   }
1638   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1639     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1640       if(rs1[i]!=rt1[i]) {
1641         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1642         alloc_reg64(current,i,rt1[i]);
1643         current->is32&=~(1LL<<rt1[i]);
1644       }
1645     }
1646     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1647     if(is_const(current,rs1[i])) {
1648       int v=get_const(current,rs1[i]);
1649       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1650       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1651       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1652     }
1653     else clear_const(current,rt1[i]);
1654   }
1655   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1656     if(is_const(current,rs1[i])) {
1657       int v=get_const(current,rs1[i]);
1658       set_const(current,rt1[i],v+imm[i]);
1659     }
1660     else clear_const(current,rt1[i]);
1661     current->is32|=1LL<<rt1[i];
1662   }
1663   else {
1664     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1665     current->is32|=1LL<<rt1[i];
1666   }
1667   dirty_reg(current,rt1[i]);
1668 }
1669
1670 void load_alloc(struct regstat *current,int i)
1671 {
1672   clear_const(current,rt1[i]);
1673   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1674   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1675   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1676   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1677     alloc_reg(current,i,rt1[i]);
1678     assert(get_reg(current->regmap,rt1[i])>=0);
1679     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1680     {
1681       current->is32&=~(1LL<<rt1[i]);
1682       alloc_reg64(current,i,rt1[i]);
1683     }
1684     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1685     {
1686       current->is32&=~(1LL<<rt1[i]);
1687       alloc_reg64(current,i,rt1[i]);
1688       alloc_all(current,i);
1689       alloc_reg64(current,i,FTEMP);
1690       minimum_free_regs[i]=HOST_REGS;
1691     }
1692     else current->is32|=1LL<<rt1[i];
1693     dirty_reg(current,rt1[i]);
1694     // If using TLB, need a register for pointer to the mapping table
1695     if(using_tlb) alloc_reg(current,i,TLREG);
1696     // LWL/LWR need a temporary register for the old value
1697     if(opcode[i]==0x22||opcode[i]==0x26)
1698     {
1699       alloc_reg(current,i,FTEMP);
1700       alloc_reg_temp(current,i,-1);
1701       minimum_free_regs[i]=1;
1702     }
1703   }
1704   else
1705   {
1706     // Load to r0 or unneeded register (dummy load)
1707     // but we still need a register to calculate the address
1708     if(opcode[i]==0x22||opcode[i]==0x26)
1709     {
1710       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1711     }
1712     // If using TLB, need a register for pointer to the mapping table
1713     if(using_tlb) alloc_reg(current,i,TLREG);
1714     alloc_reg_temp(current,i,-1);
1715     minimum_free_regs[i]=1;
1716     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1717     {
1718       alloc_all(current,i);
1719       alloc_reg64(current,i,FTEMP);
1720       minimum_free_regs[i]=HOST_REGS;
1721     }
1722   }
1723 }
1724
1725 void store_alloc(struct regstat *current,int i)
1726 {
1727   clear_const(current,rs2[i]);
1728   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1729   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1730   alloc_reg(current,i,rs2[i]);
1731   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1732     alloc_reg64(current,i,rs2[i]);
1733     if(rs2[i]) alloc_reg(current,i,FTEMP);
1734   }
1735   // If using TLB, need a register for pointer to the mapping table
1736   if(using_tlb) alloc_reg(current,i,TLREG);
1737   #if defined(HOST_IMM8)
1738   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1739   else alloc_reg(current,i,INVCP);
1740   #endif
1741   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1742     alloc_reg(current,i,FTEMP);
1743   }
1744   // We need a temporary register for address generation
1745   alloc_reg_temp(current,i,-1);
1746   minimum_free_regs[i]=1;
1747 }
1748
1749 void c1ls_alloc(struct regstat *current,int i)
1750 {
1751   //clear_const(current,rs1[i]); // FIXME
1752   clear_const(current,rt1[i]);
1753   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1754   alloc_reg(current,i,CSREG); // Status
1755   alloc_reg(current,i,FTEMP);
1756   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1757     alloc_reg64(current,i,FTEMP);
1758   }
1759   // If using TLB, need a register for pointer to the mapping table
1760   if(using_tlb) alloc_reg(current,i,TLREG);
1761   #if defined(HOST_IMM8)
1762   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1763   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1764     alloc_reg(current,i,INVCP);
1765   #endif
1766   // We need a temporary register for address generation
1767   alloc_reg_temp(current,i,-1);
1768 }
1769
1770 void c2ls_alloc(struct regstat *current,int i)
1771 {
1772   clear_const(current,rt1[i]);
1773   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1774   alloc_reg(current,i,FTEMP);
1775   // If using TLB, need a register for pointer to the mapping table
1776   if(using_tlb) alloc_reg(current,i,TLREG);
1777   #if defined(HOST_IMM8)
1778   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1779   else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1780     alloc_reg(current,i,INVCP);
1781   #endif
1782   // We need a temporary register for address generation
1783   alloc_reg_temp(current,i,-1);
1784   minimum_free_regs[i]=1;
1785 }
1786
1787 #ifndef multdiv_alloc
1788 void multdiv_alloc(struct regstat *current,int i)
1789 {
1790   //  case 0x18: MULT
1791   //  case 0x19: MULTU
1792   //  case 0x1A: DIV
1793   //  case 0x1B: DIVU
1794   //  case 0x1C: DMULT
1795   //  case 0x1D: DMULTU
1796   //  case 0x1E: DDIV
1797   //  case 0x1F: DDIVU
1798   clear_const(current,rs1[i]);
1799   clear_const(current,rs2[i]);
1800   if(rs1[i]&&rs2[i])
1801   {
1802     if((opcode2[i]&4)==0) // 32-bit
1803     {
1804       current->u&=~(1LL<<HIREG);
1805       current->u&=~(1LL<<LOREG);
1806       alloc_reg(current,i,HIREG);
1807       alloc_reg(current,i,LOREG);
1808       alloc_reg(current,i,rs1[i]);
1809       alloc_reg(current,i,rs2[i]);
1810       current->is32|=1LL<<HIREG;
1811       current->is32|=1LL<<LOREG;
1812       dirty_reg(current,HIREG);
1813       dirty_reg(current,LOREG);
1814     }
1815     else // 64-bit
1816     {
1817       current->u&=~(1LL<<HIREG);
1818       current->u&=~(1LL<<LOREG);
1819       current->uu&=~(1LL<<HIREG);
1820       current->uu&=~(1LL<<LOREG);
1821       alloc_reg64(current,i,HIREG);
1822       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1823       alloc_reg64(current,i,rs1[i]);
1824       alloc_reg64(current,i,rs2[i]);
1825       alloc_all(current,i);
1826       current->is32&=~(1LL<<HIREG);
1827       current->is32&=~(1LL<<LOREG);
1828       dirty_reg(current,HIREG);
1829       dirty_reg(current,LOREG);
1830       minimum_free_regs[i]=HOST_REGS;
1831     }
1832   }
1833   else
1834   {
1835     // Multiply by zero is zero.
1836     // MIPS does not have a divide by zero exception.
1837     // The result is undefined, we return zero.
1838     alloc_reg(current,i,HIREG);
1839     alloc_reg(current,i,LOREG);
1840     current->is32|=1LL<<HIREG;
1841     current->is32|=1LL<<LOREG;
1842     dirty_reg(current,HIREG);
1843     dirty_reg(current,LOREG);
1844   }
1845 }
1846 #endif
1847
1848 void cop0_alloc(struct regstat *current,int i)
1849 {
1850   if(opcode2[i]==0) // MFC0
1851   {
1852     if(rt1[i]) {
1853       clear_const(current,rt1[i]);
1854       alloc_all(current,i);
1855       alloc_reg(current,i,rt1[i]);
1856       current->is32|=1LL<<rt1[i];
1857       dirty_reg(current,rt1[i]);
1858     }
1859   }
1860   else if(opcode2[i]==4) // MTC0
1861   {
1862     if(rs1[i]){
1863       clear_const(current,rs1[i]);
1864       alloc_reg(current,i,rs1[i]);
1865       alloc_all(current,i);
1866     }
1867     else {
1868       alloc_all(current,i); // FIXME: Keep r0
1869       current->u&=~1LL;
1870       alloc_reg(current,i,0);
1871     }
1872   }
1873   else
1874   {
1875     // TLBR/TLBWI/TLBWR/TLBP/ERET
1876     assert(opcode2[i]==0x10);
1877     alloc_all(current,i);
1878   }
1879   minimum_free_regs[i]=HOST_REGS;
1880 }
1881
1882 void cop1_alloc(struct regstat *current,int i)
1883 {
1884   alloc_reg(current,i,CSREG); // Load status
1885   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1886   {
1887     if(rt1[i]){
1888       clear_const(current,rt1[i]);
1889       if(opcode2[i]==1) {
1890         alloc_reg64(current,i,rt1[i]); // DMFC1
1891         current->is32&=~(1LL<<rt1[i]);
1892       }else{
1893         alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1894         current->is32|=1LL<<rt1[i];
1895       }
1896       dirty_reg(current,rt1[i]);
1897     }
1898     alloc_reg_temp(current,i,-1);
1899   }
1900   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1901   {
1902     if(rs1[i]){
1903       clear_const(current,rs1[i]);
1904       if(opcode2[i]==5)
1905         alloc_reg64(current,i,rs1[i]); // DMTC1
1906       else
1907         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1908       alloc_reg_temp(current,i,-1);
1909     }
1910     else {
1911       current->u&=~1LL;
1912       alloc_reg(current,i,0);
1913       alloc_reg_temp(current,i,-1);
1914     }
1915   }
1916   minimum_free_regs[i]=1;
1917 }
1918 void fconv_alloc(struct regstat *current,int i)
1919 {
1920   alloc_reg(current,i,CSREG); // Load status
1921   alloc_reg_temp(current,i,-1);
1922   minimum_free_regs[i]=1;
1923 }
1924 void float_alloc(struct regstat *current,int i)
1925 {
1926   alloc_reg(current,i,CSREG); // Load status
1927   alloc_reg_temp(current,i,-1);
1928   minimum_free_regs[i]=1;
1929 }
1930 void c2op_alloc(struct regstat *current,int i)
1931 {
1932   alloc_reg_temp(current,i,-1);
1933 }
1934 void fcomp_alloc(struct regstat *current,int i)
1935 {
1936   alloc_reg(current,i,CSREG); // Load status
1937   alloc_reg(current,i,FSREG); // Load flags
1938   dirty_reg(current,FSREG); // Flag will be modified
1939   alloc_reg_temp(current,i,-1);
1940   minimum_free_regs[i]=1;
1941 }
1942
1943 void syscall_alloc(struct regstat *current,int i)
1944 {
1945   alloc_cc(current,i);
1946   dirty_reg(current,CCREG);
1947   alloc_all(current,i);
1948   minimum_free_regs[i]=HOST_REGS;
1949   current->isconst=0;
1950 }
1951
1952 void delayslot_alloc(struct regstat *current,int i)
1953 {
1954   switch(itype[i]) {
1955     case UJUMP:
1956     case CJUMP:
1957     case SJUMP:
1958     case RJUMP:
1959     case FJUMP:
1960     case SYSCALL:
1961     case HLECALL:
1962     case SPAN:
1963       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1964       printf("Disabled speculative precompilation\n");
1965       stop_after_jal=1;
1966       break;
1967     case IMM16:
1968       imm16_alloc(current,i);
1969       break;
1970     case LOAD:
1971     case LOADLR:
1972       load_alloc(current,i);
1973       break;
1974     case STORE:
1975     case STORELR:
1976       store_alloc(current,i);
1977       break;
1978     case ALU:
1979       alu_alloc(current,i);
1980       break;
1981     case SHIFT:
1982       shift_alloc(current,i);
1983       break;
1984     case MULTDIV:
1985       multdiv_alloc(current,i);
1986       break;
1987     case SHIFTIMM:
1988       shiftimm_alloc(current,i);
1989       break;
1990     case MOV:
1991       mov_alloc(current,i);
1992       break;
1993     case COP0:
1994       cop0_alloc(current,i);
1995       break;
1996     case COP1:
1997     case COP2:
1998       cop1_alloc(current,i);
1999       break;
2000     case C1LS:
2001       c1ls_alloc(current,i);
2002       break;
2003     case C2LS:
2004       c2ls_alloc(current,i);
2005       break;
2006     case FCONV:
2007       fconv_alloc(current,i);
2008       break;
2009     case FLOAT:
2010       float_alloc(current,i);
2011       break;
2012     case FCOMP:
2013       fcomp_alloc(current,i);
2014       break;
2015     case C2OP:
2016       c2op_alloc(current,i);
2017       break;
2018   }
2019 }
2020
2021 // Special case where a branch and delay slot span two pages in virtual memory
2022 static void pagespan_alloc(struct regstat *current,int i)
2023 {
2024   current->isconst=0;
2025   current->wasconst=0;
2026   regs[i].wasconst=0;
2027   minimum_free_regs[i]=HOST_REGS;
2028   alloc_all(current,i);
2029   alloc_cc(current,i);
2030   dirty_reg(current,CCREG);
2031   if(opcode[i]==3) // JAL
2032   {
2033     alloc_reg(current,i,31);
2034     dirty_reg(current,31);
2035   }
2036   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
2037   {
2038     alloc_reg(current,i,rs1[i]);
2039     if (rt1[i]!=0) {
2040       alloc_reg(current,i,rt1[i]);
2041       dirty_reg(current,rt1[i]);
2042     }
2043   }
2044   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
2045   {
2046     if(rs1[i]) alloc_reg(current,i,rs1[i]);
2047     if(rs2[i]) alloc_reg(current,i,rs2[i]);
2048     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
2049     {
2050       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2051       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
2052     }
2053   }
2054   else
2055   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
2056   {
2057     if(rs1[i]) alloc_reg(current,i,rs1[i]);
2058     if(!((current->is32>>rs1[i])&1))
2059     {
2060       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2061     }
2062   }
2063   else
2064   if(opcode[i]==0x11) // BC1
2065   {
2066     alloc_reg(current,i,FSREG);
2067     alloc_reg(current,i,CSREG);
2068   }
2069   //else ...
2070 }
2071
2072 add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
2073 {
2074   stubs[stubcount][0]=type;
2075   stubs[stubcount][1]=addr;
2076   stubs[stubcount][2]=retaddr;
2077   stubs[stubcount][3]=a;
2078   stubs[stubcount][4]=b;
2079   stubs[stubcount][5]=c;
2080   stubs[stubcount][6]=d;
2081   stubs[stubcount][7]=e;
2082   stubcount++;
2083 }
2084
2085 // Write out a single register
2086 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
2087 {
2088   int hr;
2089   for(hr=0;hr<HOST_REGS;hr++) {
2090     if(hr!=EXCLUDE_REG) {
2091       if((regmap[hr]&63)==r) {
2092         if((dirty>>hr)&1) {
2093           if(regmap[hr]<64) {
2094             emit_storereg(r,hr);
2095 #ifndef FORCE32
2096             if((is32>>regmap[hr])&1) {
2097               emit_sarimm(hr,31,hr);
2098               emit_storereg(r|64,hr);
2099             }
2100 #endif
2101           }else{
2102             emit_storereg(r|64,hr);
2103           }
2104         }
2105       }
2106     }
2107   }
2108 }
2109
2110 int mchecksum()
2111 {
2112   //if(!tracedebug) return 0;
2113   int i;
2114   int sum=0;
2115   for(i=0;i<2097152;i++) {
2116     unsigned int temp=sum;
2117     sum<<=1;
2118     sum|=(~temp)>>31;
2119     sum^=((u_int *)rdram)[i];
2120   }
2121   return sum;
2122 }
2123 int rchecksum()
2124 {
2125   int i;
2126   int sum=0;
2127   for(i=0;i<64;i++)
2128     sum^=((u_int *)reg)[i];
2129   return sum;
2130 }
2131 void rlist()
2132 {
2133   int i;
2134   printf("TRACE: ");
2135   for(i=0;i<32;i++)
2136     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2137   printf("\n");
2138 #ifndef DISABLE_COP1
2139   printf("TRACE: ");
2140   for(i=0;i<32;i++)
2141     printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2142   printf("\n");
2143 #endif
2144 }
2145
2146 void enabletrace()
2147 {
2148   tracedebug=1;
2149 }
2150
2151 void memdebug(int i)
2152 {
2153   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2154   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2155   //rlist();
2156   //if(tracedebug) {
2157   //if(Count>=-2084597794) {
2158   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2159   //if(0) {
2160     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2161     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2162     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2163     rlist();
2164     #ifdef __i386__
2165     printf("TRACE: %x\n",(&i)[-1]);
2166     #endif
2167     #ifdef __arm__
2168     int j;
2169     printf("TRACE: %x \n",(&j)[10]);
2170     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2171     #endif
2172     //fflush(stdout);
2173   }
2174   //printf("TRACE: %x\n",(&i)[-1]);
2175 }
2176
2177 void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2178 {
2179   printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2180 }
2181
2182 void alu_assemble(int i,struct regstat *i_regs)
2183 {
2184   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2185     if(rt1[i]) {
2186       signed char s1,s2,t;
2187       t=get_reg(i_regs->regmap,rt1[i]);
2188       if(t>=0) {
2189         s1=get_reg(i_regs->regmap,rs1[i]);
2190         s2=get_reg(i_regs->regmap,rs2[i]);
2191         if(rs1[i]&&rs2[i]) {
2192           assert(s1>=0);
2193           assert(s2>=0);
2194           if(opcode2[i]&2) emit_sub(s1,s2,t);
2195           else emit_add(s1,s2,t);
2196         }
2197         else if(rs1[i]) {
2198           if(s1>=0) emit_mov(s1,t);
2199           else emit_loadreg(rs1[i],t);
2200         }
2201         else if(rs2[i]) {
2202           if(s2>=0) {
2203             if(opcode2[i]&2) emit_neg(s2,t);
2204             else emit_mov(s2,t);
2205           }
2206           else {
2207             emit_loadreg(rs2[i],t);
2208             if(opcode2[i]&2) emit_neg(t,t);
2209           }
2210         }
2211         else emit_zeroreg(t);
2212       }
2213     }
2214   }
2215   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2216     if(rt1[i]) {
2217       signed char s1l,s2l,s1h,s2h,tl,th;
2218       tl=get_reg(i_regs->regmap,rt1[i]);
2219       th=get_reg(i_regs->regmap,rt1[i]|64);
2220       if(tl>=0) {
2221         s1l=get_reg(i_regs->regmap,rs1[i]);
2222         s2l=get_reg(i_regs->regmap,rs2[i]);
2223         s1h=get_reg(i_regs->regmap,rs1[i]|64);
2224         s2h=get_reg(i_regs->regmap,rs2[i]|64);
2225         if(rs1[i]&&rs2[i]) {
2226           assert(s1l>=0);
2227           assert(s2l>=0);
2228           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2229           else emit_adds(s1l,s2l,tl);
2230           if(th>=0) {
2231             #ifdef INVERTED_CARRY
2232             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2233             #else
2234             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2235             #endif
2236             else emit_add(s1h,s2h,th);
2237           }
2238         }
2239         else if(rs1[i]) {
2240           if(s1l>=0) emit_mov(s1l,tl);
2241           else emit_loadreg(rs1[i],tl);
2242           if(th>=0) {
2243             if(s1h>=0) emit_mov(s1h,th);
2244             else emit_loadreg(rs1[i]|64,th);
2245           }
2246         }
2247         else if(rs2[i]) {
2248           if(s2l>=0) {
2249             if(opcode2[i]&2) emit_negs(s2l,tl);
2250             else emit_mov(s2l,tl);
2251           }
2252           else {
2253             emit_loadreg(rs2[i],tl);
2254             if(opcode2[i]&2) emit_negs(tl,tl);
2255           }
2256           if(th>=0) {
2257             #ifdef INVERTED_CARRY
2258             if(s2h>=0) emit_mov(s2h,th);
2259             else emit_loadreg(rs2[i]|64,th);
2260             if(opcode2[i]&2) {
2261               emit_adcimm(-1,th); // x86 has inverted carry flag
2262               emit_not(th,th);
2263             }
2264             #else
2265             if(opcode2[i]&2) {
2266               if(s2h>=0) emit_rscimm(s2h,0,th);
2267               else {
2268                 emit_loadreg(rs2[i]|64,th);
2269                 emit_rscimm(th,0,th);
2270               }
2271             }else{
2272               if(s2h>=0) emit_mov(s2h,th);
2273               else emit_loadreg(rs2[i]|64,th);
2274             }
2275             #endif
2276           }
2277         }
2278         else {
2279           emit_zeroreg(tl);
2280           if(th>=0) emit_zeroreg(th);
2281         }
2282       }
2283     }
2284   }
2285   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2286     if(rt1[i]) {
2287       signed char s1l,s1h,s2l,s2h,t;
2288       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2289       {
2290         t=get_reg(i_regs->regmap,rt1[i]);
2291         //assert(t>=0);
2292         if(t>=0) {
2293           s1l=get_reg(i_regs->regmap,rs1[i]);
2294           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2295           s2l=get_reg(i_regs->regmap,rs2[i]);
2296           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2297           if(rs2[i]==0) // rx<r0
2298           {
2299             assert(s1h>=0);
2300             if(opcode2[i]==0x2a) // SLT
2301               emit_shrimm(s1h,31,t);
2302             else // SLTU (unsigned can not be less than zero)
2303               emit_zeroreg(t);
2304           }
2305           else if(rs1[i]==0) // r0<rx
2306           {
2307             assert(s2h>=0);
2308             if(opcode2[i]==0x2a) // SLT
2309               emit_set_gz64_32(s2h,s2l,t);
2310             else // SLTU (set if not zero)
2311               emit_set_nz64_32(s2h,s2l,t);
2312           }
2313           else {
2314             assert(s1l>=0);assert(s1h>=0);
2315             assert(s2l>=0);assert(s2h>=0);
2316             if(opcode2[i]==0x2a) // SLT
2317               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2318             else // SLTU
2319               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2320           }
2321         }
2322       } else {
2323         t=get_reg(i_regs->regmap,rt1[i]);
2324         //assert(t>=0);
2325         if(t>=0) {
2326           s1l=get_reg(i_regs->regmap,rs1[i]);
2327           s2l=get_reg(i_regs->regmap,rs2[i]);
2328           if(rs2[i]==0) // rx<r0
2329           {
2330             assert(s1l>=0);
2331             if(opcode2[i]==0x2a) // SLT
2332               emit_shrimm(s1l,31,t);
2333             else // SLTU (unsigned can not be less than zero)
2334               emit_zeroreg(t);
2335           }
2336           else if(rs1[i]==0) // r0<rx
2337           {
2338             assert(s2l>=0);
2339             if(opcode2[i]==0x2a) // SLT
2340               emit_set_gz32(s2l,t);
2341             else // SLTU (set if not zero)
2342               emit_set_nz32(s2l,t);
2343           }
2344           else{
2345             assert(s1l>=0);assert(s2l>=0);
2346             if(opcode2[i]==0x2a) // SLT
2347               emit_set_if_less32(s1l,s2l,t);
2348             else // SLTU
2349               emit_set_if_carry32(s1l,s2l,t);
2350           }
2351         }
2352       }
2353     }
2354   }
2355   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2356     if(rt1[i]) {
2357       signed char s1l,s1h,s2l,s2h,th,tl;
2358       tl=get_reg(i_regs->regmap,rt1[i]);
2359       th=get_reg(i_regs->regmap,rt1[i]|64);
2360       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2361       {
2362         assert(tl>=0);
2363         if(tl>=0) {
2364           s1l=get_reg(i_regs->regmap,rs1[i]);
2365           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2366           s2l=get_reg(i_regs->regmap,rs2[i]);
2367           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2368           if(rs1[i]&&rs2[i]) {
2369             assert(s1l>=0);assert(s1h>=0);
2370             assert(s2l>=0);assert(s2h>=0);
2371             if(opcode2[i]==0x24) { // AND
2372               emit_and(s1l,s2l,tl);
2373               emit_and(s1h,s2h,th);
2374             } else
2375             if(opcode2[i]==0x25) { // OR
2376               emit_or(s1l,s2l,tl);
2377               emit_or(s1h,s2h,th);
2378             } else
2379             if(opcode2[i]==0x26) { // XOR
2380               emit_xor(s1l,s2l,tl);
2381               emit_xor(s1h,s2h,th);
2382             } else
2383             if(opcode2[i]==0x27) { // NOR
2384               emit_or(s1l,s2l,tl);
2385               emit_or(s1h,s2h,th);
2386               emit_not(tl,tl);
2387               emit_not(th,th);
2388             }
2389           }
2390           else
2391           {
2392             if(opcode2[i]==0x24) { // AND
2393               emit_zeroreg(tl);
2394               emit_zeroreg(th);
2395             } else
2396             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2397               if(rs1[i]){
2398                 if(s1l>=0) emit_mov(s1l,tl);
2399                 else emit_loadreg(rs1[i],tl);
2400                 if(s1h>=0) emit_mov(s1h,th);
2401                 else emit_loadreg(rs1[i]|64,th);
2402               }
2403               else
2404               if(rs2[i]){
2405                 if(s2l>=0) emit_mov(s2l,tl);
2406                 else emit_loadreg(rs2[i],tl);
2407                 if(s2h>=0) emit_mov(s2h,th);
2408                 else emit_loadreg(rs2[i]|64,th);
2409               }
2410               else{
2411                 emit_zeroreg(tl);
2412                 emit_zeroreg(th);
2413               }
2414             } else
2415             if(opcode2[i]==0x27) { // NOR
2416               if(rs1[i]){
2417                 if(s1l>=0) emit_not(s1l,tl);
2418                 else{
2419                   emit_loadreg(rs1[i],tl);
2420                   emit_not(tl,tl);
2421                 }
2422                 if(s1h>=0) emit_not(s1h,th);
2423                 else{
2424                   emit_loadreg(rs1[i]|64,th);
2425                   emit_not(th,th);
2426                 }
2427               }
2428               else
2429               if(rs2[i]){
2430                 if(s2l>=0) emit_not(s2l,tl);
2431                 else{
2432                   emit_loadreg(rs2[i],tl);
2433                   emit_not(tl,tl);
2434                 }
2435                 if(s2h>=0) emit_not(s2h,th);
2436                 else{
2437                   emit_loadreg(rs2[i]|64,th);
2438                   emit_not(th,th);
2439                 }
2440               }
2441               else {
2442                 emit_movimm(-1,tl);
2443                 emit_movimm(-1,th);
2444               }
2445             }
2446           }
2447         }
2448       }
2449       else
2450       {
2451         // 32 bit
2452         if(tl>=0) {
2453           s1l=get_reg(i_regs->regmap,rs1[i]);
2454           s2l=get_reg(i_regs->regmap,rs2[i]);
2455           if(rs1[i]&&rs2[i]) {
2456             assert(s1l>=0);
2457             assert(s2l>=0);
2458             if(opcode2[i]==0x24) { // AND
2459               emit_and(s1l,s2l,tl);
2460             } else
2461             if(opcode2[i]==0x25) { // OR
2462               emit_or(s1l,s2l,tl);
2463             } else
2464             if(opcode2[i]==0x26) { // XOR
2465               emit_xor(s1l,s2l,tl);
2466             } else
2467             if(opcode2[i]==0x27) { // NOR
2468               emit_or(s1l,s2l,tl);
2469               emit_not(tl,tl);
2470             }
2471           }
2472           else
2473           {
2474             if(opcode2[i]==0x24) { // AND
2475               emit_zeroreg(tl);
2476             } else
2477             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2478               if(rs1[i]){
2479                 if(s1l>=0) emit_mov(s1l,tl);
2480                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2481               }
2482               else
2483               if(rs2[i]){
2484                 if(s2l>=0) emit_mov(s2l,tl);
2485                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2486               }
2487               else emit_zeroreg(tl);
2488             } else
2489             if(opcode2[i]==0x27) { // NOR
2490               if(rs1[i]){
2491                 if(s1l>=0) emit_not(s1l,tl);
2492                 else {
2493                   emit_loadreg(rs1[i],tl);
2494                   emit_not(tl,tl);
2495                 }
2496               }
2497               else
2498               if(rs2[i]){
2499                 if(s2l>=0) emit_not(s2l,tl);
2500                 else {
2501                   emit_loadreg(rs2[i],tl);
2502                   emit_not(tl,tl);
2503                 }
2504               }
2505               else emit_movimm(-1,tl);
2506             }
2507           }
2508         }
2509       }
2510     }
2511   }
2512 }
2513
2514 void imm16_assemble(int i,struct regstat *i_regs)
2515 {
2516   if (opcode[i]==0x0f) { // LUI
2517     if(rt1[i]) {
2518       signed char t;
2519       t=get_reg(i_regs->regmap,rt1[i]);
2520       //assert(t>=0);
2521       if(t>=0) {
2522         if(!((i_regs->isconst>>t)&1))
2523           emit_movimm(imm[i]<<16,t);
2524       }
2525     }
2526   }
2527   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2528     if(rt1[i]) {
2529       signed char s,t;
2530       t=get_reg(i_regs->regmap,rt1[i]);
2531       s=get_reg(i_regs->regmap,rs1[i]);
2532       if(rs1[i]) {
2533         //assert(t>=0);
2534         //assert(s>=0);
2535         if(t>=0) {
2536           if(!((i_regs->isconst>>t)&1)) {
2537             if(s<0) {
2538               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2539               emit_addimm(t,imm[i],t);
2540             }else{
2541               if(!((i_regs->wasconst>>s)&1))
2542                 emit_addimm(s,imm[i],t);
2543               else
2544                 emit_movimm(constmap[i][s]+imm[i],t);
2545             }
2546           }
2547         }
2548       } else {
2549         if(t>=0) {
2550           if(!((i_regs->isconst>>t)&1))
2551             emit_movimm(imm[i],t);
2552         }
2553       }
2554     }
2555   }
2556   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2557     if(rt1[i]) {
2558       signed char sh,sl,th,tl;
2559       th=get_reg(i_regs->regmap,rt1[i]|64);
2560       tl=get_reg(i_regs->regmap,rt1[i]);
2561       sh=get_reg(i_regs->regmap,rs1[i]|64);
2562       sl=get_reg(i_regs->regmap,rs1[i]);
2563       if(tl>=0) {
2564         if(rs1[i]) {
2565           assert(sh>=0);
2566           assert(sl>=0);
2567           if(th>=0) {
2568             emit_addimm64_32(sh,sl,imm[i],th,tl);
2569           }
2570           else {
2571             emit_addimm(sl,imm[i],tl);
2572           }
2573         } else {
2574           emit_movimm(imm[i],tl);
2575           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2576         }
2577       }
2578     }
2579   }
2580   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2581     if(rt1[i]) {
2582       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2583       signed char sh,sl,t;
2584       t=get_reg(i_regs->regmap,rt1[i]);
2585       sh=get_reg(i_regs->regmap,rs1[i]|64);
2586       sl=get_reg(i_regs->regmap,rs1[i]);
2587       //assert(t>=0);
2588       if(t>=0) {
2589         if(rs1[i]>0) {
2590           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2591           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2592             if(opcode[i]==0x0a) { // SLTI
2593               if(sl<0) {
2594                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2595                 emit_slti32(t,imm[i],t);
2596               }else{
2597                 emit_slti32(sl,imm[i],t);
2598               }
2599             }
2600             else { // SLTIU
2601               if(sl<0) {
2602                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2603                 emit_sltiu32(t,imm[i],t);
2604               }else{
2605                 emit_sltiu32(sl,imm[i],t);
2606               }
2607             }
2608           }else{ // 64-bit
2609             assert(sl>=0);
2610             if(opcode[i]==0x0a) // SLTI
2611               emit_slti64_32(sh,sl,imm[i],t);
2612             else // SLTIU
2613               emit_sltiu64_32(sh,sl,imm[i],t);
2614           }
2615         }else{
2616           // SLTI(U) with r0 is just stupid,
2617           // nonetheless examples can be found
2618           if(opcode[i]==0x0a) // SLTI
2619             if(0<imm[i]) emit_movimm(1,t);
2620             else emit_zeroreg(t);
2621           else // SLTIU
2622           {
2623             if(imm[i]) emit_movimm(1,t);
2624             else emit_zeroreg(t);
2625           }
2626         }
2627       }
2628     }
2629   }
2630   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2631     if(rt1[i]) {
2632       signed char sh,sl,th,tl;
2633       th=get_reg(i_regs->regmap,rt1[i]|64);
2634       tl=get_reg(i_regs->regmap,rt1[i]);
2635       sh=get_reg(i_regs->regmap,rs1[i]|64);
2636       sl=get_reg(i_regs->regmap,rs1[i]);
2637       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2638         if(opcode[i]==0x0c) //ANDI
2639         {
2640           if(rs1[i]) {
2641             if(sl<0) {
2642               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2643               emit_andimm(tl,imm[i],tl);
2644             }else{
2645               if(!((i_regs->wasconst>>sl)&1))
2646                 emit_andimm(sl,imm[i],tl);
2647               else
2648                 emit_movimm(constmap[i][sl]&imm[i],tl);
2649             }
2650           }
2651           else
2652             emit_zeroreg(tl);
2653           if(th>=0) emit_zeroreg(th);
2654         }
2655         else
2656         {
2657           if(rs1[i]) {
2658             if(sl<0) {
2659               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2660             }
2661             if(th>=0) {
2662               if(sh<0) {
2663                 emit_loadreg(rs1[i]|64,th);
2664               }else{
2665                 emit_mov(sh,th);
2666               }
2667             }
2668             if(opcode[i]==0x0d) //ORI
2669             if(sl<0) {
2670               emit_orimm(tl,imm[i],tl);
2671             }else{
2672               if(!((i_regs->wasconst>>sl)&1))
2673                 emit_orimm(sl,imm[i],tl);
2674               else
2675                 emit_movimm(constmap[i][sl]|imm[i],tl);
2676             }
2677             if(opcode[i]==0x0e) //XORI
2678             if(sl<0) {
2679               emit_xorimm(tl,imm[i],tl);
2680             }else{
2681               if(!((i_regs->wasconst>>sl)&1))
2682                 emit_xorimm(sl,imm[i],tl);
2683               else
2684                 emit_movimm(constmap[i][sl]^imm[i],tl);
2685             }
2686           }
2687           else {
2688             emit_movimm(imm[i],tl);
2689             if(th>=0) emit_zeroreg(th);
2690           }
2691         }
2692       }
2693     }
2694   }
2695 }
2696
2697 void shiftimm_assemble(int i,struct regstat *i_regs)
2698 {
2699   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2700   {
2701     if(rt1[i]) {
2702       signed char s,t;
2703       t=get_reg(i_regs->regmap,rt1[i]);
2704       s=get_reg(i_regs->regmap,rs1[i]);
2705       //assert(t>=0);
2706       if(t>=0&&!((i_regs->isconst>>t)&1)){
2707         if(rs1[i]==0)
2708         {
2709           emit_zeroreg(t);
2710         }
2711         else
2712         {
2713           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2714           if(imm[i]) {
2715             if(opcode2[i]==0) // SLL
2716             {
2717               emit_shlimm(s<0?t:s,imm[i],t);
2718             }
2719             if(opcode2[i]==2) // SRL
2720             {
2721               emit_shrimm(s<0?t:s,imm[i],t);
2722             }
2723             if(opcode2[i]==3) // SRA
2724             {
2725               emit_sarimm(s<0?t:s,imm[i],t);
2726             }
2727           }else{
2728             // Shift by zero
2729             if(s>=0 && s!=t) emit_mov(s,t);
2730           }
2731         }
2732       }
2733       //emit_storereg(rt1[i],t); //DEBUG
2734     }
2735   }
2736   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2737   {
2738     if(rt1[i]) {
2739       signed char sh,sl,th,tl;
2740       th=get_reg(i_regs->regmap,rt1[i]|64);
2741       tl=get_reg(i_regs->regmap,rt1[i]);
2742       sh=get_reg(i_regs->regmap,rs1[i]|64);
2743       sl=get_reg(i_regs->regmap,rs1[i]);
2744       if(tl>=0) {
2745         if(rs1[i]==0)
2746         {
2747           emit_zeroreg(tl);
2748           if(th>=0) emit_zeroreg(th);
2749         }
2750         else
2751         {
2752           assert(sl>=0);
2753           assert(sh>=0);
2754           if(imm[i]) {
2755             if(opcode2[i]==0x38) // DSLL
2756             {
2757               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2758               emit_shlimm(sl,imm[i],tl);
2759             }
2760             if(opcode2[i]==0x3a) // DSRL
2761             {
2762               emit_shrdimm(sl,sh,imm[i],tl);
2763               if(th>=0) emit_shrimm(sh,imm[i],th);
2764             }
2765             if(opcode2[i]==0x3b) // DSRA
2766             {
2767               emit_shrdimm(sl,sh,imm[i],tl);
2768               if(th>=0) emit_sarimm(sh,imm[i],th);
2769             }
2770           }else{
2771             // Shift by zero
2772             if(sl!=tl) emit_mov(sl,tl);
2773             if(th>=0&&sh!=th) emit_mov(sh,th);
2774           }
2775         }
2776       }
2777     }
2778   }
2779   if(opcode2[i]==0x3c) // DSLL32
2780   {
2781     if(rt1[i]) {
2782       signed char sl,tl,th;
2783       tl=get_reg(i_regs->regmap,rt1[i]);
2784       th=get_reg(i_regs->regmap,rt1[i]|64);
2785       sl=get_reg(i_regs->regmap,rs1[i]);
2786       if(th>=0||tl>=0){
2787         assert(tl>=0);
2788         assert(th>=0);
2789         assert(sl>=0);
2790         emit_mov(sl,th);
2791         emit_zeroreg(tl);
2792         if(imm[i]>32)
2793         {
2794           emit_shlimm(th,imm[i]&31,th);
2795         }
2796       }
2797     }
2798   }
2799   if(opcode2[i]==0x3e) // DSRL32
2800   {
2801     if(rt1[i]) {
2802       signed char sh,tl,th;
2803       tl=get_reg(i_regs->regmap,rt1[i]);
2804       th=get_reg(i_regs->regmap,rt1[i]|64);
2805       sh=get_reg(i_regs->regmap,rs1[i]|64);
2806       if(tl>=0){
2807         assert(sh>=0);
2808         emit_mov(sh,tl);
2809         if(th>=0) emit_zeroreg(th);
2810         if(imm[i]>32)
2811         {
2812           emit_shrimm(tl,imm[i]&31,tl);
2813         }
2814       }
2815     }
2816   }
2817   if(opcode2[i]==0x3f) // DSRA32
2818   {
2819     if(rt1[i]) {
2820       signed char sh,tl;
2821       tl=get_reg(i_regs->regmap,rt1[i]);
2822       sh=get_reg(i_regs->regmap,rs1[i]|64);
2823       if(tl>=0){
2824         assert(sh>=0);
2825         emit_mov(sh,tl);
2826         if(imm[i]>32)
2827         {
2828           emit_sarimm(tl,imm[i]&31,tl);
2829         }
2830       }
2831     }
2832   }
2833 }
2834
2835 #ifndef shift_assemble
2836 void shift_assemble(int i,struct regstat *i_regs)
2837 {
2838   printf("Need shift_assemble for this architecture.\n");
2839   exit(1);
2840 }
2841 #endif
2842
2843 void load_assemble(int i,struct regstat *i_regs)
2844 {
2845   int s,th,tl,addr,map=-1;
2846   int offset;
2847   int jaddr=0;
2848   int memtarget=0,c=0;
2849   int fastload_reg_override=0;
2850   u_int hr,reglist=0;
2851   th=get_reg(i_regs->regmap,rt1[i]|64);
2852   tl=get_reg(i_regs->regmap,rt1[i]);
2853   s=get_reg(i_regs->regmap,rs1[i]);
2854   offset=imm[i];
2855   for(hr=0;hr<HOST_REGS;hr++) {
2856     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2857   }
2858   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2859   if(s>=0) {
2860     c=(i_regs->wasconst>>s)&1;
2861     if (c) {
2862       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2863       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2864     }
2865   }
2866   //printf("load_assemble: c=%d\n",c);
2867   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2868   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2869 #ifdef PCSX
2870   if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2871     ||rt1[i]==0) {
2872       // could be FIFO, must perform the read
2873       // ||dummy read
2874       assem_debug("(forced read)\n");
2875       tl=get_reg(i_regs->regmap,-1);
2876       assert(tl>=0);
2877   }
2878 #endif
2879   if(offset||s<0||c) addr=tl;
2880   else addr=s;
2881   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2882  if(tl>=0) {
2883   //printf("load_assemble: c=%d\n",c);
2884   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2885   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2886   reglist&=~(1<<tl);
2887   if(th>=0) reglist&=~(1<<th);
2888   if(!using_tlb) {
2889     if(!c) {
2890       #ifdef RAM_OFFSET
2891       map=get_reg(i_regs->regmap,ROREG);
2892       if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2893       #endif
2894 //#define R29_HACK 1
2895       #ifdef R29_HACK
2896       // Strmnnrmn's speed hack
2897       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2898       #endif
2899       {
2900         jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
2901       }
2902     }
2903   }else{ // using tlb
2904     int x=0;
2905     if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2906     if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2907     map=get_reg(i_regs->regmap,TLREG);
2908     assert(map>=0);
2909     reglist&=~(1<<map);
2910     map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2911     do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2912   }
2913   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2914   if (opcode[i]==0x20) { // LB
2915     if(!c||memtarget) {
2916       if(!dummy) {
2917         #ifdef HOST_IMM_ADDR32
2918         if(c)
2919           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2920         else
2921         #endif
2922         {
2923           //emit_xorimm(addr,3,tl);
2924           //gen_tlb_addr_r(tl,map);
2925           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2926           int x=0,a=tl;
2927 #ifdef BIG_ENDIAN_MIPS
2928           if(!c) emit_xorimm(addr,3,tl);
2929           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2930 #else
2931           if(!c) a=addr;
2932 #endif
2933           if(fastload_reg_override) a=fastload_reg_override;
2934
2935           emit_movsbl_indexed_tlb(x,a,map,tl);
2936         }
2937       }
2938       if(jaddr)
2939         add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2940     }
2941     else
2942       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2943   }
2944   if (opcode[i]==0x21) { // LH
2945     if(!c||memtarget) {
2946       if(!dummy) {
2947         #ifdef HOST_IMM_ADDR32
2948         if(c)
2949           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2950         else
2951         #endif
2952         {
2953           int x=0,a=tl;
2954 #ifdef BIG_ENDIAN_MIPS
2955           if(!c) emit_xorimm(addr,2,tl);
2956           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2957 #else
2958           if(!c) a=addr;
2959 #endif
2960           if(fastload_reg_override) a=fastload_reg_override;
2961           //#ifdef
2962           //emit_movswl_indexed_tlb(x,tl,map,tl);
2963           //else
2964           if(map>=0) {
2965             gen_tlb_addr_r(a,map);
2966             emit_movswl_indexed(x,a,tl);
2967           }else{
2968             #ifdef RAM_OFFSET
2969             emit_movswl_indexed(x,a,tl);
2970             #else
2971             emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2972             #endif
2973           }
2974         }
2975       }
2976       if(jaddr)
2977         add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2978     }
2979     else
2980       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2981   }
2982   if (opcode[i]==0x23) { // LW
2983     if(!c||memtarget) {
2984       if(!dummy) {
2985         int a=addr;
2986         if(fastload_reg_override) a=fastload_reg_override;
2987         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2988         #ifdef HOST_IMM_ADDR32
2989         if(c)
2990           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2991         else
2992         #endif
2993         emit_readword_indexed_tlb(0,a,map,tl);
2994       }
2995       if(jaddr)
2996         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2997     }
2998     else
2999       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3000   }
3001   if (opcode[i]==0x24) { // LBU
3002     if(!c||memtarget) {
3003       if(!dummy) {
3004         #ifdef HOST_IMM_ADDR32
3005         if(c)
3006           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
3007         else
3008         #endif
3009         {
3010           //emit_xorimm(addr,3,tl);
3011           //gen_tlb_addr_r(tl,map);
3012           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
3013           int x=0,a=tl;
3014 #ifdef BIG_ENDIAN_MIPS
3015           if(!c) emit_xorimm(addr,3,tl);
3016           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3017 #else
3018           if(!c) a=addr;
3019 #endif
3020           if(fastload_reg_override) a=fastload_reg_override;
3021
3022           emit_movzbl_indexed_tlb(x,a,map,tl);
3023         }
3024       }
3025       if(jaddr)
3026         add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3027     }
3028     else
3029       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3030   }
3031   if (opcode[i]==0x25) { // LHU
3032     if(!c||memtarget) {
3033       if(!dummy) {
3034         #ifdef HOST_IMM_ADDR32
3035         if(c)
3036           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
3037         else
3038         #endif
3039         {
3040           int x=0,a=tl;
3041 #ifdef BIG_ENDIAN_MIPS
3042           if(!c) emit_xorimm(addr,2,tl);
3043           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3044 #else
3045           if(!c) a=addr;
3046 #endif
3047           if(fastload_reg_override) a=fastload_reg_override;
3048           //#ifdef
3049           //emit_movzwl_indexed_tlb(x,tl,map,tl);
3050           //#else
3051           if(map>=0) {
3052             gen_tlb_addr_r(a,map);
3053             emit_movzwl_indexed(x,a,tl);
3054           }else{
3055             #ifdef RAM_OFFSET
3056             emit_movzwl_indexed(x,a,tl);
3057             #else
3058             emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
3059             #endif
3060           }
3061         }
3062       }
3063       if(jaddr)
3064         add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3065     }
3066     else
3067       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3068   }
3069   if (opcode[i]==0x27) { // LWU
3070     assert(th>=0);
3071     if(!c||memtarget) {
3072       if(!dummy) {
3073         int a=addr;
3074         if(fastload_reg_override) a=fastload_reg_override;
3075         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
3076         #ifdef HOST_IMM_ADDR32
3077         if(c)
3078           emit_readword_tlb(constmap[i][s]+offset,map,tl);
3079         else
3080         #endif
3081         emit_readword_indexed_tlb(0,a,map,tl);
3082       }
3083       if(jaddr)
3084         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3085     }
3086     else {
3087       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3088     }
3089     emit_zeroreg(th);
3090   }
3091   if (opcode[i]==0x37) { // LD
3092     if(!c||memtarget) {
3093       if(!dummy) {
3094         int a=addr;
3095         if(fastload_reg_override) a=fastload_reg_override;
3096         //gen_tlb_addr_r(tl,map);
3097         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
3098         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
3099         #ifdef HOST_IMM_ADDR32
3100         if(c)
3101           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3102         else
3103         #endif
3104         emit_readdword_indexed_tlb(0,a,map,th,tl);
3105       }
3106       if(jaddr)
3107         add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3108     }
3109     else
3110       inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3111   }
3112  }
3113   //emit_storereg(rt1[i],tl); // DEBUG
3114   //if(opcode[i]==0x23)
3115   //if(opcode[i]==0x24)
3116   //if(opcode[i]==0x23||opcode[i]==0x24)
3117   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
3118   {
3119     //emit_pusha();
3120     save_regs(0x100f);
3121         emit_readword((int)&last_count,ECX);
3122         #ifdef __i386__
3123         if(get_reg(i_regs->regmap,CCREG)<0)
3124           emit_loadreg(CCREG,HOST_CCREG);
3125         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3126         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3127         emit_writeword(HOST_CCREG,(int)&Count);
3128         #endif
3129         #ifdef __arm__
3130         if(get_reg(i_regs->regmap,CCREG)<0)
3131           emit_loadreg(CCREG,0);
3132         else
3133           emit_mov(HOST_CCREG,0);
3134         emit_add(0,ECX,0);
3135         emit_addimm(0,2*ccadj[i],0);
3136         emit_writeword(0,(int)&Count);
3137         #endif
3138     emit_call((int)memdebug);
3139     //emit_popa();
3140     restore_regs(0x100f);
3141   }/**/
3142 }
3143
3144 #ifndef loadlr_assemble
3145 void loadlr_assemble(int i,struct regstat *i_regs)
3146 {
3147   printf("Need loadlr_assemble for this architecture.\n");
3148   exit(1);
3149 }
3150 #endif
3151
3152 void store_assemble(int i,struct regstat *i_regs)
3153 {
3154   int s,th,tl,map=-1;
3155   int addr,temp;
3156   int offset;
3157   int jaddr=0,jaddr2,type;
3158   int memtarget=0,c=0;
3159   int agr=AGEN1+(i&1);
3160   int faststore_reg_override=0;
3161   u_int hr,reglist=0;
3162   th=get_reg(i_regs->regmap,rs2[i]|64);
3163   tl=get_reg(i_regs->regmap,rs2[i]);
3164   s=get_reg(i_regs->regmap,rs1[i]);
3165   temp=get_reg(i_regs->regmap,agr);
3166   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3167   offset=imm[i];
3168   if(s>=0) {
3169     c=(i_regs->wasconst>>s)&1;
3170     if(c) {
3171       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3172       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3173     }
3174   }
3175   assert(tl>=0);
3176   assert(temp>=0);
3177   for(hr=0;hr<HOST_REGS;hr++) {
3178     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3179   }
3180   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3181   if(offset||s<0||c) addr=temp;
3182   else addr=s;
3183   if(!using_tlb) {
3184     if(!c) {
3185       #ifndef PCSX
3186       #ifdef R29_HACK
3187       // Strmnnrmn's speed hack
3188       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3189       #endif
3190       emit_cmpimm(addr,RAM_SIZE);
3191       #ifdef DESTRUCTIVE_SHIFT
3192       if(s==addr) emit_mov(s,temp);
3193       #endif
3194       #ifdef R29_HACK
3195       memtarget=1;
3196       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3197       #endif
3198       {
3199         jaddr=(int)out;
3200         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3201         // Hint to branch predictor that the branch is unlikely to be taken
3202         if(rs1[i]>=28)
3203           emit_jno_unlikely(0);
3204         else
3205         #endif
3206         emit_jno(0);
3207       }
3208       #else
3209         jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
3210       #endif
3211     }
3212   }else{ // using tlb
3213     int x=0;
3214     if (opcode[i]==0x28) x=3; // SB
3215     if (opcode[i]==0x29) x=2; // SH
3216     map=get_reg(i_regs->regmap,TLREG);
3217     assert(map>=0);
3218     reglist&=~(1<<map);
3219     map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3220     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3221   }
3222
3223   if (opcode[i]==0x28) { // SB
3224     if(!c||memtarget) {
3225       int x=0,a=temp;
3226 #ifdef BIG_ENDIAN_MIPS
3227       if(!c) emit_xorimm(addr,3,temp);
3228       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3229 #else
3230       if(!c) a=addr;
3231 #endif
3232       if(faststore_reg_override) a=faststore_reg_override;
3233       //gen_tlb_addr_w(temp,map);
3234       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3235       emit_writebyte_indexed_tlb(tl,x,a,map,a);
3236     }
3237     type=STOREB_STUB;
3238   }
3239   if (opcode[i]==0x29) { // SH
3240     if(!c||memtarget) {
3241       int x=0,a=temp;
3242 #ifdef BIG_ENDIAN_MIPS
3243       if(!c) emit_xorimm(addr,2,temp);
3244       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3245 #else
3246       if(!c) a=addr;
3247 #endif
3248       if(faststore_reg_override) a=faststore_reg_override;
3249       //#ifdef
3250       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3251       //#else
3252       if(map>=0) {
3253         gen_tlb_addr_w(a,map);
3254         emit_writehword_indexed(tl,x,a);
3255       }else
3256         emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
3257     }
3258     type=STOREH_STUB;
3259   }
3260   if (opcode[i]==0x2B) { // SW
3261     if(!c||memtarget) {
3262       int a=addr;
3263       if(faststore_reg_override) a=faststore_reg_override;
3264       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3265       emit_writeword_indexed_tlb(tl,0,a,map,temp);
3266     }
3267     type=STOREW_STUB;
3268   }
3269   if (opcode[i]==0x3F) { // SD
3270     if(!c||memtarget) {
3271       int a=addr;
3272       if(faststore_reg_override) a=faststore_reg_override;
3273       if(rs2[i]) {
3274         assert(th>=0);
3275         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3276         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3277         emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
3278       }else{
3279         // Store zero
3280         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3281         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3282         emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
3283       }
3284     }
3285     type=STORED_STUB;
3286   }
3287 #ifdef PCSX
3288   if(jaddr) {
3289     // PCSX store handlers don't check invcode again
3290     reglist|=1<<addr;
3291     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3292     jaddr=0;
3293   }
3294 #endif
3295   if(!using_tlb) {
3296     if(!c||memtarget) {
3297       #ifdef DESTRUCTIVE_SHIFT
3298       // The x86 shift operation is 'destructive'; it overwrites the
3299       // source register, so we need to make a copy first and use that.
3300       addr=temp;
3301       #endif
3302       #if defined(HOST_IMM8)
3303       int ir=get_reg(i_regs->regmap,INVCP);
3304       assert(ir>=0);
3305       emit_cmpmem_indexedsr12_reg(ir,addr,1);
3306       #else
3307       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3308       #endif
3309       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3310       emit_callne(invalidate_addr_reg[addr]);
3311       #else
3312       jaddr2=(int)out;
3313       emit_jne(0);
3314       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3315       #endif
3316     }
3317   }
3318   if(jaddr) {
3319     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3320   } else if(c&&!memtarget) {
3321     inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3322   }
3323   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3324   //if(opcode[i]==0x2B || opcode[i]==0x28)
3325   //if(opcode[i]==0x2B || opcode[i]==0x29)
3326   //if(opcode[i]==0x2B)
3327   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3328   {
3329     #ifdef __i386__
3330     emit_pusha();
3331     #endif
3332     #ifdef __arm__
3333     save_regs(0x100f);
3334     #endif
3335         emit_readword((int)&last_count,ECX);
3336         #ifdef __i386__
3337         if(get_reg(i_regs->regmap,CCREG)<0)
3338           emit_loadreg(CCREG,HOST_CCREG);
3339         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3340         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3341         emit_writeword(HOST_CCREG,(int)&Count);
3342         #endif
3343         #ifdef __arm__
3344         if(get_reg(i_regs->regmap,CCREG)<0)
3345           emit_loadreg(CCREG,0);
3346         else
3347           emit_mov(HOST_CCREG,0);
3348         emit_add(0,ECX,0);
3349         emit_addimm(0,2*ccadj[i],0);
3350         emit_writeword(0,(int)&Count);
3351         #endif
3352     emit_call((int)memdebug);
3353     #ifdef __i386__
3354     emit_popa();
3355     #endif
3356     #ifdef __arm__
3357     restore_regs(0x100f);
3358     #endif
3359   }/**/
3360 }
3361
3362 void storelr_assemble(int i,struct regstat *i_regs)
3363 {
3364   int s,th,tl;
3365   int temp;
3366   int temp2;
3367   int offset;
3368   int jaddr=0,jaddr2;
3369   int case1,case2,case3;
3370   int done0,done1,done2;
3371   int memtarget=0,c=0;
3372   int agr=AGEN1+(i&1);
3373   u_int hr,reglist=0;
3374   th=get_reg(i_regs->regmap,rs2[i]|64);
3375   tl=get_reg(i_regs->regmap,rs2[i]);
3376   s=get_reg(i_regs->regmap,rs1[i]);
3377   temp=get_reg(i_regs->regmap,agr);
3378   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3379   offset=imm[i];
3380   if(s>=0) {
3381     c=(i_regs->isconst>>s)&1;
3382     if(c) {
3383       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3384       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3385     }
3386   }
3387   assert(tl>=0);
3388   for(hr=0;hr<HOST_REGS;hr++) {
3389     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3390   }
3391   assert(temp>=0);
3392   if(!using_tlb) {
3393     if(!c) {
3394       emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3395       if(!offset&&s!=temp) emit_mov(s,temp);
3396       jaddr=(int)out;
3397       emit_jno(0);
3398     }
3399     else
3400     {
3401       if(!memtarget||!rs1[i]) {
3402         jaddr=(int)out;
3403         emit_jmp(0);
3404       }
3405     }
3406     #ifdef RAM_OFFSET
3407     int map=get_reg(i_regs->regmap,ROREG);
3408     if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3409     gen_tlb_addr_w(temp,map);
3410     #else
3411     if((u_int)rdram!=0x80000000) 
3412       emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3413     #endif
3414   }else{ // using tlb
3415     int map=get_reg(i_regs->regmap,TLREG);
3416     assert(map>=0);
3417     reglist&=~(1<<map);
3418     map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3419     if(!c&&!offset&&s>=0) emit_mov(s,temp);
3420     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3421     if(!jaddr&&!memtarget) {
3422       jaddr=(int)out;
3423       emit_jmp(0);
3424     }
3425     gen_tlb_addr_w(temp,map);
3426   }
3427
3428   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3429     temp2=get_reg(i_regs->regmap,FTEMP);
3430     if(!rs2[i]) temp2=th=tl;
3431   }
3432
3433 #ifndef BIG_ENDIAN_MIPS
3434     emit_xorimm(temp,3,temp);
3435 #endif
3436   emit_testimm(temp,2);
3437   case2=(int)out;
3438   emit_jne(0);
3439   emit_testimm(temp,1);
3440   case1=(int)out;
3441   emit_jne(0);
3442   // 0
3443   if (opcode[i]==0x2A) { // SWL
3444     emit_writeword_indexed(tl,0,temp);
3445   }
3446   if (opcode[i]==0x2E) { // SWR
3447     emit_writebyte_indexed(tl,3,temp);
3448   }
3449   if (opcode[i]==0x2C) { // SDL
3450     emit_writeword_indexed(th,0,temp);
3451     if(rs2[i]) emit_mov(tl,temp2);
3452   }
3453   if (opcode[i]==0x2D) { // SDR
3454     emit_writebyte_indexed(tl,3,temp);
3455     if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3456   }
3457   done0=(int)out;
3458   emit_jmp(0);
3459   // 1
3460   set_jump_target(case1,(int)out);
3461   if (opcode[i]==0x2A) { // SWL
3462     // Write 3 msb into three least significant bytes
3463     if(rs2[i]) emit_rorimm(tl,8,tl);
3464     emit_writehword_indexed(tl,-1,temp);
3465     if(rs2[i]) emit_rorimm(tl,16,tl);
3466     emit_writebyte_indexed(tl,1,temp);
3467     if(rs2[i]) emit_rorimm(tl,8,tl);
3468   }
3469   if (opcode[i]==0x2E) { // SWR
3470     // Write two lsb into two most significant bytes
3471     emit_writehword_indexed(tl,1,temp);
3472   }
3473   if (opcode[i]==0x2C) { // SDL
3474     if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3475     // Write 3 msb into three least significant bytes
3476     if(rs2[i]) emit_rorimm(th,8,th);
3477     emit_writehword_indexed(th,-1,temp);
3478     if(rs2[i]) emit_rorimm(th,16,th);
3479     emit_writebyte_indexed(th,1,temp);
3480     if(rs2[i]) emit_rorimm(th,8,th);
3481   }
3482   if (opcode[i]==0x2D) { // SDR
3483     if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3484     // Write two lsb into two most significant bytes
3485     emit_writehword_indexed(tl,1,temp);
3486   }
3487   done1=(int)out;
3488   emit_jmp(0);
3489   // 2
3490   set_jump_target(case2,(int)out);
3491   emit_testimm(temp,1);
3492   case3=(int)out;
3493   emit_jne(0);
3494   if (opcode[i]==0x2A) { // SWL
3495     // Write two msb into two least significant bytes
3496     if(rs2[i]) emit_rorimm(tl,16,tl);
3497     emit_writehword_indexed(tl,-2,temp);
3498     if(rs2[i]) emit_rorimm(tl,16,tl);
3499   }
3500   if (opcode[i]==0x2E) { // SWR
3501     // Write 3 lsb into three most significant bytes
3502     emit_writebyte_indexed(tl,-1,temp);
3503     if(rs2[i]) emit_rorimm(tl,8,tl);
3504     emit_writehword_indexed(tl,0,temp);
3505     if(rs2[i]) emit_rorimm(tl,24,tl);
3506   }
3507   if (opcode[i]==0x2C) { // SDL
3508     if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3509     // Write two msb into two least significant bytes
3510     if(rs2[i]) emit_rorimm(th,16,th);
3511     emit_writehword_indexed(th,-2,temp);
3512     if(rs2[i]) emit_rorimm(th,16,th);
3513   }
3514   if (opcode[i]==0x2D) { // SDR
3515     if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3516     // Write 3 lsb into three most significant bytes
3517     emit_writebyte_indexed(tl,-1,temp);
3518     if(rs2[i]) emit_rorimm(tl,8,tl);
3519     emit_writehword_indexed(tl,0,temp);
3520     if(rs2[i]) emit_rorimm(tl,24,tl);
3521   }
3522   done2=(int)out;
3523   emit_jmp(0);
3524   // 3
3525   set_jump_target(case3,(int)out);
3526   if (opcode[i]==0x2A) { // SWL
3527     // Write msb into least significant byte
3528     if(rs2[i]) emit_rorimm(tl,24,tl);
3529     emit_writebyte_indexed(tl,-3,temp);
3530     if(rs2[i]) emit_rorimm(tl,8,tl);
3531   }
3532   if (opcode[i]==0x2E) { // SWR
3533     // Write entire word
3534     emit_writeword_indexed(tl,-3,temp);
3535   }
3536   if (opcode[i]==0x2C) { // SDL
3537     if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3538     // Write msb into least significant byte
3539     if(rs2[i]) emit_rorimm(th,24,th);
3540     emit_writebyte_indexed(th,-3,temp);
3541     if(rs2[i]) emit_rorimm(th,8,th);
3542   }
3543   if (opcode[i]==0x2D) { // SDR
3544     if(rs2[i]) emit_mov(th,temp2);
3545     // Write entire word
3546     emit_writeword_indexed(tl,-3,temp);
3547   }
3548   set_jump_target(done0,(int)out);
3549   set_jump_target(done1,(int)out);
3550   set_jump_target(done2,(int)out);
3551   if (opcode[i]==0x2C) { // SDL
3552     emit_testimm(temp,4);
3553     done0=(int)out;
3554     emit_jne(0);
3555     emit_andimm(temp,~3,temp);
3556     emit_writeword_indexed(temp2,4,temp);
3557     set_jump_target(done0,(int)out);
3558   }
3559   if (opcode[i]==0x2D) { // SDR
3560     emit_testimm(temp,4);
3561     done0=(int)out;
3562     emit_jeq(0);
3563     emit_andimm(temp,~3,temp);
3564     emit_writeword_indexed(temp2,-4,temp);
3565     set_jump_target(done0,(int)out);
3566   }
3567   if(!c||!memtarget)
3568     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3569   if(!using_tlb) {
3570     #ifdef RAM_OFFSET
3571     int map=get_reg(i_regs->regmap,ROREG);
3572     if(map<0) map=HOST_TEMPREG;
3573     gen_orig_addr_w(temp,map);
3574     #else
3575     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3576     #endif
3577     #if defined(HOST_IMM8)
3578     int ir=get_reg(i_regs->regmap,INVCP);
3579     assert(ir>=0);
3580     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3581     #else
3582     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3583     #endif
3584     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3585     emit_callne(invalidate_addr_reg[temp]);
3586     #else
3587     jaddr2=(int)out;
3588     emit_jne(0);
3589     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3590     #endif
3591   }
3592   /*
3593     emit_pusha();
3594     //save_regs(0x100f);
3595         emit_readword((int)&last_count,ECX);
3596         if(get_reg(i_regs->regmap,CCREG)<0)
3597           emit_loadreg(CCREG,HOST_CCREG);
3598         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3599         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3600         emit_writeword(HOST_CCREG,(int)&Count);
3601     emit_call((int)memdebug);
3602     emit_popa();
3603     //restore_regs(0x100f);
3604   /**/
3605 }
3606
3607 void c1ls_assemble(int i,struct regstat *i_regs)
3608 {
3609 #ifndef DISABLE_COP1
3610   int s,th,tl;
3611   int temp,ar;
3612   int map=-1;
3613   int offset;
3614   int c=0;
3615   int jaddr,jaddr2=0,jaddr3,type;
3616   int agr=AGEN1+(i&1);
3617   u_int hr,reglist=0;
3618   th=get_reg(i_regs->regmap,FTEMP|64);
3619   tl=get_reg(i_regs->regmap,FTEMP);
3620   s=get_reg(i_regs->regmap,rs1[i]);
3621   temp=get_reg(i_regs->regmap,agr);
3622   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3623   offset=imm[i];
3624   assert(tl>=0);
3625   assert(rs1[i]>0);
3626   assert(temp>=0);
3627   for(hr=0;hr<HOST_REGS;hr++) {
3628     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3629   }
3630   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3631   if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3632   {
3633     // Loads use a temporary register which we need to save
3634     reglist|=1<<temp;
3635   }
3636   if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3637     ar=temp;
3638   else // LWC1/LDC1
3639     ar=tl;
3640   //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3641   //else c=(i_regs->wasconst>>s)&1;
3642   if(s>=0) c=(i_regs->wasconst>>s)&1;
3643   // Check cop1 unusable
3644   if(!cop1_usable) {
3645     signed char rs=get_reg(i_regs->regmap,CSREG);
3646     assert(rs>=0);
3647     emit_testimm(rs,0x20000000);
3648     jaddr=(int)out;
3649     emit_jeq(0);
3650     add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3651     cop1_usable=1;
3652   }
3653   if (opcode[i]==0x39) { // SWC1 (get float address)
3654     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3655   }
3656   if (opcode[i]==0x3D) { // SDC1 (get double address)
3657     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3658   }
3659   // Generate address + offset
3660   if(!using_tlb) {
3661     if(!c)
3662       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3663   }
3664   else
3665   {
3666     map=get_reg(i_regs->regmap,TLREG);
3667     assert(map>=0);
3668     reglist&=~(1<<map);
3669     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3670       map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3671     }
3672     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3673       map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3674     }
3675   }
3676   if (opcode[i]==0x39) { // SWC1 (read float)
3677     emit_readword_indexed(0,tl,tl);
3678   }
3679   if (opcode[i]==0x3D) { // SDC1 (read double)
3680     emit_readword_indexed(4,tl,th);
3681     emit_readword_indexed(0,tl,tl);
3682   }
3683   if (opcode[i]==0x31) { // LWC1 (get target address)
3684     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3685   }
3686   if (opcode[i]==0x35) { // LDC1 (get target address)
3687     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3688   }
3689   if(!using_tlb) {
3690     if(!c) {
3691       jaddr2=(int)out;
3692       emit_jno(0);
3693     }
3694     else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3695       jaddr2=(int)out;
3696       emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
3697     }
3698     #ifdef DESTRUCTIVE_SHIFT
3699     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3700       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3701     }
3702     #endif
3703   }else{
3704     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3705       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3706     }
3707     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3708       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3709     }
3710   }
3711   if (opcode[i]==0x31) { // LWC1
3712     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3713     //gen_tlb_addr_r(ar,map);
3714     //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3715     #ifdef HOST_IMM_ADDR32
3716     if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3717     else
3718     #endif
3719     emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3720     type=LOADW_STUB;
3721   }
3722   if (opcode[i]==0x35) { // LDC1
3723     assert(th>=0);
3724     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3725     //gen_tlb_addr_r(ar,map);
3726     //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3727     //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3728     #ifdef HOST_IMM_ADDR32
3729     if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3730     else
3731     #endif
3732     emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3733     type=LOADD_STUB;
3734   }
3735   if (opcode[i]==0x39) { // SWC1
3736     //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3737     emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3738     type=STOREW_STUB;
3739   }
3740   if (opcode[i]==0x3D) { // SDC1
3741     assert(th>=0);
3742     //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3743     //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3744     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3745     type=STORED_STUB;
3746   }
3747   if(!using_tlb) {
3748     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3749       #ifndef DESTRUCTIVE_SHIFT
3750       temp=offset||c||s<0?ar:s;
3751       #endif
3752       #if defined(HOST_IMM8)
3753       int ir=get_reg(i_regs->regmap,INVCP);
3754       assert(ir>=0);
3755       emit_cmpmem_indexedsr12_reg(ir,temp,1);
3756       #else
3757       emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3758       #endif
3759       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3760       emit_callne(invalidate_addr_reg[temp]);
3761       #else
3762       jaddr3=(int)out;
3763       emit_jne(0);
3764       add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3765       #endif
3766     }
3767   }
3768   if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3769   if (opcode[i]==0x31) { // LWC1 (write float)
3770     emit_writeword_indexed(tl,0,temp);
3771   }
3772   if (opcode[i]==0x35) { // LDC1 (write double)
3773     emit_writeword_indexed(th,4,temp);
3774     emit_writeword_indexed(tl,0,temp);
3775   }
3776   //if(opcode[i]==0x39)
3777   /*if(opcode[i]==0x39||opcode[i]==0x31)
3778   {
3779     emit_pusha();
3780         emit_readword((int)&last_count,ECX);
3781         if(get_reg(i_regs->regmap,CCREG)<0)
3782           emit_loadreg(CCREG,HOST_CCREG);
3783         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3784         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3785         emit_writeword(HOST_CCREG,(int)&Count);
3786     emit_call((int)memdebug);
3787     emit_popa();
3788   }/**/
3789 #else
3790   cop1_unusable(i, i_regs);
3791 #endif
3792 }
3793
3794 void c2ls_assemble(int i,struct regstat *i_regs)
3795 {
3796   int s,tl;
3797   int ar;
3798   int offset;
3799   int memtarget=0,c=0;
3800   int jaddr2=0,jaddr3,type;
3801   int agr=AGEN1+(i&1);
3802   int fastio_reg_override=0;
3803   u_int hr,reglist=0;
3804   u_int copr=(source[i]>>16)&0x1f;
3805   s=get_reg(i_regs->regmap,rs1[i]);
3806   tl=get_reg(i_regs->regmap,FTEMP);
3807   offset=imm[i];
3808   assert(rs1[i]>0);
3809   assert(tl>=0);
3810   assert(!using_tlb);
3811
3812   for(hr=0;hr<HOST_REGS;hr++) {
3813     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3814   }
3815   if(i_regs->regmap[HOST_CCREG]==CCREG)
3816     reglist&=~(1<<HOST_CCREG);
3817
3818   // get the address
3819   if (opcode[i]==0x3a) { // SWC2
3820     ar=get_reg(i_regs->regmap,agr);
3821     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3822     reglist|=1<<ar;
3823   } else { // LWC2
3824     ar=tl;
3825   }
3826   if(s>=0) c=(i_regs->wasconst>>s)&1;
3827   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3828   if (!offset&&!c&&s>=0) ar=s;
3829   assert(ar>=0);
3830
3831   if (opcode[i]==0x3a) { // SWC2
3832     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3833     type=STOREW_STUB;
3834   }
3835   else
3836     type=LOADW_STUB;
3837
3838   if(c&&!memtarget) {
3839     jaddr2=(int)out;
3840     emit_jmp(0); // inline_readstub/inline_writestub?
3841   }
3842   else {
3843     if(!c) {
3844       jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
3845     }
3846     if (opcode[i]==0x32) { // LWC2
3847       #ifdef HOST_IMM_ADDR32
3848       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3849       else
3850       #endif
3851       int a=ar;
3852       if(fastio_reg_override) a=fastio_reg_override;
3853       emit_readword_indexed(0,a,tl);
3854     }
3855     if (opcode[i]==0x3a) { // SWC2
3856       #ifdef DESTRUCTIVE_SHIFT
3857       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3858       #endif
3859       int a=ar;
3860       if(fastio_reg_override) a=fastio_reg_override;
3861       emit_writeword_indexed(tl,0,a);
3862     }
3863   }
3864   if(jaddr2)
3865     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3866   if (opcode[i]==0x3a) { // SWC2
3867 #if defined(HOST_IMM8)
3868     int ir=get_reg(i_regs->regmap,INVCP);
3869     assert(ir>=0);
3870     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3871 #else
3872     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3873 #endif
3874     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3875     emit_callne(invalidate_addr_reg[ar]);
3876     #else
3877     jaddr3=(int)out;
3878     emit_jne(0);
3879     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3880     #endif
3881   }
3882   if (opcode[i]==0x32) { // LWC2
3883     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3884   }
3885 }
3886
3887 #ifndef multdiv_assemble
3888 void multdiv_assemble(int i,struct regstat *i_regs)
3889 {
3890   printf("Need multdiv_assemble for this architecture.\n");
3891   exit(1);
3892 }
3893 #endif
3894
3895 void mov_assemble(int i,struct regstat *i_regs)
3896 {
3897   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3898   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3899   if(rt1[i]) {
3900     signed char sh,sl,th,tl;
3901     th=get_reg(i_regs->regmap,rt1[i]|64);
3902     tl=get_reg(i_regs->regmap,rt1[i]);
3903     //assert(tl>=0);
3904     if(tl>=0) {
3905       sh=get_reg(i_regs->regmap,rs1[i]|64);
3906       sl=get_reg(i_regs->regmap,rs1[i]);
3907       if(sl>=0) emit_mov(sl,tl);
3908       else emit_loadreg(rs1[i],tl);
3909       if(th>=0) {
3910         if(sh>=0) emit_mov(sh,th);
3911         else emit_loadreg(rs1[i]|64,th);
3912       }
3913     }
3914   }
3915 }
3916
3917 #ifndef fconv_assemble
3918 void fconv_assemble(int i,struct regstat *i_regs)
3919 {
3920   printf("Need fconv_assemble for this architecture.\n");
3921   exit(1);
3922 }
3923 #endif
3924
3925 #if 0
3926 void float_assemble(int i,struct regstat *i_regs)
3927 {
3928   printf("Need float_assemble for this architecture.\n");
3929   exit(1);
3930 }
3931 #endif
3932
3933 void syscall_assemble(int i,struct regstat *i_regs)
3934 {
3935   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3936   assert(ccreg==HOST_CCREG);
3937   assert(!is_delayslot);
3938   emit_movimm(start+i*4,EAX); // Get PC
3939   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3940   emit_jmp((int)jump_syscall_hle); // XXX
3941 }
3942
3943 void hlecall_assemble(int i,struct regstat *i_regs)
3944 {
3945   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3946   assert(ccreg==HOST_CCREG);
3947   assert(!is_delayslot);
3948   emit_movimm(start+i*4+4,0); // Get PC
3949   emit_movimm((int)psxHLEt[source[i]&7],1);
3950   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
3951   emit_jmp((int)jump_hlecall);
3952 }
3953
3954 void intcall_assemble(int i,struct regstat *i_regs)
3955 {
3956   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3957   assert(ccreg==HOST_CCREG);
3958   assert(!is_delayslot);
3959   emit_movimm(start+i*4,0); // Get PC
3960   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG);
3961   emit_jmp((int)jump_intcall);
3962 }
3963
3964 void ds_assemble(int i,struct regstat *i_regs)
3965 {
3966   speculate_register_values(i);
3967   is_delayslot=1;
3968   switch(itype[i]) {
3969     case ALU:
3970       alu_assemble(i,i_regs);break;
3971     case IMM16:
3972       imm16_assemble(i,i_regs);break;
3973     case SHIFT:
3974       shift_assemble(i,i_regs);break;
3975     case SHIFTIMM:
3976       shiftimm_assemble(i,i_regs);break;
3977     case LOAD:
3978       load_assemble(i,i_regs);break;
3979     case LOADLR:
3980       loadlr_assemble(i,i_regs);break;
3981     case STORE:
3982       store_assemble(i,i_regs);break;
3983     case STORELR:
3984       storelr_assemble(i,i_regs);break;
3985     case COP0:
3986       cop0_assemble(i,i_regs);break;
3987     case COP1:
3988       cop1_assemble(i,i_regs);break;
3989     case C1LS:
3990       c1ls_assemble(i,i_regs);break;
3991     case COP2:
3992       cop2_assemble(i,i_regs);break;
3993     case C2LS:
3994       c2ls_assemble(i,i_regs);break;
3995     case C2OP:
3996       c2op_assemble(i,i_regs);break;
3997     case FCONV:
3998       fconv_assemble(i,i_regs);break;
3999     case FLOAT:
4000       float_assemble(i,i_regs);break;
4001     case FCOMP:
4002       fcomp_assemble(i,i_regs);break;
4003     case MULTDIV:
4004       multdiv_assemble(i,i_regs);break;
4005     case MOV:
4006       mov_assemble(i,i_regs);break;
4007     case SYSCALL:
4008     case HLECALL:
4009     case INTCALL:
4010     case SPAN:
4011     case UJUMP:
4012     case RJUMP:
4013     case CJUMP:
4014     case SJUMP:
4015     case FJUMP:
4016       printf("Jump in the delay slot.  This is probably a bug.\n");
4017   }
4018   is_delayslot=0;
4019 }
4020
4021 // Is the branch target a valid internal jump?
4022 int internal_branch(uint64_t i_is32,int addr)
4023 {
4024   if(addr&1) return 0; // Indirect (register) jump
4025   if(addr>=start && addr<start+slen*4-4)
4026   {
4027     int t=(addr-start)>>2;
4028     // Delay slots are not valid branch targets
4029     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4030     // 64 -> 32 bit transition requires a recompile
4031     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
4032     {
4033       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
4034       else printf("optimizable: yes\n");
4035     }*/
4036     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4037 #ifndef FORCE32
4038     if(requires_32bit[t]&~i_is32) return 0;
4039     else
4040 #endif
4041       return 1;
4042   }
4043   return 0;
4044 }
4045
4046 #ifndef wb_invalidate
4047 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
4048   uint64_t u,uint64_t uu)
4049 {
4050   int hr;
4051   for(hr=0;hr<HOST_REGS;hr++) {
4052     if(hr!=EXCLUDE_REG) {
4053       if(pre[hr]!=entry[hr]) {
4054         if(pre[hr]>=0) {
4055           if((dirty>>hr)&1) {
4056             if(get_reg(entry,pre[hr])<0) {
4057               if(pre[hr]<64) {
4058                 if(!((u>>pre[hr])&1)) {
4059                   emit_storereg(pre[hr],hr);
4060                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
4061                     emit_sarimm(hr,31,hr);
4062                     emit_storereg(pre[hr]|64,hr);
4063                   }
4064                 }
4065               }else{
4066                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
4067                   emit_storereg(pre[hr],hr);
4068                 }
4069               }
4070             }
4071           }
4072         }
4073       }
4074     }
4075   }
4076   // Move from one register to another (no writeback)
4077   for(hr=0;hr<HOST_REGS;hr++) {
4078     if(hr!=EXCLUDE_REG) {
4079       if(pre[hr]!=entry[hr]) {
4080         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
4081           int nr;
4082           if((nr=get_reg(entry,pre[hr]))>=0) {
4083             emit_mov(hr,nr);
4084           }
4085         }
4086       }
4087     }
4088   }
4089 }
4090 #endif
4091
4092 // Load the specified registers
4093 // This only loads the registers given as arguments because
4094 // we don't want to load things that will be overwritten
4095 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
4096 {
4097   int hr;
4098   // Load 32-bit regs
4099   for(hr=0;hr<HOST_REGS;hr++) {
4100     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4101       if(entry[hr]!=regmap[hr]) {
4102         if(regmap[hr]==rs1||regmap[hr]==rs2)
4103         {
4104           if(regmap[hr]==0) {
4105             emit_zeroreg(hr);
4106           }
4107           else
4108           {
4109             emit_loadreg(regmap[hr],hr);
4110           }
4111         }
4112       }
4113     }
4114   }
4115   //Load 64-bit regs
4116   for(hr=0;hr<HOST_REGS;hr++) {
4117     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4118       if(entry[hr]!=regmap[hr]) {
4119         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
4120         {
4121           assert(regmap[hr]!=64);
4122           if((is32>>(regmap[hr]&63))&1) {
4123             int lr=get_reg(regmap,regmap[hr]-64);
4124             if(lr>=0)
4125               emit_sarimm(lr,31,hr);
4126             else
4127               emit_loadreg(regmap[hr],hr);
4128           }
4129           else
4130           {
4131             emit_loadreg(regmap[hr],hr);
4132           }
4133         }
4134       }
4135     }
4136   }
4137 }
4138
4139 // Load registers prior to the start of a loop
4140 // so that they are not loaded within the loop
4141 static void loop_preload(signed char pre[],signed char entry[])
4142 {
4143   int hr;
4144   for(hr=0;hr<HOST_REGS;hr++) {
4145     if(hr!=EXCLUDE_REG) {
4146       if(pre[hr]!=entry[hr]) {
4147         if(entry[hr]>=0) {
4148           if(get_reg(pre,entry[hr])<0) {
4149             assem_debug("loop preload:\n");
4150             //printf("loop preload: %d\n",hr);
4151             if(entry[hr]==0) {
4152               emit_zeroreg(hr);
4153             }
4154             else if(entry[hr]<TEMPREG)
4155             {
4156               emit_loadreg(entry[hr],hr);
4157             }
4158             else if(entry[hr]-64<TEMPREG)
4159             {
4160               emit_loadreg(entry[hr],hr);
4161             }
4162           }
4163         }
4164       }
4165     }
4166   }
4167 }
4168
4169 // Generate address for load/store instruction
4170 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
4171 void address_generation(int i,struct regstat *i_regs,signed char entry[])
4172 {
4173   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
4174     int ra=-1;
4175     int agr=AGEN1+(i&1);
4176     int mgr=MGEN1+(i&1);
4177     if(itype[i]==LOAD) {
4178       ra=get_reg(i_regs->regmap,rt1[i]);
4179       if(ra<0) ra=get_reg(i_regs->regmap,-1); 
4180       assert(ra>=0);
4181     }
4182     if(itype[i]==LOADLR) {
4183       ra=get_reg(i_regs->regmap,FTEMP);
4184     }
4185     if(itype[i]==STORE||itype[i]==STORELR) {
4186       ra=get_reg(i_regs->regmap,agr);
4187       if(ra<0) ra=get_reg(i_regs->regmap,-1);
4188     }
4189     if(itype[i]==C1LS||itype[i]==C2LS) {
4190       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
4191         ra=get_reg(i_regs->regmap,FTEMP);
4192       else { // SWC1/SDC1/SWC2/SDC2
4193         ra=get_reg(i_regs->regmap,agr);
4194         if(ra<0) ra=get_reg(i_regs->regmap,-1);
4195       }
4196     }
4197     int rs=get_reg(i_regs->regmap,rs1[i]);
4198     int rm=get_reg(i_regs->regmap,TLREG);
4199     if(ra>=0) {
4200       int offset=imm[i];
4201       int c=(i_regs->wasconst>>rs)&1;
4202       if(rs1[i]==0) {
4203         // Using r0 as a base address
4204         /*if(rm>=0) {
4205           if(!entry||entry[rm]!=mgr) {
4206             generate_map_const(offset,rm);
4207           } // else did it in the previous cycle
4208         }*/
4209         if(!entry||entry[ra]!=agr) {
4210           if (opcode[i]==0x22||opcode[i]==0x26) {
4211             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4212           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4213             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4214           }else{
4215             emit_movimm(offset,ra);
4216           }
4217         } // else did it in the previous cycle
4218       }
4219       else if(rs<0) {
4220         if(!entry||entry[ra]!=rs1[i])
4221           emit_loadreg(rs1[i],ra);
4222         //if(!entry||entry[ra]!=rs1[i])
4223         //  printf("poor load scheduling!\n");
4224       }
4225       else if(c) {
4226         if(rm>=0) {
4227           if(!entry||entry[rm]!=mgr) {
4228             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4229               // Stores to memory go thru the mapper to detect self-modifying
4230               // code, loads don't.
4231               if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4232                  (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
4233                 generate_map_const(constmap[i][rs]+offset,rm);
4234             }else{
4235               if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4236                 generate_map_const(constmap[i][rs]+offset,rm);
4237             }
4238           }
4239         }
4240         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4241           if(!entry||entry[ra]!=agr) {
4242             if (opcode[i]==0x22||opcode[i]==0x26) {
4243               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4244             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4245               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4246             }else{
4247               #ifdef HOST_IMM_ADDR32
4248               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4249                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4250               #endif
4251               emit_movimm(constmap[i][rs]+offset,ra);
4252             }
4253           } // else did it in the previous cycle
4254         } // else load_consts already did it
4255       }
4256       if(offset&&!c&&rs1[i]) {
4257         if(rs>=0) {
4258           emit_addimm(rs,offset,ra);
4259         }else{
4260           emit_addimm(ra,offset,ra);
4261         }
4262       }
4263     }
4264   }
4265   // Preload constants for next instruction
4266   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4267     int agr,ra;
4268     #ifndef HOST_IMM_ADDR32
4269     // Mapper entry
4270     agr=MGEN1+((i+1)&1);
4271     ra=get_reg(i_regs->regmap,agr);
4272     if(ra>=0) {
4273       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4274       int offset=imm[i+1];
4275       int c=(regs[i+1].wasconst>>rs)&1;
4276       if(c) {
4277         if(itype[i+1]==STORE||itype[i+1]==STORELR
4278            ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4279           // Stores to memory go thru the mapper to detect self-modifying
4280           // code, loads don't.
4281           if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4282              (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
4283             generate_map_const(constmap[i+1][rs]+offset,ra);
4284         }else{
4285           if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4286             generate_map_const(constmap[i+1][rs]+offset,ra);
4287         }
4288       }
4289       /*else if(rs1[i]==0) {
4290         generate_map_const(offset,ra);
4291       }*/
4292     }
4293     #endif
4294     // Actual address
4295     agr=AGEN1+((i+1)&1);
4296     ra=get_reg(i_regs->regmap,agr);
4297     if(ra>=0) {
4298       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4299       int offset=imm[i+1];
4300       int c=(regs[i+1].wasconst>>rs)&1;
4301       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4302         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4303           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4304         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4305           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4306         }else{
4307           #ifdef HOST_IMM_ADDR32
4308           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4309              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4310           #endif
4311           emit_movimm(constmap[i+1][rs]+offset,ra);
4312         }
4313       }
4314       else if(rs1[i+1]==0) {
4315         // Using r0 as a base address
4316         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4317           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4318         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4319           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4320         }else{
4321           emit_movimm(offset,ra);
4322         }
4323       }
4324     }
4325   }
4326 }
4327
4328 int get_final_value(int hr, int i, int *value)
4329 {
4330   int reg=regs[i].regmap[hr];
4331   while(i<slen-1) {
4332     if(regs[i+1].regmap[hr]!=reg) break;
4333     if(!((regs[i+1].isconst>>hr)&1)) break;
4334     if(bt[i+1]) break;
4335     i++;
4336   }
4337   if(i<slen-1) {
4338     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4339       *value=constmap[i][hr];
4340       return 1;
4341     }
4342     if(!bt[i+1]) {
4343       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4344         // Load in delay slot, out-of-order execution
4345         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4346         {
4347           #ifdef HOST_IMM_ADDR32
4348           if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4349           #endif
4350           // Precompute load address
4351           *value=constmap[i][hr]+imm[i+2];
4352           return 1;
4353         }
4354       }
4355       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4356       {
4357         #ifdef HOST_IMM_ADDR32
4358         if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4359         #endif
4360         // Precompute load address
4361         *value=constmap[i][hr]+imm[i+1];
4362         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4363         return 1;
4364       }
4365     }
4366   }
4367   *value=constmap[i][hr];
4368   //printf("c=%x\n",(int)constmap[i][hr]);
4369   if(i==slen-1) return 1;
4370   if(reg<64) {
4371     return !((unneeded_reg[i+1]>>reg)&1);
4372   }else{
4373     return !((unneeded_reg_upper[i+1]>>reg)&1);
4374   }
4375 }
4376
4377 // Load registers with known constants
4378 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4379 {
4380   int hr;
4381   // Load 32-bit regs
4382   for(hr=0;hr<HOST_REGS;hr++) {
4383     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4384       //if(entry[hr]!=regmap[hr]) {
4385       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4386         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4387           int value;
4388           if(get_final_value(hr,i,&value)) {
4389             if(value==0) {
4390               emit_zeroreg(hr);
4391             }
4392             else {
4393               emit_movimm(value,hr);
4394             }
4395           }
4396         }
4397       }
4398     }
4399   }
4400   // Load 64-bit regs
4401   for(hr=0;hr<HOST_REGS;hr++) {
4402     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4403       //if(entry[hr]!=regmap[hr]) {
4404       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4405         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4406           if((is32>>(regmap[hr]&63))&1) {
4407             int lr=get_reg(regmap,regmap[hr]-64);
4408             assert(lr>=0);
4409             emit_sarimm(lr,31,hr);
4410           }
4411           else
4412           {
4413             int value;
4414             if(get_final_value(hr,i,&value)) {
4415               if(value==0) {
4416                 emit_zeroreg(hr);
4417               }
4418               else {
4419                 emit_movimm(value,hr);
4420               }
4421             }
4422           }
4423         }
4424       }
4425     }
4426   }
4427 }
4428 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4429 {
4430   int hr;
4431   // Load 32-bit regs
4432   for(hr=0;hr<HOST_REGS;hr++) {
4433     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4434       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4435         int value=constmap[i][hr];
4436         if(value==0) {
4437           emit_zeroreg(hr);
4438         }
4439         else {
4440           emit_movimm(value,hr);
4441         }
4442       }
4443     }
4444   }
4445   // Load 64-bit regs
4446   for(hr=0;hr<HOST_REGS;hr++) {
4447     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4448       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4449         if((is32>>(regmap[hr]&63))&1) {
4450           int lr=get_reg(regmap,regmap[hr]-64);
4451           assert(lr>=0);
4452           emit_sarimm(lr,31,hr);
4453         }
4454         else
4455         {
4456           int value=constmap[i][hr];
4457           if(value==0) {
4458             emit_zeroreg(hr);
4459           }
4460           else {
4461             emit_movimm(value,hr);
4462           }
4463         }
4464       }
4465     }
4466   }
4467 }
4468
4469 // Write out all dirty registers (except cycle count)
4470 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4471 {
4472   int hr;
4473   for(hr=0;hr<HOST_REGS;hr++) {
4474     if(hr!=EXCLUDE_REG) {
4475       if(i_regmap[hr]>0) {
4476         if(i_regmap[hr]!=CCREG) {
4477           if((i_dirty>>hr)&1) {
4478             if(i_regmap[hr]<64) {
4479               emit_storereg(i_regmap[hr],hr);
4480 #ifndef FORCE32
4481               if( ((i_is32>>i_regmap[hr])&1) ) {
4482                 #ifdef DESTRUCTIVE_WRITEBACK
4483                 emit_sarimm(hr,31,hr);
4484                 emit_storereg(i_regmap[hr]|64,hr);
4485                 #else
4486                 emit_sarimm(hr,31,HOST_TEMPREG);
4487                 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4488                 #endif
4489               }
4490 #endif
4491             }else{
4492               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4493                 emit_storereg(i_regmap[hr],hr);
4494               }
4495             }
4496           }
4497         }
4498       }
4499     }
4500   }
4501 }
4502 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4503 // This writes the registers not written by store_regs_bt
4504 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4505 {
4506   int hr;
4507   int t=(addr-start)>>2;
4508   for(hr=0;hr<HOST_REGS;hr++) {
4509     if(hr!=EXCLUDE_REG) {
4510       if(i_regmap[hr]>0) {
4511         if(i_regmap[hr]!=CCREG) {
4512           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4513             if((i_dirty>>hr)&1) {
4514               if(i_regmap[hr]<64) {
4515                 emit_storereg(i_regmap[hr],hr);
4516 #ifndef FORCE32
4517                 if( ((i_is32>>i_regmap[hr])&1) ) {
4518                   #ifdef DESTRUCTIVE_WRITEBACK
4519                   emit_sarimm(hr,31,hr);
4520                   emit_storereg(i_regmap[hr]|64,hr);
4521                   #else
4522                   emit_sarimm(hr,31,HOST_TEMPREG);
4523                   emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4524                   #endif
4525                 }
4526 #endif
4527               }else{
4528                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4529                   emit_storereg(i_regmap[hr],hr);
4530                 }
4531               }
4532             }
4533           }
4534         }
4535       }
4536     }
4537   }
4538 }
4539
4540 // Load all registers (except cycle count)
4541 void load_all_regs(signed char i_regmap[])
4542 {
4543   int hr;
4544   for(hr=0;hr<HOST_REGS;hr++) {
4545     if(hr!=EXCLUDE_REG) {
4546       if(i_regmap[hr]==0) {
4547         emit_zeroreg(hr);
4548       }
4549       else
4550       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4551       {
4552         emit_loadreg(i_regmap[hr],hr);
4553       }
4554     }
4555   }
4556 }
4557
4558 // Load all current registers also needed by next instruction
4559 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4560 {
4561   int hr;
4562   for(hr=0;hr<HOST_REGS;hr++) {
4563     if(hr!=EXCLUDE_REG) {
4564       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4565         if(i_regmap[hr]==0) {
4566           emit_zeroreg(hr);
4567         }
4568         else
4569         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4570         {
4571           emit_loadreg(i_regmap[hr],hr);
4572         }
4573       }
4574     }
4575   }
4576 }
4577
4578 // Load all regs, storing cycle count if necessary
4579 void load_regs_entry(int t)
4580 {
4581   int hr;
4582   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4583   else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4584   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4585     emit_storereg(CCREG,HOST_CCREG);
4586   }
4587   // Load 32-bit regs
4588   for(hr=0;hr<HOST_REGS;hr++) {
4589     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4590       if(regs[t].regmap_entry[hr]==0) {
4591         emit_zeroreg(hr);
4592       }
4593       else if(regs[t].regmap_entry[hr]!=CCREG)
4594       {
4595         emit_loadreg(regs[t].regmap_entry[hr],hr);
4596       }
4597     }
4598   }
4599   // Load 64-bit regs
4600   for(hr=0;hr<HOST_REGS;hr++) {
4601     if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4602       assert(regs[t].regmap_entry[hr]!=64);
4603       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4604         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4605         if(lr<0) {
4606           emit_loadreg(regs[t].regmap_entry[hr],hr);
4607         }
4608         else
4609         {
4610           emit_sarimm(lr,31,hr);
4611         }
4612       }
4613       else
4614       {
4615         emit_loadreg(regs[t].regmap_entry[hr],hr);
4616       }
4617     }
4618   }
4619 }
4620
4621 // Store dirty registers prior to branch
4622 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4623 {
4624   if(internal_branch(i_is32,addr))
4625   {
4626     int t=(addr-start)>>2;
4627     int hr;
4628     for(hr=0;hr<HOST_REGS;hr++) {
4629       if(hr!=EXCLUDE_REG) {
4630         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4631           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4632             if((i_dirty>>hr)&1) {
4633               if(i_regmap[hr]<64) {
4634                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4635                   emit_storereg(i_regmap[hr],hr);
4636                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4637                     #ifdef DESTRUCTIVE_WRITEBACK
4638                     emit_sarimm(hr,31,hr);
4639                     emit_storereg(i_regmap[hr]|64,hr);
4640                     #else
4641                     emit_sarimm(hr,31,HOST_TEMPREG);
4642                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4643                     #endif
4644                   }
4645                 }
4646               }else{
4647                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4648                   emit_storereg(i_regmap[hr],hr);
4649                 }
4650               }
4651             }
4652           }
4653         }
4654       }
4655     }
4656   }
4657   else
4658   {
4659     // Branch out of this block, write out all dirty regs
4660     wb_dirtys(i_regmap,i_is32,i_dirty);
4661   }
4662 }
4663
4664 // Load all needed registers for branch target
4665 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4666 {
4667   //if(addr>=start && addr<(start+slen*4))
4668   if(internal_branch(i_is32,addr))
4669   {
4670     int t=(addr-start)>>2;
4671     int hr;
4672     // Store the cycle count before loading something else
4673     if(i_regmap[HOST_CCREG]!=CCREG) {
4674       assert(i_regmap[HOST_CCREG]==-1);
4675     }
4676     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4677       emit_storereg(CCREG,HOST_CCREG);
4678     }
4679     // Load 32-bit regs
4680     for(hr=0;hr<HOST_REGS;hr++) {
4681       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4682         #ifdef DESTRUCTIVE_WRITEBACK
4683         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4684         #else
4685         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4686         #endif
4687           if(regs[t].regmap_entry[hr]==0) {
4688             emit_zeroreg(hr);
4689           }
4690           else if(regs[t].regmap_entry[hr]!=CCREG)
4691           {
4692             emit_loadreg(regs[t].regmap_entry[hr],hr);
4693           }
4694         }
4695       }
4696     }
4697     //Load 64-bit regs
4698     for(hr=0;hr<HOST_REGS;hr++) {
4699       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4700         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4701           assert(regs[t].regmap_entry[hr]!=64);
4702           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4703             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4704             if(lr<0) {
4705               emit_loadreg(regs[t].regmap_entry[hr],hr);
4706             }
4707             else
4708             {
4709               emit_sarimm(lr,31,hr);
4710             }
4711           }
4712           else
4713           {
4714             emit_loadreg(regs[t].regmap_entry[hr],hr);
4715           }
4716         }
4717         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4718           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4719           assert(lr>=0);
4720           emit_sarimm(lr,31,hr);
4721         }
4722       }
4723     }
4724   }
4725 }
4726
4727 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4728 {
4729   if(addr>=start && addr<start+slen*4-4)
4730   {
4731     int t=(addr-start)>>2;
4732     int hr;
4733     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4734     for(hr=0;hr<HOST_REGS;hr++)
4735     {
4736       if(hr!=EXCLUDE_REG)
4737       {
4738         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4739         {
4740           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4741           {
4742             return 0;
4743           }
4744           else 
4745           if((i_dirty>>hr)&1)
4746           {
4747             if(i_regmap[hr]<TEMPREG)
4748             {
4749               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4750                 return 0;
4751             }
4752             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4753             {
4754               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4755                 return 0;
4756             }
4757           }
4758         }
4759         else // Same register but is it 32-bit or dirty?
4760         if(i_regmap[hr]>=0)
4761         {
4762           if(!((regs[t].dirty>>hr)&1))
4763           {
4764             if((i_dirty>>hr)&1)
4765             {
4766               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4767               {
4768                 //printf("%x: dirty no match\n",addr);
4769                 return 0;
4770               }
4771             }
4772           }
4773           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4774           {
4775             //printf("%x: is32 no match\n",addr);
4776             return 0;
4777           }
4778         }
4779       }
4780     }
4781     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4782 #ifndef FORCE32
4783     if(requires_32bit[t]&~i_is32) return 0;
4784 #endif
4785     // Delay slots are not valid branch targets
4786     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4787     // Delay slots require additional processing, so do not match
4788     if(is_ds[t]) return 0;
4789   }
4790   else
4791   {
4792     int hr;
4793     for(hr=0;hr<HOST_REGS;hr++)
4794     {
4795       if(hr!=EXCLUDE_REG)
4796       {
4797         if(i_regmap[hr]>=0)
4798         {
4799           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4800           {
4801             if((i_dirty>>hr)&1)
4802             {
4803               return 0;
4804             }
4805           }
4806         }
4807       }
4808     }
4809   }
4810   return 1;
4811 }
4812
4813 // Used when a branch jumps into the delay slot of another branch
4814 void ds_assemble_entry(int i)
4815 {
4816   int t=(ba[i]-start)>>2;
4817   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4818   assem_debug("Assemble delay slot at %x\n",ba[i]);
4819   assem_debug("<->\n");
4820   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4821     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4822   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4823   address_generation(t,&regs[t],regs[t].regmap_entry);
4824   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4825     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4826   cop1_usable=0;
4827   is_delayslot=0;
4828   switch(itype[t]) {
4829     case ALU:
4830       alu_assemble(t,&regs[t]);break;
4831     case IMM16:
4832       imm16_assemble(t,&regs[t]);break;
4833     case SHIFT:
4834       shift_assemble(t,&regs[t]);break;
4835     case SHIFTIMM:
4836       shiftimm_assemble(t,&regs[t]);break;
4837     case LOAD:
4838       load_assemble(t,&regs[t]);break;
4839     case LOADLR:
4840       loadlr_assemble(t,&regs[t]);break;
4841     case STORE:
4842       store_assemble(t,&regs[t]);break;
4843     case STORELR:
4844       storelr_assemble(t,&regs[t]);break;
4845     case COP0:
4846       cop0_assemble(t,&regs[t]);break;
4847     case COP1:
4848       cop1_assemble(t,&regs[t]);break;
4849     case C1LS:
4850       c1ls_assemble(t,&regs[t]);break;
4851     case COP2:
4852       cop2_assemble(t,&regs[t]);break;
4853     case C2LS:
4854       c2ls_assemble(t,&regs[t]);break;
4855     case C2OP:
4856       c2op_assemble(t,&regs[t]);break;
4857     case FCONV:
4858       fconv_assemble(t,&regs[t]);break;
4859     case FLOAT:
4860       float_assemble(t,&regs[t]);break;
4861     case FCOMP:
4862       fcomp_assemble(t,&regs[t]);break;
4863     case MULTDIV:
4864       multdiv_assemble(t,&regs[t]);break;
4865     case MOV:
4866       mov_assemble(t,&regs[t]);break;
4867     case SYSCALL:
4868     case HLECALL:
4869     case INTCALL:
4870     case SPAN:
4871     case UJUMP:
4872     case RJUMP:
4873     case CJUMP:
4874     case SJUMP:
4875     case FJUMP:
4876       printf("Jump in the delay slot.  This is probably a bug.\n");
4877   }
4878   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4879   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4880   if(internal_branch(regs[t].is32,ba[i]+4))
4881     assem_debug("branch: internal\n");
4882   else
4883     assem_debug("branch: external\n");
4884   assert(internal_branch(regs[t].is32,ba[i]+4));
4885   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4886   emit_jmp(0);
4887 }
4888
4889 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4890 {
4891   int count;
4892   int jaddr;
4893   int idle=0;
4894   if(itype[i]==RJUMP)
4895   {
4896     *adj=0;
4897   }
4898   //if(ba[i]>=start && ba[i]<(start+slen*4))
4899   if(internal_branch(branch_regs[i].is32,ba[i]))
4900   {
4901     int t=(ba[i]-start)>>2;
4902     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4903     else *adj=ccadj[t];
4904   }
4905   else
4906   {
4907     *adj=0;
4908   }
4909   count=ccadj[i];
4910   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4911     // Idle loop
4912     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4913     idle=(int)out;
4914     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4915     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4916     jaddr=(int)out;
4917     emit_jmp(0);
4918   }
4919   else if(*adj==0||invert) {
4920     emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4921     jaddr=(int)out;
4922     emit_jns(0);
4923   }
4924   else
4925   {
4926     emit_cmpimm(HOST_CCREG,-CLOCK_DIVIDER*(count+2));
4927     jaddr=(int)out;
4928     emit_jns(0);
4929   }
4930   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4931 }
4932
4933 void do_ccstub(int n)
4934 {
4935   literal_pool(256);
4936   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4937   set_jump_target(stubs[n][1],(int)out);
4938   int i=stubs[n][4];
4939   if(stubs[n][6]==NULLDS) {
4940     // Delay slot instruction is nullified ("likely" branch)
4941     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4942   }
4943   else if(stubs[n][6]!=TAKEN) {
4944     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4945   }
4946   else {
4947     if(internal_branch(branch_regs[i].is32,ba[i]))
4948       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4949   }
4950   if(stubs[n][5]!=-1)
4951   {
4952     // Save PC as return address
4953     emit_movimm(stubs[n][5],EAX);
4954     emit_writeword(EAX,(int)&pcaddr);
4955   }
4956   else
4957   {
4958     // Return address depends on which way the branch goes
4959     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4960     {
4961       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4962       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4963       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4964       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4965       if(rs1[i]==0)
4966       {
4967         s1l=s2l;s1h=s2h;
4968         s2l=s2h=-1;
4969       }
4970       else if(rs2[i]==0)
4971       {
4972         s2l=s2h=-1;
4973       }
4974       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4975         s1h=s2h=-1;
4976       }
4977       assert(s1l>=0);
4978       #ifdef DESTRUCTIVE_WRITEBACK
4979       if(rs1[i]) {
4980         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4981           emit_loadreg(rs1[i],s1l);
4982       } 
4983       else {
4984         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4985           emit_loadreg(rs2[i],s1l);
4986       }
4987       if(s2l>=0)
4988         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4989           emit_loadreg(rs2[i],s2l);
4990       #endif
4991       int hr=0;
4992       int addr=-1,alt=-1,ntaddr=-1;
4993       while(hr<HOST_REGS)
4994       {
4995         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4996            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4997            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4998         {
4999           addr=hr++;break;
5000         }
5001         hr++;
5002       }
5003       while(hr<HOST_REGS)
5004       {
5005         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5006            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5007            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5008         {
5009           alt=hr++;break;
5010         }
5011         hr++;
5012       }
5013       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5014       {
5015         while(hr<HOST_REGS)
5016         {
5017           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5018              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5019              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5020           {
5021             ntaddr=hr;break;
5022           }
5023           hr++;
5024         }
5025         assert(hr<HOST_REGS);
5026       }
5027       if((opcode[i]&0x2f)==4) // BEQ
5028       {
5029         #ifdef HAVE_CMOV_IMM
5030         if(s1h<0) {
5031           if(s2l>=0) emit_cmp(s1l,s2l);
5032           else emit_test(s1l,s1l);
5033           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5034         }
5035         else
5036         #endif
5037         {
5038           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5039           if(s1h>=0) {
5040             if(s2h>=0) emit_cmp(s1h,s2h);
5041             else emit_test(s1h,s1h);
5042             emit_cmovne_reg(alt,addr);
5043           }
5044           if(s2l>=0) emit_cmp(s1l,s2l);
5045           else emit_test(s1l,s1l);
5046           emit_cmovne_reg(alt,addr);
5047         }
5048       }
5049       if((opcode[i]&0x2f)==5) // BNE
5050       {
5051         #ifdef HAVE_CMOV_IMM
5052         if(s1h<0) {
5053           if(s2l>=0) emit_cmp(s1l,s2l);
5054           else emit_test(s1l,s1l);
5055           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5056         }
5057         else
5058         #endif
5059         {
5060           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5061           if(s1h>=0) {
5062             if(s2h>=0) emit_cmp(s1h,s2h);
5063             else emit_test(s1h,s1h);
5064             emit_cmovne_reg(alt,addr);
5065           }
5066           if(s2l>=0) emit_cmp(s1l,s2l);
5067           else emit_test(s1l,s1l);
5068           emit_cmovne_reg(alt,addr);
5069         }
5070       }
5071       if((opcode[i]&0x2f)==6) // BLEZ
5072       {
5073         //emit_movimm(ba[i],alt);
5074         //emit_movimm(start+i*4+8,addr);
5075         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5076         emit_cmpimm(s1l,1);
5077         if(s1h>=0) emit_mov(addr,ntaddr);
5078         emit_cmovl_reg(alt,addr);
5079         if(s1h>=0) {
5080           emit_test(s1h,s1h);
5081           emit_cmovne_reg(ntaddr,addr);
5082           emit_cmovs_reg(alt,addr);
5083         }
5084       }
5085       if((opcode[i]&0x2f)==7) // BGTZ
5086       {
5087         //emit_movimm(ba[i],addr);
5088         //emit_movimm(start+i*4+8,ntaddr);
5089         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
5090         emit_cmpimm(s1l,1);
5091         if(s1h>=0) emit_mov(addr,alt);
5092         emit_cmovl_reg(ntaddr,addr);
5093         if(s1h>=0) {
5094           emit_test(s1h,s1h);
5095           emit_cmovne_reg(alt,addr);
5096           emit_cmovs_reg(ntaddr,addr);
5097         }
5098       }
5099       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
5100       {
5101         //emit_movimm(ba[i],alt);
5102         //emit_movimm(start+i*4+8,addr);
5103         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5104         if(s1h>=0) emit_test(s1h,s1h);
5105         else emit_test(s1l,s1l);
5106         emit_cmovs_reg(alt,addr);
5107       }
5108       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
5109       {
5110         //emit_movimm(ba[i],addr);
5111         //emit_movimm(start+i*4+8,alt);
5112         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5113         if(s1h>=0) emit_test(s1h,s1h);
5114         else emit_test(s1l,s1l);
5115         emit_cmovs_reg(alt,addr);
5116       }
5117       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
5118         if(source[i]&0x10000) // BC1T
5119         {
5120           //emit_movimm(ba[i],alt);
5121           //emit_movimm(start+i*4+8,addr);
5122           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5123           emit_testimm(s1l,0x800000);
5124           emit_cmovne_reg(alt,addr);
5125         }
5126         else // BC1F
5127         {
5128           //emit_movimm(ba[i],addr);
5129           //emit_movimm(start+i*4+8,alt);
5130           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5131           emit_testimm(s1l,0x800000);
5132           emit_cmovne_reg(alt,addr);
5133         }
5134       }
5135       emit_writeword(addr,(int)&pcaddr);
5136     }
5137     else
5138     if(itype[i]==RJUMP)
5139     {
5140       int r=get_reg(branch_regs[i].regmap,rs1[i]);
5141       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5142         r=get_reg(branch_regs[i].regmap,RTEMP);
5143       }
5144       emit_writeword(r,(int)&pcaddr);
5145     }
5146     else {printf("Unknown branch type in do_ccstub\n");exit(1);}
5147   }
5148   // Update cycle count
5149   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
5150   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5151   emit_call((int)cc_interrupt);
5152   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5153   if(stubs[n][6]==TAKEN) {
5154     if(internal_branch(branch_regs[i].is32,ba[i]))
5155       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
5156     else if(itype[i]==RJUMP) {
5157       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5158         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5159       else
5160         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
5161     }
5162   }else if(stubs[n][6]==NOTTAKEN) {
5163     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5164     else load_all_regs(branch_regs[i].regmap);
5165   }else if(stubs[n][6]==NULLDS) {
5166     // Delay slot instruction is nullified ("likely" branch)
5167     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
5168     else load_all_regs(regs[i].regmap);
5169   }else{
5170     load_all_regs(branch_regs[i].regmap);
5171   }
5172   emit_jmp(stubs[n][2]); // return address
5173   
5174   /* This works but uses a lot of memory...
5175   emit_readword((int)&last_count,ECX);
5176   emit_add(HOST_CCREG,ECX,EAX);
5177   emit_writeword(EAX,(int)&Count);
5178   emit_call((int)gen_interupt);
5179   emit_readword((int)&Count,HOST_CCREG);
5180   emit_readword((int)&next_interupt,EAX);
5181   emit_readword((int)&pending_exception,EBX);
5182   emit_writeword(EAX,(int)&last_count);
5183   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
5184   emit_test(EBX,EBX);
5185   int jne_instr=(int)out;
5186   emit_jne(0);
5187   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
5188   load_all_regs(branch_regs[i].regmap);
5189   emit_jmp(stubs[n][2]); // return address
5190   set_jump_target(jne_instr,(int)out);
5191   emit_readword((int)&pcaddr,EAX);
5192   // Call get_addr_ht instead of doing the hash table here.
5193   // This code is executed infrequently and takes up a lot of space
5194   // so smaller is better.
5195   emit_storereg(CCREG,HOST_CCREG);
5196   emit_pushreg(EAX);
5197   emit_call((int)get_addr_ht);
5198   emit_loadreg(CCREG,HOST_CCREG);
5199   emit_addimm(ESP,4,ESP);
5200   emit_jmpreg(EAX);*/
5201 }
5202
5203 add_to_linker(int addr,int target,int ext)
5204 {
5205   link_addr[linkcount][0]=addr;
5206   link_addr[linkcount][1]=target;
5207   link_addr[linkcount][2]=ext;  
5208   linkcount++;
5209 }
5210
5211 static void ujump_assemble_write_ra(int i)
5212 {
5213   int rt;
5214   unsigned int return_address;
5215   rt=get_reg(branch_regs[i].regmap,31);
5216   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5217   //assert(rt>=0);
5218   return_address=start+i*4+8;
5219   if(rt>=0) {
5220     #ifdef USE_MINI_HT
5221     if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
5222       int temp=-1; // note: must be ds-safe
5223       #ifdef HOST_TEMPREG
5224       temp=HOST_TEMPREG;
5225       #endif
5226       if(temp>=0) do_miniht_insert(return_address,rt,temp);
5227       else emit_movimm(return_address,rt);
5228     }
5229     else
5230     #endif
5231     {
5232       #ifdef REG_PREFETCH
5233       if(temp>=0) 
5234       {
5235         if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5236       }
5237       #endif
5238       emit_movimm(return_address,rt); // PC into link register
5239       #ifdef IMM_PREFETCH
5240       emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5241       #endif
5242     }
5243   }
5244 }
5245
5246 void ujump_assemble(int i,struct regstat *i_regs)
5247 {
5248   signed char *i_regmap=i_regs->regmap;
5249   int ra_done=0;
5250   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5251   address_generation(i+1,i_regs,regs[i].regmap_entry);
5252   #ifdef REG_PREFETCH
5253   int temp=get_reg(branch_regs[i].regmap,PTEMP);
5254   if(rt1[i]==31&&temp>=0) 
5255   {
5256     int return_address=start+i*4+8;
5257     if(get_reg(branch_regs[i].regmap,31)>0) 
5258     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5259   }
5260   #endif
5261   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5262     ujump_assemble_write_ra(i); // writeback ra for DS
5263     ra_done=1;
5264   }
5265   ds_assemble(i+1,i_regs);
5266   uint64_t bc_unneeded=branch_regs[i].u;
5267   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5268   bc_unneeded|=1|(1LL<<rt1[i]);
5269   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5270   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5271                 bc_unneeded,bc_unneeded_upper);
5272   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5273   if(!ra_done&&rt1[i]==31)
5274     ujump_assemble_write_ra(i);
5275   int cc,adj;
5276   cc=get_reg(branch_regs[i].regmap,CCREG);
5277   assert(cc==HOST_CCREG);
5278   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5279   #ifdef REG_PREFETCH
5280   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5281   #endif
5282   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5283   if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5284   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5285   if(internal_branch(branch_regs[i].is32,ba[i]))
5286     assem_debug("branch: internal\n");
5287   else
5288     assem_debug("branch: external\n");
5289   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5290     ds_assemble_entry(i);
5291   }
5292   else {
5293     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5294     emit_jmp(0);
5295   }
5296 }
5297
5298 static void rjump_assemble_write_ra(int i)
5299 {
5300   int rt,return_address;
5301   assert(rt1[i+1]!=rt1[i]);
5302   assert(rt2[i+1]!=rt1[i]);
5303   rt=get_reg(branch_regs[i].regmap,rt1[i]);
5304   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5305   assert(rt>=0);
5306   return_address=start+i*4+8;
5307   #ifdef REG_PREFETCH
5308   if(temp>=0) 
5309   {
5310     if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5311   }
5312   #endif
5313   emit_movimm(return_address,rt); // PC into link register
5314   #ifdef IMM_PREFETCH
5315   emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5316   #endif
5317 }
5318
5319 void rjump_assemble(int i,struct regstat *i_regs)
5320 {
5321   signed char *i_regmap=i_regs->regmap;
5322   int temp;
5323   int rs,cc,adj;
5324   int ra_done=0;
5325   rs=get_reg(branch_regs[i].regmap,rs1[i]);
5326   assert(rs>=0);
5327   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5328     // Delay slot abuse, make a copy of the branch address register
5329     temp=get_reg(branch_regs[i].regmap,RTEMP);
5330     assert(temp>=0);
5331     assert(regs[i].regmap[temp]==RTEMP);
5332     emit_mov(rs,temp);
5333     rs=temp;
5334   }
5335   address_generation(i+1,i_regs,regs[i].regmap_entry);
5336   #ifdef REG_PREFETCH
5337   if(rt1[i]==31) 
5338   {
5339     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5340       int return_address=start+i*4+8;
5341       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5342     }
5343   }
5344   #endif
5345   #ifdef USE_MINI_HT
5346   if(rs1[i]==31) {
5347     int rh=get_reg(regs[i].regmap,RHASH);
5348     if(rh>=0) do_preload_rhash(rh);
5349   }
5350   #endif
5351   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5352     rjump_assemble_write_ra(i);
5353     ra_done=1;
5354   }
5355   ds_assemble(i+1,i_regs);
5356   uint64_t bc_unneeded=branch_regs[i].u;
5357   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5358   bc_unneeded|=1|(1LL<<rt1[i]);
5359   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5360   bc_unneeded&=~(1LL<<rs1[i]);
5361   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5362                 bc_unneeded,bc_unneeded_upper);
5363   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5364   if(!ra_done&&rt1[i]!=0)
5365     rjump_assemble_write_ra(i);
5366   cc=get_reg(branch_regs[i].regmap,CCREG);
5367   assert(cc==HOST_CCREG);
5368   #ifdef USE_MINI_HT
5369   int rh=get_reg(branch_regs[i].regmap,RHASH);
5370   int ht=get_reg(branch_regs[i].regmap,RHTBL);
5371   if(rs1[i]==31) {
5372     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5373     do_preload_rhtbl(ht);
5374     do_rhash(rs,rh);
5375   }
5376   #endif
5377   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5378   #ifdef DESTRUCTIVE_WRITEBACK
5379   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5380     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5381       emit_loadreg(rs1[i],rs);
5382     }
5383   }
5384   #endif
5385   #ifdef REG_PREFETCH
5386   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5387   #endif
5388   #ifdef USE_MINI_HT
5389   if(rs1[i]==31) {
5390     do_miniht_load(ht,rh);
5391   }
5392   #endif
5393   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5394   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5395   //assert(adj==0);
5396   emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5397   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5398   emit_jns(0);
5399   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5400   #ifdef USE_MINI_HT
5401   if(rs1[i]==31) {
5402     do_miniht_jump(rs,rh,ht);
5403   }
5404   else
5405   #endif
5406   {
5407     //if(rs!=EAX) emit_mov(rs,EAX);
5408     //emit_jmp((int)jump_vaddr_eax);
5409     emit_jmp(jump_vaddr_reg[rs]);
5410   }
5411   /* Check hash table
5412   temp=!rs;
5413   emit_mov(rs,temp);
5414   emit_shrimm(rs,16,rs);
5415   emit_xor(temp,rs,rs);
5416   emit_movzwl_reg(rs,rs);
5417   emit_shlimm(rs,4,rs);
5418   emit_cmpmem_indexed((int)hash_table,rs,temp);
5419   emit_jne((int)out+14);
5420   emit_readword_indexed((int)hash_table+4,rs,rs);
5421   emit_jmpreg(rs);
5422   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5423   emit_addimm_no_flags(8,rs);
5424   emit_jeq((int)out-17);
5425   // No hit on hash table, call compiler
5426   emit_pushreg(temp);
5427 //DEBUG >
5428 #ifdef DEBUG_CYCLE_COUNT
5429   emit_readword((int)&last_count,ECX);
5430   emit_add(HOST_CCREG,ECX,HOST_CCREG);
5431   emit_readword((int)&next_interupt,ECX);
5432   emit_writeword(HOST_CCREG,(int)&Count);
5433   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5434   emit_writeword(ECX,(int)&last_count);
5435 #endif
5436 //DEBUG <
5437   emit_storereg(CCREG,HOST_CCREG);
5438   emit_call((int)get_addr);
5439   emit_loadreg(CCREG,HOST_CCREG);
5440   emit_addimm(ESP,4,ESP);
5441   emit_jmpreg(EAX);*/
5442   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5443   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5444   #endif
5445 }
5446
5447 void cjump_assemble(int i,struct regstat *i_regs)
5448 {
5449   signed char *i_regmap=i_regs->regmap;
5450   int cc;
5451   int match;
5452   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5453   assem_debug("match=%d\n",match);
5454   int s1h,s1l,s2h,s2l;
5455   int prev_cop1_usable=cop1_usable;
5456   int unconditional=0,nop=0;
5457   int only32=0;
5458   int invert=0;
5459   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5460   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5461   if(!match) invert=1;
5462   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5463   if(i>(ba[i]-start)>>2) invert=1;
5464   #endif
5465   
5466   if(ooo[i]) {
5467     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5468     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5469     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5470     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5471   }
5472   else {
5473     s1l=get_reg(i_regmap,rs1[i]);
5474     s1h=get_reg(i_regmap,rs1[i]|64);
5475     s2l=get_reg(i_regmap,rs2[i]);
5476     s2h=get_reg(i_regmap,rs2[i]|64);
5477   }
5478   if(rs1[i]==0&&rs2[i]==0)
5479   {
5480     if(opcode[i]&1) nop=1;
5481     else unconditional=1;
5482     //assert(opcode[i]!=5);
5483     //assert(opcode[i]!=7);
5484     //assert(opcode[i]!=0x15);
5485     //assert(opcode[i]!=0x17);
5486   }
5487   else if(rs1[i]==0)
5488   {
5489     s1l=s2l;s1h=s2h;
5490     s2l=s2h=-1;
5491     only32=(regs[i].was32>>rs2[i])&1;
5492   }
5493   else if(rs2[i]==0)
5494   {
5495     s2l=s2h=-1;
5496     only32=(regs[i].was32>>rs1[i])&1;
5497   }
5498   else {
5499     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5500   }
5501
5502   if(ooo[i]) {
5503     // Out of order execution (delay slot first)
5504     //printf("OOOE\n");
5505     address_generation(i+1,i_regs,regs[i].regmap_entry);
5506     ds_assemble(i+1,i_regs);
5507     int adj;
5508     uint64_t bc_unneeded=branch_regs[i].u;
5509     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5510     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5511     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5512     bc_unneeded|=1;
5513     bc_unneeded_upper|=1;
5514     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5515                   bc_unneeded,bc_unneeded_upper);
5516     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5517     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5518     cc=get_reg(branch_regs[i].regmap,CCREG);
5519     assert(cc==HOST_CCREG);
5520     if(unconditional) 
5521       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5522     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5523     //assem_debug("cycle count (adj)\n");
5524     if(unconditional) {
5525       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5526       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5527         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5528         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5529         if(internal)
5530           assem_debug("branch: internal\n");
5531         else
5532           assem_debug("branch: external\n");
5533         if(internal&&is_ds[(ba[i]-start)>>2]) {
5534           ds_assemble_entry(i);
5535         }
5536         else {
5537           add_to_linker((int)out,ba[i],internal);
5538           emit_jmp(0);
5539         }
5540         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5541         if(((u_int)out)&7) emit_addnop(0);
5542         #endif
5543       }
5544     }
5545     else if(nop) {
5546       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5547       int jaddr=(int)out;
5548       emit_jns(0);
5549       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5550     }
5551     else {
5552       int taken=0,nottaken=0,nottaken1=0;
5553       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5554       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5555       if(!only32)
5556       {
5557         assert(s1h>=0);
5558         if(opcode[i]==4) // BEQ
5559         {
5560           if(s2h>=0) emit_cmp(s1h,s2h);
5561           else emit_test(s1h,s1h);
5562           nottaken1=(int)out;
5563           emit_jne(1);
5564         }
5565         if(opcode[i]==5) // BNE
5566         {
5567           if(s2h>=0) emit_cmp(s1h,s2h);
5568           else emit_test(s1h,s1h);
5569           if(invert) taken=(int)out;
5570           else add_to_linker((int)out,ba[i],internal);
5571           emit_jne(0);
5572         }
5573         if(opcode[i]==6) // BLEZ
5574         {
5575           emit_test(s1h,s1h);
5576           if(invert) taken=(int)out;
5577           else add_to_linker((int)out,ba[i],internal);
5578           emit_js(0);
5579           nottaken1=(int)out;
5580           emit_jne(1);
5581         }
5582         if(opcode[i]==7) // BGTZ
5583         {
5584           emit_test(s1h,s1h);
5585           nottaken1=(int)out;
5586           emit_js(1);
5587           if(invert) taken=(int)out;
5588           else add_to_linker((int)out,ba[i],internal);
5589           emit_jne(0);
5590         }
5591       } // if(!only32)
5592           
5593       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5594       assert(s1l>=0);
5595       if(opcode[i]==4) // BEQ
5596       {
5597         if(s2l>=0) emit_cmp(s1l,s2l);
5598         else emit_test(s1l,s1l);
5599         if(invert){
5600           nottaken=(int)out;
5601           emit_jne(1);
5602         }else{
5603           add_to_linker((int)out,ba[i],internal);
5604           emit_jeq(0);
5605         }
5606       }
5607       if(opcode[i]==5) // BNE
5608       {
5609         if(s2l>=0) emit_cmp(s1l,s2l);
5610         else emit_test(s1l,s1l);
5611         if(invert){
5612           nottaken=(int)out;
5613           emit_jeq(1);
5614         }else{
5615           add_to_linker((int)out,ba[i],internal);
5616           emit_jne(0);
5617         }
5618       }
5619       if(opcode[i]==6) // BLEZ
5620       {
5621         emit_cmpimm(s1l,1);
5622         if(invert){
5623           nottaken=(int)out;
5624           emit_jge(1);
5625         }else{
5626           add_to_linker((int)out,ba[i],internal);
5627           emit_jl(0);
5628         }
5629       }
5630       if(opcode[i]==7) // BGTZ
5631       {
5632         emit_cmpimm(s1l,1);
5633         if(invert){
5634           nottaken=(int)out;
5635           emit_jl(1);
5636         }else{
5637           add_to_linker((int)out,ba[i],internal);
5638           emit_jge(0);
5639         }
5640       }
5641       if(invert) {
5642         if(taken) set_jump_target(taken,(int)out);
5643         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5644         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5645           if(adj) {
5646             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5647             add_to_linker((int)out,ba[i],internal);
5648           }else{
5649             emit_addnop(13);
5650             add_to_linker((int)out,ba[i],internal*2);
5651           }
5652           emit_jmp(0);
5653         }else
5654         #endif
5655         {
5656           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5657           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5658           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5659           if(internal)
5660             assem_debug("branch: internal\n");
5661           else
5662             assem_debug("branch: external\n");
5663           if(internal&&is_ds[(ba[i]-start)>>2]) {
5664             ds_assemble_entry(i);
5665           }
5666           else {
5667             add_to_linker((int)out,ba[i],internal);
5668             emit_jmp(0);
5669           }
5670         }
5671         set_jump_target(nottaken,(int)out);
5672       }
5673
5674       if(nottaken1) set_jump_target(nottaken1,(int)out);
5675       if(adj) {
5676         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5677       }
5678     } // (!unconditional)
5679   } // if(ooo)
5680   else
5681   {
5682     // In-order execution (branch first)
5683     //if(likely[i]) printf("IOL\n");
5684     //else
5685     //printf("IOE\n");
5686     int taken=0,nottaken=0,nottaken1=0;
5687     if(!unconditional&&!nop) {
5688       if(!only32)
5689       {
5690         assert(s1h>=0);
5691         if((opcode[i]&0x2f)==4) // BEQ
5692         {
5693           if(s2h>=0) emit_cmp(s1h,s2h);
5694           else emit_test(s1h,s1h);
5695           nottaken1=(int)out;
5696           emit_jne(2);
5697         }
5698         if((opcode[i]&0x2f)==5) // BNE
5699         {
5700           if(s2h>=0) emit_cmp(s1h,s2h);
5701           else emit_test(s1h,s1h);
5702           taken=(int)out;
5703           emit_jne(1);
5704         }
5705         if((opcode[i]&0x2f)==6) // BLEZ
5706         {
5707           emit_test(s1h,s1h);
5708           taken=(int)out;
5709           emit_js(1);
5710           nottaken1=(int)out;
5711           emit_jne(2);
5712         }
5713         if((opcode[i]&0x2f)==7) // BGTZ
5714         {
5715           emit_test(s1h,s1h);
5716           nottaken1=(int)out;
5717           emit_js(2);
5718           taken=(int)out;
5719           emit_jne(1);
5720         }
5721       } // if(!only32)
5722           
5723       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5724       assert(s1l>=0);
5725       if((opcode[i]&0x2f)==4) // BEQ
5726       {
5727         if(s2l>=0) emit_cmp(s1l,s2l);
5728         else emit_test(s1l,s1l);
5729         nottaken=(int)out;
5730         emit_jne(2);
5731       }
5732       if((opcode[i]&0x2f)==5) // BNE
5733       {
5734         if(s2l>=0) emit_cmp(s1l,s2l);
5735         else emit_test(s1l,s1l);
5736         nottaken=(int)out;
5737         emit_jeq(2);
5738       }
5739       if((opcode[i]&0x2f)==6) // BLEZ
5740       {
5741         emit_cmpimm(s1l,1);
5742         nottaken=(int)out;
5743         emit_jge(2);
5744       }
5745       if((opcode[i]&0x2f)==7) // BGTZ
5746       {
5747         emit_cmpimm(s1l,1);
5748         nottaken=(int)out;
5749         emit_jl(2);
5750       }
5751     } // if(!unconditional)
5752     int adj;
5753     uint64_t ds_unneeded=branch_regs[i].u;
5754     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5755     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5756     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5757     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5758     ds_unneeded|=1;
5759     ds_unneeded_upper|=1;
5760     // branch taken
5761     if(!nop) {
5762       if(taken) set_jump_target(taken,(int)out);
5763       assem_debug("1:\n");
5764       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5765                     ds_unneeded,ds_unneeded_upper);
5766       // load regs
5767       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5768       address_generation(i+1,&branch_regs[i],0);
5769       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5770       ds_assemble(i+1,&branch_regs[i]);
5771       cc=get_reg(branch_regs[i].regmap,CCREG);
5772       if(cc==-1) {
5773         emit_loadreg(CCREG,cc=HOST_CCREG);
5774         // CHECK: Is the following instruction (fall thru) allocated ok?
5775       }
5776       assert(cc==HOST_CCREG);
5777       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5778       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5779       assem_debug("cycle count (adj)\n");
5780       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5781       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5782       if(internal)
5783         assem_debug("branch: internal\n");
5784       else
5785         assem_debug("branch: external\n");
5786       if(internal&&is_ds[(ba[i]-start)>>2]) {
5787         ds_assemble_entry(i);
5788       }
5789       else {
5790         add_to_linker((int)out,ba[i],internal);
5791         emit_jmp(0);
5792       }
5793     }
5794     // branch not taken
5795     cop1_usable=prev_cop1_usable;
5796     if(!unconditional) {
5797       if(nottaken1) set_jump_target(nottaken1,(int)out);
5798       set_jump_target(nottaken,(int)out);
5799       assem_debug("2:\n");
5800       if(!likely[i]) {
5801         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5802                       ds_unneeded,ds_unneeded_upper);
5803         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5804         address_generation(i+1,&branch_regs[i],0);
5805         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5806         ds_assemble(i+1,&branch_regs[i]);
5807       }
5808       cc=get_reg(branch_regs[i].regmap,CCREG);
5809       if(cc==-1&&!likely[i]) {
5810         // Cycle count isn't in a register, temporarily load it then write it out
5811         emit_loadreg(CCREG,HOST_CCREG);
5812         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5813         int jaddr=(int)out;
5814         emit_jns(0);
5815         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5816         emit_storereg(CCREG,HOST_CCREG);
5817       }
5818       else{
5819         cc=get_reg(i_regmap,CCREG);
5820         assert(cc==HOST_CCREG);
5821         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5822         int jaddr=(int)out;
5823         emit_jns(0);
5824         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5825       }
5826     }
5827   }
5828 }
5829
5830 void sjump_assemble(int i,struct regstat *i_regs)
5831 {
5832   signed char *i_regmap=i_regs->regmap;
5833   int cc;
5834   int match;
5835   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5836   assem_debug("smatch=%d\n",match);
5837   int s1h,s1l;
5838   int prev_cop1_usable=cop1_usable;
5839   int unconditional=0,nevertaken=0;
5840   int only32=0;
5841   int invert=0;
5842   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5843   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5844   if(!match) invert=1;
5845   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5846   if(i>(ba[i]-start)>>2) invert=1;
5847   #endif
5848
5849   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5850   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5851
5852   if(ooo[i]) {
5853     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5854     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5855   }
5856   else {
5857     s1l=get_reg(i_regmap,rs1[i]);
5858     s1h=get_reg(i_regmap,rs1[i]|64);
5859   }
5860   if(rs1[i]==0)
5861   {
5862     if(opcode2[i]&1) unconditional=1;
5863     else nevertaken=1;
5864     // These are never taken (r0 is never less than zero)
5865     //assert(opcode2[i]!=0);
5866     //assert(opcode2[i]!=2);
5867     //assert(opcode2[i]!=0x10);
5868     //assert(opcode2[i]!=0x12);
5869   }
5870   else {
5871     only32=(regs[i].was32>>rs1[i])&1;
5872   }
5873
5874   if(ooo[i]) {
5875     // Out of order execution (delay slot first)
5876     //printf("OOOE\n");
5877     address_generation(i+1,i_regs,regs[i].regmap_entry);
5878     ds_assemble(i+1,i_regs);
5879     int adj;
5880     uint64_t bc_unneeded=branch_regs[i].u;
5881     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5882     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5883     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5884     bc_unneeded|=1;
5885     bc_unneeded_upper|=1;
5886     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5887                   bc_unneeded,bc_unneeded_upper);
5888     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5889     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5890     if(rt1[i]==31) {
5891       int rt,return_address;
5892       rt=get_reg(branch_regs[i].regmap,31);
5893       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5894       if(rt>=0) {
5895         // Save the PC even if the branch is not taken
5896         return_address=start+i*4+8;
5897         emit_movimm(return_address,rt); // PC into link register
5898         #ifdef IMM_PREFETCH
5899         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5900         #endif
5901       }
5902     }
5903     cc=get_reg(branch_regs[i].regmap,CCREG);
5904     assert(cc==HOST_CCREG);
5905     if(unconditional) 
5906       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5907     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5908     assem_debug("cycle count (adj)\n");
5909     if(unconditional) {
5910       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5911       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5912         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5913         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5914         if(internal)
5915           assem_debug("branch: internal\n");
5916         else
5917           assem_debug("branch: external\n");
5918         if(internal&&is_ds[(ba[i]-start)>>2]) {
5919           ds_assemble_entry(i);
5920         }
5921         else {
5922           add_to_linker((int)out,ba[i],internal);
5923           emit_jmp(0);
5924         }
5925         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5926         if(((u_int)out)&7) emit_addnop(0);
5927         #endif
5928       }
5929     }
5930     else if(nevertaken) {
5931       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5932       int jaddr=(int)out;
5933       emit_jns(0);
5934       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5935     }
5936     else {
5937       int nottaken=0;
5938       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5939       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5940       if(!only32)
5941       {
5942         assert(s1h>=0);
5943         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5944         {
5945           emit_test(s1h,s1h);
5946           if(invert){
5947             nottaken=(int)out;
5948             emit_jns(1);
5949           }else{
5950             add_to_linker((int)out,ba[i],internal);
5951             emit_js(0);
5952           }
5953         }
5954         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5955         {
5956           emit_test(s1h,s1h);
5957           if(invert){
5958             nottaken=(int)out;
5959             emit_js(1);
5960           }else{
5961             add_to_linker((int)out,ba[i],internal);
5962             emit_jns(0);
5963           }
5964         }
5965       } // if(!only32)
5966       else
5967       {
5968         assert(s1l>=0);
5969         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5970         {
5971           emit_test(s1l,s1l);
5972           if(invert){
5973             nottaken=(int)out;
5974             emit_jns(1);
5975           }else{
5976             add_to_linker((int)out,ba[i],internal);
5977             emit_js(0);
5978           }
5979         }
5980         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5981         {
5982           emit_test(s1l,s1l);
5983           if(invert){
5984             nottaken=(int)out;
5985             emit_js(1);
5986           }else{
5987             add_to_linker((int)out,ba[i],internal);
5988             emit_jns(0);
5989           }
5990         }
5991       } // if(!only32)
5992           
5993       if(invert) {
5994         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5995         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5996           if(adj) {
5997             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5998             add_to_linker((int)out,ba[i],internal);
5999           }else{
6000             emit_addnop(13);
6001             add_to_linker((int)out,ba[i],internal*2);
6002           }
6003           emit_jmp(0);
6004         }else
6005         #endif
6006         {
6007           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6008           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6009           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6010           if(internal)
6011             assem_debug("branch: internal\n");
6012           else
6013             assem_debug("branch: external\n");
6014           if(internal&&is_ds[(ba[i]-start)>>2]) {
6015             ds_assemble_entry(i);
6016           }
6017           else {
6018             add_to_linker((int)out,ba[i],internal);
6019             emit_jmp(0);
6020           }
6021         }
6022         set_jump_target(nottaken,(int)out);
6023       }
6024
6025       if(adj) {
6026         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6027       }
6028     } // (!unconditional)
6029   } // if(ooo)
6030   else
6031   {
6032     // In-order execution (branch first)
6033     //printf("IOE\n");
6034     int nottaken=0;
6035     if(rt1[i]==31) {
6036       int rt,return_address;
6037       rt=get_reg(branch_regs[i].regmap,31);
6038       if(rt>=0) {
6039         // Save the PC even if the branch is not taken
6040         return_address=start+i*4+8;
6041         emit_movimm(return_address,rt); // PC into link register
6042         #ifdef IMM_PREFETCH
6043         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
6044         #endif
6045       }
6046     }
6047     if(!unconditional) {
6048       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6049       if(!only32)
6050       {
6051         assert(s1h>=0);
6052         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
6053         {
6054           emit_test(s1h,s1h);
6055           nottaken=(int)out;
6056           emit_jns(1);
6057         }
6058         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
6059         {
6060           emit_test(s1h,s1h);
6061           nottaken=(int)out;
6062           emit_js(1);
6063         }
6064       } // if(!only32)
6065       else
6066       {
6067         assert(s1l>=0);
6068         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
6069         {
6070           emit_test(s1l,s1l);
6071           nottaken=(int)out;
6072           emit_jns(1);
6073         }
6074         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
6075         {
6076           emit_test(s1l,s1l);
6077           nottaken=(int)out;
6078           emit_js(1);
6079         }
6080       }
6081     } // if(!unconditional)
6082     int adj;
6083     uint64_t ds_unneeded=branch_regs[i].u;
6084     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6085     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6086     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6087     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6088     ds_unneeded|=1;
6089     ds_unneeded_upper|=1;
6090     // branch taken
6091     if(!nevertaken) {
6092       //assem_debug("1:\n");
6093       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6094                     ds_unneeded,ds_unneeded_upper);
6095       // load regs
6096       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6097       address_generation(i+1,&branch_regs[i],0);
6098       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6099       ds_assemble(i+1,&branch_regs[i]);
6100       cc=get_reg(branch_regs[i].regmap,CCREG);
6101       if(cc==-1) {
6102         emit_loadreg(CCREG,cc=HOST_CCREG);
6103         // CHECK: Is the following instruction (fall thru) allocated ok?
6104       }
6105       assert(cc==HOST_CCREG);
6106       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6107       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6108       assem_debug("cycle count (adj)\n");
6109       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6110       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6111       if(internal)
6112         assem_debug("branch: internal\n");
6113       else
6114         assem_debug("branch: external\n");
6115       if(internal&&is_ds[(ba[i]-start)>>2]) {
6116         ds_assemble_entry(i);
6117       }
6118       else {
6119         add_to_linker((int)out,ba[i],internal);
6120         emit_jmp(0);
6121       }
6122     }
6123     // branch not taken
6124     cop1_usable=prev_cop1_usable;
6125     if(!unconditional) {
6126       set_jump_target(nottaken,(int)out);
6127       assem_debug("1:\n");
6128       if(!likely[i]) {
6129         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6130                       ds_unneeded,ds_unneeded_upper);
6131         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6132         address_generation(i+1,&branch_regs[i],0);
6133         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6134         ds_assemble(i+1,&branch_regs[i]);
6135       }
6136       cc=get_reg(branch_regs[i].regmap,CCREG);
6137       if(cc==-1&&!likely[i]) {
6138         // Cycle count isn't in a register, temporarily load it then write it out
6139         emit_loadreg(CCREG,HOST_CCREG);
6140         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6141         int jaddr=(int)out;
6142         emit_jns(0);
6143         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6144         emit_storereg(CCREG,HOST_CCREG);
6145       }
6146       else{
6147         cc=get_reg(i_regmap,CCREG);
6148         assert(cc==HOST_CCREG);
6149         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6150         int jaddr=(int)out;
6151         emit_jns(0);
6152         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6153       }
6154     }
6155   }
6156 }
6157
6158 void fjump_assemble(int i,struct regstat *i_regs)
6159 {
6160   signed char *i_regmap=i_regs->regmap;
6161   int cc;
6162   int match;
6163   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6164   assem_debug("fmatch=%d\n",match);
6165   int fs,cs;
6166   int eaddr;
6167   int invert=0;
6168   int internal=internal_branch(branch_regs[i].is32,ba[i]);
6169   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
6170   if(!match) invert=1;
6171   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6172   if(i>(ba[i]-start)>>2) invert=1;
6173   #endif
6174
6175   if(ooo[i]) {
6176     fs=get_reg(branch_regs[i].regmap,FSREG);
6177     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
6178   }
6179   else {
6180     fs=get_reg(i_regmap,FSREG);
6181   }
6182
6183   // Check cop1 unusable
6184   if(!cop1_usable) {
6185     cs=get_reg(i_regmap,CSREG);
6186     assert(cs>=0);
6187     emit_testimm(cs,0x20000000);
6188     eaddr=(int)out;
6189     emit_jeq(0);
6190     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
6191     cop1_usable=1;
6192   }
6193
6194   if(ooo[i]) {
6195     // Out of order execution (delay slot first)
6196     //printf("OOOE\n");
6197     ds_assemble(i+1,i_regs);
6198     int adj;
6199     uint64_t bc_unneeded=branch_regs[i].u;
6200     uint64_t bc_unneeded_upper=branch_regs[i].uu;
6201     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6202     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
6203     bc_unneeded|=1;
6204     bc_unneeded_upper|=1;
6205     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6206                   bc_unneeded,bc_unneeded_upper);
6207     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
6208     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6209     cc=get_reg(branch_regs[i].regmap,CCREG);
6210     assert(cc==HOST_CCREG);
6211     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6212     assem_debug("cycle count (adj)\n");
6213     if(1) {
6214       int nottaken=0;
6215       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6216       if(1) {
6217         assert(fs>=0);
6218         emit_testimm(fs,0x800000);
6219         if(source[i]&0x10000) // BC1T
6220         {
6221           if(invert){
6222             nottaken=(int)out;
6223             emit_jeq(1);
6224           }else{
6225             add_to_linker((int)out,ba[i],internal);
6226             emit_jne(0);
6227           }
6228         }
6229         else // BC1F
6230           if(invert){
6231             nottaken=(int)out;
6232             emit_jne(1);
6233           }else{
6234             add_to_linker((int)out,ba[i],internal);
6235             emit_jeq(0);
6236           }
6237         {
6238         }
6239       } // if(!only32)
6240           
6241       if(invert) {
6242         if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6243         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6244         else if(match) emit_addnop(13);
6245         #endif
6246         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6247         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6248         if(internal)
6249           assem_debug("branch: internal\n");
6250         else
6251           assem_debug("branch: external\n");
6252         if(internal&&is_ds[(ba[i]-start)>>2]) {
6253           ds_assemble_entry(i);
6254         }
6255         else {
6256           add_to_linker((int)out,ba[i],internal);
6257           emit_jmp(0);
6258         }
6259         set_jump_target(nottaken,(int)out);
6260       }
6261
6262       if(adj) {
6263         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6264       }
6265     } // (!unconditional)
6266   } // if(ooo)
6267   else
6268   {
6269     // In-order execution (branch first)
6270     //printf("IOE\n");
6271     int nottaken=0;
6272     if(1) {
6273       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6274       if(1) {
6275         assert(fs>=0);
6276         emit_testimm(fs,0x800000);
6277         if(source[i]&0x10000) // BC1T
6278         {
6279           nottaken=(int)out;
6280           emit_jeq(1);
6281         }
6282         else // BC1F
6283         {
6284           nottaken=(int)out;
6285           emit_jne(1);
6286         }
6287       }
6288     } // if(!unconditional)
6289     int adj;
6290     uint64_t ds_unneeded=branch_regs[i].u;
6291     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6292     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6293     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6294     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6295     ds_unneeded|=1;
6296     ds_unneeded_upper|=1;
6297     // branch taken
6298     //assem_debug("1:\n");
6299     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6300                   ds_unneeded,ds_unneeded_upper);
6301     // load regs
6302     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6303     address_generation(i+1,&branch_regs[i],0);
6304     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6305     ds_assemble(i+1,&branch_regs[i]);
6306     cc=get_reg(branch_regs[i].regmap,CCREG);
6307     if(cc==-1) {
6308       emit_loadreg(CCREG,cc=HOST_CCREG);
6309       // CHECK: Is the following instruction (fall thru) allocated ok?
6310     }
6311     assert(cc==HOST_CCREG);
6312     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6313     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6314     assem_debug("cycle count (adj)\n");
6315     if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6316     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6317     if(internal)
6318       assem_debug("branch: internal\n");
6319     else
6320       assem_debug("branch: external\n");
6321     if(internal&&is_ds[(ba[i]-start)>>2]) {
6322       ds_assemble_entry(i);
6323     }
6324     else {
6325       add_to_linker((int)out,ba[i],internal);
6326       emit_jmp(0);
6327     }
6328
6329     // branch not taken
6330     if(1) { // <- FIXME (don't need this)
6331       set_jump_target(nottaken,(int)out);
6332       assem_debug("1:\n");
6333       if(!likely[i]) {
6334         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6335                       ds_unneeded,ds_unneeded_upper);
6336         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6337         address_generation(i+1,&branch_regs[i],0);
6338         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6339         ds_assemble(i+1,&branch_regs[i]);
6340       }
6341       cc=get_reg(branch_regs[i].regmap,CCREG);
6342       if(cc==-1&&!likely[i]) {
6343         // Cycle count isn't in a register, temporarily load it then write it out
6344         emit_loadreg(CCREG,HOST_CCREG);
6345         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6346         int jaddr=(int)out;
6347         emit_jns(0);
6348         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6349         emit_storereg(CCREG,HOST_CCREG);
6350       }
6351       else{
6352         cc=get_reg(i_regmap,CCREG);
6353         assert(cc==HOST_CCREG);
6354         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6355         int jaddr=(int)out;
6356         emit_jns(0);
6357         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6358       }
6359     }
6360   }
6361 }
6362
6363 static void pagespan_assemble(int i,struct regstat *i_regs)
6364 {
6365   int s1l=get_reg(i_regs->regmap,rs1[i]);
6366   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6367   int s2l=get_reg(i_regs->regmap,rs2[i]);
6368   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6369   void *nt_branch=NULL;
6370   int taken=0;
6371   int nottaken=0;
6372   int unconditional=0;
6373   if(rs1[i]==0)
6374   {
6375     s1l=s2l;s1h=s2h;
6376     s2l=s2h=-1;
6377   }
6378   else if(rs2[i]==0)
6379   {
6380     s2l=s2h=-1;
6381   }
6382   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6383     s1h=s2h=-1;
6384   }
6385   int hr=0;
6386   int addr,alt,ntaddr;
6387   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6388   else {
6389     while(hr<HOST_REGS)
6390     {
6391       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6392          (i_regs->regmap[hr]&63)!=rs1[i] &&
6393          (i_regs->regmap[hr]&63)!=rs2[i] )
6394       {
6395         addr=hr++;break;
6396       }
6397       hr++;
6398     }
6399   }
6400   while(hr<HOST_REGS)
6401   {
6402     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6403        (i_regs->regmap[hr]&63)!=rs1[i] &&
6404        (i_regs->regmap[hr]&63)!=rs2[i] )
6405     {
6406       alt=hr++;break;
6407     }
6408     hr++;
6409   }
6410   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6411   {
6412     while(hr<HOST_REGS)
6413     {
6414       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6415          (i_regs->regmap[hr]&63)!=rs1[i] &&
6416          (i_regs->regmap[hr]&63)!=rs2[i] )
6417       {
6418         ntaddr=hr;break;
6419       }
6420       hr++;
6421     }
6422   }
6423   assert(hr<HOST_REGS);
6424   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6425     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6426   }
6427   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6428   if(opcode[i]==2) // J
6429   {
6430     unconditional=1;
6431   }
6432   if(opcode[i]==3) // JAL
6433   {
6434     // TODO: mini_ht
6435     int rt=get_reg(i_regs->regmap,31);
6436     emit_movimm(start+i*4+8,rt);
6437     unconditional=1;
6438   }
6439   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6440   {
6441     emit_mov(s1l,addr);
6442     if(opcode2[i]==9) // JALR
6443     {
6444       int rt=get_reg(i_regs->regmap,rt1[i]);
6445       emit_movimm(start+i*4+8,rt);
6446     }
6447   }
6448   if((opcode[i]&0x3f)==4) // BEQ
6449   {
6450     if(rs1[i]==rs2[i])
6451     {
6452       unconditional=1;
6453     }
6454     else
6455     #ifdef HAVE_CMOV_IMM
6456     if(s1h<0) {
6457       if(s2l>=0) emit_cmp(s1l,s2l);
6458       else emit_test(s1l,s1l);
6459       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6460     }
6461     else
6462     #endif
6463     {
6464       assert(s1l>=0);
6465       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6466       if(s1h>=0) {
6467         if(s2h>=0) emit_cmp(s1h,s2h);
6468         else emit_test(s1h,s1h);
6469         emit_cmovne_reg(alt,addr);
6470       }
6471       if(s2l>=0) emit_cmp(s1l,s2l);
6472       else emit_test(s1l,s1l);
6473       emit_cmovne_reg(alt,addr);
6474     }
6475   }
6476   if((opcode[i]&0x3f)==5) // BNE
6477   {
6478     #ifdef HAVE_CMOV_IMM
6479     if(s1h<0) {
6480       if(s2l>=0) emit_cmp(s1l,s2l);
6481       else emit_test(s1l,s1l);
6482       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6483     }
6484     else
6485     #endif
6486     {
6487       assert(s1l>=0);
6488       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6489       if(s1h>=0) {
6490         if(s2h>=0) emit_cmp(s1h,s2h);
6491         else emit_test(s1h,s1h);
6492         emit_cmovne_reg(alt,addr);
6493       }
6494       if(s2l>=0) emit_cmp(s1l,s2l);
6495       else emit_test(s1l,s1l);
6496       emit_cmovne_reg(alt,addr);
6497     }
6498   }
6499   if((opcode[i]&0x3f)==0x14) // BEQL
6500   {
6501     if(s1h>=0) {
6502       if(s2h>=0) emit_cmp(s1h,s2h);
6503       else emit_test(s1h,s1h);
6504       nottaken=(int)out;
6505       emit_jne(0);
6506     }
6507     if(s2l>=0) emit_cmp(s1l,s2l);
6508     else emit_test(s1l,s1l);
6509     if(nottaken) set_jump_target(nottaken,(int)out);
6510     nottaken=(int)out;
6511     emit_jne(0);
6512   }
6513   if((opcode[i]&0x3f)==0x15) // BNEL
6514   {
6515     if(s1h>=0) {
6516       if(s2h>=0) emit_cmp(s1h,s2h);
6517       else emit_test(s1h,s1h);
6518       taken=(int)out;
6519       emit_jne(0);
6520     }
6521     if(s2l>=0) emit_cmp(s1l,s2l);
6522     else emit_test(s1l,s1l);
6523     nottaken=(int)out;
6524     emit_jeq(0);
6525     if(taken) set_jump_target(taken,(int)out);
6526   }
6527   if((opcode[i]&0x3f)==6) // BLEZ
6528   {
6529     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6530     emit_cmpimm(s1l,1);
6531     if(s1h>=0) emit_mov(addr,ntaddr);
6532     emit_cmovl_reg(alt,addr);
6533     if(s1h>=0) {
6534       emit_test(s1h,s1h);
6535       emit_cmovne_reg(ntaddr,addr);
6536       emit_cmovs_reg(alt,addr);
6537     }
6538   }
6539   if((opcode[i]&0x3f)==7) // BGTZ
6540   {
6541     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6542     emit_cmpimm(s1l,1);
6543     if(s1h>=0) emit_mov(addr,alt);
6544     emit_cmovl_reg(ntaddr,addr);
6545     if(s1h>=0) {
6546       emit_test(s1h,s1h);
6547       emit_cmovne_reg(alt,addr);
6548       emit_cmovs_reg(ntaddr,addr);
6549     }
6550   }
6551   if((opcode[i]&0x3f)==0x16) // BLEZL
6552   {
6553     assert((opcode[i]&0x3f)!=0x16);
6554   }
6555   if((opcode[i]&0x3f)==0x17) // BGTZL
6556   {
6557     assert((opcode[i]&0x3f)!=0x17);
6558   }
6559   assert(opcode[i]!=1); // BLTZ/BGEZ
6560
6561   //FIXME: Check CSREG
6562   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6563     if((source[i]&0x30000)==0) // BC1F
6564     {
6565       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6566       emit_testimm(s1l,0x800000);
6567       emit_cmovne_reg(alt,addr);
6568     }
6569     if((source[i]&0x30000)==0x10000) // BC1T
6570     {
6571       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6572       emit_testimm(s1l,0x800000);
6573       emit_cmovne_reg(alt,addr);
6574     }
6575     if((source[i]&0x30000)==0x20000) // BC1FL
6576     {
6577       emit_testimm(s1l,0x800000);
6578       nottaken=(int)out;
6579       emit_jne(0);
6580     }
6581     if((source[i]&0x30000)==0x30000) // BC1TL
6582     {
6583       emit_testimm(s1l,0x800000);
6584       nottaken=(int)out;
6585       emit_jeq(0);
6586     }
6587   }
6588
6589   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6590   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6591   if(likely[i]||unconditional)
6592   {
6593     emit_movimm(ba[i],HOST_BTREG);
6594   }
6595   else if(addr!=HOST_BTREG)
6596   {
6597     emit_mov(addr,HOST_BTREG);
6598   }
6599   void *branch_addr=out;
6600   emit_jmp(0);
6601   int target_addr=start+i*4+5;
6602   void *stub=out;
6603   void *compiled_target_addr=check_addr(target_addr);
6604   emit_extjump_ds((int)branch_addr,target_addr);
6605   if(compiled_target_addr) {
6606     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6607     add_link(target_addr,stub);
6608   }
6609   else set_jump_target((int)branch_addr,(int)stub);
6610   if(likely[i]) {
6611     // Not-taken path
6612     set_jump_target((int)nottaken,(int)out);
6613     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6614     void *branch_addr=out;
6615     emit_jmp(0);
6616     int target_addr=start+i*4+8;
6617     void *stub=out;
6618     void *compiled_target_addr=check_addr(target_addr);
6619     emit_extjump_ds((int)branch_addr,target_addr);
6620     if(compiled_target_addr) {
6621       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6622       add_link(target_addr,stub);
6623     }
6624     else set_jump_target((int)branch_addr,(int)stub);
6625   }
6626 }
6627
6628 // Assemble the delay slot for the above
6629 static void pagespan_ds()
6630 {
6631   assem_debug("initial delay slot:\n");
6632   u_int vaddr=start+1;
6633   u_int page=get_page(vaddr);
6634   u_int vpage=get_vpage(vaddr);
6635   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6636   do_dirty_stub_ds();
6637   ll_add(jump_in+page,vaddr,(void *)out);
6638   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6639   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6640     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6641   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6642     emit_writeword(HOST_BTREG,(int)&branch_target);
6643   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6644   address_generation(0,&regs[0],regs[0].regmap_entry);
6645   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6646     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6647   cop1_usable=0;
6648   is_delayslot=0;
6649   switch(itype[0]) {
6650     case ALU:
6651       alu_assemble(0,&regs[0]);break;
6652     case IMM16:
6653       imm16_assemble(0,&regs[0]);break;
6654     case SHIFT:
6655       shift_assemble(0,&regs[0]);break;
6656     case SHIFTIMM:
6657       shiftimm_assemble(0,&regs[0]);break;
6658     case LOAD:
6659       load_assemble(0,&regs[0]);break;
6660     case LOADLR:
6661       loadlr_assemble(0,&regs[0]);break;
6662     case STORE:
6663       store_assemble(0,&regs[0]);break;
6664     case STORELR:
6665       storelr_assemble(0,&regs[0]);break;
6666     case COP0:
6667       cop0_assemble(0,&regs[0]);break;
6668     case COP1:
6669       cop1_assemble(0,&regs[0]);break;
6670     case C1LS:
6671       c1ls_assemble(0,&regs[0]);break;
6672     case COP2:
6673       cop2_assemble(0,&regs[0]);break;
6674     case C2LS:
6675       c2ls_assemble(0,&regs[0]);break;
6676     case C2OP:
6677       c2op_assemble(0,&regs[0]);break;
6678     case FCONV:
6679       fconv_assemble(0,&regs[0]);break;
6680     case FLOAT:
6681       float_assemble(0,&regs[0]);break;
6682     case FCOMP:
6683       fcomp_assemble(0,&regs[0]);break;
6684     case MULTDIV:
6685       multdiv_assemble(0,&regs[0]);break;
6686     case MOV:
6687       mov_assemble(0,&regs[0]);break;
6688     case SYSCALL:
6689     case HLECALL:
6690     case INTCALL:
6691     case SPAN:
6692     case UJUMP:
6693     case RJUMP:
6694     case CJUMP:
6695     case SJUMP:
6696     case FJUMP:
6697       printf("Jump in the delay slot.  This is probably a bug.\n");
6698   }
6699   int btaddr=get_reg(regs[0].regmap,BTREG);
6700   if(btaddr<0) {
6701     btaddr=get_reg(regs[0].regmap,-1);
6702     emit_readword((int)&branch_target,btaddr);
6703   }
6704   assert(btaddr!=HOST_CCREG);
6705   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6706 #ifdef HOST_IMM8
6707   emit_movimm(start+4,HOST_TEMPREG);
6708   emit_cmp(btaddr,HOST_TEMPREG);
6709 #else
6710   emit_cmpimm(btaddr,start+4);
6711 #endif
6712   int branch=(int)out;
6713   emit_jeq(0);
6714   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6715   emit_jmp(jump_vaddr_reg[btaddr]);
6716   set_jump_target(branch,(int)out);
6717   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6718   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6719 }
6720
6721 // Basic liveness analysis for MIPS registers
6722 void unneeded_registers(int istart,int iend,int r)
6723 {
6724   int i;
6725   uint64_t u,uu,gte_u,b,bu,gte_bu;
6726   uint64_t temp_u,temp_uu,temp_gte_u;
6727   uint64_t tdep;
6728   if(iend==slen-1) {
6729     u=1;uu=1;
6730   }else{
6731     u=unneeded_reg[iend+1];
6732     uu=unneeded_reg_upper[iend+1];
6733     u=1;uu=1;
6734   }
6735   gte_u=temp_gte_u=0;
6736
6737   for (i=iend;i>=istart;i--)
6738   {
6739     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6740     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6741     {
6742       // If subroutine call, flag return address as a possible branch target
6743       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6744       
6745       if(ba[i]<start || ba[i]>=(start+slen*4))
6746       {
6747         // Branch out of this block, flush all regs
6748         u=1;
6749         uu=1;
6750         gte_u=0;
6751         /* Hexagon hack 
6752         if(itype[i]==UJUMP&&rt1[i]==31)
6753         {
6754           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6755         }
6756         if(itype[i]==RJUMP&&rs1[i]==31)
6757         {
6758           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6759         }
6760         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6761           if(itype[i]==UJUMP&&rt1[i]==31)
6762           {
6763             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6764             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6765           }
6766           if(itype[i]==RJUMP&&rs1[i]==31)
6767           {
6768             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6769             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6770           }
6771         }*/
6772         branch_unneeded_reg[i]=u;
6773         branch_unneeded_reg_upper[i]=uu;
6774         // Merge in delay slot
6775         tdep=(~uu>>rt1[i+1])&1;
6776         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6777         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6778         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6779         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6780         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6781         u|=1;uu|=1;
6782         gte_u|=gte_rt[i+1];
6783         gte_u&=~gte_rs[i+1];
6784         // If branch is "likely" (and conditional)
6785         // then we skip the delay slot on the fall-thru path
6786         if(likely[i]) {
6787           if(i<slen-1) {
6788             u&=unneeded_reg[i+2];
6789             uu&=unneeded_reg_upper[i+2];
6790             gte_u&=gte_unneeded[i+2];
6791           }
6792           else
6793           {
6794             u=1;
6795             uu=1;
6796             gte_u=0;
6797           }
6798         }
6799       }
6800       else
6801       {
6802         // Internal branch, flag target
6803         bt[(ba[i]-start)>>2]=1;
6804         if(ba[i]<=start+i*4) {
6805           // Backward branch
6806           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6807           {
6808             // Unconditional branch
6809             temp_u=1;temp_uu=1;
6810             temp_gte_u=0;
6811           } else {
6812             // Conditional branch (not taken case)
6813             temp_u=unneeded_reg[i+2];
6814             temp_uu=unneeded_reg_upper[i+2];
6815             temp_gte_u&=gte_unneeded[i+2];
6816           }
6817           // Merge in delay slot
6818           tdep=(~temp_uu>>rt1[i+1])&1;
6819           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6820           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6821           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6822           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6823           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6824           temp_u|=1;temp_uu|=1;
6825           temp_gte_u|=gte_rt[i+1];
6826           temp_gte_u&=~gte_rs[i+1];
6827           // If branch is "likely" (and conditional)
6828           // then we skip the delay slot on the fall-thru path
6829           if(likely[i]) {
6830             if(i<slen-1) {
6831               temp_u&=unneeded_reg[i+2];
6832               temp_uu&=unneeded_reg_upper[i+2];
6833               temp_gte_u&=gte_unneeded[i+2];
6834             }
6835             else
6836             {
6837               temp_u=1;
6838               temp_uu=1;
6839               temp_gte_u=0;
6840             }
6841           }
6842           tdep=(~temp_uu>>rt1[i])&1;
6843           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6844           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6845           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6846           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6847           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6848           temp_u|=1;temp_uu|=1;
6849           temp_gte_u|=gte_rt[i];
6850           temp_gte_u&=~gte_rs[i];
6851           unneeded_reg[i]=temp_u;
6852           unneeded_reg_upper[i]=temp_uu;
6853           gte_unneeded[i]=temp_gte_u;
6854           // Only go three levels deep.  This recursion can take an
6855           // excessive amount of time if there are a lot of nested loops.
6856           if(r<2) {
6857             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6858           }else{
6859             unneeded_reg[(ba[i]-start)>>2]=1;
6860             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6861             gte_unneeded[(ba[i]-start)>>2]=0;
6862           }
6863         } /*else*/ if(1) {
6864           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6865           {
6866             // Unconditional branch
6867             u=unneeded_reg[(ba[i]-start)>>2];
6868             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6869             gte_u=gte_unneeded[(ba[i]-start)>>2];
6870             branch_unneeded_reg[i]=u;
6871             branch_unneeded_reg_upper[i]=uu;
6872         //u=1;
6873         //uu=1;
6874         //branch_unneeded_reg[i]=u;
6875         //branch_unneeded_reg_upper[i]=uu;
6876             // Merge in delay slot
6877             tdep=(~uu>>rt1[i+1])&1;
6878             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6879             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6880             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6881             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6882             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6883             u|=1;uu|=1;
6884             gte_u|=gte_rt[i+1];
6885             gte_u&=~gte_rs[i+1];
6886           } else {
6887             // Conditional branch
6888             b=unneeded_reg[(ba[i]-start)>>2];
6889             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6890             gte_bu=gte_unneeded[(ba[i]-start)>>2];
6891             branch_unneeded_reg[i]=b;
6892             branch_unneeded_reg_upper[i]=bu;
6893         //b=1;
6894         //bu=1;
6895         //branch_unneeded_reg[i]=b;
6896         //branch_unneeded_reg_upper[i]=bu;
6897             // Branch delay slot
6898             tdep=(~uu>>rt1[i+1])&1;
6899             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6900             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6901             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6902             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6903             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6904             b|=1;bu|=1;
6905             gte_bu|=gte_rt[i+1];
6906             gte_bu&=~gte_rs[i+1];
6907             // If branch is "likely" then we skip the
6908             // delay slot on the fall-thru path
6909             if(likely[i]) {
6910               u=b;
6911               uu=bu;
6912               gte_u=gte_bu;
6913               if(i<slen-1) {
6914                 u&=unneeded_reg[i+2];
6915                 uu&=unneeded_reg_upper[i+2];
6916                 gte_u&=gte_unneeded[i+2];
6917         //u=1;
6918         //uu=1;
6919               }
6920             } else {
6921               u&=b;
6922               uu&=bu;
6923               gte_u&=gte_bu;
6924         //u=1;
6925         //uu=1;
6926             }
6927             if(i<slen-1) {
6928               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6929               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6930         //branch_unneeded_reg[i]=1;
6931         //branch_unneeded_reg_upper[i]=1;
6932             } else {
6933               branch_unneeded_reg[i]=1;
6934               branch_unneeded_reg_upper[i]=1;
6935             }
6936           }
6937         }
6938       }
6939     }
6940     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6941     {
6942       // SYSCALL instruction (software interrupt)
6943       u=1;
6944       uu=1;
6945     }
6946     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6947     {
6948       // ERET instruction (return from interrupt)
6949       u=1;
6950       uu=1;
6951     }
6952     //u=uu=1; // DEBUG
6953     tdep=(~uu>>rt1[i])&1;
6954     // Written registers are unneeded
6955     u|=1LL<<rt1[i];
6956     u|=1LL<<rt2[i];
6957     uu|=1LL<<rt1[i];
6958     uu|=1LL<<rt2[i];
6959     gte_u|=gte_rt[i];
6960     // Accessed registers are needed
6961     u&=~(1LL<<rs1[i]);
6962     u&=~(1LL<<rs2[i]);
6963     uu&=~(1LL<<us1[i]);
6964     uu&=~(1LL<<us2[i]);
6965     gte_u&=~gte_rs[i];
6966     // Source-target dependencies
6967     uu&=~(tdep<<dep1[i]);
6968     uu&=~(tdep<<dep2[i]);
6969     // R0 is always unneeded
6970     u|=1;uu|=1;
6971     // Save it
6972     unneeded_reg[i]=u;
6973     unneeded_reg_upper[i]=uu;
6974     gte_unneeded[i]=gte_u;
6975     /*
6976     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6977     printf("U:");
6978     int r;
6979     for(r=1;r<=CCREG;r++) {
6980       if((unneeded_reg[i]>>r)&1) {
6981         if(r==HIREG) printf(" HI");
6982         else if(r==LOREG) printf(" LO");
6983         else printf(" r%d",r);
6984       }
6985     }
6986     printf(" UU:");
6987     for(r=1;r<=CCREG;r++) {
6988       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6989         if(r==HIREG) printf(" HI");
6990         else if(r==LOREG) printf(" LO");
6991         else printf(" r%d",r);
6992       }
6993     }
6994     printf("\n");*/
6995   }
6996 #ifdef FORCE32
6997   for (i=iend;i>=istart;i--)
6998   {
6999     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
7000   }
7001 #endif
7002 }
7003
7004 // Identify registers which are likely to contain 32-bit values
7005 // This is used to predict whether any branches will jump to a
7006 // location with 64-bit values in registers.
7007 static void provisional_32bit()
7008 {
7009   int i,j;
7010   uint64_t is32=1;
7011   uint64_t lastbranch=1;
7012   
7013   for(i=0;i<slen;i++)
7014   {
7015     if(i>0) {
7016       if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
7017         if(i>1) is32=lastbranch;
7018         else is32=1;
7019       }
7020     }
7021     if(i>1)
7022     {
7023       if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
7024         if(likely[i-2]) {
7025           if(i>2) is32=lastbranch;
7026           else is32=1;
7027         }
7028       }
7029       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
7030       {
7031         if(rs1[i-2]==0||rs2[i-2]==0)
7032         {
7033           if(rs1[i-2]) {
7034             is32|=1LL<<rs1[i-2];
7035           }
7036           if(rs2[i-2]) {
7037             is32|=1LL<<rs2[i-2];
7038           }
7039         }
7040       }
7041     }
7042     // If something jumps here with 64-bit values
7043     // then promote those registers to 64 bits
7044     if(bt[i])
7045     {
7046       uint64_t temp_is32=is32;
7047       for(j=i-1;j>=0;j--)
7048       {
7049         if(ba[j]==start+i*4) 
7050           //temp_is32&=branch_regs[j].is32;
7051           temp_is32&=p32[j];
7052       }
7053       for(j=i;j<slen;j++)
7054       {
7055         if(ba[j]==start+i*4) 
7056           temp_is32=1;
7057       }
7058       is32=temp_is32;
7059     }
7060     int type=itype[i];
7061     int op=opcode[i];
7062     int op2=opcode2[i];
7063     int rt=rt1[i];
7064     int s1=rs1[i];
7065     int s2=rs2[i];
7066     if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7067       // Branches don't write registers, consider the delay slot instead.
7068       type=itype[i+1];
7069       op=opcode[i+1];
7070       op2=opcode2[i+1];
7071       rt=rt1[i+1];
7072       s1=rs1[i+1];
7073       s2=rs2[i+1];
7074       lastbranch=is32;
7075     }
7076     switch(type) {
7077       case LOAD:
7078         if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
7079            opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
7080           is32&=~(1LL<<rt);
7081         else
7082           is32|=1LL<<rt;
7083         break;
7084       case STORE:
7085       case STORELR:
7086         break;
7087       case LOADLR:
7088         if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
7089         if(op==0x22) is32|=1LL<<rt; // LWL
7090         break;
7091       case IMM16:
7092         if (op==0x08||op==0x09|| // ADDI/ADDIU
7093             op==0x0a||op==0x0b|| // SLTI/SLTIU
7094             op==0x0c|| // ANDI
7095             op==0x0f)  // LUI
7096         {
7097           is32|=1LL<<rt;
7098         }
7099         if(op==0x18||op==0x19) { // DADDI/DADDIU
7100           is32&=~(1LL<<rt);
7101           //if(imm[i]==0)
7102           //  is32|=((is32>>s1)&1LL)<<rt;
7103         }
7104         if(op==0x0d||op==0x0e) { // ORI/XORI
7105           uint64_t sr=((is32>>s1)&1LL);
7106           is32&=~(1LL<<rt);
7107           is32|=sr<<rt;
7108         }
7109         break;
7110       case UJUMP:
7111         break;
7112       case RJUMP:
7113         break;
7114       case CJUMP:
7115         break;
7116       case SJUMP:
7117         break;
7118       case FJUMP:
7119         break;
7120       case ALU:
7121         if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
7122           is32|=1LL<<rt;
7123         }
7124         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7125           is32|=1LL<<rt;
7126         }
7127         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7128           uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
7129           is32&=~(1LL<<rt);
7130           is32|=sr<<rt;
7131         }
7132         else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
7133           if(s1==0&&s2==0) {
7134             is32|=1LL<<rt;
7135           }
7136           else if(s2==0) {
7137             uint64_t sr=((is32>>s1)&1LL);
7138             is32&=~(1LL<<rt);
7139             is32|=sr<<rt;
7140           }
7141           else if(s1==0) {
7142             uint64_t sr=((is32>>s2)&1LL);
7143             is32&=~(1LL<<rt);
7144             is32|=sr<<rt;
7145           }
7146           else {
7147             is32&=~(1LL<<rt);
7148           }
7149         }
7150         else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
7151           if(s1==0&&s2==0) {
7152             is32|=1LL<<rt;
7153           }
7154           else if(s2==0) {
7155             uint64_t sr=((is32>>s1)&1LL);
7156             is32&=~(1LL<<rt);
7157             is32|=sr<<rt;
7158           }
7159           else {
7160             is32&=~(1LL<<rt);
7161           }
7162         }
7163         break;
7164       case MULTDIV:
7165         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7166           is32&=~((1LL<<HIREG)|(1LL<<LOREG));
7167         }
7168         else {
7169           is32|=(1LL<<HIREG)|(1LL<<LOREG);
7170         }
7171         break;
7172       case MOV:
7173         {
7174           uint64_t sr=((is32>>s1)&1LL);
7175           is32&=~(1LL<<rt);
7176           is32|=sr<<rt;
7177         }
7178         break;
7179       case SHIFT:
7180         if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
7181         else is32|=1LL<<rt; // SLLV/SRLV/SRAV
7182         break;
7183       case SHIFTIMM:
7184         is32|=1LL<<rt;
7185         // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
7186         if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
7187         break;
7188       case COP0:
7189         if(op2==0) is32|=1LL<<rt; // MFC0
7190         break;
7191       case COP1:
7192       case COP2:
7193         if(op2==0) is32|=1LL<<rt; // MFC1
7194         if(op2==1) is32&=~(1LL<<rt); // DMFC1
7195         if(op2==2) is32|=1LL<<rt; // CFC1
7196         break;
7197       case C1LS:
7198       case C2LS:
7199         break;
7200       case FLOAT:
7201       case FCONV:
7202         break;
7203       case FCOMP:
7204         break;
7205       case C2OP:
7206       case SYSCALL:
7207       case HLECALL:
7208         break;
7209       default:
7210         break;
7211     }
7212     is32|=1;
7213     p32[i]=is32;
7214
7215     if(i>0)
7216     {
7217       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
7218       {
7219         if(rt1[i-1]==31) // JAL/JALR
7220         {
7221           // Subroutine call will return here, don't alloc any registers
7222           is32=1;
7223         }
7224         else if(i+1<slen)
7225         {
7226           // Internal branch will jump here, match registers to caller
7227           is32=0x3FFFFFFFFLL;
7228         }
7229       }
7230     }
7231   }
7232 }
7233
7234 // Identify registers which may be assumed to contain 32-bit values
7235 // and where optimizations will rely on this.
7236 // This is used to determine whether backward branches can safely
7237 // jump to a location with 64-bit values in registers.
7238 static void provisional_r32()
7239 {
7240   u_int r32=0;
7241   int i;
7242   
7243   for (i=slen-1;i>=0;i--)
7244   {
7245     int hr;
7246     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7247     {
7248       if(ba[i]<start || ba[i]>=(start+slen*4))
7249       {
7250         // Branch out of this block, don't need anything
7251         r32=0;
7252       }
7253       else
7254       {
7255         // Internal branch
7256         // Need whatever matches the target
7257         // (and doesn't get overwritten by the delay slot instruction)
7258         r32=0;
7259         int t=(ba[i]-start)>>2;
7260         if(ba[i]>start+i*4) {
7261           // Forward branch
7262           //if(!(requires_32bit[t]&~regs[i].was32))
7263           //  r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7264           if(!(pr32[t]&~regs[i].was32))
7265             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7266         }else{
7267           // Backward branch
7268           if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7269             r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7270         }
7271       }
7272       // Conditional branch may need registers for following instructions
7273       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7274       {
7275         if(i<slen-2) {
7276           //r32|=requires_32bit[i+2];
7277           r32|=pr32[i+2];
7278           r32&=regs[i].was32;
7279           // Mark this address as a branch target since it may be called
7280           // upon return from interrupt
7281           //bt[i+2]=1;
7282         }
7283       }
7284       // Merge in delay slot
7285       if(!likely[i]) {
7286         // These are overwritten unless the branch is "likely"
7287         // and the delay slot is nullified if not taken
7288         r32&=~(1LL<<rt1[i+1]);
7289         r32&=~(1LL<<rt2[i+1]);
7290       }
7291       // Assume these are needed (delay slot)
7292       if(us1[i+1]>0)
7293       {
7294         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7295       }
7296       if(us2[i+1]>0)
7297       {
7298         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7299       }
7300       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7301       {
7302         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7303       }
7304       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7305       {
7306         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7307       }
7308     }
7309     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7310     {
7311       // SYSCALL instruction (software interrupt)
7312       r32=0;
7313     }
7314     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7315     {
7316       // ERET instruction (return from interrupt)
7317       r32=0;
7318     }
7319     // Check 32 bits
7320     r32&=~(1LL<<rt1[i]);
7321     r32&=~(1LL<<rt2[i]);
7322     if(us1[i]>0)
7323     {
7324       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7325     }
7326     if(us2[i]>0)
7327     {
7328       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7329     }
7330     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7331     {
7332       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7333     }
7334     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7335     {
7336       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7337     }
7338     //requires_32bit[i]=r32;
7339     pr32[i]=r32;
7340     
7341     // Dirty registers which are 32-bit, require 32-bit input
7342     // as they will be written as 32-bit values
7343     for(hr=0;hr<HOST_REGS;hr++)
7344     {
7345       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7346         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7347           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7348           pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7349           //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7350         }
7351       }
7352     }
7353   }
7354 }
7355
7356 // Write back dirty registers as soon as we will no longer modify them,
7357 // so that we don't end up with lots of writes at the branches.
7358 void clean_registers(int istart,int iend,int wr)
7359 {
7360   int i;
7361   int r;
7362   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7363   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7364   if(iend==slen-1) {
7365     will_dirty_i=will_dirty_next=0;
7366     wont_dirty_i=wont_dirty_next=0;
7367   }else{
7368     will_dirty_i=will_dirty_next=will_dirty[iend+1];
7369     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7370   }
7371   for (i=iend;i>=istart;i--)
7372   {
7373     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7374     {
7375       if(ba[i]<start || ba[i]>=(start+slen*4))
7376       {
7377         // Branch out of this block, flush all regs
7378         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7379         {
7380           // Unconditional branch
7381           will_dirty_i=0;
7382           wont_dirty_i=0;
7383           // Merge in delay slot (will dirty)
7384           for(r=0;r<HOST_REGS;r++) {
7385             if(r!=EXCLUDE_REG) {
7386               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7387               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7388               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7389               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7390               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7391               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7392               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7393               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7394               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7395               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7396               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7397               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7398               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7399               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7400             }
7401           }
7402         }
7403         else
7404         {
7405           // Conditional branch
7406           will_dirty_i=0;
7407           wont_dirty_i=wont_dirty_next;
7408           // Merge in delay slot (will dirty)
7409           for(r=0;r<HOST_REGS;r++) {
7410             if(r!=EXCLUDE_REG) {
7411               if(!likely[i]) {
7412                 // Might not dirty if likely branch is not taken
7413                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7414                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7415                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7416                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7417                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7418                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7419                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7420                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7421                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7422                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7423                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7424                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7425                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7426                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7427               }
7428             }
7429           }
7430         }
7431         // Merge in delay slot (wont dirty)
7432         for(r=0;r<HOST_REGS;r++) {
7433           if(r!=EXCLUDE_REG) {
7434             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7435             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7436             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7437             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7438             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7439             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7440             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7441             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7442             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7443             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7444           }
7445         }
7446         if(wr) {
7447           #ifndef DESTRUCTIVE_WRITEBACK
7448           branch_regs[i].dirty&=wont_dirty_i;
7449           #endif
7450           branch_regs[i].dirty|=will_dirty_i;
7451         }
7452       }
7453       else
7454       {
7455         // Internal branch
7456         if(ba[i]<=start+i*4) {
7457           // Backward branch
7458           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7459           {
7460             // Unconditional branch
7461             temp_will_dirty=0;
7462             temp_wont_dirty=0;
7463             // Merge in delay slot (will dirty)
7464             for(r=0;r<HOST_REGS;r++) {
7465               if(r!=EXCLUDE_REG) {
7466                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7467                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7468                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7469                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7470                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7471                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7472                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7473                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7474                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7475                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7476                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7477                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7478                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7479                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7480               }
7481             }
7482           } else {
7483             // Conditional branch (not taken case)
7484             temp_will_dirty=will_dirty_next;
7485             temp_wont_dirty=wont_dirty_next;
7486             // Merge in delay slot (will dirty)
7487             for(r=0;r<HOST_REGS;r++) {
7488               if(r!=EXCLUDE_REG) {
7489                 if(!likely[i]) {
7490                   // Will not dirty if likely branch is not taken
7491                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7492                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7493                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7494                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7495                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7496                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7497                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7498                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7499                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7500                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7501                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7502                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7503                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7504                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7505                 }
7506               }
7507             }
7508           }
7509           // Merge in delay slot (wont dirty)
7510           for(r=0;r<HOST_REGS;r++) {
7511             if(r!=EXCLUDE_REG) {
7512               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7513               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7514               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7515               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7516               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7517               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7518               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7519               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7520               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7521               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7522             }
7523           }
7524           // Deal with changed mappings
7525           if(i<iend) {
7526             for(r=0;r<HOST_REGS;r++) {
7527               if(r!=EXCLUDE_REG) {
7528                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7529                   temp_will_dirty&=~(1<<r);
7530                   temp_wont_dirty&=~(1<<r);
7531                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7532                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7533                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7534                   } else {
7535                     temp_will_dirty|=1<<r;
7536                     temp_wont_dirty|=1<<r;
7537                   }
7538                 }
7539               }
7540             }
7541           }
7542           if(wr) {
7543             will_dirty[i]=temp_will_dirty;
7544             wont_dirty[i]=temp_wont_dirty;
7545             clean_registers((ba[i]-start)>>2,i-1,0);
7546           }else{
7547             // Limit recursion.  It can take an excessive amount
7548             // of time if there are a lot of nested loops.
7549             will_dirty[(ba[i]-start)>>2]=0;
7550             wont_dirty[(ba[i]-start)>>2]=-1;
7551           }
7552         }
7553         /*else*/ if(1)
7554         {
7555           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7556           {
7557             // Unconditional branch
7558             will_dirty_i=0;
7559             wont_dirty_i=0;
7560           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7561             for(r=0;r<HOST_REGS;r++) {
7562               if(r!=EXCLUDE_REG) {
7563                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7564                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7565                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7566                 }
7567                 if(branch_regs[i].regmap[r]>=0) {
7568                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7569                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7570                 }
7571               }
7572             }
7573           //}
7574             // Merge in delay slot
7575             for(r=0;r<HOST_REGS;r++) {
7576               if(r!=EXCLUDE_REG) {
7577                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7578                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7579                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7580                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7581                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7582                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7583                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7584                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7585                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7586                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7587                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7588                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7589                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7590                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7591               }
7592             }
7593           } else {
7594             // Conditional branch
7595             will_dirty_i=will_dirty_next;
7596             wont_dirty_i=wont_dirty_next;
7597           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7598             for(r=0;r<HOST_REGS;r++) {
7599               if(r!=EXCLUDE_REG) {
7600                 signed char target_reg=branch_regs[i].regmap[r];
7601                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7602                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7603                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7604                 }
7605                 else if(target_reg>=0) {
7606                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7607                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7608                 }
7609                 // Treat delay slot as part of branch too
7610                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7611                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7612                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7613                 }
7614                 else
7615                 {
7616                   will_dirty[i+1]&=~(1<<r);
7617                 }*/
7618               }
7619             }
7620           //}
7621             // Merge in delay slot
7622             for(r=0;r<HOST_REGS;r++) {
7623               if(r!=EXCLUDE_REG) {
7624                 if(!likely[i]) {
7625                   // Might not dirty if likely branch is not taken
7626                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7627                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7628                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7629                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7630                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7631                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7632                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7633                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7634                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7635                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7636                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7637                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7638                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7639                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7640                 }
7641               }
7642             }
7643           }
7644           // Merge in delay slot (won't dirty)
7645           for(r=0;r<HOST_REGS;r++) {
7646             if(r!=EXCLUDE_REG) {
7647               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7648               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7649               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7650               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7651               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7652               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7653               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7654               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7655               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7656               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7657             }
7658           }
7659           if(wr) {
7660             #ifndef DESTRUCTIVE_WRITEBACK
7661             branch_regs[i].dirty&=wont_dirty_i;
7662             #endif
7663             branch_regs[i].dirty|=will_dirty_i;
7664           }
7665         }
7666       }
7667     }
7668     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7669     {
7670       // SYSCALL instruction (software interrupt)
7671       will_dirty_i=0;
7672       wont_dirty_i=0;
7673     }
7674     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7675     {
7676       // ERET instruction (return from interrupt)
7677       will_dirty_i=0;
7678       wont_dirty_i=0;
7679     }
7680     will_dirty_next=will_dirty_i;
7681     wont_dirty_next=wont_dirty_i;
7682     for(r=0;r<HOST_REGS;r++) {
7683       if(r!=EXCLUDE_REG) {
7684         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7685         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7686         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7687         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7688         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7689         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7690         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7691         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7692         if(i>istart) {
7693           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP) 
7694           {
7695             // Don't store a register immediately after writing it,
7696             // may prevent dual-issue.
7697             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7698             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7699           }
7700         }
7701       }
7702     }
7703     // Save it
7704     will_dirty[i]=will_dirty_i;
7705     wont_dirty[i]=wont_dirty_i;
7706     // Mark registers that won't be dirtied as not dirty
7707     if(wr) {
7708       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7709       for(r=0;r<HOST_REGS;r++) {
7710         if((will_dirty_i>>r)&1) {
7711           printf(" r%d",r);
7712         }
7713       }
7714       printf("\n");*/
7715
7716       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7717         regs[i].dirty|=will_dirty_i;
7718         #ifndef DESTRUCTIVE_WRITEBACK
7719         regs[i].dirty&=wont_dirty_i;
7720         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7721         {
7722           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7723             for(r=0;r<HOST_REGS;r++) {
7724               if(r!=EXCLUDE_REG) {
7725                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7726                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7727                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7728               }
7729             }
7730           }
7731         }
7732         else
7733         {
7734           if(i<iend) {
7735             for(r=0;r<HOST_REGS;r++) {
7736               if(r!=EXCLUDE_REG) {
7737                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7738                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7739                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7740               }
7741             }
7742           }
7743         }
7744         #endif
7745       //}
7746     }
7747     // Deal with changed mappings
7748     temp_will_dirty=will_dirty_i;
7749     temp_wont_dirty=wont_dirty_i;
7750     for(r=0;r<HOST_REGS;r++) {
7751       if(r!=EXCLUDE_REG) {
7752         int nr;
7753         if(regs[i].regmap[r]==regmap_pre[i][r]) {
7754           if(wr) {
7755             #ifndef DESTRUCTIVE_WRITEBACK
7756             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7757             #endif
7758             regs[i].wasdirty|=will_dirty_i&(1<<r);
7759           }
7760         }
7761         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7762           // Register moved to a different register
7763           will_dirty_i&=~(1<<r);
7764           wont_dirty_i&=~(1<<r);
7765           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7766           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7767           if(wr) {
7768             #ifndef DESTRUCTIVE_WRITEBACK
7769             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7770             #endif
7771             regs[i].wasdirty|=will_dirty_i&(1<<r);
7772           }
7773         }
7774         else {
7775           will_dirty_i&=~(1<<r);
7776           wont_dirty_i&=~(1<<r);
7777           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7778             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7779             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7780           } else {
7781             wont_dirty_i|=1<<r;
7782             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7783           }
7784         }
7785       }
7786     }
7787   }
7788 }
7789
7790 #ifdef DISASM
7791   /* disassembly */
7792 void disassemble_inst(int i)
7793 {
7794     if (bt[i]) printf("*"); else printf(" ");
7795     switch(itype[i]) {
7796       case UJUMP:
7797         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7798       case CJUMP:
7799         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7800       case SJUMP:
7801         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7802       case FJUMP:
7803         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7804       case RJUMP:
7805         if (opcode[i]==0x9&&rt1[i]!=31)
7806           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7807         else
7808           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7809         break;
7810       case SPAN:
7811         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7812       case IMM16:
7813         if(opcode[i]==0xf) //LUI
7814           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7815         else
7816           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7817         break;
7818       case LOAD:
7819       case LOADLR:
7820         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7821         break;
7822       case STORE:
7823       case STORELR:
7824         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7825         break;
7826       case ALU:
7827       case SHIFT:
7828         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7829         break;
7830       case MULTDIV:
7831         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7832         break;
7833       case SHIFTIMM:
7834         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7835         break;
7836       case MOV:
7837         if((opcode2[i]&0x1d)==0x10)
7838           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7839         else if((opcode2[i]&0x1d)==0x11)
7840           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7841         else
7842           printf (" %x: %s\n",start+i*4,insn[i]);
7843         break;
7844       case COP0:
7845         if(opcode2[i]==0)
7846           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7847         else if(opcode2[i]==4)
7848           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7849         else printf (" %x: %s\n",start+i*4,insn[i]);
7850         break;
7851       case COP1:
7852         if(opcode2[i]<3)
7853           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7854         else if(opcode2[i]>3)
7855           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7856         else printf (" %x: %s\n",start+i*4,insn[i]);
7857         break;
7858       case COP2:
7859         if(opcode2[i]<3)
7860           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7861         else if(opcode2[i]>3)
7862           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7863         else printf (" %x: %s\n",start+i*4,insn[i]);
7864         break;
7865       case C1LS:
7866         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7867         break;
7868       case C2LS:
7869         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7870         break;
7871       case INTCALL:
7872         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7873         break;
7874       default:
7875         //printf (" %s %8x\n",insn[i],source[i]);
7876         printf (" %x: %s\n",start+i*4,insn[i]);
7877     }
7878 }
7879 #else
7880 static void disassemble_inst(int i) {}
7881 #endif // DISASM
7882
7883 // clear the state completely, instead of just marking
7884 // things invalid like invalidate_all_pages() does
7885 void new_dynarec_clear_full()
7886 {
7887   int n;
7888   out=(u_char *)BASE_ADDR;
7889   memset(invalid_code,1,sizeof(invalid_code));
7890   memset(hash_table,0xff,sizeof(hash_table));
7891   memset(mini_ht,-1,sizeof(mini_ht));
7892   memset(restore_candidate,0,sizeof(restore_candidate));
7893   memset(shadow,0,sizeof(shadow));
7894   copy=shadow;
7895   expirep=16384; // Expiry pointer, +2 blocks
7896   pending_exception=0;
7897   literalcount=0;
7898   stop_after_jal=0;
7899   inv_code_start=inv_code_end=~0;
7900   gte_reads_flags=0;
7901   // TLB
7902 #ifndef DISABLE_TLB
7903   using_tlb=0;
7904 #endif
7905   for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7906     memory_map[n]=-1;
7907   for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7908     memory_map[n]=((u_int)rdram-0x80000000)>>2;
7909   for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7910     memory_map[n]=-1;
7911   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7912   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7913   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7914 }
7915
7916 void new_dynarec_init()
7917 {
7918   printf("Init new dynarec\n");
7919   out=(u_char *)BASE_ADDR;
7920   if (mmap (out, 1<<TARGET_SIZE_2,
7921             PROT_READ | PROT_WRITE | PROT_EXEC,
7922             MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7923             -1, 0) <= 0) {printf("mmap() failed\n");}
7924 #ifdef MUPEN64
7925   rdword=&readmem_dword;
7926   fake_pc.f.r.rs=&readmem_dword;
7927   fake_pc.f.r.rt=&readmem_dword;
7928   fake_pc.f.r.rd=&readmem_dword;
7929 #endif
7930   int n;
7931   new_dynarec_clear_full();
7932 #ifdef HOST_IMM8
7933   // Copy this into local area so we don't have to put it in every literal pool
7934   invc_ptr=invalid_code;
7935 #endif
7936 #ifdef MUPEN64
7937   for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7938     writemem[n] = write_nomem_new;
7939     writememb[n] = write_nomemb_new;
7940     writememh[n] = write_nomemh_new;
7941 #ifndef FORCE32
7942     writememd[n] = write_nomemd_new;
7943 #endif
7944     readmem[n] = read_nomem_new;
7945     readmemb[n] = read_nomemb_new;
7946     readmemh[n] = read_nomemh_new;
7947 #ifndef FORCE32
7948     readmemd[n] = read_nomemd_new;
7949 #endif
7950   }
7951   for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7952     writemem[n] = write_rdram_new;
7953     writememb[n] = write_rdramb_new;
7954     writememh[n] = write_rdramh_new;
7955 #ifndef FORCE32
7956     writememd[n] = write_rdramd_new;
7957 #endif
7958   }
7959   for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7960     writemem[n] = write_nomem_new;
7961     writememb[n] = write_nomemb_new;
7962     writememh[n] = write_nomemh_new;
7963 #ifndef FORCE32
7964     writememd[n] = write_nomemd_new;
7965 #endif
7966     readmem[n] = read_nomem_new;
7967     readmemb[n] = read_nomemb_new;
7968     readmemh[n] = read_nomemh_new;
7969 #ifndef FORCE32
7970     readmemd[n] = read_nomemd_new;
7971 #endif
7972   }
7973 #endif
7974   tlb_hacks();
7975   arch_init();
7976 }
7977
7978 void new_dynarec_cleanup()
7979 {
7980   int n;
7981   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7982   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7983   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7984   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7985   #ifdef ROM_COPY
7986   if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7987   #endif
7988 }
7989
7990 int new_recompile_block(int addr)
7991 {
7992 /*
7993   if(addr==0x800cd050) {
7994     int block;
7995     for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7996     int n;
7997     for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7998   }
7999 */
8000   //if(Count==365117028) tracedebug=1;
8001   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8002   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8003   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
8004   //if(debug) 
8005   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
8006   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
8007   /*if(Count>=312978186) {
8008     rlist();
8009   }*/
8010   //rlist();
8011   start = (u_int)addr&~3;
8012   //assert(((u_int)addr&1)==0);
8013   new_dynarec_did_compile=1;
8014 #ifdef PCSX
8015   if (Config.HLE && start == 0x80001000) // hlecall
8016   {
8017     // XXX: is this enough? Maybe check hleSoftCall?
8018     u_int beginning=(u_int)out;
8019     u_int page=get_page(start);
8020     invalid_code[start>>12]=0;
8021     emit_movimm(start,0);
8022     emit_writeword(0,(int)&pcaddr);
8023     emit_jmp((int)new_dyna_leave);
8024     literal_pool(0);
8025 #ifdef __arm__
8026     __clear_cache((void *)beginning,out);
8027 #endif
8028     ll_add(jump_in+page,start,(void *)beginning);
8029     return 0;
8030   }
8031   else if ((u_int)addr < 0x00200000 ||
8032     (0xa0000000 <= addr && addr < 0xa0200000)) {
8033     // used for BIOS calls mostly?
8034     source = (u_int *)((u_int)rdram+(start&0x1fffff));
8035     pagelimit = (addr&0xa0000000)|0x00200000;
8036   }
8037   else if (!Config.HLE && (
8038 /*    (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
8039     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
8040     // BIOS
8041     source = (u_int *)((u_int)psxR+(start&0x7ffff));
8042     pagelimit = (addr&0xfff00000)|0x80000;
8043   }
8044   else
8045 #endif
8046 #ifdef MUPEN64
8047   if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
8048     source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
8049     pagelimit = 0xa4001000;
8050   }
8051   else
8052 #endif
8053   if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
8054     source = (u_int *)((u_int)rdram+start-0x80000000);
8055     pagelimit = 0x80000000+RAM_SIZE;
8056   }
8057 #ifndef DISABLE_TLB
8058   else if ((signed int)addr >= (signed int)0xC0000000) {
8059     //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
8060     //if(tlb_LUT_r[start>>12])
8061       //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
8062     if((signed int)memory_map[start>>12]>=0) {
8063       source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
8064       pagelimit=(start+4096)&0xFFFFF000;
8065       int map=memory_map[start>>12];
8066       int i;
8067       for(i=0;i<5;i++) {
8068         //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
8069         if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
8070       }
8071       assem_debug("pagelimit=%x\n",pagelimit);
8072       assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
8073     }
8074     else {
8075       assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
8076       //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
8077       return -1; // Caller will invoke exception handler
8078     }
8079     //printf("source= %x\n",(int)source);
8080   }
8081 #endif
8082   else {
8083     printf("Compile at bogus memory address: %x \n", (int)addr);
8084     exit(1);
8085   }
8086
8087   /* Pass 1: disassemble */
8088   /* Pass 2: register dependencies, branch targets */
8089   /* Pass 3: register allocation */
8090   /* Pass 4: branch dependencies */
8091   /* Pass 5: pre-alloc */
8092   /* Pass 6: optimize clean/dirty state */
8093   /* Pass 7: flag 32-bit registers */
8094   /* Pass 8: assembly */
8095   /* Pass 9: linker */
8096   /* Pass 10: garbage collection / free memory */
8097
8098   int i,j;
8099   int done=0;
8100   unsigned int type,op,op2;
8101
8102   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
8103   
8104   /* Pass 1 disassembly */
8105
8106   for(i=0;!done;i++) {
8107     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
8108     minimum_free_regs[i]=0;
8109     opcode[i]=op=source[i]>>26;
8110     switch(op)
8111     {
8112       case 0x00: strcpy(insn[i],"special"); type=NI;
8113         op2=source[i]&0x3f;
8114         switch(op2)
8115         {
8116           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
8117           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
8118           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
8119           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
8120           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
8121           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
8122           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
8123           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
8124           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
8125           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
8126           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
8127           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
8128           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
8129           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
8130           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
8131           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
8132           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
8133           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
8134           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
8135           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
8136           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
8137           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
8138           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
8139           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
8140           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
8141           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
8142           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
8143           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
8144           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
8145           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
8146           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
8147           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
8148           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
8149           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
8150           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
8151 #ifndef FORCE32
8152           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
8153           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
8154           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
8155           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
8156           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
8157           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
8158           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
8159           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
8160           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
8161           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
8162           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
8163           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
8164           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
8165           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
8166           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
8167           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
8168           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
8169 #endif
8170         }
8171         break;
8172       case 0x01: strcpy(insn[i],"regimm"); type=NI;
8173         op2=(source[i]>>16)&0x1f;
8174         switch(op2)
8175         {
8176           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
8177           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
8178           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
8179           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
8180           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
8181           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
8182           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
8183           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
8184           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
8185           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
8186           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
8187           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
8188           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
8189           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
8190         }
8191         break;
8192       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
8193       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
8194       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
8195       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
8196       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
8197       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
8198       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
8199       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
8200       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
8201       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
8202       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
8203       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
8204       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
8205       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
8206       case 0x10: strcpy(insn[i],"cop0"); type=NI;
8207         op2=(source[i]>>21)&0x1f;
8208         switch(op2)
8209         {
8210           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
8211           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
8212           case 0x10: strcpy(insn[i],"tlb"); type=NI;
8213           switch(source[i]&0x3f)
8214           {
8215             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
8216             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
8217             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
8218             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
8219 #ifdef PCSX
8220             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
8221 #else
8222             case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
8223 #endif
8224           }
8225         }
8226         break;
8227       case 0x11: strcpy(insn[i],"cop1"); type=NI;
8228         op2=(source[i]>>21)&0x1f;
8229         switch(op2)
8230         {
8231           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
8232           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
8233           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
8234           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
8235           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
8236           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
8237           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
8238           switch((source[i]>>16)&0x3)
8239           {
8240             case 0x00: strcpy(insn[i],"BC1F"); break;
8241             case 0x01: strcpy(insn[i],"BC1T"); break;
8242             case 0x02: strcpy(insn[i],"BC1FL"); break;
8243             case 0x03: strcpy(insn[i],"BC1TL"); break;
8244           }
8245           break;
8246           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
8247           switch(source[i]&0x3f)
8248           {
8249             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
8250             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
8251             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
8252             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
8253             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
8254             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
8255             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
8256             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
8257             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
8258             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
8259             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
8260             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
8261             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
8262             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
8263             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
8264             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
8265             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
8266             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
8267             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
8268             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
8269             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
8270             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
8271             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
8272             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8273             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8274             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8275             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8276             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8277             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8278             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8279             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8280             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8281             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8282             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8283             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8284           }
8285           break;
8286           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8287           switch(source[i]&0x3f)
8288           {
8289             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8290             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8291             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8292             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8293             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8294             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8295             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8296             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8297             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8298             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8299             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8300             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8301             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8302             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8303             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8304             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8305             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8306             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8307             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8308             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8309             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8310             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8311             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8312             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8313             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8314             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8315             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8316             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8317             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8318             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8319             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8320             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8321             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8322             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8323             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8324           }
8325           break;
8326           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8327           switch(source[i]&0x3f)
8328           {
8329             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8330             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8331           }
8332           break;
8333           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8334           switch(source[i]&0x3f)
8335           {
8336             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8337             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8338           }
8339           break;
8340         }
8341         break;
8342 #ifndef FORCE32
8343       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8344       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8345       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8346       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8347       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8348       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8349       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8350       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8351 #endif
8352       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8353       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8354       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8355       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8356       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8357       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8358       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8359 #ifndef FORCE32
8360       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8361 #endif
8362       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8363       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8364       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8365       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8366 #ifndef FORCE32
8367       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8368       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8369 #endif
8370       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8371       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8372       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8373       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8374 #ifndef FORCE32
8375       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8376       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8377       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8378 #endif
8379       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8380       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8381 #ifndef FORCE32
8382       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8383       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8384       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8385 #endif
8386 #ifdef PCSX
8387       case 0x12: strcpy(insn[i],"COP2"); type=NI;
8388         op2=(source[i]>>21)&0x1f;
8389         //if (op2 & 0x10) {
8390         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
8391           if (gte_handlers[source[i]&0x3f]!=NULL) {
8392             if (gte_regnames[source[i]&0x3f]!=NULL)
8393               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
8394             else
8395               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8396             type=C2OP;
8397           }
8398         }
8399         else switch(op2)
8400         {
8401           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8402           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8403           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8404           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8405         }
8406         break;
8407       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8408       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8409       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8410 #endif
8411       default: strcpy(insn[i],"???"); type=NI;
8412         printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
8413         break;
8414     }
8415     itype[i]=type;
8416     opcode2[i]=op2;
8417     /* Get registers/immediates */
8418     lt1[i]=0;
8419     us1[i]=0;
8420     us2[i]=0;
8421     dep1[i]=0;
8422     dep2[i]=0;
8423     gte_rs[i]=gte_rt[i]=0;
8424     switch(type) {
8425       case LOAD:
8426         rs1[i]=(source[i]>>21)&0x1f;
8427         rs2[i]=0;
8428         rt1[i]=(source[i]>>16)&0x1f;
8429         rt2[i]=0;
8430         imm[i]=(short)source[i];
8431         break;
8432       case STORE:
8433       case STORELR:
8434         rs1[i]=(source[i]>>21)&0x1f;
8435         rs2[i]=(source[i]>>16)&0x1f;
8436         rt1[i]=0;
8437         rt2[i]=0;
8438         imm[i]=(short)source[i];
8439         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8440         break;
8441       case LOADLR:
8442         // LWL/LWR only load part of the register,
8443         // therefore the target register must be treated as a source too
8444         rs1[i]=(source[i]>>21)&0x1f;
8445         rs2[i]=(source[i]>>16)&0x1f;
8446         rt1[i]=(source[i]>>16)&0x1f;
8447         rt2[i]=0;
8448         imm[i]=(short)source[i];
8449         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8450         if(op==0x26) dep1[i]=rt1[i]; // LWR
8451         break;
8452       case IMM16:
8453         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8454         else rs1[i]=(source[i]>>21)&0x1f;
8455         rs2[i]=0;
8456         rt1[i]=(source[i]>>16)&0x1f;
8457         rt2[i]=0;
8458         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8459           imm[i]=(unsigned short)source[i];
8460         }else{
8461           imm[i]=(short)source[i];
8462         }
8463         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8464         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8465         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8466         break;
8467       case UJUMP:
8468         rs1[i]=0;
8469         rs2[i]=0;
8470         rt1[i]=0;
8471         rt2[i]=0;
8472         // The JAL instruction writes to r31.
8473         if (op&1) {
8474           rt1[i]=31;
8475         }
8476         rs2[i]=CCREG;
8477         break;
8478       case RJUMP:
8479         rs1[i]=(source[i]>>21)&0x1f;
8480         rs2[i]=0;
8481         rt1[i]=0;
8482         rt2[i]=0;
8483         // The JALR instruction writes to rd.
8484         if (op2&1) {
8485           rt1[i]=(source[i]>>11)&0x1f;
8486         }
8487         rs2[i]=CCREG;
8488         break;
8489       case CJUMP:
8490         rs1[i]=(source[i]>>21)&0x1f;
8491         rs2[i]=(source[i]>>16)&0x1f;
8492         rt1[i]=0;
8493         rt2[i]=0;
8494         if(op&2) { // BGTZ/BLEZ
8495           rs2[i]=0;
8496         }
8497         us1[i]=rs1[i];
8498         us2[i]=rs2[i];
8499         likely[i]=op>>4;
8500         break;
8501       case SJUMP:
8502         rs1[i]=(source[i]>>21)&0x1f;
8503         rs2[i]=CCREG;
8504         rt1[i]=0;
8505         rt2[i]=0;
8506         us1[i]=rs1[i];
8507         if(op2&0x10) { // BxxAL
8508           rt1[i]=31;
8509           // NOTE: If the branch is not taken, r31 is still overwritten
8510         }
8511         likely[i]=(op2&2)>>1;
8512         break;
8513       case FJUMP:
8514         rs1[i]=FSREG;
8515         rs2[i]=CSREG;
8516         rt1[i]=0;
8517         rt2[i]=0;
8518         likely[i]=((source[i])>>17)&1;
8519         break;
8520       case ALU:
8521         rs1[i]=(source[i]>>21)&0x1f; // source
8522         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8523         rt1[i]=(source[i]>>11)&0x1f; // destination
8524         rt2[i]=0;
8525         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8526           us1[i]=rs1[i];us2[i]=rs2[i];
8527         }
8528         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8529           dep1[i]=rs1[i];dep2[i]=rs2[i];
8530         }
8531         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8532           dep1[i]=rs1[i];dep2[i]=rs2[i];
8533         }
8534         break;
8535       case MULTDIV:
8536         rs1[i]=(source[i]>>21)&0x1f; // source
8537         rs2[i]=(source[i]>>16)&0x1f; // divisor
8538         rt1[i]=HIREG;
8539         rt2[i]=LOREG;
8540         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8541           us1[i]=rs1[i];us2[i]=rs2[i];
8542         }
8543         break;
8544       case MOV:
8545         rs1[i]=0;
8546         rs2[i]=0;
8547         rt1[i]=0;
8548         rt2[i]=0;
8549         if(op2==0x10) rs1[i]=HIREG; // MFHI
8550         if(op2==0x11) rt1[i]=HIREG; // MTHI
8551         if(op2==0x12) rs1[i]=LOREG; // MFLO
8552         if(op2==0x13) rt1[i]=LOREG; // MTLO
8553         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8554         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8555         dep1[i]=rs1[i];
8556         break;
8557       case SHIFT:
8558         rs1[i]=(source[i]>>16)&0x1f; // target of shift
8559         rs2[i]=(source[i]>>21)&0x1f; // shift amount
8560         rt1[i]=(source[i]>>11)&0x1f; // destination
8561         rt2[i]=0;
8562         // DSLLV/DSRLV/DSRAV are 64-bit
8563         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8564         break;
8565       case SHIFTIMM:
8566         rs1[i]=(source[i]>>16)&0x1f;
8567         rs2[i]=0;
8568         rt1[i]=(source[i]>>11)&0x1f;
8569         rt2[i]=0;
8570         imm[i]=(source[i]>>6)&0x1f;
8571         // DSxx32 instructions
8572         if(op2>=0x3c) imm[i]|=0x20;
8573         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8574         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8575         break;
8576       case COP0:
8577         rs1[i]=0;
8578         rs2[i]=0;
8579         rt1[i]=0;
8580         rt2[i]=0;
8581         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8582         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8583         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8584         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8585         break;
8586       case COP1:
8587         rs1[i]=0;
8588         rs2[i]=0;
8589         rt1[i]=0;
8590         rt2[i]=0;
8591         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8592         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8593         if(op2==5) us1[i]=rs1[i]; // DMTC1
8594         rs2[i]=CSREG;
8595         break;
8596       case COP2:
8597         rs1[i]=0;
8598         rs2[i]=0;
8599         rt1[i]=0;
8600         rt2[i]=0;
8601         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
8602         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
8603         rs2[i]=CSREG;
8604         int gr=(source[i]>>11)&0x1F;
8605         switch(op2)
8606         {
8607           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
8608           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
8609           case 0x02: gte_rs[i]=1ll<<(gr+32); // CFC2
8610             if(gr==31&&!gte_reads_flags) {
8611               assem_debug("gte flag read encountered @%08x\n",addr + i*4);
8612               gte_reads_flags=1;
8613             }
8614             break;
8615           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
8616         }
8617         break;
8618       case C1LS:
8619         rs1[i]=(source[i]>>21)&0x1F;
8620         rs2[i]=CSREG;
8621         rt1[i]=0;
8622         rt2[i]=0;
8623         imm[i]=(short)source[i];
8624         break;
8625       case C2LS:
8626         rs1[i]=(source[i]>>21)&0x1F;
8627         rs2[i]=0;
8628         rt1[i]=0;
8629         rt2[i]=0;
8630         imm[i]=(short)source[i];
8631         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
8632         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
8633         break;
8634       case C2OP:
8635         rs1[i]=0;
8636         rs2[i]=0;
8637         rt1[i]=0;
8638         rt2[i]=0;
8639         gte_rt[i]=1ll<<63; // every op changes flags
8640         // TODO: other regs?
8641         break;
8642       case FLOAT:
8643       case FCONV:
8644         rs1[i]=0;
8645         rs2[i]=CSREG;
8646         rt1[i]=0;
8647         rt2[i]=0;
8648         break;
8649       case FCOMP:
8650         rs1[i]=FSREG;
8651         rs2[i]=CSREG;
8652         rt1[i]=FSREG;
8653         rt2[i]=0;
8654         break;
8655       case SYSCALL:
8656       case HLECALL:
8657       case INTCALL:
8658         rs1[i]=CCREG;
8659         rs2[i]=0;
8660         rt1[i]=0;
8661         rt2[i]=0;
8662         break;
8663       default:
8664         rs1[i]=0;
8665         rs2[i]=0;
8666         rt1[i]=0;
8667         rt2[i]=0;
8668     }
8669     /* Calculate branch target addresses */
8670     if(type==UJUMP)
8671       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8672     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8673       ba[i]=start+i*4+8; // Ignore never taken branch
8674     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8675       ba[i]=start+i*4+8; // Ignore never taken branch
8676     else if(type==CJUMP||type==SJUMP||type==FJUMP)
8677       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8678     else ba[i]=-1;
8679 #ifdef PCSX
8680     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
8681       int do_in_intrp=0;
8682       // branch in delay slot?
8683       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
8684         // don't handle first branch and call interpreter if it's hit
8685         printf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
8686         do_in_intrp=1;
8687       }
8688       // basic load delay detection
8689       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
8690         int t=(ba[i-1]-start)/4;
8691         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
8692           // jump target wants DS result - potential load delay effect
8693           printf("load delay @%08x (%08x)\n", addr + i*4, addr);
8694           do_in_intrp=1;
8695           bt[t+1]=1; // expected return from interpreter
8696         }
8697         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
8698               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
8699           // v0 overwrite like this is a sign of trouble, bail out
8700           printf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
8701           do_in_intrp=1;
8702         }
8703       }
8704       if(do_in_intrp) {
8705         rs1[i-1]=CCREG;
8706         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
8707         ba[i-1]=-1;
8708         itype[i-1]=INTCALL;
8709         done=2;
8710         i--; // don't compile the DS
8711       }
8712     }
8713 #endif
8714     /* Is this the end of the block? */
8715     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8716       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8717         done=2;
8718       }
8719       else {
8720         if(stop_after_jal) done=1;
8721         // Stop on BREAK
8722         if((source[i+1]&0xfc00003f)==0x0d) done=1;
8723       }
8724       // Don't recompile stuff that's already compiled
8725       if(check_addr(start+i*4+4)) done=1;
8726       // Don't get too close to the limit
8727       if(i>MAXBLOCK/2) done=1;
8728     }
8729     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
8730     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
8731     if(done==2) {
8732       // Does the block continue due to a branch?
8733       for(j=i-1;j>=0;j--)
8734       {
8735         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
8736         if(ba[j]==start+i*4+4) done=j=0;
8737         if(ba[j]==start+i*4+8) done=j=0;
8738       }
8739     }
8740     //assert(i<MAXBLOCK-1);
8741     if(start+i*4==pagelimit-4) done=1;
8742     assert(start+i*4<pagelimit);
8743     if (i==MAXBLOCK-1) done=1;
8744     // Stop if we're compiling junk
8745     if(itype[i]==NI&&opcode[i]==0x11) {
8746       done=stop_after_jal=1;
8747       printf("Disabled speculative precompilation\n");
8748     }
8749   }
8750   slen=i;
8751   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8752     if(start+i*4==pagelimit) {
8753       itype[i-1]=SPAN;
8754     }
8755   }
8756   assert(slen>0);
8757
8758   /* Pass 2 - Register dependencies and branch targets */
8759
8760   unneeded_registers(0,slen-1,0);
8761   
8762   /* Pass 3 - Register allocation */
8763
8764   struct regstat current; // Current register allocations/status
8765   current.is32=1;
8766   current.dirty=0;
8767   current.u=unneeded_reg[0];
8768   current.uu=unneeded_reg_upper[0];
8769   clear_all_regs(current.regmap);
8770   alloc_reg(&current,0,CCREG);
8771   dirty_reg(&current,CCREG);
8772   current.isconst=0;
8773   current.wasconst=0;
8774   int ds=0;
8775   int cc=0;
8776   int hr=-1;
8777
8778 #ifndef FORCE32
8779   provisional_32bit();
8780 #endif
8781   if((u_int)addr&1) {
8782     // First instruction is delay slot
8783     cc=-1;
8784     bt[1]=1;
8785     ds=1;
8786     unneeded_reg[0]=1;
8787     unneeded_reg_upper[0]=1;
8788     current.regmap[HOST_BTREG]=BTREG;
8789   }
8790   
8791   for(i=0;i<slen;i++)
8792   {
8793     if(bt[i])
8794     {
8795       int hr;
8796       for(hr=0;hr<HOST_REGS;hr++)
8797       {
8798         // Is this really necessary?
8799         if(current.regmap[hr]==0) current.regmap[hr]=-1;
8800       }
8801       current.isconst=0;
8802     }
8803     if(i>1)
8804     {
8805       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8806       {
8807         if(rs1[i-2]==0||rs2[i-2]==0)
8808         {
8809           if(rs1[i-2]) {
8810             current.is32|=1LL<<rs1[i-2];
8811             int hr=get_reg(current.regmap,rs1[i-2]|64);
8812             if(hr>=0) current.regmap[hr]=-1;
8813           }
8814           if(rs2[i-2]) {
8815             current.is32|=1LL<<rs2[i-2];
8816             int hr=get_reg(current.regmap,rs2[i-2]|64);
8817             if(hr>=0) current.regmap[hr]=-1;
8818           }
8819         }
8820       }
8821     }
8822 #ifndef FORCE32
8823     // If something jumps here with 64-bit values
8824     // then promote those registers to 64 bits
8825     if(bt[i])
8826     {
8827       uint64_t temp_is32=current.is32;
8828       for(j=i-1;j>=0;j--)
8829       {
8830         if(ba[j]==start+i*4) 
8831           temp_is32&=branch_regs[j].is32;
8832       }
8833       for(j=i;j<slen;j++)
8834       {
8835         if(ba[j]==start+i*4) 
8836           //temp_is32=1;
8837           temp_is32&=p32[j];
8838       }
8839       if(temp_is32!=current.is32) {
8840         //printf("dumping 32-bit regs (%x)\n",start+i*4);
8841         #ifndef DESTRUCTIVE_WRITEBACK
8842         if(ds)
8843         #endif
8844         for(hr=0;hr<HOST_REGS;hr++)
8845         {
8846           int r=current.regmap[hr];
8847           if(r>0&&r<64)
8848           {
8849             if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8850               temp_is32|=1LL<<r;
8851               //printf("restore %d\n",r);
8852             }
8853           }
8854         }
8855         current.is32=temp_is32;
8856       }
8857     }
8858 #else
8859     current.is32=-1LL;
8860 #endif
8861
8862     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8863     regs[i].wasconst=current.isconst;
8864     regs[i].was32=current.is32;
8865     regs[i].wasdirty=current.dirty;
8866     #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
8867     // To change a dirty register from 32 to 64 bits, we must write
8868     // it out during the previous cycle (for branches, 2 cycles)
8869     if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8870     {
8871       uint64_t temp_is32=current.is32;
8872       for(j=i-1;j>=0;j--)
8873       {
8874         if(ba[j]==start+i*4+4) 
8875           temp_is32&=branch_regs[j].is32;
8876       }
8877       for(j=i;j<slen;j++)
8878       {
8879         if(ba[j]==start+i*4+4) 
8880           //temp_is32=1;
8881           temp_is32&=p32[j];
8882       }
8883       if(temp_is32!=current.is32) {
8884         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8885         for(hr=0;hr<HOST_REGS;hr++)
8886         {
8887           int r=current.regmap[hr];
8888           if(r>0)
8889           {
8890             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8891               if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8892               {
8893                 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8894                 {
8895                   //printf("dump %d/r%d\n",hr,r);
8896                   current.regmap[hr]=-1;
8897                   if(get_reg(current.regmap,r|64)>=0) 
8898                     current.regmap[get_reg(current.regmap,r|64)]=-1;
8899                 }
8900               }
8901             }
8902           }
8903         }
8904       }
8905     }
8906     else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8907     {
8908       uint64_t temp_is32=current.is32;
8909       for(j=i-1;j>=0;j--)
8910       {
8911         if(ba[j]==start+i*4+8) 
8912           temp_is32&=branch_regs[j].is32;
8913       }
8914       for(j=i;j<slen;j++)
8915       {
8916         if(ba[j]==start+i*4+8) 
8917           //temp_is32=1;
8918           temp_is32&=p32[j];
8919       }
8920       if(temp_is32!=current.is32) {
8921         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8922         for(hr=0;hr<HOST_REGS;hr++)
8923         {
8924           int r=current.regmap[hr];
8925           if(r>0)
8926           {
8927             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8928               if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8929               {
8930                 //printf("dump %d/r%d\n",hr,r);
8931                 current.regmap[hr]=-1;
8932                 if(get_reg(current.regmap,r|64)>=0) 
8933                   current.regmap[get_reg(current.regmap,r|64)]=-1;
8934               }
8935             }
8936           }
8937         }
8938       }
8939     }
8940     #endif
8941     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8942       if(i+1<slen) {
8943         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8944         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8945         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8946         current.u|=1;
8947         current.uu|=1;
8948       } else {
8949         current.u=1;
8950         current.uu=1;
8951       }
8952     } else {
8953       if(i+1<slen) {
8954         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8955         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8956         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8957         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8958         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8959         current.u|=1;
8960         current.uu|=1;
8961       } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8962     }
8963     is_ds[i]=ds;
8964     if(ds) {
8965       ds=0; // Skip delay slot, already allocated as part of branch
8966       // ...but we need to alloc it in case something jumps here
8967       if(i+1<slen) {
8968         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8969         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8970       }else{
8971         current.u=branch_unneeded_reg[i-1];
8972         current.uu=branch_unneeded_reg_upper[i-1];
8973       }
8974       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8975       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8976       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8977       current.u|=1;
8978       current.uu|=1;
8979       struct regstat temp;
8980       memcpy(&temp,&current,sizeof(current));
8981       temp.wasdirty=temp.dirty;
8982       temp.was32=temp.is32;
8983       // TODO: Take into account unconditional branches, as below
8984       delayslot_alloc(&temp,i);
8985       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8986       regs[i].wasdirty=temp.wasdirty;
8987       regs[i].was32=temp.was32;
8988       regs[i].dirty=temp.dirty;
8989       regs[i].is32=temp.is32;
8990       regs[i].isconst=0;
8991       regs[i].wasconst=0;
8992       current.isconst=0;
8993       // Create entry (branch target) regmap
8994       for(hr=0;hr<HOST_REGS;hr++)
8995       {
8996         int r=temp.regmap[hr];
8997         if(r>=0) {
8998           if(r!=regmap_pre[i][hr]) {
8999             regs[i].regmap_entry[hr]=-1;
9000           }
9001           else
9002           {
9003             if(r<64){
9004               if((current.u>>r)&1) {
9005                 regs[i].regmap_entry[hr]=-1;
9006                 regs[i].regmap[hr]=-1;
9007                 //Don't clear regs in the delay slot as the branch might need them
9008                 //current.regmap[hr]=-1;
9009               }else
9010                 regs[i].regmap_entry[hr]=r;
9011             }
9012             else {
9013               if((current.uu>>(r&63))&1) {
9014                 regs[i].regmap_entry[hr]=-1;
9015                 regs[i].regmap[hr]=-1;
9016                 //Don't clear regs in the delay slot as the branch might need them
9017                 //current.regmap[hr]=-1;
9018               }else
9019                 regs[i].regmap_entry[hr]=r;
9020             }
9021           }
9022         } else {
9023           // First instruction expects CCREG to be allocated
9024           if(i==0&&hr==HOST_CCREG) 
9025             regs[i].regmap_entry[hr]=CCREG;
9026           else
9027             regs[i].regmap_entry[hr]=-1;
9028         }
9029       }
9030     }
9031     else { // Not delay slot
9032       switch(itype[i]) {
9033         case UJUMP:
9034           //current.isconst=0; // DEBUG
9035           //current.wasconst=0; // DEBUG
9036           //regs[i].wasconst=0; // DEBUG
9037           clear_const(&current,rt1[i]);
9038           alloc_cc(&current,i);
9039           dirty_reg(&current,CCREG);
9040           if (rt1[i]==31) {
9041             alloc_reg(&current,i,31);
9042             dirty_reg(&current,31);
9043             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
9044             //assert(rt1[i+1]!=rt1[i]);
9045             #ifdef REG_PREFETCH
9046             alloc_reg(&current,i,PTEMP);
9047             #endif
9048             //current.is32|=1LL<<rt1[i];
9049           }
9050           ooo[i]=1;
9051           delayslot_alloc(&current,i+1);
9052           //current.isconst=0; // DEBUG
9053           ds=1;
9054           //printf("i=%d, isconst=%x\n",i,current.isconst);
9055           break;
9056         case RJUMP:
9057           //current.isconst=0;
9058           //current.wasconst=0;
9059           //regs[i].wasconst=0;
9060           clear_const(&current,rs1[i]);
9061           clear_const(&current,rt1[i]);
9062           alloc_cc(&current,i);
9063           dirty_reg(&current,CCREG);
9064           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
9065             alloc_reg(&current,i,rs1[i]);
9066             if (rt1[i]!=0) {
9067               alloc_reg(&current,i,rt1[i]);
9068               dirty_reg(&current,rt1[i]);
9069               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
9070               assert(rt1[i+1]!=rt1[i]);
9071               #ifdef REG_PREFETCH
9072               alloc_reg(&current,i,PTEMP);
9073               #endif
9074             }
9075             #ifdef USE_MINI_HT
9076             if(rs1[i]==31) { // JALR
9077               alloc_reg(&current,i,RHASH);
9078               #ifndef HOST_IMM_ADDR32
9079               alloc_reg(&current,i,RHTBL);
9080               #endif
9081             }
9082             #endif
9083             delayslot_alloc(&current,i+1);
9084           } else {
9085             // The delay slot overwrites our source register,
9086             // allocate a temporary register to hold the old value.
9087             current.isconst=0;
9088             current.wasconst=0;
9089             regs[i].wasconst=0;
9090             delayslot_alloc(&current,i+1);
9091             current.isconst=0;
9092             alloc_reg(&current,i,RTEMP);
9093           }
9094           //current.isconst=0; // DEBUG
9095           ooo[i]=1;
9096           ds=1;
9097           break;
9098         case CJUMP:
9099           //current.isconst=0;
9100           //current.wasconst=0;
9101           //regs[i].wasconst=0;
9102           clear_const(&current,rs1[i]);
9103           clear_const(&current,rs2[i]);
9104           if((opcode[i]&0x3E)==4) // BEQ/BNE
9105           {
9106             alloc_cc(&current,i);
9107             dirty_reg(&current,CCREG);
9108             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9109             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9110             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9111             {
9112               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9113               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9114             }
9115             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
9116                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
9117               // The delay slot overwrites one of our conditions.
9118               // Allocate the branch condition registers instead.
9119               current.isconst=0;
9120               current.wasconst=0;
9121               regs[i].wasconst=0;
9122               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9123               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9124               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9125               {
9126                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9127                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9128               }
9129             }
9130             else
9131             {
9132               ooo[i]=1;
9133               delayslot_alloc(&current,i+1);
9134             }
9135           }
9136           else
9137           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
9138           {
9139             alloc_cc(&current,i);
9140             dirty_reg(&current,CCREG);
9141             alloc_reg(&current,i,rs1[i]);
9142             if(!(current.is32>>rs1[i]&1))
9143             {
9144               alloc_reg64(&current,i,rs1[i]);
9145             }
9146             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
9147               // The delay slot overwrites one of our conditions.
9148               // Allocate the branch condition registers instead.
9149               current.isconst=0;
9150               current.wasconst=0;
9151               regs[i].wasconst=0;
9152               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9153               if(!((current.is32>>rs1[i])&1))
9154               {
9155                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9156               }
9157             }
9158             else
9159             {
9160               ooo[i]=1;
9161               delayslot_alloc(&current,i+1);
9162             }
9163           }
9164           else
9165           // Don't alloc the delay slot yet because we might not execute it
9166           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
9167           {
9168             current.isconst=0;
9169             current.wasconst=0;
9170             regs[i].wasconst=0;
9171             alloc_cc(&current,i);
9172             dirty_reg(&current,CCREG);
9173             alloc_reg(&current,i,rs1[i]);
9174             alloc_reg(&current,i,rs2[i]);
9175             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9176             {
9177               alloc_reg64(&current,i,rs1[i]);
9178               alloc_reg64(&current,i,rs2[i]);
9179             }
9180           }
9181           else
9182           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
9183           {
9184             current.isconst=0;
9185             current.wasconst=0;
9186             regs[i].wasconst=0;
9187             alloc_cc(&current,i);
9188             dirty_reg(&current,CCREG);
9189             alloc_reg(&current,i,rs1[i]);
9190             if(!(current.is32>>rs1[i]&1))
9191             {
9192               alloc_reg64(&current,i,rs1[i]);
9193             }
9194           }
9195           ds=1;
9196           //current.isconst=0;
9197           break;
9198         case SJUMP:
9199           //current.isconst=0;
9200           //current.wasconst=0;
9201           //regs[i].wasconst=0;
9202           clear_const(&current,rs1[i]);
9203           clear_const(&current,rt1[i]);
9204           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
9205           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
9206           {
9207             alloc_cc(&current,i);
9208             dirty_reg(&current,CCREG);
9209             alloc_reg(&current,i,rs1[i]);
9210             if(!(current.is32>>rs1[i]&1))
9211             {
9212               alloc_reg64(&current,i,rs1[i]);
9213             }
9214             if (rt1[i]==31) { // BLTZAL/BGEZAL
9215               alloc_reg(&current,i,31);
9216               dirty_reg(&current,31);
9217               //#ifdef REG_PREFETCH
9218               //alloc_reg(&current,i,PTEMP);
9219               //#endif
9220               //current.is32|=1LL<<rt1[i];
9221             }
9222             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
9223                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
9224               // Allocate the branch condition registers instead.
9225               current.isconst=0;
9226               current.wasconst=0;
9227               regs[i].wasconst=0;
9228               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9229               if(!((current.is32>>rs1[i])&1))
9230               {
9231                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9232               }
9233             }
9234             else
9235             {
9236               ooo[i]=1;
9237               delayslot_alloc(&current,i+1);
9238             }
9239           }
9240           else
9241           // Don't alloc the delay slot yet because we might not execute it
9242           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
9243           {
9244             current.isconst=0;
9245             current.wasconst=0;
9246             regs[i].wasconst=0;
9247             alloc_cc(&current,i);
9248             dirty_reg(&current,CCREG);
9249             alloc_reg(&current,i,rs1[i]);
9250             if(!(current.is32>>rs1[i]&1))
9251             {
9252               alloc_reg64(&current,i,rs1[i]);
9253             }
9254           }
9255           ds=1;
9256           //current.isconst=0;
9257           break;
9258         case FJUMP:
9259           current.isconst=0;
9260           current.wasconst=0;
9261           regs[i].wasconst=0;
9262           if(likely[i]==0) // BC1F/BC1T
9263           {
9264             // TODO: Theoretically we can run out of registers here on x86.
9265             // The delay slot can allocate up to six, and we need to check
9266             // CSREG before executing the delay slot.  Possibly we can drop
9267             // the cycle count and then reload it after checking that the
9268             // FPU is in a usable state, or don't do out-of-order execution.
9269             alloc_cc(&current,i);
9270             dirty_reg(&current,CCREG);
9271             alloc_reg(&current,i,FSREG);
9272             alloc_reg(&current,i,CSREG);
9273             if(itype[i+1]==FCOMP) {
9274               // The delay slot overwrites the branch condition.
9275               // Allocate the branch condition registers instead.
9276               alloc_cc(&current,i);
9277               dirty_reg(&current,CCREG);
9278               alloc_reg(&current,i,CSREG);
9279               alloc_reg(&current,i,FSREG);
9280             }
9281             else {
9282               ooo[i]=1;
9283               delayslot_alloc(&current,i+1);
9284               alloc_reg(&current,i+1,CSREG);
9285             }
9286           }
9287           else
9288           // Don't alloc the delay slot yet because we might not execute it
9289           if(likely[i]) // BC1FL/BC1TL
9290           {
9291             alloc_cc(&current,i);
9292             dirty_reg(&current,CCREG);
9293             alloc_reg(&current,i,CSREG);
9294             alloc_reg(&current,i,FSREG);
9295           }
9296           ds=1;
9297           current.isconst=0;
9298           break;
9299         case IMM16:
9300           imm16_alloc(&current,i);
9301           break;
9302         case LOAD:
9303         case LOADLR:
9304           load_alloc(&current,i);
9305           break;
9306         case STORE:
9307         case STORELR:
9308           store_alloc(&current,i);
9309           break;
9310         case ALU:
9311           alu_alloc(&current,i);
9312           break;
9313         case SHIFT:
9314           shift_alloc(&current,i);
9315           break;
9316         case MULTDIV:
9317           multdiv_alloc(&current,i);
9318           break;
9319         case SHIFTIMM:
9320           shiftimm_alloc(&current,i);
9321           break;
9322         case MOV:
9323           mov_alloc(&current,i);
9324           break;
9325         case COP0:
9326           cop0_alloc(&current,i);
9327           break;
9328         case COP1:
9329         case COP2:
9330           cop1_alloc(&current,i);
9331           break;
9332         case C1LS:
9333           c1ls_alloc(&current,i);
9334           break;
9335         case C2LS:
9336           c2ls_alloc(&current,i);
9337           break;
9338         case C2OP:
9339           c2op_alloc(&current,i);
9340           break;
9341         case FCONV:
9342           fconv_alloc(&current,i);
9343           break;
9344         case FLOAT:
9345           float_alloc(&current,i);
9346           break;
9347         case FCOMP:
9348           fcomp_alloc(&current,i);
9349           break;
9350         case SYSCALL:
9351         case HLECALL:
9352         case INTCALL:
9353           syscall_alloc(&current,i);
9354           break;
9355         case SPAN:
9356           pagespan_alloc(&current,i);
9357           break;
9358       }
9359       
9360       // Drop the upper half of registers that have become 32-bit
9361       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9362       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9363         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9364         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9365         current.uu|=1;
9366       } else {
9367         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9368         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9369         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9370         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9371         current.uu|=1;
9372       }
9373
9374       // Create entry (branch target) regmap
9375       for(hr=0;hr<HOST_REGS;hr++)
9376       {
9377         int r,or,er;
9378         r=current.regmap[hr];
9379         if(r>=0) {
9380           if(r!=regmap_pre[i][hr]) {
9381             // TODO: delay slot (?)
9382             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9383             if(or<0||(r&63)>=TEMPREG){
9384               regs[i].regmap_entry[hr]=-1;
9385             }
9386             else
9387             {
9388               // Just move it to a different register
9389               regs[i].regmap_entry[hr]=r;
9390               // If it was dirty before, it's still dirty
9391               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9392             }
9393           }
9394           else
9395           {
9396             // Unneeded
9397             if(r==0){
9398               regs[i].regmap_entry[hr]=0;
9399             }
9400             else
9401             if(r<64){
9402               if((current.u>>r)&1) {
9403                 regs[i].regmap_entry[hr]=-1;
9404                 //regs[i].regmap[hr]=-1;
9405                 current.regmap[hr]=-1;
9406               }else
9407                 regs[i].regmap_entry[hr]=r;
9408             }
9409             else {
9410               if((current.uu>>(r&63))&1) {
9411                 regs[i].regmap_entry[hr]=-1;
9412                 //regs[i].regmap[hr]=-1;
9413                 current.regmap[hr]=-1;
9414               }else
9415                 regs[i].regmap_entry[hr]=r;
9416             }
9417           }
9418         } else {
9419           // Branches expect CCREG to be allocated at the target
9420           if(regmap_pre[i][hr]==CCREG) 
9421             regs[i].regmap_entry[hr]=CCREG;
9422           else
9423             regs[i].regmap_entry[hr]=-1;
9424         }
9425       }
9426       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9427     }
9428     /* Branch post-alloc */
9429     if(i>0)
9430     {
9431       current.was32=current.is32;
9432       current.wasdirty=current.dirty;
9433       switch(itype[i-1]) {
9434         case UJUMP:
9435           memcpy(&branch_regs[i-1],&current,sizeof(current));
9436           branch_regs[i-1].isconst=0;
9437           branch_regs[i-1].wasconst=0;
9438           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9439           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9440           alloc_cc(&branch_regs[i-1],i-1);
9441           dirty_reg(&branch_regs[i-1],CCREG);
9442           if(rt1[i-1]==31) { // JAL
9443             alloc_reg(&branch_regs[i-1],i-1,31);
9444             dirty_reg(&branch_regs[i-1],31);
9445             branch_regs[i-1].is32|=1LL<<31;
9446           }
9447           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9448           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9449           break;
9450         case RJUMP:
9451           memcpy(&branch_regs[i-1],&current,sizeof(current));
9452           branch_regs[i-1].isconst=0;
9453           branch_regs[i-1].wasconst=0;
9454           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9455           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9456           alloc_cc(&branch_regs[i-1],i-1);
9457           dirty_reg(&branch_regs[i-1],CCREG);
9458           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9459           if(rt1[i-1]!=0) { // JALR
9460             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9461             dirty_reg(&branch_regs[i-1],rt1[i-1]);
9462             branch_regs[i-1].is32|=1LL<<rt1[i-1];
9463           }
9464           #ifdef USE_MINI_HT
9465           if(rs1[i-1]==31) { // JALR
9466             alloc_reg(&branch_regs[i-1],i-1,RHASH);
9467             #ifndef HOST_IMM_ADDR32
9468             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9469             #endif
9470           }
9471           #endif
9472           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9473           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9474           break;
9475         case CJUMP:
9476           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9477           {
9478             alloc_cc(&current,i-1);
9479             dirty_reg(&current,CCREG);
9480             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9481                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9482               // The delay slot overwrote one of our conditions
9483               // Delay slot goes after the test (in order)
9484               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9485               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9486               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9487               current.u|=1;
9488               current.uu|=1;
9489               delayslot_alloc(&current,i);
9490               current.isconst=0;
9491             }
9492             else
9493             {
9494               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9495               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9496               // Alloc the branch condition registers
9497               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9498               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9499               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9500               {
9501                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9502                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9503               }
9504             }
9505             memcpy(&branch_regs[i-1],&current,sizeof(current));
9506             branch_regs[i-1].isconst=0;
9507             branch_regs[i-1].wasconst=0;
9508             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9509             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9510           }
9511           else
9512           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9513           {
9514             alloc_cc(&current,i-1);
9515             dirty_reg(&current,CCREG);
9516             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9517               // The delay slot overwrote the branch condition
9518               // Delay slot goes after the test (in order)
9519               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9520               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9521               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9522               current.u|=1;
9523               current.uu|=1;
9524               delayslot_alloc(&current,i);
9525               current.isconst=0;
9526             }
9527             else
9528             {
9529               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9530               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9531               // Alloc the branch condition register
9532               alloc_reg(&current,i-1,rs1[i-1]);
9533               if(!(current.is32>>rs1[i-1]&1))
9534               {
9535                 alloc_reg64(&current,i-1,rs1[i-1]);
9536               }
9537             }
9538             memcpy(&branch_regs[i-1],&current,sizeof(current));
9539             branch_regs[i-1].isconst=0;
9540             branch_regs[i-1].wasconst=0;
9541             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9542             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9543           }
9544           else
9545           // Alloc the delay slot in case the branch is taken
9546           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9547           {
9548             memcpy(&branch_regs[i-1],&current,sizeof(current));
9549             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9550             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9551             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9552             alloc_cc(&branch_regs[i-1],i);
9553             dirty_reg(&branch_regs[i-1],CCREG);
9554             delayslot_alloc(&branch_regs[i-1],i);
9555             branch_regs[i-1].isconst=0;
9556             alloc_reg(&current,i,CCREG); // Not taken path
9557             dirty_reg(&current,CCREG);
9558             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9559           }
9560           else
9561           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9562           {
9563             memcpy(&branch_regs[i-1],&current,sizeof(current));
9564             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9565             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9566             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9567             alloc_cc(&branch_regs[i-1],i);
9568             dirty_reg(&branch_regs[i-1],CCREG);
9569             delayslot_alloc(&branch_regs[i-1],i);
9570             branch_regs[i-1].isconst=0;
9571             alloc_reg(&current,i,CCREG); // Not taken path
9572             dirty_reg(&current,CCREG);
9573             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9574           }
9575           break;
9576         case SJUMP:
9577           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9578           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9579           {
9580             alloc_cc(&current,i-1);
9581             dirty_reg(&current,CCREG);
9582             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9583               // The delay slot overwrote the branch condition
9584               // Delay slot goes after the test (in order)
9585               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9586               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9587               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9588               current.u|=1;
9589               current.uu|=1;
9590               delayslot_alloc(&current,i);
9591               current.isconst=0;
9592             }
9593             else
9594             {
9595               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9596               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9597               // Alloc the branch condition register
9598               alloc_reg(&current,i-1,rs1[i-1]);
9599               if(!(current.is32>>rs1[i-1]&1))
9600               {
9601                 alloc_reg64(&current,i-1,rs1[i-1]);
9602               }
9603             }
9604             memcpy(&branch_regs[i-1],&current,sizeof(current));
9605             branch_regs[i-1].isconst=0;
9606             branch_regs[i-1].wasconst=0;
9607             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9608             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9609           }
9610           else
9611           // Alloc the delay slot in case the branch is taken
9612           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9613           {
9614             memcpy(&branch_regs[i-1],&current,sizeof(current));
9615             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9616             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9617             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9618             alloc_cc(&branch_regs[i-1],i);
9619             dirty_reg(&branch_regs[i-1],CCREG);
9620             delayslot_alloc(&branch_regs[i-1],i);
9621             branch_regs[i-1].isconst=0;
9622             alloc_reg(&current,i,CCREG); // Not taken path
9623             dirty_reg(&current,CCREG);
9624             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9625           }
9626           // FIXME: BLTZAL/BGEZAL
9627           if(opcode2[i-1]&0x10) { // BxxZAL
9628             alloc_reg(&branch_regs[i-1],i-1,31);
9629             dirty_reg(&branch_regs[i-1],31);
9630             branch_regs[i-1].is32|=1LL<<31;
9631           }
9632           break;
9633         case FJUMP:
9634           if(likely[i-1]==0) // BC1F/BC1T
9635           {
9636             alloc_cc(&current,i-1);
9637             dirty_reg(&current,CCREG);
9638             if(itype[i]==FCOMP) {
9639               // The delay slot overwrote the branch condition
9640               // Delay slot goes after the test (in order)
9641               delayslot_alloc(&current,i);
9642               current.isconst=0;
9643             }
9644             else
9645             {
9646               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9647               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9648               // Alloc the branch condition register
9649               alloc_reg(&current,i-1,FSREG);
9650             }
9651             memcpy(&branch_regs[i-1],&current,sizeof(current));
9652             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9653           }
9654           else // BC1FL/BC1TL
9655           {
9656             // Alloc the delay slot in case the branch is taken
9657             memcpy(&branch_regs[i-1],&current,sizeof(current));
9658             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9659             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9660             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9661             alloc_cc(&branch_regs[i-1],i);
9662             dirty_reg(&branch_regs[i-1],CCREG);
9663             delayslot_alloc(&branch_regs[i-1],i);
9664             branch_regs[i-1].isconst=0;
9665             alloc_reg(&current,i,CCREG); // Not taken path
9666             dirty_reg(&current,CCREG);
9667             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9668           }
9669           break;
9670       }
9671
9672       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9673       {
9674         if(rt1[i-1]==31) // JAL/JALR
9675         {
9676           // Subroutine call will return here, don't alloc any registers
9677           current.is32=1;
9678           current.dirty=0;
9679           clear_all_regs(current.regmap);
9680           alloc_reg(&current,i,CCREG);
9681           dirty_reg(&current,CCREG);
9682         }
9683         else if(i+1<slen)
9684         {
9685           // Internal branch will jump here, match registers to caller
9686           current.is32=0x3FFFFFFFFLL;
9687           current.dirty=0;
9688           clear_all_regs(current.regmap);
9689           alloc_reg(&current,i,CCREG);
9690           dirty_reg(&current,CCREG);
9691           for(j=i-1;j>=0;j--)
9692           {
9693             if(ba[j]==start+i*4+4) {
9694               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9695               current.is32=branch_regs[j].is32;
9696               current.dirty=branch_regs[j].dirty;
9697               break;
9698             }
9699           }
9700           while(j>=0) {
9701             if(ba[j]==start+i*4+4) {
9702               for(hr=0;hr<HOST_REGS;hr++) {
9703                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9704                   current.regmap[hr]=-1;
9705                 }
9706                 current.is32&=branch_regs[j].is32;
9707                 current.dirty&=branch_regs[j].dirty;
9708               }
9709             }
9710             j--;
9711           }
9712         }
9713       }
9714     }
9715
9716     // Count cycles in between branches
9717     ccadj[i]=cc;
9718     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9719     {
9720       cc=0;
9721     }
9722 #ifdef PCSX
9723     else if(/*itype[i]==LOAD||*/itype[i]==STORE||itype[i]==C1LS) // load causes weird timing issues
9724     {
9725       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
9726     }
9727     else if(itype[i]==C2LS)
9728     {
9729       cc+=4;
9730     }
9731 #endif
9732     else
9733     {
9734       cc++;
9735     }
9736
9737     flush_dirty_uppers(&current);
9738     if(!is_ds[i]) {
9739       regs[i].is32=current.is32;
9740       regs[i].dirty=current.dirty;
9741       regs[i].isconst=current.isconst;
9742       memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9743     }
9744     for(hr=0;hr<HOST_REGS;hr++) {
9745       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9746         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9747           regs[i].wasconst&=~(1<<hr);
9748         }
9749       }
9750     }
9751     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9752   }
9753   
9754   /* Pass 4 - Cull unused host registers */
9755   
9756   uint64_t nr=0;
9757   
9758   for (i=slen-1;i>=0;i--)
9759   {
9760     int hr;
9761     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9762     {
9763       if(ba[i]<start || ba[i]>=(start+slen*4))
9764       {
9765         // Branch out of this block, don't need anything
9766         nr=0;
9767       }
9768       else
9769       {
9770         // Internal branch
9771         // Need whatever matches the target
9772         nr=0;
9773         int t=(ba[i]-start)>>2;
9774         for(hr=0;hr<HOST_REGS;hr++)
9775         {
9776           if(regs[i].regmap_entry[hr]>=0) {
9777             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9778           }
9779         }
9780       }
9781       // Conditional branch may need registers for following instructions
9782       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9783       {
9784         if(i<slen-2) {
9785           nr|=needed_reg[i+2];
9786           for(hr=0;hr<HOST_REGS;hr++)
9787           {
9788             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9789             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9790           }
9791         }
9792       }
9793       // Don't need stuff which is overwritten
9794       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9795       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9796       // Merge in delay slot
9797       for(hr=0;hr<HOST_REGS;hr++)
9798       {
9799         if(!likely[i]) {
9800           // These are overwritten unless the branch is "likely"
9801           // and the delay slot is nullified if not taken
9802           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9803           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9804         }
9805         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9806         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9807         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9808         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9809         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9810         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9811         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9812         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9813         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9814           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9815           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9816         }
9817         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9818           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9819           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9820         }
9821         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
9822           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9823           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9824         }
9825       }
9826     }
9827     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
9828     {
9829       // SYSCALL instruction (software interrupt)
9830       nr=0;
9831     }
9832     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9833     {
9834       // ERET instruction (return from interrupt)
9835       nr=0;
9836     }
9837     else // Non-branch
9838     {
9839       if(i<slen-1) {
9840         for(hr=0;hr<HOST_REGS;hr++) {
9841           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9842           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9843           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9844           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9845         }
9846       }
9847     }
9848     for(hr=0;hr<HOST_REGS;hr++)
9849     {
9850       // Overwritten registers are not needed
9851       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9852       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9853       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9854       // Source registers are needed
9855       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9856       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9857       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9858       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9859       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9860       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9861       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9862       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9863       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9864         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9865         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9866       }
9867       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9868         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9869         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9870       }
9871       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
9872         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9873         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9874       }
9875       // Don't store a register immediately after writing it,
9876       // may prevent dual-issue.
9877       // But do so if this is a branch target, otherwise we
9878       // might have to load the register before the branch.
9879       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9880         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9881            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9882           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9883           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9884         }
9885         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9886            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9887           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9888           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9889         }
9890       }
9891     }
9892     // Cycle count is needed at branches.  Assume it is needed at the target too.
9893     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9894       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9895       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9896     }
9897     // Save it
9898     needed_reg[i]=nr;
9899     
9900     // Deallocate unneeded registers
9901     for(hr=0;hr<HOST_REGS;hr++)
9902     {
9903       if(!((nr>>hr)&1)) {
9904         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9905         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9906            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9907            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9908         {
9909           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9910           {
9911             if(likely[i]) {
9912               regs[i].regmap[hr]=-1;
9913               regs[i].isconst&=~(1<<hr);
9914               if(i<slen-2) {
9915                 regmap_pre[i+2][hr]=-1;
9916                 regs[i+2].wasconst&=~(1<<hr);
9917               }
9918             }
9919           }
9920         }
9921         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9922         {
9923           int d1=0,d2=0,map=0,temp=0;
9924           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9925           {
9926             d1=dep1[i+1];
9927             d2=dep2[i+1];
9928           }
9929           if(using_tlb) {
9930             if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9931                itype[i+1]==STORE || itype[i+1]==STORELR ||
9932                itype[i+1]==C1LS || itype[i+1]==C2LS)
9933             map=TLREG;
9934           } else
9935           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9936              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9937             map=INVCP;
9938           }
9939           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9940              itype[i+1]==C1LS || itype[i+1]==C2LS)
9941             temp=FTEMP;
9942           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9943              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9944              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9945              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9946              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9947              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9948              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9949              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9950              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9951              regs[i].regmap[hr]!=map )
9952           {
9953             regs[i].regmap[hr]=-1;
9954             regs[i].isconst&=~(1<<hr);
9955             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9956                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9957                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9958                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9959                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9960                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9961                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9962                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9963                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9964                branch_regs[i].regmap[hr]!=map)
9965             {
9966               branch_regs[i].regmap[hr]=-1;
9967               branch_regs[i].regmap_entry[hr]=-1;
9968               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9969               {
9970                 if(!likely[i]&&i<slen-2) {
9971                   regmap_pre[i+2][hr]=-1;
9972                   regs[i+2].wasconst&=~(1<<hr);
9973                 }
9974               }
9975             }
9976           }
9977         }
9978         else
9979         {
9980           // Non-branch
9981           if(i>0)
9982           {
9983             int d1=0,d2=0,map=-1,temp=-1;
9984             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9985             {
9986               d1=dep1[i];
9987               d2=dep2[i];
9988             }
9989             if(using_tlb) {
9990               if(itype[i]==LOAD || itype[i]==LOADLR ||
9991                  itype[i]==STORE || itype[i]==STORELR ||
9992                  itype[i]==C1LS || itype[i]==C2LS)
9993               map=TLREG;
9994             } else if(itype[i]==STORE || itype[i]==STORELR ||
9995                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9996               map=INVCP;
9997             }
9998             if(itype[i]==LOADLR || itype[i]==STORELR ||
9999                itype[i]==C1LS || itype[i]==C2LS)
10000               temp=FTEMP;
10001             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
10002                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
10003                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
10004                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
10005                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
10006                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
10007             {
10008               if(i<slen-1&&!is_ds[i]) {
10009                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
10010                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
10011                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
10012                 {
10013                   printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
10014                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
10015                 }
10016                 regmap_pre[i+1][hr]=-1;
10017                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
10018                 regs[i+1].wasconst&=~(1<<hr);
10019               }
10020               regs[i].regmap[hr]=-1;
10021               regs[i].isconst&=~(1<<hr);
10022             }
10023           }
10024         }
10025       }
10026     }
10027   }
10028   
10029   /* Pass 5 - Pre-allocate registers */
10030   
10031   // If a register is allocated during a loop, try to allocate it for the
10032   // entire loop, if possible.  This avoids loading/storing registers
10033   // inside of the loop.
10034   
10035   signed char f_regmap[HOST_REGS];
10036   clear_all_regs(f_regmap);
10037   for(i=0;i<slen-1;i++)
10038   {
10039     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10040     {
10041       if(ba[i]>=start && ba[i]<(start+i*4)) 
10042       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
10043       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
10044       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
10045       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
10046       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
10047       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
10048       {
10049         int t=(ba[i]-start)>>2;
10050         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
10051         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
10052         for(hr=0;hr<HOST_REGS;hr++)
10053         {
10054           if(regs[i].regmap[hr]>64) {
10055             if(!((regs[i].dirty>>hr)&1))
10056               f_regmap[hr]=regs[i].regmap[hr];
10057             else f_regmap[hr]=-1;
10058           }
10059           else if(regs[i].regmap[hr]>=0) {
10060             if(f_regmap[hr]!=regs[i].regmap[hr]) {
10061               // dealloc old register
10062               int n;
10063               for(n=0;n<HOST_REGS;n++)
10064               {
10065                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10066               }
10067               // and alloc new one
10068               f_regmap[hr]=regs[i].regmap[hr];
10069             }
10070           }
10071           if(branch_regs[i].regmap[hr]>64) {
10072             if(!((branch_regs[i].dirty>>hr)&1))
10073               f_regmap[hr]=branch_regs[i].regmap[hr];
10074             else f_regmap[hr]=-1;
10075           }
10076           else if(branch_regs[i].regmap[hr]>=0) {
10077             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
10078               // dealloc old register
10079               int n;
10080               for(n=0;n<HOST_REGS;n++)
10081               {
10082                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
10083               }
10084               // and alloc new one
10085               f_regmap[hr]=branch_regs[i].regmap[hr];
10086             }
10087           }
10088           if(ooo[i]) {
10089             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) 
10090               f_regmap[hr]=branch_regs[i].regmap[hr];
10091           }else{
10092             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) 
10093               f_regmap[hr]=branch_regs[i].regmap[hr];
10094           }
10095           // Avoid dirty->clean transition
10096           #ifdef DESTRUCTIVE_WRITEBACK
10097           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
10098           #endif
10099           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
10100           // case above, however it's always a good idea.  We can't hoist the
10101           // load if the register was already allocated, so there's no point
10102           // wasting time analyzing most of these cases.  It only "succeeds"
10103           // when the mapping was different and the load can be replaced with
10104           // a mov, which is of negligible benefit.  So such cases are
10105           // skipped below.
10106           if(f_regmap[hr]>0) {
10107             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
10108               int r=f_regmap[hr];
10109               for(j=t;j<=i;j++)
10110               {
10111                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10112                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
10113                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
10114                 if(r>63) {
10115                   // NB This can exclude the case where the upper-half
10116                   // register is lower numbered than the lower-half
10117                   // register.  Not sure if it's worth fixing...
10118                   if(get_reg(regs[j].regmap,r&63)<0) break;
10119                   if(get_reg(regs[j].regmap_entry,r&63)<0) break;
10120                   if(regs[j].is32&(1LL<<(r&63))) break;
10121                 }
10122                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
10123                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10124                   int k;
10125                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
10126                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
10127                     if(r>63) {
10128                       if(get_reg(regs[i].regmap,r&63)<0) break;
10129                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
10130                     }
10131                     k=i;
10132                     while(k>1&&regs[k-1].regmap[hr]==-1) {
10133                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10134                         //printf("no free regs for store %x\n",start+(k-1)*4);
10135                         break;
10136                       }
10137                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
10138                         //printf("no-match due to different register\n");
10139                         break;
10140                       }
10141                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
10142                         //printf("no-match due to branch\n");
10143                         break;
10144                       }
10145                       // call/ret fast path assumes no registers allocated
10146                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
10147                         break;
10148                       }
10149                       if(r>63) {
10150                         // NB This can exclude the case where the upper-half
10151                         // register is lower numbered than the lower-half
10152                         // register.  Not sure if it's worth fixing...
10153                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
10154                         if(regs[k-1].is32&(1LL<<(r&63))) break;
10155                       }
10156                       k--;
10157                     }
10158                     if(i<slen-1) {
10159                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
10160                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
10161                         //printf("bad match after branch\n");
10162                         break;
10163                       }
10164                     }
10165                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
10166                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
10167                       while(k<i) {
10168                         regs[k].regmap_entry[hr]=f_regmap[hr];
10169                         regs[k].regmap[hr]=f_regmap[hr];
10170                         regmap_pre[k+1][hr]=f_regmap[hr];
10171                         regs[k].wasdirty&=~(1<<hr);
10172                         regs[k].dirty&=~(1<<hr);
10173                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
10174                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
10175                         regs[k].wasconst&=~(1<<hr);
10176                         regs[k].isconst&=~(1<<hr);
10177                         k++;
10178                       }
10179                     }
10180                     else {
10181                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
10182                       break;
10183                     }
10184                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
10185                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
10186                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
10187                       regs[i].regmap_entry[hr]=f_regmap[hr];
10188                       regs[i].regmap[hr]=f_regmap[hr];
10189                       regs[i].wasdirty&=~(1<<hr);
10190                       regs[i].dirty&=~(1<<hr);
10191                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
10192                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
10193                       regs[i].wasconst&=~(1<<hr);
10194                       regs[i].isconst&=~(1<<hr);
10195                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
10196                       branch_regs[i].wasdirty&=~(1<<hr);
10197                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
10198                       branch_regs[i].regmap[hr]=f_regmap[hr];
10199                       branch_regs[i].dirty&=~(1<<hr);
10200                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
10201                       branch_regs[i].wasconst&=~(1<<hr);
10202                       branch_regs[i].isconst&=~(1<<hr);
10203                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
10204                         regmap_pre[i+2][hr]=f_regmap[hr];
10205                         regs[i+2].wasdirty&=~(1<<hr);
10206                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
10207                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
10208                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
10209                       }
10210                     }
10211                   }
10212                   for(k=t;k<j;k++) {
10213                     // Alloc register clean at beginning of loop,
10214                     // but may dirty it in pass 6
10215                     regs[k].regmap_entry[hr]=f_regmap[hr];
10216                     regs[k].regmap[hr]=f_regmap[hr];
10217                     regs[k].dirty&=~(1<<hr);
10218                     regs[k].wasconst&=~(1<<hr);
10219                     regs[k].isconst&=~(1<<hr);
10220                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
10221                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
10222                       branch_regs[k].regmap[hr]=f_regmap[hr];
10223                       branch_regs[k].dirty&=~(1<<hr);
10224                       branch_regs[k].wasconst&=~(1<<hr);
10225                       branch_regs[k].isconst&=~(1<<hr);
10226                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
10227                         regmap_pre[k+2][hr]=f_regmap[hr];
10228                         regs[k+2].wasdirty&=~(1<<hr);
10229                         assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
10230                           (regs[k+2].was32&(1LL<<f_regmap[hr])));
10231                       }
10232                     }
10233                     else
10234                     {
10235                       regmap_pre[k+1][hr]=f_regmap[hr];
10236                       regs[k+1].wasdirty&=~(1<<hr);
10237                     }
10238                   }
10239                   if(regs[j].regmap[hr]==f_regmap[hr])
10240                     regs[j].regmap_entry[hr]=f_regmap[hr];
10241                   break;
10242                 }
10243                 if(j==i) break;
10244                 if(regs[j].regmap[hr]>=0)
10245                   break;
10246                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
10247                   //printf("no-match due to different register\n");
10248                   break;
10249                 }
10250                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
10251                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
10252                   break;
10253                 }
10254                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10255                 {
10256                   // Stop on unconditional branch
10257                   break;
10258                 }
10259                 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
10260                 {
10261                   if(ooo[j]) {
10262                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) 
10263                       break;
10264                   }else{
10265                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) 
10266                       break;
10267                   }
10268                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
10269                     //printf("no-match due to different register (branch)\n");
10270                     break;
10271                   }
10272                 }
10273                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10274                   //printf("No free regs for store %x\n",start+j*4);
10275                   break;
10276                 }
10277                 if(f_regmap[hr]>=64) {
10278                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
10279                     break;
10280                   }
10281                   else
10282                   {
10283                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
10284                       break;
10285                     }
10286                   }
10287                 }
10288               }
10289             }
10290           }
10291         }
10292       }
10293     }else{
10294       // Non branch or undetermined branch target
10295       for(hr=0;hr<HOST_REGS;hr++)
10296       {
10297         if(hr!=EXCLUDE_REG) {
10298           if(regs[i].regmap[hr]>64) {
10299             if(!((regs[i].dirty>>hr)&1))
10300               f_regmap[hr]=regs[i].regmap[hr];
10301           }
10302           else if(regs[i].regmap[hr]>=0) {
10303             if(f_regmap[hr]!=regs[i].regmap[hr]) {
10304               // dealloc old register
10305               int n;
10306               for(n=0;n<HOST_REGS;n++)
10307               {
10308                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10309               }
10310               // and alloc new one
10311               f_regmap[hr]=regs[i].regmap[hr];
10312             }
10313           }
10314         }
10315       }
10316       // Try to restore cycle count at branch targets
10317       if(bt[i]) {
10318         for(j=i;j<slen-1;j++) {
10319           if(regs[j].regmap[HOST_CCREG]!=-1) break;
10320           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10321             //printf("no free regs for store %x\n",start+j*4);
10322             break;
10323           }
10324         }
10325         if(regs[j].regmap[HOST_CCREG]==CCREG) {
10326           int k=i;
10327           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
10328           while(k<j) {
10329             regs[k].regmap_entry[HOST_CCREG]=CCREG;
10330             regs[k].regmap[HOST_CCREG]=CCREG;
10331             regmap_pre[k+1][HOST_CCREG]=CCREG;
10332             regs[k+1].wasdirty|=1<<HOST_CCREG;
10333             regs[k].dirty|=1<<HOST_CCREG;
10334             regs[k].wasconst&=~(1<<HOST_CCREG);
10335             regs[k].isconst&=~(1<<HOST_CCREG);
10336             k++;
10337           }
10338           regs[j].regmap_entry[HOST_CCREG]=CCREG;          
10339         }
10340         // Work backwards from the branch target
10341         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
10342         {
10343           //printf("Extend backwards\n");
10344           int k;
10345           k=i;
10346           while(regs[k-1].regmap[HOST_CCREG]==-1) {
10347             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10348               //printf("no free regs for store %x\n",start+(k-1)*4);
10349               break;
10350             }
10351             k--;
10352           }
10353           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
10354             //printf("Extend CC, %x ->\n",start+k*4);
10355             while(k<=i) {
10356               regs[k].regmap_entry[HOST_CCREG]=CCREG;
10357               regs[k].regmap[HOST_CCREG]=CCREG;
10358               regmap_pre[k+1][HOST_CCREG]=CCREG;
10359               regs[k+1].wasdirty|=1<<HOST_CCREG;
10360               regs[k].dirty|=1<<HOST_CCREG;
10361               regs[k].wasconst&=~(1<<HOST_CCREG);
10362               regs[k].isconst&=~(1<<HOST_CCREG);
10363               k++;
10364             }
10365           }
10366           else {
10367             //printf("Fail Extend CC, %x ->\n",start+k*4);
10368           }
10369         }
10370       }
10371       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
10372          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
10373          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
10374          itype[i]!=FCONV&&itype[i]!=FCOMP)
10375       {
10376         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
10377       }
10378     }
10379   }
10380   
10381   // Cache memory offset or tlb map pointer if a register is available
10382   #ifndef HOST_IMM_ADDR32
10383   #ifndef RAM_OFFSET
10384   if(using_tlb)
10385   #endif
10386   {
10387     int earliest_available[HOST_REGS];
10388     int loop_start[HOST_REGS];
10389     int score[HOST_REGS];
10390     int end[HOST_REGS];
10391     int reg=using_tlb?MMREG:ROREG;
10392
10393     // Init
10394     for(hr=0;hr<HOST_REGS;hr++) {
10395       score[hr]=0;earliest_available[hr]=0;
10396       loop_start[hr]=MAXBLOCK;
10397     }
10398     for(i=0;i<slen-1;i++)
10399     {
10400       // Can't do anything if no registers are available
10401       if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
10402         for(hr=0;hr<HOST_REGS;hr++) {
10403           score[hr]=0;earliest_available[hr]=i+1;
10404           loop_start[hr]=MAXBLOCK;
10405         }
10406       }
10407       if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10408         if(!ooo[i]) {
10409           if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
10410             for(hr=0;hr<HOST_REGS;hr++) {
10411               score[hr]=0;earliest_available[hr]=i+1;
10412               loop_start[hr]=MAXBLOCK;
10413             }
10414           }
10415         }else{
10416           if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
10417             for(hr=0;hr<HOST_REGS;hr++) {
10418               score[hr]=0;earliest_available[hr]=i+1;
10419               loop_start[hr]=MAXBLOCK;
10420             }
10421           }
10422         }
10423       }
10424       // Mark unavailable registers
10425       for(hr=0;hr<HOST_REGS;hr++) {
10426         if(regs[i].regmap[hr]>=0) {
10427           score[hr]=0;earliest_available[hr]=i+1;
10428           loop_start[hr]=MAXBLOCK;
10429         }
10430         if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10431           if(branch_regs[i].regmap[hr]>=0) {
10432             score[hr]=0;earliest_available[hr]=i+2;
10433             loop_start[hr]=MAXBLOCK;
10434           }
10435         }
10436       }
10437       // No register allocations after unconditional jumps
10438       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10439       {
10440         for(hr=0;hr<HOST_REGS;hr++) {
10441           score[hr]=0;earliest_available[hr]=i+2;
10442           loop_start[hr]=MAXBLOCK;
10443         }
10444         i++; // Skip delay slot too
10445         //printf("skip delay slot: %x\n",start+i*4);
10446       }
10447       else
10448       // Possible match
10449       if(itype[i]==LOAD||itype[i]==LOADLR||
10450          itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
10451         for(hr=0;hr<HOST_REGS;hr++) {
10452           if(hr!=EXCLUDE_REG) {
10453             end[hr]=i-1;
10454             for(j=i;j<slen-1;j++) {
10455               if(regs[j].regmap[hr]>=0) break;
10456               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10457                 if(branch_regs[j].regmap[hr]>=0) break;
10458                 if(ooo[j]) {
10459                   if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
10460                 }else{
10461                   if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
10462                 }
10463               }
10464               else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
10465               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10466                 int t=(ba[j]-start)>>2;
10467                 if(t<j&&t>=earliest_available[hr]) {
10468                   if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
10469                     // Score a point for hoisting loop invariant
10470                     if(t<loop_start[hr]) loop_start[hr]=t;
10471                     //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
10472                     score[hr]++;
10473                     end[hr]=j;
10474                   }
10475                 }
10476                 else if(t<j) {
10477                   if(regs[t].regmap[hr]==reg) {
10478                     // Score a point if the branch target matches this register
10479                     score[hr]++;
10480                     end[hr]=j;
10481                   }
10482                 }
10483                 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
10484                    itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
10485                   score[hr]++;
10486                   end[hr]=j;
10487                 }
10488               }
10489               if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10490               {
10491                 // Stop on unconditional branch
10492                 break;
10493               }
10494               else
10495               if(itype[j]==LOAD||itype[j]==LOADLR||
10496                  itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
10497                 score[hr]++;
10498                 end[hr]=j;
10499               }
10500             }
10501           }
10502         }
10503         // Find highest score and allocate that register
10504         int maxscore=0;
10505         for(hr=0;hr<HOST_REGS;hr++) {
10506           if(hr!=EXCLUDE_REG) {
10507             if(score[hr]>score[maxscore]) {
10508               maxscore=hr;
10509               //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
10510             }
10511           }
10512         }
10513         if(score[maxscore]>1)
10514         {
10515           if(i<loop_start[maxscore]) loop_start[maxscore]=i;
10516           for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
10517             //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
10518             assert(regs[j].regmap[maxscore]<0);
10519             if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
10520             regs[j].regmap[maxscore]=reg;
10521             regs[j].dirty&=~(1<<maxscore);
10522             regs[j].wasconst&=~(1<<maxscore);
10523             regs[j].isconst&=~(1<<maxscore);
10524             if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10525               branch_regs[j].regmap[maxscore]=reg;
10526               branch_regs[j].wasdirty&=~(1<<maxscore);
10527               branch_regs[j].dirty&=~(1<<maxscore);
10528               branch_regs[j].wasconst&=~(1<<maxscore);
10529               branch_regs[j].isconst&=~(1<<maxscore);
10530               if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
10531                 regmap_pre[j+2][maxscore]=reg;
10532                 regs[j+2].wasdirty&=~(1<<maxscore);
10533               }
10534               // loop optimization (loop_preload)
10535               int t=(ba[j]-start)>>2;
10536               if(t==loop_start[maxscore]) {
10537                 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
10538                   regs[t].regmap_entry[maxscore]=reg;
10539               }
10540             }
10541             else
10542             {
10543               if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
10544                 regmap_pre[j+1][maxscore]=reg;
10545                 regs[j+1].wasdirty&=~(1<<maxscore);
10546               }
10547             }
10548           }
10549           i=j-1;
10550           if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
10551           for(hr=0;hr<HOST_REGS;hr++) {
10552             score[hr]=0;earliest_available[hr]=i+i;
10553             loop_start[hr]=MAXBLOCK;
10554           }
10555         }
10556       }
10557     }
10558   }
10559   #endif
10560   
10561   // This allocates registers (if possible) one instruction prior
10562   // to use, which can avoid a load-use penalty on certain CPUs.
10563   for(i=0;i<slen-1;i++)
10564   {
10565     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10566     {
10567       if(!bt[i+1])
10568       {
10569         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10570            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
10571         {
10572           if(rs1[i+1]) {
10573             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10574             {
10575               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10576               {
10577                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10578                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10579                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10580                 regs[i].isconst&=~(1<<hr);
10581                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10582                 constmap[i][hr]=constmap[i+1][hr];
10583                 regs[i+1].wasdirty&=~(1<<hr);
10584                 regs[i].dirty&=~(1<<hr);
10585               }
10586             }
10587           }
10588           if(rs2[i+1]) {
10589             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10590             {
10591               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10592               {
10593                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10594                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10595                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10596                 regs[i].isconst&=~(1<<hr);
10597                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10598                 constmap[i][hr]=constmap[i+1][hr];
10599                 regs[i+1].wasdirty&=~(1<<hr);
10600                 regs[i].dirty&=~(1<<hr);
10601               }
10602             }
10603           }
10604           // Preload target address for load instruction (non-constant)
10605           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10606             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10607             {
10608               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10609               {
10610                 regs[i].regmap[hr]=rs1[i+1];
10611                 regmap_pre[i+1][hr]=rs1[i+1];
10612                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10613                 regs[i].isconst&=~(1<<hr);
10614                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10615                 constmap[i][hr]=constmap[i+1][hr];
10616                 regs[i+1].wasdirty&=~(1<<hr);
10617                 regs[i].dirty&=~(1<<hr);
10618               }
10619             }
10620           }
10621           // Load source into target register 
10622           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10623             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10624             {
10625               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10626               {
10627                 regs[i].regmap[hr]=rs1[i+1];
10628                 regmap_pre[i+1][hr]=rs1[i+1];
10629                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10630                 regs[i].isconst&=~(1<<hr);
10631                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10632                 constmap[i][hr]=constmap[i+1][hr];
10633                 regs[i+1].wasdirty&=~(1<<hr);
10634                 regs[i].dirty&=~(1<<hr);
10635               }
10636             }
10637           }
10638           // Preload map address
10639           #ifndef HOST_IMM_ADDR32
10640           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10641             hr=get_reg(regs[i+1].regmap,TLREG);
10642             if(hr>=0) {
10643               int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10644               if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10645                 int nr;
10646                 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10647                 {
10648                   regs[i].regmap[hr]=MGEN1+((i+1)&1);
10649                   regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10650                   regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10651                   regs[i].isconst&=~(1<<hr);
10652                   regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10653                   constmap[i][hr]=constmap[i+1][hr];
10654                   regs[i+1].wasdirty&=~(1<<hr);
10655                   regs[i].dirty&=~(1<<hr);
10656                 }
10657                 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10658                 {
10659                   // move it to another register
10660                   regs[i+1].regmap[hr]=-1;
10661                   regmap_pre[i+2][hr]=-1;
10662                   regs[i+1].regmap[nr]=TLREG;
10663                   regmap_pre[i+2][nr]=TLREG;
10664                   regs[i].regmap[nr]=MGEN1+((i+1)&1);
10665                   regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10666                   regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10667                   regs[i].isconst&=~(1<<nr);
10668                   regs[i+1].isconst&=~(1<<nr);
10669                   regs[i].dirty&=~(1<<nr);
10670                   regs[i+1].wasdirty&=~(1<<nr);
10671                   regs[i+1].dirty&=~(1<<nr);
10672                   regs[i+2].wasdirty&=~(1<<nr);
10673                 }
10674               }
10675             }
10676           }
10677           #endif
10678           // Address for store instruction (non-constant)
10679           if(itype[i+1]==STORE||itype[i+1]==STORELR
10680              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10681             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10682               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10683               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10684               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10685               assert(hr>=0);
10686               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10687               {
10688                 regs[i].regmap[hr]=rs1[i+1];
10689                 regmap_pre[i+1][hr]=rs1[i+1];
10690                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10691                 regs[i].isconst&=~(1<<hr);
10692                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10693                 constmap[i][hr]=constmap[i+1][hr];
10694                 regs[i+1].wasdirty&=~(1<<hr);
10695                 regs[i].dirty&=~(1<<hr);
10696               }
10697             }
10698           }
10699           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10700             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10701               int nr;
10702               hr=get_reg(regs[i+1].regmap,FTEMP);
10703               assert(hr>=0);
10704               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10705               {
10706                 regs[i].regmap[hr]=rs1[i+1];
10707                 regmap_pre[i+1][hr]=rs1[i+1];
10708                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10709                 regs[i].isconst&=~(1<<hr);
10710                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10711                 constmap[i][hr]=constmap[i+1][hr];
10712                 regs[i+1].wasdirty&=~(1<<hr);
10713                 regs[i].dirty&=~(1<<hr);
10714               }
10715               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10716               {
10717                 // move it to another register
10718                 regs[i+1].regmap[hr]=-1;
10719                 regmap_pre[i+2][hr]=-1;
10720                 regs[i+1].regmap[nr]=FTEMP;
10721                 regmap_pre[i+2][nr]=FTEMP;
10722                 regs[i].regmap[nr]=rs1[i+1];
10723                 regmap_pre[i+1][nr]=rs1[i+1];
10724                 regs[i+1].regmap_entry[nr]=rs1[i+1];
10725                 regs[i].isconst&=~(1<<nr);
10726                 regs[i+1].isconst&=~(1<<nr);
10727                 regs[i].dirty&=~(1<<nr);
10728                 regs[i+1].wasdirty&=~(1<<nr);
10729                 regs[i+1].dirty&=~(1<<nr);
10730                 regs[i+2].wasdirty&=~(1<<nr);
10731               }
10732             }
10733           }
10734           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10735             if(itype[i+1]==LOAD) 
10736               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10737             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10738               hr=get_reg(regs[i+1].regmap,FTEMP);
10739             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10740               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10741               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10742             }
10743             if(hr>=0&&regs[i].regmap[hr]<0) {
10744               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10745               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10746                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10747                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10748                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10749                 regs[i].isconst&=~(1<<hr);
10750                 regs[i+1].wasdirty&=~(1<<hr);
10751                 regs[i].dirty&=~(1<<hr);
10752               }
10753             }
10754           }
10755         }
10756       }
10757     }
10758   }
10759   
10760   /* Pass 6 - Optimize clean/dirty state */
10761   clean_registers(0,slen-1,1);
10762   
10763   /* Pass 7 - Identify 32-bit registers */
10764 #ifndef FORCE32
10765   provisional_r32();
10766
10767   u_int r32=0;
10768   
10769   for (i=slen-1;i>=0;i--)
10770   {
10771     int hr;
10772     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10773     {
10774       if(ba[i]<start || ba[i]>=(start+slen*4))
10775       {
10776         // Branch out of this block, don't need anything
10777         r32=0;
10778       }
10779       else
10780       {
10781         // Internal branch
10782         // Need whatever matches the target
10783         // (and doesn't get overwritten by the delay slot instruction)
10784         r32=0;
10785         int t=(ba[i]-start)>>2;
10786         if(ba[i]>start+i*4) {
10787           // Forward branch
10788           if(!(requires_32bit[t]&~regs[i].was32))
10789             r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10790         }else{
10791           // Backward branch
10792           //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10793           //  r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10794           if(!(pr32[t]&~regs[i].was32))
10795             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10796         }
10797       }
10798       // Conditional branch may need registers for following instructions
10799       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10800       {
10801         if(i<slen-2) {
10802           r32|=requires_32bit[i+2];
10803           r32&=regs[i].was32;
10804           // Mark this address as a branch target since it may be called
10805           // upon return from interrupt
10806           bt[i+2]=1;
10807         }
10808       }
10809       // Merge in delay slot
10810       if(!likely[i]) {
10811         // These are overwritten unless the branch is "likely"
10812         // and the delay slot is nullified if not taken
10813         r32&=~(1LL<<rt1[i+1]);
10814         r32&=~(1LL<<rt2[i+1]);
10815       }
10816       // Assume these are needed (delay slot)
10817       if(us1[i+1]>0)
10818       {
10819         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10820       }
10821       if(us2[i+1]>0)
10822       {
10823         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10824       }
10825       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10826       {
10827         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10828       }
10829       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10830       {
10831         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10832       }
10833     }
10834     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
10835     {
10836       // SYSCALL instruction (software interrupt)
10837       r32=0;
10838     }
10839     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10840     {
10841       // ERET instruction (return from interrupt)
10842       r32=0;
10843     }
10844     // Check 32 bits
10845     r32&=~(1LL<<rt1[i]);
10846     r32&=~(1LL<<rt2[i]);
10847     if(us1[i]>0)
10848     {
10849       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10850     }
10851     if(us2[i]>0)
10852     {
10853       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10854     }
10855     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10856     {
10857       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10858     }
10859     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10860     {
10861       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10862     }
10863     requires_32bit[i]=r32;
10864     
10865     // Dirty registers which are 32-bit, require 32-bit input
10866     // as they will be written as 32-bit values
10867     for(hr=0;hr<HOST_REGS;hr++)
10868     {
10869       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10870         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10871           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10872           requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10873         }
10874       }
10875     }
10876     //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10877   }
10878 #else
10879   for (i=slen-1;i>=0;i--)
10880   {
10881     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10882     {
10883       // Conditional branch
10884       if((source[i]>>16)!=0x1000&&i<slen-2) {
10885         // Mark this address as a branch target since it may be called
10886         // upon return from interrupt
10887         bt[i+2]=1;
10888       }
10889     }
10890   }
10891 #endif
10892
10893   if(itype[slen-1]==SPAN) {
10894     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10895   }
10896
10897 #ifdef DISASM
10898   /* Debug/disassembly */
10899   for(i=0;i<slen;i++)
10900   {
10901     printf("U:");
10902     int r;
10903     for(r=1;r<=CCREG;r++) {
10904       if((unneeded_reg[i]>>r)&1) {
10905         if(r==HIREG) printf(" HI");
10906         else if(r==LOREG) printf(" LO");
10907         else printf(" r%d",r);
10908       }
10909     }
10910 #ifndef FORCE32
10911     printf(" UU:");
10912     for(r=1;r<=CCREG;r++) {
10913       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10914         if(r==HIREG) printf(" HI");
10915         else if(r==LOREG) printf(" LO");
10916         else printf(" r%d",r);
10917       }
10918     }
10919     printf(" 32:");
10920     for(r=0;r<=CCREG;r++) {
10921       //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10922       if((regs[i].was32>>r)&1) {
10923         if(r==CCREG) printf(" CC");
10924         else if(r==HIREG) printf(" HI");
10925         else if(r==LOREG) printf(" LO");
10926         else printf(" r%d",r);
10927       }
10928     }
10929 #endif
10930     printf("\n");
10931     #if defined(__i386__) || defined(__x86_64__)
10932     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10933     #endif
10934     #ifdef __arm__
10935     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10936     #endif
10937     printf("needs: ");
10938     if(needed_reg[i]&1) printf("eax ");
10939     if((needed_reg[i]>>1)&1) printf("ecx ");
10940     if((needed_reg[i]>>2)&1) printf("edx ");
10941     if((needed_reg[i]>>3)&1) printf("ebx ");
10942     if((needed_reg[i]>>5)&1) printf("ebp ");
10943     if((needed_reg[i]>>6)&1) printf("esi ");
10944     if((needed_reg[i]>>7)&1) printf("edi ");
10945     printf("r:");
10946     for(r=0;r<=CCREG;r++) {
10947       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10948       if((requires_32bit[i]>>r)&1) {
10949         if(r==CCREG) printf(" CC");
10950         else if(r==HIREG) printf(" HI");
10951         else if(r==LOREG) printf(" LO");
10952         else printf(" r%d",r);
10953       }
10954     }
10955     printf("\n");
10956     /*printf("pr:");
10957     for(r=0;r<=CCREG;r++) {
10958       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10959       if((pr32[i]>>r)&1) {
10960         if(r==CCREG) printf(" CC");
10961         else if(r==HIREG) printf(" HI");
10962         else if(r==LOREG) printf(" LO");
10963         else printf(" r%d",r);
10964       }
10965     }
10966     if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10967     printf("\n");*/
10968     #if defined(__i386__) || defined(__x86_64__)
10969     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10970     printf("dirty: ");
10971     if(regs[i].wasdirty&1) printf("eax ");
10972     if((regs[i].wasdirty>>1)&1) printf("ecx ");
10973     if((regs[i].wasdirty>>2)&1) printf("edx ");
10974     if((regs[i].wasdirty>>3)&1) printf("ebx ");
10975     if((regs[i].wasdirty>>5)&1) printf("ebp ");
10976     if((regs[i].wasdirty>>6)&1) printf("esi ");
10977     if((regs[i].wasdirty>>7)&1) printf("edi ");
10978     #endif
10979     #ifdef __arm__
10980     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10981     printf("dirty: ");
10982     if(regs[i].wasdirty&1) printf("r0 ");
10983     if((regs[i].wasdirty>>1)&1) printf("r1 ");
10984     if((regs[i].wasdirty>>2)&1) printf("r2 ");
10985     if((regs[i].wasdirty>>3)&1) printf("r3 ");
10986     if((regs[i].wasdirty>>4)&1) printf("r4 ");
10987     if((regs[i].wasdirty>>5)&1) printf("r5 ");
10988     if((regs[i].wasdirty>>6)&1) printf("r6 ");
10989     if((regs[i].wasdirty>>7)&1) printf("r7 ");
10990     if((regs[i].wasdirty>>8)&1) printf("r8 ");
10991     if((regs[i].wasdirty>>9)&1) printf("r9 ");
10992     if((regs[i].wasdirty>>10)&1) printf("r10 ");
10993     if((regs[i].wasdirty>>12)&1) printf("r12 ");
10994     #endif
10995     printf("\n");
10996     disassemble_inst(i);
10997     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10998     #if defined(__i386__) || defined(__x86_64__)
10999     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
11000     if(regs[i].dirty&1) printf("eax ");
11001     if((regs[i].dirty>>1)&1) printf("ecx ");
11002     if((regs[i].dirty>>2)&1) printf("edx ");
11003     if((regs[i].dirty>>3)&1) printf("ebx ");
11004     if((regs[i].dirty>>5)&1) printf("ebp ");
11005     if((regs[i].dirty>>6)&1) printf("esi ");
11006     if((regs[i].dirty>>7)&1) printf("edi ");
11007     #endif
11008     #ifdef __arm__
11009     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
11010     if(regs[i].dirty&1) printf("r0 ");
11011     if((regs[i].dirty>>1)&1) printf("r1 ");
11012     if((regs[i].dirty>>2)&1) printf("r2 ");
11013     if((regs[i].dirty>>3)&1) printf("r3 ");
11014     if((regs[i].dirty>>4)&1) printf("r4 ");
11015     if((regs[i].dirty>>5)&1) printf("r5 ");
11016     if((regs[i].dirty>>6)&1) printf("r6 ");
11017     if((regs[i].dirty>>7)&1) printf("r7 ");
11018     if((regs[i].dirty>>8)&1) printf("r8 ");
11019     if((regs[i].dirty>>9)&1) printf("r9 ");
11020     if((regs[i].dirty>>10)&1) printf("r10 ");
11021     if((regs[i].dirty>>12)&1) printf("r12 ");
11022     #endif
11023     printf("\n");
11024     if(regs[i].isconst) {
11025       printf("constants: ");
11026       #if defined(__i386__) || defined(__x86_64__)
11027       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
11028       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
11029       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
11030       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
11031       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
11032       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
11033       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
11034       #endif
11035       #ifdef __arm__
11036       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
11037       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
11038       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
11039       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
11040       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
11041       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
11042       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
11043       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
11044       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
11045       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
11046       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
11047       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
11048       #endif
11049       printf("\n");
11050     }
11051 #ifndef FORCE32
11052     printf(" 32:");
11053     for(r=0;r<=CCREG;r++) {
11054       if((regs[i].is32>>r)&1) {
11055         if(r==CCREG) printf(" CC");
11056         else if(r==HIREG) printf(" HI");
11057         else if(r==LOREG) printf(" LO");
11058         else printf(" r%d",r);
11059       }
11060     }
11061     printf("\n");
11062 #endif
11063     /*printf(" p32:");
11064     for(r=0;r<=CCREG;r++) {
11065       if((p32[i]>>r)&1) {
11066         if(r==CCREG) printf(" CC");
11067         else if(r==HIREG) printf(" HI");
11068         else if(r==LOREG) printf(" LO");
11069         else printf(" r%d",r);
11070       }
11071     }
11072     if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
11073     else printf("\n");*/
11074     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
11075       #if defined(__i386__) || defined(__x86_64__)
11076       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
11077       if(branch_regs[i].dirty&1) printf("eax ");
11078       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
11079       if((branch_regs[i].dirty>>2)&1) printf("edx ");
11080       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
11081       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
11082       if((branch_regs[i].dirty>>6)&1) printf("esi ");
11083       if((branch_regs[i].dirty>>7)&1) printf("edi ");
11084       #endif
11085       #ifdef __arm__
11086       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
11087       if(branch_regs[i].dirty&1) printf("r0 ");
11088       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
11089       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
11090       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
11091       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
11092       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
11093       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
11094       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
11095       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
11096       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
11097       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
11098       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
11099       #endif
11100 #ifndef FORCE32
11101       printf(" 32:");
11102       for(r=0;r<=CCREG;r++) {
11103         if((branch_regs[i].is32>>r)&1) {
11104           if(r==CCREG) printf(" CC");
11105           else if(r==HIREG) printf(" HI");
11106           else if(r==LOREG) printf(" LO");
11107           else printf(" r%d",r);
11108         }
11109       }
11110       printf("\n");
11111 #endif
11112     }
11113   }
11114 #endif // DISASM
11115
11116   /* Pass 8 - Assembly */
11117   linkcount=0;stubcount=0;
11118   ds=0;is_delayslot=0;
11119   cop1_usable=0;
11120   uint64_t is32_pre=0;
11121   u_int dirty_pre=0;
11122   u_int beginning=(u_int)out;
11123   if((u_int)addr&1) {
11124     ds=1;
11125     pagespan_ds();
11126   }
11127   u_int instr_addr0_override=0;
11128
11129 #ifdef PCSX
11130   if (start == 0x80030000) {
11131     // nasty hack for fastbios thing
11132     // override block entry to this code
11133     instr_addr0_override=(u_int)out;
11134     emit_movimm(start,0);
11135     // abuse io address var as a flag that we
11136     // have already returned here once
11137     emit_readword((int)&address,1);
11138     emit_writeword(0,(int)&pcaddr);
11139     emit_writeword(0,(int)&address);
11140     emit_cmp(0,1);
11141     emit_jne((int)new_dyna_leave);
11142   }
11143 #endif
11144   for(i=0;i<slen;i++)
11145   {
11146     //if(ds) printf("ds: ");
11147     disassemble_inst(i);
11148     if(ds) {
11149       ds=0; // Skip delay slot
11150       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
11151       instr_addr[i]=0;
11152     } else {
11153       speculate_register_values(i);
11154       #ifndef DESTRUCTIVE_WRITEBACK
11155       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11156       {
11157         wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
11158               unneeded_reg[i],unneeded_reg_upper[i]);
11159         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
11160               unneeded_reg[i],unneeded_reg_upper[i]);
11161       }
11162       if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
11163         is32_pre=branch_regs[i].is32;
11164         dirty_pre=branch_regs[i].dirty;
11165       }else{
11166         is32_pre=regs[i].is32;
11167         dirty_pre=regs[i].dirty;
11168       }
11169       #endif
11170       // write back
11171       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11172       {
11173         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
11174                       unneeded_reg[i],unneeded_reg_upper[i]);
11175         loop_preload(regmap_pre[i],regs[i].regmap_entry);
11176       }
11177       // branch target entry point
11178       instr_addr[i]=(u_int)out;
11179       assem_debug("<->\n");
11180       // load regs
11181       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
11182         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
11183       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
11184       address_generation(i,&regs[i],regs[i].regmap_entry);
11185       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
11186       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
11187       {
11188         // Load the delay slot registers if necessary
11189         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
11190           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11191         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
11192           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11193         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
11194           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11195       }
11196       else if(i+1<slen)
11197       {
11198         // Preload registers for following instruction
11199         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
11200           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
11201             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11202         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
11203           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
11204             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11205       }
11206       // TODO: if(is_ooo(i)) address_generation(i+1);
11207       if(itype[i]==CJUMP||itype[i]==FJUMP)
11208         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
11209       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
11210         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11211       if(bt[i]) cop1_usable=0;
11212       // assemble
11213       switch(itype[i]) {
11214         case ALU:
11215           alu_assemble(i,&regs[i]);break;
11216         case IMM16:
11217           imm16_assemble(i,&regs[i]);break;
11218         case SHIFT:
11219           shift_assemble(i,&regs[i]);break;
11220         case SHIFTIMM:
11221           shiftimm_assemble(i,&regs[i]);break;
11222         case LOAD:
11223           load_assemble(i,&regs[i]);break;
11224         case LOADLR:
11225           loadlr_assemble(i,&regs[i]);break;
11226         case STORE:
11227           store_assemble(i,&regs[i]);break;
11228         case STORELR:
11229           storelr_assemble(i,&regs[i]);break;
11230         case COP0:
11231           cop0_assemble(i,&regs[i]);break;
11232         case COP1:
11233           cop1_assemble(i,&regs[i]);break;
11234         case C1LS:
11235           c1ls_assemble(i,&regs[i]);break;
11236         case COP2:
11237           cop2_assemble(i,&regs[i]);break;
11238         case C2LS:
11239           c2ls_assemble(i,&regs[i]);break;
11240         case C2OP:
11241           c2op_assemble(i,&regs[i]);break;
11242         case FCONV:
11243           fconv_assemble(i,&regs[i]);break;
11244         case FLOAT:
11245           float_assemble(i,&regs[i]);break;
11246         case FCOMP:
11247           fcomp_assemble(i,&regs[i]);break;
11248         case MULTDIV:
11249           multdiv_assemble(i,&regs[i]);break;
11250         case MOV:
11251           mov_assemble(i,&regs[i]);break;
11252         case SYSCALL:
11253           syscall_assemble(i,&regs[i]);break;
11254         case HLECALL:
11255           hlecall_assemble(i,&regs[i]);break;
11256         case INTCALL:
11257           intcall_assemble(i,&regs[i]);break;
11258         case UJUMP:
11259           ujump_assemble(i,&regs[i]);ds=1;break;
11260         case RJUMP:
11261           rjump_assemble(i,&regs[i]);ds=1;break;
11262         case CJUMP:
11263           cjump_assemble(i,&regs[i]);ds=1;break;
11264         case SJUMP:
11265           sjump_assemble(i,&regs[i]);ds=1;break;
11266         case FJUMP:
11267           fjump_assemble(i,&regs[i]);ds=1;break;
11268         case SPAN:
11269           pagespan_assemble(i,&regs[i]);break;
11270       }
11271       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
11272         literal_pool(1024);
11273       else
11274         literal_pool_jumpover(256);
11275     }
11276   }
11277   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
11278   // If the block did not end with an unconditional branch,
11279   // add a jump to the next instruction.
11280   if(i>1) {
11281     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
11282       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11283       assert(i==slen);
11284       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
11285         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11286         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11287           emit_loadreg(CCREG,HOST_CCREG);
11288         emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
11289       }
11290       else if(!likely[i-2])
11291       {
11292         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
11293         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
11294       }
11295       else
11296       {
11297         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
11298         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
11299       }
11300       add_to_linker((int)out,start+i*4,0);
11301       emit_jmp(0);
11302     }
11303   }
11304   else
11305   {
11306     assert(i>0);
11307     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11308     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11309     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11310       emit_loadreg(CCREG,HOST_CCREG);
11311     emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
11312     add_to_linker((int)out,start+i*4,0);
11313     emit_jmp(0);
11314   }
11315
11316   // TODO: delay slot stubs?
11317   // Stubs
11318   for(i=0;i<stubcount;i++)
11319   {
11320     switch(stubs[i][0])
11321     {
11322       case LOADB_STUB:
11323       case LOADH_STUB:
11324       case LOADW_STUB:
11325       case LOADD_STUB:
11326       case LOADBU_STUB:
11327       case LOADHU_STUB:
11328         do_readstub(i);break;
11329       case STOREB_STUB:
11330       case STOREH_STUB:
11331       case STOREW_STUB:
11332       case STORED_STUB:
11333         do_writestub(i);break;
11334       case CC_STUB:
11335         do_ccstub(i);break;
11336       case INVCODE_STUB:
11337         do_invstub(i);break;
11338       case FP_STUB:
11339         do_cop1stub(i);break;
11340       case STORELR_STUB:
11341         do_unalignedwritestub(i);break;
11342     }
11343   }
11344
11345   if (instr_addr0_override)
11346     instr_addr[0] = instr_addr0_override;
11347
11348   /* Pass 9 - Linker */
11349   for(i=0;i<linkcount;i++)
11350   {
11351     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
11352     literal_pool(64);
11353     if(!link_addr[i][2])
11354     {
11355       void *stub=out;
11356       void *addr=check_addr(link_addr[i][1]);
11357       emit_extjump(link_addr[i][0],link_addr[i][1]);
11358       if(addr) {
11359         set_jump_target(link_addr[i][0],(int)addr);
11360         add_link(link_addr[i][1],stub);
11361       }
11362       else set_jump_target(link_addr[i][0],(int)stub);
11363     }
11364     else
11365     {
11366       // Internal branch
11367       int target=(link_addr[i][1]-start)>>2;
11368       assert(target>=0&&target<slen);
11369       assert(instr_addr[target]);
11370       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11371       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
11372       //#else
11373       set_jump_target(link_addr[i][0],instr_addr[target]);
11374       //#endif
11375     }
11376   }
11377   // External Branch Targets (jump_in)
11378   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
11379   for(i=0;i<slen;i++)
11380   {
11381     if(bt[i]||i==0)
11382     {
11383       if(instr_addr[i]) // TODO - delay slots (=null)
11384       {
11385         u_int vaddr=start+i*4;
11386         u_int page=get_page(vaddr);
11387         u_int vpage=get_vpage(vaddr);
11388         literal_pool(256);
11389         //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
11390 #ifndef FORCE32
11391         if(!requires_32bit[i])
11392 #else
11393         if(1)
11394 #endif
11395         {
11396           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11397           assem_debug("jump_in: %x\n",start+i*4);
11398           ll_add(jump_dirty+vpage,vaddr,(void *)out);
11399           int entry_point=do_dirty_stub(i);
11400           ll_add(jump_in+page,vaddr,(void *)entry_point);
11401           // If there was an existing entry in the hash table,
11402           // replace it with the new address.
11403           // Don't add new entries.  We'll insert the
11404           // ones that actually get used in check_addr().
11405           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
11406           if(ht_bin[0]==vaddr) {
11407             ht_bin[1]=entry_point;
11408           }
11409           if(ht_bin[2]==vaddr) {
11410             ht_bin[3]=entry_point;
11411           }
11412         }
11413         else
11414         {
11415           u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
11416           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11417           assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
11418           //int entry_point=(int)out;
11419           ////assem_debug("entry_point: %x\n",entry_point);
11420           //load_regs_entry(i);
11421           //if(entry_point==(int)out)
11422           //  entry_point=instr_addr[i];
11423           //else
11424           //  emit_jmp(instr_addr[i]);
11425           //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
11426           ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
11427           int entry_point=do_dirty_stub(i);
11428           ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
11429         }
11430       }
11431     }
11432   }
11433   // Write out the literal pool if necessary
11434   literal_pool(0);
11435   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11436   // Align code
11437   if(((u_int)out)&7) emit_addnop(13);
11438   #endif
11439   assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
11440   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
11441   memcpy(copy,source,slen*4);
11442   copy+=slen*4;
11443   
11444   #ifdef __arm__
11445   __clear_cache((void *)beginning,out);
11446   #endif
11447   
11448   // If we're within 256K of the end of the buffer,
11449   // start over from the beginning. (Is 256K enough?)
11450   if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
11451   
11452   // Trap writes to any of the pages we compiled
11453   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
11454     invalid_code[i]=0;
11455 #ifndef DISABLE_TLB
11456     memory_map[i]|=0x40000000;
11457     if((signed int)start>=(signed int)0xC0000000) {
11458       assert(using_tlb);
11459       j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
11460       invalid_code[j]=0;
11461       memory_map[j]|=0x40000000;
11462       //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
11463     }
11464 #endif
11465   }
11466   inv_code_start=inv_code_end=~0;
11467 #ifdef PCSX
11468   // for PCSX we need to mark all mirrors too
11469   if(get_page(start)<(RAM_SIZE>>12))
11470     for(i=start>>12;i<=(start+slen*4)>>12;i++)
11471       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
11472       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
11473       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
11474 #endif
11475   
11476   /* Pass 10 - Free memory by expiring oldest blocks */
11477   
11478   int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
11479   while(expirep!=end)
11480   {
11481     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
11482     int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
11483     inv_debug("EXP: Phase %d\n",expirep);
11484     switch((expirep>>11)&3)
11485     {
11486       case 0:
11487         // Clear jump_in and jump_dirty
11488         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
11489         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
11490         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
11491         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
11492         break;
11493       case 1:
11494         // Clear pointers
11495         ll_kill_pointers(jump_out[expirep&2047],base,shift);
11496         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
11497         break;
11498       case 2:
11499         // Clear hash table
11500         for(i=0;i<32;i++) {
11501           int *ht_bin=hash_table[((expirep&2047)<<5)+i];
11502           if((ht_bin[3]>>shift)==(base>>shift) ||
11503              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11504             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
11505             ht_bin[2]=ht_bin[3]=-1;
11506           }
11507           if((ht_bin[1]>>shift)==(base>>shift) ||
11508              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11509             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
11510             ht_bin[0]=ht_bin[2];
11511             ht_bin[1]=ht_bin[3];
11512             ht_bin[2]=ht_bin[3]=-1;
11513           }
11514         }
11515         break;
11516       case 3:
11517         // Clear jump_out
11518         #ifdef __arm__
11519         if((expirep&2047)==0) 
11520           do_clear_cache();
11521         #endif
11522         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
11523         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
11524         break;
11525     }
11526     expirep=(expirep+1)&65535;
11527   }
11528   return 0;
11529 }
11530
11531 // vim:shiftwidth=2:expandtab