f7f19c5fe024375b36027c982caee682546b4bd9
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2010 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24
25 #include "emu_if.h" //emulator interface
26
27 #include <sys/mman.h>
28
29 #ifdef __i386__
30 #include "assem_x86.h"
31 #endif
32 #ifdef __x86_64__
33 #include "assem_x64.h"
34 #endif
35 #ifdef __arm__
36 #include "assem_arm.h"
37 #endif
38
39 #define MAXBLOCK 4096
40 #define MAX_OUTPUT_BLOCK_SIZE 262144
41 #define CLOCK_DIVIDER 2
42
43 struct regstat
44 {
45   signed char regmap_entry[HOST_REGS];
46   signed char regmap[HOST_REGS];
47   uint64_t was32;
48   uint64_t is32;
49   uint64_t wasdirty;
50   uint64_t dirty;
51   uint64_t u;
52   uint64_t uu;
53   u_int wasconst;
54   u_int isconst;
55   uint64_t constmap[HOST_REGS];
56 };
57
58 struct ll_entry
59 {
60   u_int vaddr;
61   u_int reg32;
62   void *addr;
63   struct ll_entry *next;
64 };
65
66   u_int start;
67   u_int *source;
68   u_int pagelimit;
69   char insn[MAXBLOCK][10];
70   u_char itype[MAXBLOCK];
71   u_char opcode[MAXBLOCK];
72   u_char opcode2[MAXBLOCK];
73   u_char bt[MAXBLOCK];
74   u_char rs1[MAXBLOCK];
75   u_char rs2[MAXBLOCK];
76   u_char rt1[MAXBLOCK];
77   u_char rt2[MAXBLOCK];
78   u_char us1[MAXBLOCK];
79   u_char us2[MAXBLOCK];
80   u_char dep1[MAXBLOCK];
81   u_char dep2[MAXBLOCK];
82   u_char lt1[MAXBLOCK];
83   int imm[MAXBLOCK];
84   u_int ba[MAXBLOCK];
85   char likely[MAXBLOCK];
86   char is_ds[MAXBLOCK];
87   uint64_t unneeded_reg[MAXBLOCK];
88   uint64_t unneeded_reg_upper[MAXBLOCK];
89   uint64_t branch_unneeded_reg[MAXBLOCK];
90   uint64_t branch_unneeded_reg_upper[MAXBLOCK];
91   uint64_t p32[MAXBLOCK];
92   uint64_t pr32[MAXBLOCK];
93   signed char regmap_pre[MAXBLOCK][HOST_REGS];
94   signed char regmap[MAXBLOCK][HOST_REGS];
95   signed char regmap_entry[MAXBLOCK][HOST_REGS];
96   uint64_t constmap[MAXBLOCK][HOST_REGS];
97   uint64_t known_value[HOST_REGS];
98   u_int known_reg;
99   struct regstat regs[MAXBLOCK];
100   struct regstat branch_regs[MAXBLOCK];
101   u_int needed_reg[MAXBLOCK];
102   uint64_t requires_32bit[MAXBLOCK];
103   u_int wont_dirty[MAXBLOCK];
104   u_int will_dirty[MAXBLOCK];
105   int ccadj[MAXBLOCK];
106   int slen;
107   u_int instr_addr[MAXBLOCK];
108   u_int link_addr[MAXBLOCK][3];
109   int linkcount;
110   u_int stubs[MAXBLOCK*3][8];
111   int stubcount;
112   u_int literals[1024][2];
113   int literalcount;
114   int is_delayslot;
115   int cop1_usable;
116   u_char *out;
117   struct ll_entry *jump_in[4096];
118   struct ll_entry *jump_out[4096];
119   struct ll_entry *jump_dirty[4096];
120   u_int hash_table[65536][4]  __attribute__((aligned(16)));
121   char shadow[1048576]  __attribute__((aligned(16)));
122   void *copy;
123   int expirep;
124   u_int using_tlb;
125   u_int stop_after_jal;
126   extern u_char restore_candidate[512];
127   extern int cycle_count;
128
129   /* registers that may be allocated */
130   /* 1-31 gpr */
131 #define HIREG 32 // hi
132 #define LOREG 33 // lo
133 #define FSREG 34 // FPU status (FCSR)
134 #define CSREG 35 // Coprocessor status
135 #define CCREG 36 // Cycle count
136 #define INVCP 37 // Pointer to invalid_code
137 #define TEMPREG 38
138 #define FTEMP 38 // FPU/LDL/LDR temporary register
139 #define PTEMP 39 // Prefetch temporary register
140 #define TLREG 40 // TLB mapping offset
141 #define RHASH 41 // Return address hash
142 #define RHTBL 42 // Return address hash table address
143 #define RTEMP 43 // JR/JALR address register
144 #define MAXREG 43
145 #define AGEN1 44 // Address generation temporary register
146 #define AGEN2 45 // Address generation temporary register
147 #define MGEN1 46 // Maptable address generation temporary register
148 #define MGEN2 47 // Maptable address generation temporary register
149 #define BTREG 48 // Branch target temporary register
150
151   /* instruction types */
152 #define NOP 0     // No operation
153 #define LOAD 1    // Load
154 #define STORE 2   // Store
155 #define LOADLR 3  // Unaligned load
156 #define STORELR 4 // Unaligned store
157 #define MOV 5     // Move 
158 #define ALU 6     // Arithmetic/logic
159 #define MULTDIV 7 // Multiply/divide
160 #define SHIFT 8   // Shift by register
161 #define SHIFTIMM 9// Shift by immediate
162 #define IMM16 10  // 16-bit immediate
163 #define RJUMP 11  // Unconditional jump to register
164 #define UJUMP 12  // Unconditional jump
165 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
166 #define SJUMP 14  // Conditional branch (regimm format)
167 #define COP0 15   // Coprocessor 0
168 #define COP1 16   // Coprocessor 1
169 #define C1LS 17   // Coprocessor 1 load/store
170 #define FJUMP 18  // Conditional branch (floating point)
171 #define FLOAT 19  // Floating point unit
172 #define FCONV 20  // Convert integer to float
173 #define FCOMP 21  // Floating point compare (sets FSREG)
174 #define SYSCALL 22// SYSCALL
175 #define OTHER 23  // Other
176 #define SPAN 24   // Branch/delay slot spans 2 pages
177 #define NI 25     // Not implemented
178 #define HLECALL 26// PCSX fake opcodes for HLE
179 #define COP2 27   // Coprocessor 2 move
180 #define C2LS 28   // Coprocessor 2 load/store
181 #define C2OP 29   // Coprocessor 2 operation
182
183   /* stubs */
184 #define CC_STUB 1
185 #define FP_STUB 2
186 #define LOADB_STUB 3
187 #define LOADH_STUB 4
188 #define LOADW_STUB 5
189 #define LOADD_STUB 6
190 #define LOADBU_STUB 7
191 #define LOADHU_STUB 8
192 #define STOREB_STUB 9
193 #define STOREH_STUB 10
194 #define STOREW_STUB 11
195 #define STORED_STUB 12
196 #define STORELR_STUB 13
197 #define INVCODE_STUB 14
198
199   /* branch codes */
200 #define TAKEN 1
201 #define NOTTAKEN 2
202 #define NULLDS 3
203
204 // asm linkage
205 int new_recompile_block(int addr);
206 void *get_addr_ht(u_int vaddr);
207 void invalidate_block(u_int block);
208 void invalidate_addr(u_int addr);
209 void remove_hash(int vaddr);
210 void jump_vaddr();
211 void dyna_linker();
212 void dyna_linker_ds();
213 void verify_code();
214 void verify_code_vm();
215 void verify_code_ds();
216 void cc_interrupt();
217 void fp_exception();
218 void fp_exception_ds();
219 void jump_syscall();
220 void jump_syscall_hle();
221 void jump_eret();
222 void jump_hlecall();
223 void new_dyna_leave();
224
225 // TLB
226 void TLBWI_new();
227 void TLBWR_new();
228 void read_nomem_new();
229 void read_nomemb_new();
230 void read_nomemh_new();
231 void read_nomemd_new();
232 void write_nomem_new();
233 void write_nomemb_new();
234 void write_nomemh_new();
235 void write_nomemd_new();
236 void write_rdram_new();
237 void write_rdramb_new();
238 void write_rdramh_new();
239 void write_rdramd_new();
240 extern u_int memory_map[1048576];
241
242 // Needed by assembler
243 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
244 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
245 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
246 void load_all_regs(signed char i_regmap[]);
247 void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
248 void load_regs_entry(int t);
249 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
250
251 int tracedebug=0;
252
253 //#define DEBUG_CYCLE_COUNT 1
254
255 void nullf() {}
256 //#define assem_debug printf
257 //#define inv_debug printf
258 #define assem_debug nullf
259 #define inv_debug nullf
260
261 static void tlb_hacks()
262 {
263 #ifndef DISABLE_TLB
264   // Goldeneye hack
265   if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
266   {
267     u_int addr;
268     int n;
269     switch (ROM_HEADER->Country_code&0xFF) 
270     {
271       case 0x45: // U
272         addr=0x34b30;
273         break;                   
274       case 0x4A: // J 
275         addr=0x34b70;    
276         break;    
277       case 0x50: // E 
278         addr=0x329f0;
279         break;                        
280       default: 
281         // Unknown country code
282         addr=0;
283         break;
284     }
285     u_int rom_addr=(u_int)rom;
286     #ifdef ROM_COPY
287     // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
288     // in the lower 4G of memory to use this hack.  Copy it if necessary.
289     if((void *)rom>(void *)0xffffffff) {
290       munmap(ROM_COPY, 67108864);
291       if(mmap(ROM_COPY, 12582912,
292               PROT_READ | PROT_WRITE,
293               MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
294               -1, 0) <= 0) {printf("mmap() failed\n");}
295       memcpy(ROM_COPY,rom,12582912);
296       rom_addr=(u_int)ROM_COPY;
297     }
298     #endif
299     if(addr) {
300       for(n=0x7F000;n<0x80000;n++) {
301         memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
302       }
303     }
304   }
305 #endif
306 }
307
308 static u_int get_page(u_int vaddr)
309 {
310   u_int page=(vaddr^0x80000000)>>12;
311 #ifndef DISABLE_TLB
312   if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
313 #endif
314   if(page>2048) page=2048+(page&2047);
315   return page;
316 }
317
318 static u_int get_vpage(u_int vaddr)
319 {
320   u_int vpage=(vaddr^0x80000000)>>12;
321 #ifndef DISABLE_TLB
322   if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
323 #endif
324   if(vpage>2048) vpage=2048+(vpage&2047);
325   return vpage;
326 }
327
328 // Get address from virtual address
329 // This is called from the recompiled JR/JALR instructions
330 void *get_addr(u_int vaddr)
331 {
332   u_int page=get_page(vaddr);
333   u_int vpage=get_vpage(vaddr);
334   struct ll_entry *head;
335   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
336   head=jump_in[page];
337   while(head!=NULL) {
338     if(head->vaddr==vaddr&&head->reg32==0) {
339   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
340       int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
341       ht_bin[3]=ht_bin[1];
342       ht_bin[2]=ht_bin[0];
343       ht_bin[1]=(int)head->addr;
344       ht_bin[0]=vaddr;
345       return head->addr;
346     }
347     head=head->next;
348   }
349   head=jump_dirty[vpage];
350   while(head!=NULL) {
351     if(head->vaddr==vaddr&&head->reg32==0) {
352       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
353       // Don't restore blocks which are about to expire from the cache
354       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
355       if(verify_dirty(head->addr)) {
356         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
357         invalid_code[vaddr>>12]=0;
358         memory_map[vaddr>>12]|=0x40000000;
359         if(vpage<2048) {
360 #ifndef DISABLE_TLB
361           if(tlb_LUT_r[vaddr>>12]) {
362             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
363             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
364           }
365 #endif
366           restore_candidate[vpage>>3]|=1<<(vpage&7);
367         }
368         else restore_candidate[page>>3]|=1<<(page&7);
369         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
370         if(ht_bin[0]==vaddr) {
371           ht_bin[1]=(int)head->addr; // Replace existing entry
372         }
373         else
374         {
375           ht_bin[3]=ht_bin[1];
376           ht_bin[2]=ht_bin[0];
377           ht_bin[1]=(int)head->addr;
378           ht_bin[0]=vaddr;
379         }
380         return head->addr;
381       }
382     }
383     head=head->next;
384   }
385   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
386   int r=new_recompile_block(vaddr);
387   if(r==0) return get_addr(vaddr);
388   // Execute in unmapped page, generate pagefault execption
389   Status|=2;
390   Cause=(vaddr<<31)|0x8;
391   EPC=(vaddr&1)?vaddr-5:vaddr;
392   BadVAddr=(vaddr&~1);
393   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
394   EntryHi=BadVAddr&0xFFFFE000;
395   return get_addr_ht(0x80000000);
396 }
397 // Look up address in hash table first
398 void *get_addr_ht(u_int vaddr)
399 {
400   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
401   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
402   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
403   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
404   return get_addr(vaddr);
405 }
406
407 void *get_addr_32(u_int vaddr,u_int flags)
408 {
409 #ifdef FORCE32
410   return get_addr(vaddr);
411 #endif
412   //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
413   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
414   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
415   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
416   u_int page=get_page(vaddr);
417   u_int vpage=get_vpage(vaddr);
418   struct ll_entry *head;
419   head=jump_in[page];
420   while(head!=NULL) {
421     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
422       //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
423       if(head->reg32==0) {
424         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
425         if(ht_bin[0]==-1) {
426           ht_bin[1]=(int)head->addr;
427           ht_bin[0]=vaddr;
428         }else if(ht_bin[2]==-1) {
429           ht_bin[3]=(int)head->addr;
430           ht_bin[2]=vaddr;
431         }
432         //ht_bin[3]=ht_bin[1];
433         //ht_bin[2]=ht_bin[0];
434         //ht_bin[1]=(int)head->addr;
435         //ht_bin[0]=vaddr;
436       }
437       return head->addr;
438     }
439     head=head->next;
440   }
441   head=jump_dirty[vpage];
442   while(head!=NULL) {
443     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
444       //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
445       // Don't restore blocks which are about to expire from the cache
446       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
447       if(verify_dirty(head->addr)) {
448         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
449         invalid_code[vaddr>>12]=0;
450         memory_map[vaddr>>12]|=0x40000000;
451         if(vpage<2048) {
452 #ifndef DISABLE_TLB
453           if(tlb_LUT_r[vaddr>>12]) {
454             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
455             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
456           }
457 #endif
458           restore_candidate[vpage>>3]|=1<<(vpage&7);
459         }
460         else restore_candidate[page>>3]|=1<<(page&7);
461         if(head->reg32==0) {
462           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
463           if(ht_bin[0]==-1) {
464             ht_bin[1]=(int)head->addr;
465             ht_bin[0]=vaddr;
466           }else if(ht_bin[2]==-1) {
467             ht_bin[3]=(int)head->addr;
468             ht_bin[2]=vaddr;
469           }
470           //ht_bin[3]=ht_bin[1];
471           //ht_bin[2]=ht_bin[0];
472           //ht_bin[1]=(int)head->addr;
473           //ht_bin[0]=vaddr;
474         }
475         return head->addr;
476       }
477     }
478     head=head->next;
479   }
480   //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
481   int r=new_recompile_block(vaddr);
482   if(r==0) return get_addr(vaddr);
483   // Execute in unmapped page, generate pagefault execption
484   Status|=2;
485   Cause=(vaddr<<31)|0x8;
486   EPC=(vaddr&1)?vaddr-5:vaddr;
487   BadVAddr=(vaddr&~1);
488   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
489   EntryHi=BadVAddr&0xFFFFE000;
490   return get_addr_ht(0x80000000);
491 }
492
493 void clear_all_regs(signed char regmap[])
494 {
495   int hr;
496   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
497 }
498
499 signed char get_reg(signed char regmap[],int r)
500 {
501   int hr;
502   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
503   return -1;
504 }
505
506 // Find a register that is available for two consecutive cycles
507 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
508 {
509   int hr;
510   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
511   return -1;
512 }
513
514 int count_free_regs(signed char regmap[])
515 {
516   int count=0;
517   int hr;
518   for(hr=0;hr<HOST_REGS;hr++)
519   {
520     if(hr!=EXCLUDE_REG) {
521       if(regmap[hr]<0) count++;
522     }
523   }
524   return count;
525 }
526
527 void dirty_reg(struct regstat *cur,signed char reg)
528 {
529   int hr;
530   if(!reg) return;
531   for (hr=0;hr<HOST_REGS;hr++) {
532     if((cur->regmap[hr]&63)==reg) {
533       cur->dirty|=1<<hr;
534     }
535   }
536 }
537
538 // If we dirty the lower half of a 64 bit register which is now being
539 // sign-extended, we need to dump the upper half.
540 // Note: Do this only after completion of the instruction, because
541 // some instructions may need to read the full 64-bit value even if
542 // overwriting it (eg SLTI, DSRA32).
543 static void flush_dirty_uppers(struct regstat *cur)
544 {
545   int hr,reg;
546   for (hr=0;hr<HOST_REGS;hr++) {
547     if((cur->dirty>>hr)&1) {
548       reg=cur->regmap[hr];
549       if(reg>=64) 
550         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
551     }
552   }
553 }
554
555 void set_const(struct regstat *cur,signed char reg,uint64_t value)
556 {
557   int hr;
558   if(!reg) return;
559   for (hr=0;hr<HOST_REGS;hr++) {
560     if(cur->regmap[hr]==reg) {
561       cur->isconst|=1<<hr;
562       cur->constmap[hr]=value;
563     }
564     else if((cur->regmap[hr]^64)==reg) {
565       cur->isconst|=1<<hr;
566       cur->constmap[hr]=value>>32;
567     }
568   }
569 }
570
571 void clear_const(struct regstat *cur,signed char reg)
572 {
573   int hr;
574   if(!reg) return;
575   for (hr=0;hr<HOST_REGS;hr++) {
576     if((cur->regmap[hr]&63)==reg) {
577       cur->isconst&=~(1<<hr);
578     }
579   }
580 }
581
582 int is_const(struct regstat *cur,signed char reg)
583 {
584   int hr;
585   if(!reg) return 1;
586   for (hr=0;hr<HOST_REGS;hr++) {
587     if((cur->regmap[hr]&63)==reg) {
588       return (cur->isconst>>hr)&1;
589     }
590   }
591   return 0;
592 }
593 uint64_t get_const(struct regstat *cur,signed char reg)
594 {
595   int hr;
596   if(!reg) return 0;
597   for (hr=0;hr<HOST_REGS;hr++) {
598     if(cur->regmap[hr]==reg) {
599       return cur->constmap[hr];
600     }
601   }
602   printf("Unknown constant in r%d\n",reg);
603   exit(1);
604 }
605
606 // Least soon needed registers
607 // Look at the next ten instructions and see which registers
608 // will be used.  Try not to reallocate these.
609 void lsn(u_char hsn[], int i, int *preferred_reg)
610 {
611   int j;
612   int b=-1;
613   for(j=0;j<9;j++)
614   {
615     if(i+j>=slen) {
616       j=slen-i-1;
617       break;
618     }
619     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
620     {
621       // Don't go past an unconditonal jump
622       j++;
623       break;
624     }
625   }
626   for(;j>=0;j--)
627   {
628     if(rs1[i+j]) hsn[rs1[i+j]]=j;
629     if(rs2[i+j]) hsn[rs2[i+j]]=j;
630     if(rt1[i+j]) hsn[rt1[i+j]]=j;
631     if(rt2[i+j]) hsn[rt2[i+j]]=j;
632     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
633       // Stores can allocate zero
634       hsn[rs1[i+j]]=j;
635       hsn[rs2[i+j]]=j;
636     }
637     // On some architectures stores need invc_ptr
638     #if defined(HOST_IMM8)
639     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
640       hsn[INVCP]=j;
641     }
642     #endif
643     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
644     {
645       hsn[CCREG]=j;
646       b=j;
647     }
648   }
649   if(b>=0)
650   {
651     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
652     {
653       // Follow first branch
654       int t=(ba[i+b]-start)>>2;
655       j=7-b;if(t+j>=slen) j=slen-t-1;
656       for(;j>=0;j--)
657       {
658         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
659         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
660         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
661         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
662       }
663     }
664     // TODO: preferred register based on backward branch
665   }
666   // Delay slot should preferably not overwrite branch conditions or cycle count
667   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
668     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
669     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
670     hsn[CCREG]=1;
671     // ...or hash tables
672     hsn[RHASH]=1;
673     hsn[RHTBL]=1;
674   }
675   // Coprocessor load/store needs FTEMP, even if not declared
676   if(itype[i]==C1LS||itype[i]==C2LS) {
677     hsn[FTEMP]=0;
678   }
679   // Load L/R also uses FTEMP as a temporary register
680   if(itype[i]==LOADLR) {
681     hsn[FTEMP]=0;
682   }
683   // Also SWL/SWR/SDL/SDR
684   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
685     hsn[FTEMP]=0;
686   }
687   // Don't remove the TLB registers either
688   if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
689     hsn[TLREG]=0;
690   }
691   // Don't remove the miniht registers
692   if(itype[i]==UJUMP||itype[i]==RJUMP)
693   {
694     hsn[RHASH]=0;
695     hsn[RHTBL]=0;
696   }
697 }
698
699 // We only want to allocate registers if we're going to use them again soon
700 int needed_again(int r, int i)
701 {
702   int j;
703   int b=-1;
704   int rn=10;
705   int hr;
706   u_char hsn[MAXREG+1];
707   int preferred_reg;
708   
709   memset(hsn,10,sizeof(hsn));
710   lsn(hsn,i,&preferred_reg);
711   
712   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
713   {
714     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
715       return 0; // Don't need any registers if exiting the block
716   }
717   for(j=0;j<9;j++)
718   {
719     if(i+j>=slen) {
720       j=slen-i-1;
721       break;
722     }
723     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
724     {
725       // Don't go past an unconditonal jump
726       j++;
727       break;
728     }
729     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||((source[i+j]&0xfc00003f)==0x0d))
730     {
731       break;
732     }
733   }
734   for(;j>=1;j--)
735   {
736     if(rs1[i+j]==r) rn=j;
737     if(rs2[i+j]==r) rn=j;
738     if((unneeded_reg[i+j]>>r)&1) rn=10;
739     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
740     {
741       b=j;
742     }
743   }
744   /*
745   if(b>=0)
746   {
747     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
748     {
749       // Follow first branch
750       int o=rn;
751       int t=(ba[i+b]-start)>>2;
752       j=7-b;if(t+j>=slen) j=slen-t-1;
753       for(;j>=0;j--)
754       {
755         if(!((unneeded_reg[t+j]>>r)&1)) {
756           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
757           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
758         }
759         else rn=o;
760       }
761     }
762   }*/
763   for(hr=0;hr<HOST_REGS;hr++) {
764     if(hr!=EXCLUDE_REG) {
765       if(rn<hsn[hr]) return 1;
766     }
767   }
768   return 0;
769 }
770
771 // Try to match register allocations at the end of a loop with those
772 // at the beginning
773 int loop_reg(int i, int r, int hr)
774 {
775   int j,k;
776   for(j=0;j<9;j++)
777   {
778     if(i+j>=slen) {
779       j=slen-i-1;
780       break;
781     }
782     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
783     {
784       // Don't go past an unconditonal jump
785       j++;
786       break;
787     }
788   }
789   k=0;
790   if(i>0){
791     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
792       k--;
793   }
794   for(;k<j;k++)
795   {
796     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
797     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
798     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
799     {
800       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
801       {
802         int t=(ba[i+k]-start)>>2;
803         int reg=get_reg(regs[t].regmap_entry,r);
804         if(reg>=0) return reg;
805         //reg=get_reg(regs[t+1].regmap_entry,r);
806         //if(reg>=0) return reg;
807       }
808     }
809   }
810   return hr;
811 }
812
813
814 // Allocate every register, preserving source/target regs
815 void alloc_all(struct regstat *cur,int i)
816 {
817   int hr;
818   
819   for(hr=0;hr<HOST_REGS;hr++) {
820     if(hr!=EXCLUDE_REG) {
821       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
822          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
823       {
824         cur->regmap[hr]=-1;
825         cur->dirty&=~(1<<hr);
826       }
827       // Don't need zeros
828       if((cur->regmap[hr]&63)==0)
829       {
830         cur->regmap[hr]=-1;
831         cur->dirty&=~(1<<hr);
832       }
833     }
834   }
835 }
836
837
838 void div64(int64_t dividend,int64_t divisor)
839 {
840   lo=dividend/divisor;
841   hi=dividend%divisor;
842   //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
843   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
844 }
845 void divu64(uint64_t dividend,uint64_t divisor)
846 {
847   lo=dividend/divisor;
848   hi=dividend%divisor;
849   //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
850   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
851 }
852
853 void mult64(uint64_t m1,uint64_t m2)
854 {
855    unsigned long long int op1, op2, op3, op4;
856    unsigned long long int result1, result2, result3, result4;
857    unsigned long long int temp1, temp2, temp3, temp4;
858    int sign = 0;
859    
860    if (m1 < 0)
861      {
862     op2 = -m1;
863     sign = 1 - sign;
864      }
865    else op2 = m1;
866    if (m2 < 0)
867      {
868     op4 = -m2;
869     sign = 1 - sign;
870      }
871    else op4 = m2;
872    
873    op1 = op2 & 0xFFFFFFFF;
874    op2 = (op2 >> 32) & 0xFFFFFFFF;
875    op3 = op4 & 0xFFFFFFFF;
876    op4 = (op4 >> 32) & 0xFFFFFFFF;
877    
878    temp1 = op1 * op3;
879    temp2 = (temp1 >> 32) + op1 * op4;
880    temp3 = op2 * op3;
881    temp4 = (temp3 >> 32) + op2 * op4;
882    
883    result1 = temp1 & 0xFFFFFFFF;
884    result2 = temp2 + (temp3 & 0xFFFFFFFF);
885    result3 = (result2 >> 32) + temp4;
886    result4 = (result3 >> 32);
887    
888    lo = result1 | (result2 << 32);
889    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
890    if (sign)
891      {
892     hi = ~hi;
893     if (!lo) hi++;
894     else lo = ~lo + 1;
895      }
896 }
897
898 void multu64(uint64_t m1,uint64_t m2)
899 {
900    unsigned long long int op1, op2, op3, op4;
901    unsigned long long int result1, result2, result3, result4;
902    unsigned long long int temp1, temp2, temp3, temp4;
903    
904    op1 = m1 & 0xFFFFFFFF;
905    op2 = (m1 >> 32) & 0xFFFFFFFF;
906    op3 = m2 & 0xFFFFFFFF;
907    op4 = (m2 >> 32) & 0xFFFFFFFF;
908    
909    temp1 = op1 * op3;
910    temp2 = (temp1 >> 32) + op1 * op4;
911    temp3 = op2 * op3;
912    temp4 = (temp3 >> 32) + op2 * op4;
913    
914    result1 = temp1 & 0xFFFFFFFF;
915    result2 = temp2 + (temp3 & 0xFFFFFFFF);
916    result3 = (result2 >> 32) + temp4;
917    result4 = (result3 >> 32);
918    
919    lo = result1 | (result2 << 32);
920    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
921    
922   //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
923   //                                      ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
924 }
925
926 uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
927 {
928   if(bits) {
929     original<<=64-bits;
930     original>>=64-bits;
931     loaded<<=bits;
932     original|=loaded;
933   }
934   else original=loaded;
935   return original;
936 }
937 uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
938 {
939   if(bits^56) {
940     original>>=64-(bits^56);
941     original<<=64-(bits^56);
942     loaded>>=bits^56;
943     original|=loaded;
944   }
945   else original=loaded;
946   return original;
947 }
948
949 #ifdef __i386__
950 #include "assem_x86.c"
951 #endif
952 #ifdef __x86_64__
953 #include "assem_x64.c"
954 #endif
955 #ifdef __arm__
956 #include "assem_arm.c"
957 #endif
958
959 // Add virtual address mapping to linked list
960 void ll_add(struct ll_entry **head,int vaddr,void *addr)
961 {
962   struct ll_entry *new_entry;
963   new_entry=malloc(sizeof(struct ll_entry));
964   assert(new_entry!=NULL);
965   new_entry->vaddr=vaddr;
966   new_entry->reg32=0;
967   new_entry->addr=addr;
968   new_entry->next=*head;
969   *head=new_entry;
970 }
971
972 // Add virtual address mapping for 32-bit compiled block
973 void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
974 {
975   ll_add(head,vaddr,addr);
976 #ifndef FORCE32
977   (*head)->reg32=reg32;
978 #endif
979 }
980
981 // Check if an address is already compiled
982 // but don't return addresses which are about to expire from the cache
983 void *check_addr(u_int vaddr)
984 {
985   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
986   if(ht_bin[0]==vaddr) {
987     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
988       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
989   }
990   if(ht_bin[2]==vaddr) {
991     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
992       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
993   }
994   u_int page=get_page(vaddr);
995   struct ll_entry *head;
996   head=jump_in[page];
997   while(head!=NULL) {
998     if(head->vaddr==vaddr&&head->reg32==0) {
999       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1000         // Update existing entry with current address
1001         if(ht_bin[0]==vaddr) {
1002           ht_bin[1]=(int)head->addr;
1003           return head->addr;
1004         }
1005         if(ht_bin[2]==vaddr) {
1006           ht_bin[3]=(int)head->addr;
1007           return head->addr;
1008         }
1009         // Insert into hash table with low priority.
1010         // Don't evict existing entries, as they are probably
1011         // addresses that are being accessed frequently.
1012         if(ht_bin[0]==-1) {
1013           ht_bin[1]=(int)head->addr;
1014           ht_bin[0]=vaddr;
1015         }else if(ht_bin[2]==-1) {
1016           ht_bin[3]=(int)head->addr;
1017           ht_bin[2]=vaddr;
1018         }
1019         return head->addr;
1020       }
1021     }
1022     head=head->next;
1023   }
1024   return 0;
1025 }
1026
1027 void remove_hash(int vaddr)
1028 {
1029   //printf("remove hash: %x\n",vaddr);
1030   int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1031   if(ht_bin[2]==vaddr) {
1032     ht_bin[2]=ht_bin[3]=-1;
1033   }
1034   if(ht_bin[0]==vaddr) {
1035     ht_bin[0]=ht_bin[2];
1036     ht_bin[1]=ht_bin[3];
1037     ht_bin[2]=ht_bin[3]=-1;
1038   }
1039 }
1040
1041 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1042 {
1043   struct ll_entry *next;
1044   while(*head) {
1045     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) || 
1046        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1047     {
1048       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1049       remove_hash((*head)->vaddr);
1050       next=(*head)->next;
1051       free(*head);
1052       *head=next;
1053     }
1054     else
1055     {
1056       head=&((*head)->next);
1057     }
1058   }
1059 }
1060
1061 // Remove all entries from linked list
1062 void ll_clear(struct ll_entry **head)
1063 {
1064   struct ll_entry *cur;
1065   struct ll_entry *next;
1066   if(cur=*head) {
1067     *head=0;
1068     while(cur) {
1069       next=cur->next;
1070       free(cur);
1071       cur=next;
1072     }
1073   }
1074 }
1075
1076 // Dereference the pointers and remove if it matches
1077 void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1078 {
1079   u_int old_host_addr=0;
1080   while(head) {
1081     int ptr=get_pointer(head->addr);
1082     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1083     if(((ptr>>shift)==(addr>>shift)) ||
1084        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1085     {
1086       printf("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1087       u_int host_addr=(u_int)kill_pointer(head->addr);
1088
1089       if((host_addr>>12)!=(old_host_addr>>12)) {
1090         #ifdef __arm__
1091         __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1092         #endif
1093         old_host_addr=host_addr;
1094       }
1095     }
1096     head=head->next;
1097   }
1098   #ifdef __arm__
1099   if (old_host_addr)
1100     __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1101   #endif
1102 }
1103
1104 // This is called when we write to a compiled block (see do_invstub)
1105 void invalidate_page(u_int page)
1106 {
1107   struct ll_entry *head;
1108   struct ll_entry *next;
1109   u_int old_host_addr=0;
1110   head=jump_in[page];
1111   jump_in[page]=0;
1112   while(head!=NULL) {
1113     inv_debug("INVALIDATE: %x\n",head->vaddr);
1114     remove_hash(head->vaddr);
1115     next=head->next;
1116     free(head);
1117     head=next;
1118   }
1119   head=jump_out[page];
1120   jump_out[page]=0;
1121   while(head!=NULL) {
1122     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1123     u_int host_addr=(u_int)kill_pointer(head->addr);
1124
1125     if((host_addr>>12)!=(old_host_addr>>12)) {
1126       #ifdef __arm__
1127       __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1128       #endif
1129       old_host_addr=host_addr;
1130     }
1131     next=head->next;
1132     free(head);
1133     head=next;
1134   }
1135   #ifdef __arm__
1136   if (old_host_addr)
1137     __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1138   #endif
1139 }
1140 void invalidate_block(u_int block)
1141 {
1142   u_int page=get_page(block<<12);
1143   u_int vpage=get_vpage(block<<12);
1144   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1145   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1146   u_int first,last;
1147   first=last=page;
1148   struct ll_entry *head;
1149   head=jump_dirty[vpage];
1150   //printf("page=%d vpage=%d\n",page,vpage);
1151   while(head!=NULL) {
1152     u_int start,end;
1153     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1154       get_bounds((int)head->addr,&start,&end);
1155       //printf("start: %x end: %x\n",start,end);
1156       if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
1157         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1158           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1159           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1160         }
1161       }
1162 #ifndef DISABLE_TLB
1163       if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1164         if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1165           if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1166           if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1167         }
1168       }
1169 #endif
1170     }
1171     head=head->next;
1172   }
1173   //printf("first=%d last=%d\n",first,last);
1174   invalidate_page(page);
1175   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1176   assert(last<page+5);
1177   // Invalidate the adjacent pages if a block crosses a 4K boundary
1178   while(first<page) {
1179     invalidate_page(first);
1180     first++;
1181   }
1182   for(first=page+1;first<last;first++) {
1183     invalidate_page(first);
1184   }
1185   
1186   // Don't trap writes
1187   invalid_code[block]=1;
1188 #ifndef DISABLE_TLB
1189   // If there is a valid TLB entry for this page, remove write protect
1190   if(tlb_LUT_w[block]) {
1191     assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1192     // CHECK: Is this right?
1193     memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1194     u_int real_block=tlb_LUT_w[block]>>12;
1195     invalid_code[real_block]=1;
1196     if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1197   }
1198   else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1199 #endif
1200
1201   #ifdef USE_MINI_HT
1202   memset(mini_ht,-1,sizeof(mini_ht));
1203   #endif
1204 }
1205 void invalidate_addr(u_int addr)
1206 {
1207   invalidate_block(addr>>12);
1208 }
1209 void invalidate_all_pages()
1210 {
1211   u_int page,n;
1212   for(page=0;page<4096;page++)
1213     invalidate_page(page);
1214   for(page=0;page<1048576;page++)
1215     if(!invalid_code[page]) {
1216       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1217       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1218     }
1219   #ifdef __arm__
1220   __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1221   #endif
1222   #ifdef USE_MINI_HT
1223   memset(mini_ht,-1,sizeof(mini_ht));
1224   #endif
1225   #ifndef DISABLE_TLB
1226   // TLB
1227   for(page=0;page<0x100000;page++) {
1228     if(tlb_LUT_r[page]) {
1229       memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1230       if(!tlb_LUT_w[page]||!invalid_code[page])
1231         memory_map[page]|=0x40000000; // Write protect
1232     }
1233     else memory_map[page]=-1;
1234     if(page==0x80000) page=0xC0000;
1235   }
1236   tlb_hacks();
1237   #endif
1238 }
1239
1240 // Add an entry to jump_out after making a link
1241 void add_link(u_int vaddr,void *src)
1242 {
1243   u_int page=get_page(vaddr);
1244   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1245   ll_add(jump_out+page,vaddr,src);
1246   //int ptr=get_pointer(src);
1247   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1248 }
1249
1250 // If a code block was found to be unmodified (bit was set in
1251 // restore_candidate) and it remains unmodified (bit is clear
1252 // in invalid_code) then move the entries for that 4K page from
1253 // the dirty list to the clean list.
1254 void clean_blocks(u_int page)
1255 {
1256   struct ll_entry *head;
1257   inv_debug("INV: clean_blocks page=%d\n",page);
1258   head=jump_dirty[page];
1259   while(head!=NULL) {
1260     if(!invalid_code[head->vaddr>>12]) {
1261       // Don't restore blocks which are about to expire from the cache
1262       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1263         u_int start,end;
1264         if(verify_dirty((int)head->addr)) {
1265           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1266           u_int i;
1267           u_int inv=0;
1268           get_bounds((int)head->addr,&start,&end);
1269           if(start-(u_int)rdram<RAM_SIZE) {
1270             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1271               inv|=invalid_code[i];
1272             }
1273           }
1274           if((signed int)head->vaddr>=(signed int)0xC0000000) {
1275             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1276             //printf("addr=%x start=%x end=%x\n",addr,start,end);
1277             if(addr<start||addr>=end) inv=1;
1278           }
1279           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1280             inv=1;
1281           }
1282           if(!inv) {
1283             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1284             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1285               u_int ppage=page;
1286 #ifndef DISABLE_TLB
1287               if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1288 #endif
1289               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1290               //printf("page=%x, addr=%x\n",page,head->vaddr);
1291               //assert(head->vaddr>>12==(page|0x80000));
1292               ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1293               int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1294               if(!head->reg32) {
1295                 if(ht_bin[0]==head->vaddr) {
1296                   ht_bin[1]=(int)clean_addr; // Replace existing entry
1297                 }
1298                 if(ht_bin[2]==head->vaddr) {
1299                   ht_bin[3]=(int)clean_addr; // Replace existing entry
1300                 }
1301               }
1302             }
1303           }
1304         }
1305       }
1306     }
1307     head=head->next;
1308   }
1309 }
1310
1311
1312 void mov_alloc(struct regstat *current,int i)
1313 {
1314   // Note: Don't need to actually alloc the source registers
1315   if((~current->is32>>rs1[i])&1) {
1316     //alloc_reg64(current,i,rs1[i]);
1317     alloc_reg64(current,i,rt1[i]);
1318     current->is32&=~(1LL<<rt1[i]);
1319   } else {
1320     //alloc_reg(current,i,rs1[i]);
1321     alloc_reg(current,i,rt1[i]);
1322     current->is32|=(1LL<<rt1[i]);
1323   }
1324   clear_const(current,rs1[i]);
1325   clear_const(current,rt1[i]);
1326   dirty_reg(current,rt1[i]);
1327 }
1328
1329 void shiftimm_alloc(struct regstat *current,int i)
1330 {
1331   clear_const(current,rs1[i]);
1332   clear_const(current,rt1[i]);
1333   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1334   {
1335     if(rt1[i]) {
1336       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1337       else lt1[i]=rs1[i];
1338       alloc_reg(current,i,rt1[i]);
1339       current->is32|=1LL<<rt1[i];
1340       dirty_reg(current,rt1[i]);
1341     }
1342   }
1343   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1344   {
1345     if(rt1[i]) {
1346       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1347       alloc_reg64(current,i,rt1[i]);
1348       current->is32&=~(1LL<<rt1[i]);
1349       dirty_reg(current,rt1[i]);
1350     }
1351   }
1352   if(opcode2[i]==0x3c) // DSLL32
1353   {
1354     if(rt1[i]) {
1355       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1356       alloc_reg64(current,i,rt1[i]);
1357       current->is32&=~(1LL<<rt1[i]);
1358       dirty_reg(current,rt1[i]);
1359     }
1360   }
1361   if(opcode2[i]==0x3e) // DSRL32
1362   {
1363     if(rt1[i]) {
1364       alloc_reg64(current,i,rs1[i]);
1365       if(imm[i]==32) {
1366         alloc_reg64(current,i,rt1[i]);
1367         current->is32&=~(1LL<<rt1[i]);
1368       } else {
1369         alloc_reg(current,i,rt1[i]);
1370         current->is32|=1LL<<rt1[i];
1371       }
1372       dirty_reg(current,rt1[i]);
1373     }
1374   }
1375   if(opcode2[i]==0x3f) // DSRA32
1376   {
1377     if(rt1[i]) {
1378       alloc_reg64(current,i,rs1[i]);
1379       alloc_reg(current,i,rt1[i]);
1380       current->is32|=1LL<<rt1[i];
1381       dirty_reg(current,rt1[i]);
1382     }
1383   }
1384 }
1385
1386 void shift_alloc(struct regstat *current,int i)
1387 {
1388   if(rt1[i]) {
1389     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1390     {
1391       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1392       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1393       alloc_reg(current,i,rt1[i]);
1394       if(rt1[i]==rs2[i]) alloc_reg_temp(current,i,-1);
1395       current->is32|=1LL<<rt1[i];
1396     } else { // DSLLV/DSRLV/DSRAV
1397       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1398       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1399       alloc_reg64(current,i,rt1[i]);
1400       current->is32&=~(1LL<<rt1[i]);
1401       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1402         alloc_reg_temp(current,i,-1);
1403     }
1404     clear_const(current,rs1[i]);
1405     clear_const(current,rs2[i]);
1406     clear_const(current,rt1[i]);
1407     dirty_reg(current,rt1[i]);
1408   }
1409 }
1410
1411 void alu_alloc(struct regstat *current,int i)
1412 {
1413   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1414     if(rt1[i]) {
1415       if(rs1[i]&&rs2[i]) {
1416         alloc_reg(current,i,rs1[i]);
1417         alloc_reg(current,i,rs2[i]);
1418       }
1419       else {
1420         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1421         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1422       }
1423       alloc_reg(current,i,rt1[i]);
1424     }
1425     current->is32|=1LL<<rt1[i];
1426   }
1427   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1428     if(rt1[i]) {
1429       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1430       {
1431         alloc_reg64(current,i,rs1[i]);
1432         alloc_reg64(current,i,rs2[i]);
1433         alloc_reg(current,i,rt1[i]);
1434       } else {
1435         alloc_reg(current,i,rs1[i]);
1436         alloc_reg(current,i,rs2[i]);
1437         alloc_reg(current,i,rt1[i]);
1438       }
1439     }
1440     current->is32|=1LL<<rt1[i];
1441   }
1442   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1443     if(rt1[i]) {
1444       if(rs1[i]&&rs2[i]) {
1445         alloc_reg(current,i,rs1[i]);
1446         alloc_reg(current,i,rs2[i]);
1447       }
1448       else
1449       {
1450         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1451         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1452       }
1453       alloc_reg(current,i,rt1[i]);
1454       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1455       {
1456         if(!((current->uu>>rt1[i])&1)) {
1457           alloc_reg64(current,i,rt1[i]);
1458         }
1459         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1460           if(rs1[i]&&rs2[i]) {
1461             alloc_reg64(current,i,rs1[i]);
1462             alloc_reg64(current,i,rs2[i]);
1463           }
1464           else
1465           {
1466             // Is is really worth it to keep 64-bit values in registers?
1467             #ifdef NATIVE_64BIT
1468             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1469             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1470             #endif
1471           }
1472         }
1473         current->is32&=~(1LL<<rt1[i]);
1474       } else {
1475         current->is32|=1LL<<rt1[i];
1476       }
1477     }
1478   }
1479   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1480     if(rt1[i]) {
1481       if(rs1[i]&&rs2[i]) {
1482         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1483           alloc_reg64(current,i,rs1[i]);
1484           alloc_reg64(current,i,rs2[i]);
1485           alloc_reg64(current,i,rt1[i]);
1486         } else {
1487           alloc_reg(current,i,rs1[i]);
1488           alloc_reg(current,i,rs2[i]);
1489           alloc_reg(current,i,rt1[i]);
1490         }
1491       }
1492       else {
1493         alloc_reg(current,i,rt1[i]);
1494         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1495           // DADD used as move, or zeroing
1496           // If we have a 64-bit source, then make the target 64 bits too
1497           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1498             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1499             alloc_reg64(current,i,rt1[i]);
1500           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1501             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1502             alloc_reg64(current,i,rt1[i]);
1503           }
1504           if(opcode2[i]>=0x2e&&rs2[i]) {
1505             // DSUB used as negation - 64-bit result
1506             // If we have a 32-bit register, extend it to 64 bits
1507             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1508             alloc_reg64(current,i,rt1[i]);
1509           }
1510         }
1511       }
1512       if(rs1[i]&&rs2[i]) {
1513         current->is32&=~(1LL<<rt1[i]);
1514       } else if(rs1[i]) {
1515         current->is32&=~(1LL<<rt1[i]);
1516         if((current->is32>>rs1[i])&1)
1517           current->is32|=1LL<<rt1[i];
1518       } else if(rs2[i]) {
1519         current->is32&=~(1LL<<rt1[i]);
1520         if((current->is32>>rs2[i])&1)
1521           current->is32|=1LL<<rt1[i];
1522       } else {
1523         current->is32|=1LL<<rt1[i];
1524       }
1525     }
1526   }
1527   clear_const(current,rs1[i]);
1528   clear_const(current,rs2[i]);
1529   clear_const(current,rt1[i]);
1530   dirty_reg(current,rt1[i]);
1531 }
1532
1533 void imm16_alloc(struct regstat *current,int i)
1534 {
1535   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1536   else lt1[i]=rs1[i];
1537   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1538   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1539     current->is32&=~(1LL<<rt1[i]);
1540     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1541       // TODO: Could preserve the 32-bit flag if the immediate is zero
1542       alloc_reg64(current,i,rt1[i]);
1543       alloc_reg64(current,i,rs1[i]);
1544     }
1545     clear_const(current,rs1[i]);
1546     clear_const(current,rt1[i]);
1547   }
1548   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1549     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1550     current->is32|=1LL<<rt1[i];
1551     clear_const(current,rs1[i]);
1552     clear_const(current,rt1[i]);
1553   }
1554   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1555     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1556       if(rs1[i]!=rt1[i]) {
1557         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1558         alloc_reg64(current,i,rt1[i]);
1559         current->is32&=~(1LL<<rt1[i]);
1560       }
1561     }
1562     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1563     if(is_const(current,rs1[i])) {
1564       int v=get_const(current,rs1[i]);
1565       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1566       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1567       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1568     }
1569     else clear_const(current,rt1[i]);
1570   }
1571   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1572     if(is_const(current,rs1[i])) {
1573       int v=get_const(current,rs1[i]);
1574       set_const(current,rt1[i],v+imm[i]);
1575     }
1576     else clear_const(current,rt1[i]);
1577     current->is32|=1LL<<rt1[i];
1578   }
1579   else {
1580     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1581     current->is32|=1LL<<rt1[i];
1582   }
1583   dirty_reg(current,rt1[i]);
1584 }
1585
1586 void load_alloc(struct regstat *current,int i)
1587 {
1588   clear_const(current,rt1[i]);
1589   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1590   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1591   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1592   if(rt1[i]) {
1593     alloc_reg(current,i,rt1[i]);
1594     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1595     {
1596       current->is32&=~(1LL<<rt1[i]);
1597       alloc_reg64(current,i,rt1[i]);
1598     }
1599     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1600     {
1601       current->is32&=~(1LL<<rt1[i]);
1602       alloc_reg64(current,i,rt1[i]);
1603       alloc_all(current,i);
1604       alloc_reg64(current,i,FTEMP);
1605     }
1606     else current->is32|=1LL<<rt1[i];
1607     dirty_reg(current,rt1[i]);
1608     // If using TLB, need a register for pointer to the mapping table
1609     if(using_tlb) alloc_reg(current,i,TLREG);
1610     // LWL/LWR need a temporary register for the old value
1611     if(opcode[i]==0x22||opcode[i]==0x26)
1612     {
1613       alloc_reg(current,i,FTEMP);
1614       alloc_reg_temp(current,i,-1);
1615     }
1616   }
1617   else
1618   {
1619     // Load to r0 (dummy load)
1620     // but we still need a register to calculate the address
1621     alloc_reg_temp(current,i,-1);
1622   }
1623 }
1624
1625 void store_alloc(struct regstat *current,int i)
1626 {
1627   clear_const(current,rs2[i]);
1628   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1629   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1630   alloc_reg(current,i,rs2[i]);
1631   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1632     alloc_reg64(current,i,rs2[i]);
1633     if(rs2[i]) alloc_reg(current,i,FTEMP);
1634   }
1635   // If using TLB, need a register for pointer to the mapping table
1636   if(using_tlb) alloc_reg(current,i,TLREG);
1637   #if defined(HOST_IMM8)
1638   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1639   else alloc_reg(current,i,INVCP);
1640   #endif
1641   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1642     alloc_reg(current,i,FTEMP);
1643   }
1644   // We need a temporary register for address generation
1645   alloc_reg_temp(current,i,-1);
1646 }
1647
1648 void c1ls_alloc(struct regstat *current,int i)
1649 {
1650   //clear_const(current,rs1[i]); // FIXME
1651   clear_const(current,rt1[i]);
1652   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1653   alloc_reg(current,i,CSREG); // Status
1654   alloc_reg(current,i,FTEMP);
1655   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1656     alloc_reg64(current,i,FTEMP);
1657   }
1658   // If using TLB, need a register for pointer to the mapping table
1659   if(using_tlb) alloc_reg(current,i,TLREG);
1660   #if defined(HOST_IMM8)
1661   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1662   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1663     alloc_reg(current,i,INVCP);
1664   #endif
1665   // We need a temporary register for address generation
1666   alloc_reg_temp(current,i,-1);
1667 }
1668
1669 void c2ls_alloc(struct regstat *current,int i)
1670 {
1671   clear_const(current,rt1[i]);
1672   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1673   alloc_reg(current,i,FTEMP);
1674   // If using TLB, need a register for pointer to the mapping table
1675   if(using_tlb) alloc_reg(current,i,TLREG);
1676   #if defined(HOST_IMM8)
1677   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1678   else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1679     alloc_reg(current,i,INVCP);
1680   #endif
1681   // We need a temporary register for address generation
1682   alloc_reg_temp(current,i,-1);
1683 }
1684
1685 #ifndef multdiv_alloc
1686 void multdiv_alloc(struct regstat *current,int i)
1687 {
1688   //  case 0x18: MULT
1689   //  case 0x19: MULTU
1690   //  case 0x1A: DIV
1691   //  case 0x1B: DIVU
1692   //  case 0x1C: DMULT
1693   //  case 0x1D: DMULTU
1694   //  case 0x1E: DDIV
1695   //  case 0x1F: DDIVU
1696   clear_const(current,rs1[i]);
1697   clear_const(current,rs2[i]);
1698   if(rs1[i]&&rs2[i])
1699   {
1700     if((opcode2[i]&4)==0) // 32-bit
1701     {
1702       current->u&=~(1LL<<HIREG);
1703       current->u&=~(1LL<<LOREG);
1704       alloc_reg(current,i,HIREG);
1705       alloc_reg(current,i,LOREG);
1706       alloc_reg(current,i,rs1[i]);
1707       alloc_reg(current,i,rs2[i]);
1708       current->is32|=1LL<<HIREG;
1709       current->is32|=1LL<<LOREG;
1710       dirty_reg(current,HIREG);
1711       dirty_reg(current,LOREG);
1712     }
1713     else // 64-bit
1714     {
1715       current->u&=~(1LL<<HIREG);
1716       current->u&=~(1LL<<LOREG);
1717       current->uu&=~(1LL<<HIREG);
1718       current->uu&=~(1LL<<LOREG);
1719       alloc_reg64(current,i,HIREG);
1720       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1721       alloc_reg64(current,i,rs1[i]);
1722       alloc_reg64(current,i,rs2[i]);
1723       alloc_all(current,i);
1724       current->is32&=~(1LL<<HIREG);
1725       current->is32&=~(1LL<<LOREG);
1726       dirty_reg(current,HIREG);
1727       dirty_reg(current,LOREG);
1728     }
1729   }
1730   else
1731   {
1732     // Multiply by zero is zero.
1733     // MIPS does not have a divide by zero exception.
1734     // The result is undefined, we return zero.
1735     alloc_reg(current,i,HIREG);
1736     alloc_reg(current,i,LOREG);
1737     current->is32|=1LL<<HIREG;
1738     current->is32|=1LL<<LOREG;
1739     dirty_reg(current,HIREG);
1740     dirty_reg(current,LOREG);
1741   }
1742 }
1743 #endif
1744
1745 void cop0_alloc(struct regstat *current,int i)
1746 {
1747   if(opcode2[i]==0) // MFC0
1748   {
1749     if(rt1[i]) {
1750       clear_const(current,rt1[i]);
1751       alloc_all(current,i);
1752       alloc_reg(current,i,rt1[i]);
1753       current->is32|=1LL<<rt1[i];
1754       dirty_reg(current,rt1[i]);
1755     }
1756   }
1757   else if(opcode2[i]==4) // MTC0
1758   {
1759     if(rs1[i]){
1760       clear_const(current,rs1[i]);
1761       alloc_reg(current,i,rs1[i]);
1762       alloc_all(current,i);
1763     }
1764     else {
1765       alloc_all(current,i); // FIXME: Keep r0
1766       current->u&=~1LL;
1767       alloc_reg(current,i,0);
1768     }
1769   }
1770   else
1771   {
1772     // TLBR/TLBWI/TLBWR/TLBP/ERET
1773     assert(opcode2[i]==0x10);
1774     alloc_all(current,i);
1775   }
1776 }
1777
1778 void cop1_alloc(struct regstat *current,int i)
1779 {
1780   alloc_reg(current,i,CSREG); // Load status
1781   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1782   {
1783     assert(rt1[i]);
1784     clear_const(current,rt1[i]);
1785     if(opcode2[i]==1) {
1786       alloc_reg64(current,i,rt1[i]); // DMFC1
1787       current->is32&=~(1LL<<rt1[i]);
1788     }else{
1789       alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1790       current->is32|=1LL<<rt1[i];
1791     }
1792     dirty_reg(current,rt1[i]);
1793     alloc_reg_temp(current,i,-1);
1794   }
1795   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1796   {
1797     if(rs1[i]){
1798       clear_const(current,rs1[i]);
1799       if(opcode2[i]==5)
1800         alloc_reg64(current,i,rs1[i]); // DMTC1
1801       else
1802         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1803       alloc_reg_temp(current,i,-1);
1804     }
1805     else {
1806       current->u&=~1LL;
1807       alloc_reg(current,i,0);
1808       alloc_reg_temp(current,i,-1);
1809     }
1810   }
1811 }
1812 void fconv_alloc(struct regstat *current,int i)
1813 {
1814   alloc_reg(current,i,CSREG); // Load status
1815   alloc_reg_temp(current,i,-1);
1816 }
1817 void float_alloc(struct regstat *current,int i)
1818 {
1819   alloc_reg(current,i,CSREG); // Load status
1820   alloc_reg_temp(current,i,-1);
1821 }
1822 void c2op_alloc(struct regstat *current,int i)
1823 {
1824   alloc_reg_temp(current,i,-1);
1825 }
1826 void fcomp_alloc(struct regstat *current,int i)
1827 {
1828   alloc_reg(current,i,CSREG); // Load status
1829   alloc_reg(current,i,FSREG); // Load flags
1830   dirty_reg(current,FSREG); // Flag will be modified
1831   alloc_reg_temp(current,i,-1);
1832 }
1833
1834 void syscall_alloc(struct regstat *current,int i)
1835 {
1836   alloc_cc(current,i);
1837   dirty_reg(current,CCREG);
1838   alloc_all(current,i);
1839   current->isconst=0;
1840 }
1841
1842 void delayslot_alloc(struct regstat *current,int i)
1843 {
1844   switch(itype[i]) {
1845     case UJUMP:
1846     case CJUMP:
1847     case SJUMP:
1848     case RJUMP:
1849     case FJUMP:
1850     case SYSCALL:
1851     case HLECALL:
1852     case SPAN:
1853       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1854       printf("Disabled speculative precompilation\n");
1855       stop_after_jal=1;
1856       break;
1857     case IMM16:
1858       imm16_alloc(current,i);
1859       break;
1860     case LOAD:
1861     case LOADLR:
1862       load_alloc(current,i);
1863       break;
1864     case STORE:
1865     case STORELR:
1866       store_alloc(current,i);
1867       break;
1868     case ALU:
1869       alu_alloc(current,i);
1870       break;
1871     case SHIFT:
1872       shift_alloc(current,i);
1873       break;
1874     case MULTDIV:
1875       multdiv_alloc(current,i);
1876       break;
1877     case SHIFTIMM:
1878       shiftimm_alloc(current,i);
1879       break;
1880     case MOV:
1881       mov_alloc(current,i);
1882       break;
1883     case COP0:
1884       cop0_alloc(current,i);
1885       break;
1886     case COP1:
1887     case COP2:
1888       cop1_alloc(current,i);
1889       break;
1890     case C1LS:
1891       c1ls_alloc(current,i);
1892       break;
1893     case C2LS:
1894       c2ls_alloc(current,i);
1895       break;
1896     case FCONV:
1897       fconv_alloc(current,i);
1898       break;
1899     case FLOAT:
1900       float_alloc(current,i);
1901       break;
1902     case FCOMP:
1903       fcomp_alloc(current,i);
1904       break;
1905     case C2OP:
1906       c2op_alloc(current,i);
1907       break;
1908   }
1909 }
1910
1911 // Special case where a branch and delay slot span two pages in virtual memory
1912 static void pagespan_alloc(struct regstat *current,int i)
1913 {
1914   current->isconst=0;
1915   current->wasconst=0;
1916   regs[i].wasconst=0;
1917   alloc_all(current,i);
1918   alloc_cc(current,i);
1919   dirty_reg(current,CCREG);
1920   if(opcode[i]==3) // JAL
1921   {
1922     alloc_reg(current,i,31);
1923     dirty_reg(current,31);
1924   }
1925   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1926   {
1927     alloc_reg(current,i,rs1[i]);
1928     if (rt1[i]!=0) {
1929       alloc_reg(current,i,rt1[i]);
1930       dirty_reg(current,rt1[i]);
1931     }
1932   }
1933   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1934   {
1935     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1936     if(rs2[i]) alloc_reg(current,i,rs2[i]);
1937     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1938     {
1939       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1940       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1941     }
1942   }
1943   else
1944   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1945   {
1946     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1947     if(!((current->is32>>rs1[i])&1))
1948     {
1949       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1950     }
1951   }
1952   else
1953   if(opcode[i]==0x11) // BC1
1954   {
1955     alloc_reg(current,i,FSREG);
1956     alloc_reg(current,i,CSREG);
1957   }
1958   //else ...
1959 }
1960
1961 add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1962 {
1963   stubs[stubcount][0]=type;
1964   stubs[stubcount][1]=addr;
1965   stubs[stubcount][2]=retaddr;
1966   stubs[stubcount][3]=a;
1967   stubs[stubcount][4]=b;
1968   stubs[stubcount][5]=c;
1969   stubs[stubcount][6]=d;
1970   stubs[stubcount][7]=e;
1971   stubcount++;
1972 }
1973
1974 // Write out a single register
1975 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1976 {
1977   int hr;
1978   for(hr=0;hr<HOST_REGS;hr++) {
1979     if(hr!=EXCLUDE_REG) {
1980       if((regmap[hr]&63)==r) {
1981         if((dirty>>hr)&1) {
1982           if(regmap[hr]<64) {
1983             emit_storereg(r,hr);
1984 #ifndef FORCE32
1985             if((is32>>regmap[hr])&1) {
1986               emit_sarimm(hr,31,hr);
1987               emit_storereg(r|64,hr);
1988             }
1989 #endif
1990           }else{
1991             emit_storereg(r|64,hr);
1992           }
1993         }
1994       }
1995     }
1996   }
1997 }
1998
1999 int mchecksum()
2000 {
2001   //if(!tracedebug) return 0;
2002   int i;
2003   int sum=0;
2004   for(i=0;i<2097152;i++) {
2005     unsigned int temp=sum;
2006     sum<<=1;
2007     sum|=(~temp)>>31;
2008     sum^=((u_int *)rdram)[i];
2009   }
2010   return sum;
2011 }
2012 int rchecksum()
2013 {
2014   int i;
2015   int sum=0;
2016   for(i=0;i<64;i++)
2017     sum^=((u_int *)reg)[i];
2018   return sum;
2019 }
2020 void rlist()
2021 {
2022   int i;
2023   printf("TRACE: ");
2024   for(i=0;i<32;i++)
2025     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2026   printf("\n");
2027 #ifndef DISABLE_COP1
2028   printf("TRACE: ");
2029   for(i=0;i<32;i++)
2030     printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2031   printf("\n");
2032 #endif
2033 }
2034
2035 void enabletrace()
2036 {
2037   tracedebug=1;
2038 }
2039
2040 void memdebug(int i)
2041 {
2042   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2043   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2044   //rlist();
2045   //if(tracedebug) {
2046   //if(Count>=-2084597794) {
2047   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2048   //if(0) {
2049     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2050     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2051     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2052     rlist();
2053     #ifdef __i386__
2054     printf("TRACE: %x\n",(&i)[-1]);
2055     #endif
2056     #ifdef __arm__
2057     int j;
2058     printf("TRACE: %x \n",(&j)[10]);
2059     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2060     #endif
2061     //fflush(stdout);
2062   }
2063   //printf("TRACE: %x\n",(&i)[-1]);
2064 }
2065
2066 void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2067 {
2068   printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2069 }
2070
2071 void alu_assemble(int i,struct regstat *i_regs)
2072 {
2073   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2074     if(rt1[i]) {
2075       signed char s1,s2,t;
2076       t=get_reg(i_regs->regmap,rt1[i]);
2077       if(t>=0) {
2078         s1=get_reg(i_regs->regmap,rs1[i]);
2079         s2=get_reg(i_regs->regmap,rs2[i]);
2080         if(rs1[i]&&rs2[i]) {
2081           assert(s1>=0);
2082           assert(s2>=0);
2083           if(opcode2[i]&2) emit_sub(s1,s2,t);
2084           else emit_add(s1,s2,t);
2085         }
2086         else if(rs1[i]) {
2087           if(s1>=0) emit_mov(s1,t);
2088           else emit_loadreg(rs1[i],t);
2089         }
2090         else if(rs2[i]) {
2091           if(s2>=0) {
2092             if(opcode2[i]&2) emit_neg(s2,t);
2093             else emit_mov(s2,t);
2094           }
2095           else {
2096             emit_loadreg(rs2[i],t);
2097             if(opcode2[i]&2) emit_neg(t,t);
2098           }
2099         }
2100         else emit_zeroreg(t);
2101       }
2102     }
2103   }
2104   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2105     if(rt1[i]) {
2106       signed char s1l,s2l,s1h,s2h,tl,th;
2107       tl=get_reg(i_regs->regmap,rt1[i]);
2108       th=get_reg(i_regs->regmap,rt1[i]|64);
2109       if(tl>=0) {
2110         s1l=get_reg(i_regs->regmap,rs1[i]);
2111         s2l=get_reg(i_regs->regmap,rs2[i]);
2112         s1h=get_reg(i_regs->regmap,rs1[i]|64);
2113         s2h=get_reg(i_regs->regmap,rs2[i]|64);
2114         if(rs1[i]&&rs2[i]) {
2115           assert(s1l>=0);
2116           assert(s2l>=0);
2117           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2118           else emit_adds(s1l,s2l,tl);
2119           if(th>=0) {
2120             #ifdef INVERTED_CARRY
2121             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2122             #else
2123             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2124             #endif
2125             else emit_add(s1h,s2h,th);
2126           }
2127         }
2128         else if(rs1[i]) {
2129           if(s1l>=0) emit_mov(s1l,tl);
2130           else emit_loadreg(rs1[i],tl);
2131           if(th>=0) {
2132             if(s1h>=0) emit_mov(s1h,th);
2133             else emit_loadreg(rs1[i]|64,th);
2134           }
2135         }
2136         else if(rs2[i]) {
2137           if(s2l>=0) {
2138             if(opcode2[i]&2) emit_negs(s2l,tl);
2139             else emit_mov(s2l,tl);
2140           }
2141           else {
2142             emit_loadreg(rs2[i],tl);
2143             if(opcode2[i]&2) emit_negs(tl,tl);
2144           }
2145           if(th>=0) {
2146             #ifdef INVERTED_CARRY
2147             if(s2h>=0) emit_mov(s2h,th);
2148             else emit_loadreg(rs2[i]|64,th);
2149             if(opcode2[i]&2) {
2150               emit_adcimm(-1,th); // x86 has inverted carry flag
2151               emit_not(th,th);
2152             }
2153             #else
2154             if(opcode2[i]&2) {
2155               if(s2h>=0) emit_rscimm(s2h,0,th);
2156               else {
2157                 emit_loadreg(rs2[i]|64,th);
2158                 emit_rscimm(th,0,th);
2159               }
2160             }else{
2161               if(s2h>=0) emit_mov(s2h,th);
2162               else emit_loadreg(rs2[i]|64,th);
2163             }
2164             #endif
2165           }
2166         }
2167         else {
2168           emit_zeroreg(tl);
2169           if(th>=0) emit_zeroreg(th);
2170         }
2171       }
2172     }
2173   }
2174   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2175     if(rt1[i]) {
2176       signed char s1l,s1h,s2l,s2h,t;
2177       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2178       {
2179         t=get_reg(i_regs->regmap,rt1[i]);
2180         //assert(t>=0);
2181         if(t>=0) {
2182           s1l=get_reg(i_regs->regmap,rs1[i]);
2183           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2184           s2l=get_reg(i_regs->regmap,rs2[i]);
2185           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2186           if(rs2[i]==0) // rx<r0
2187           {
2188             assert(s1h>=0);
2189             if(opcode2[i]==0x2a) // SLT
2190               emit_shrimm(s1h,31,t);
2191             else // SLTU (unsigned can not be less than zero)
2192               emit_zeroreg(t);
2193           }
2194           else if(rs1[i]==0) // r0<rx
2195           {
2196             assert(s2h>=0);
2197             if(opcode2[i]==0x2a) // SLT
2198               emit_set_gz64_32(s2h,s2l,t);
2199             else // SLTU (set if not zero)
2200               emit_set_nz64_32(s2h,s2l,t);
2201           }
2202           else {
2203             assert(s1l>=0);assert(s1h>=0);
2204             assert(s2l>=0);assert(s2h>=0);
2205             if(opcode2[i]==0x2a) // SLT
2206               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2207             else // SLTU
2208               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2209           }
2210         }
2211       } else {
2212         t=get_reg(i_regs->regmap,rt1[i]);
2213         //assert(t>=0);
2214         if(t>=0) {
2215           s1l=get_reg(i_regs->regmap,rs1[i]);
2216           s2l=get_reg(i_regs->regmap,rs2[i]);
2217           if(rs2[i]==0) // rx<r0
2218           {
2219             assert(s1l>=0);
2220             if(opcode2[i]==0x2a) // SLT
2221               emit_shrimm(s1l,31,t);
2222             else // SLTU (unsigned can not be less than zero)
2223               emit_zeroreg(t);
2224           }
2225           else if(rs1[i]==0) // r0<rx
2226           {
2227             assert(s2l>=0);
2228             if(opcode2[i]==0x2a) // SLT
2229               emit_set_gz32(s2l,t);
2230             else // SLTU (set if not zero)
2231               emit_set_nz32(s2l,t);
2232           }
2233           else{
2234             assert(s1l>=0);assert(s2l>=0);
2235             if(opcode2[i]==0x2a) // SLT
2236               emit_set_if_less32(s1l,s2l,t);
2237             else // SLTU
2238               emit_set_if_carry32(s1l,s2l,t);
2239           }
2240         }
2241       }
2242     }
2243   }
2244   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2245     if(rt1[i]) {
2246       signed char s1l,s1h,s2l,s2h,th,tl;
2247       tl=get_reg(i_regs->regmap,rt1[i]);
2248       th=get_reg(i_regs->regmap,rt1[i]|64);
2249       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2250       {
2251         assert(tl>=0);
2252         if(tl>=0) {
2253           s1l=get_reg(i_regs->regmap,rs1[i]);
2254           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2255           s2l=get_reg(i_regs->regmap,rs2[i]);
2256           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2257           if(rs1[i]&&rs2[i]) {
2258             assert(s1l>=0);assert(s1h>=0);
2259             assert(s2l>=0);assert(s2h>=0);
2260             if(opcode2[i]==0x24) { // AND
2261               emit_and(s1l,s2l,tl);
2262               emit_and(s1h,s2h,th);
2263             } else
2264             if(opcode2[i]==0x25) { // OR
2265               emit_or(s1l,s2l,tl);
2266               emit_or(s1h,s2h,th);
2267             } else
2268             if(opcode2[i]==0x26) { // XOR
2269               emit_xor(s1l,s2l,tl);
2270               emit_xor(s1h,s2h,th);
2271             } else
2272             if(opcode2[i]==0x27) { // NOR
2273               emit_or(s1l,s2l,tl);
2274               emit_or(s1h,s2h,th);
2275               emit_not(tl,tl);
2276               emit_not(th,th);
2277             }
2278           }
2279           else
2280           {
2281             if(opcode2[i]==0x24) { // AND
2282               emit_zeroreg(tl);
2283               emit_zeroreg(th);
2284             } else
2285             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2286               if(rs1[i]){
2287                 if(s1l>=0) emit_mov(s1l,tl);
2288                 else emit_loadreg(rs1[i],tl);
2289                 if(s1h>=0) emit_mov(s1h,th);
2290                 else emit_loadreg(rs1[i]|64,th);
2291               }
2292               else
2293               if(rs2[i]){
2294                 if(s2l>=0) emit_mov(s2l,tl);
2295                 else emit_loadreg(rs2[i],tl);
2296                 if(s2h>=0) emit_mov(s2h,th);
2297                 else emit_loadreg(rs2[i]|64,th);
2298               }
2299               else{
2300                 emit_zeroreg(tl);
2301                 emit_zeroreg(th);
2302               }
2303             } else
2304             if(opcode2[i]==0x27) { // NOR
2305               if(rs1[i]){
2306                 if(s1l>=0) emit_not(s1l,tl);
2307                 else{
2308                   emit_loadreg(rs1[i],tl);
2309                   emit_not(tl,tl);
2310                 }
2311                 if(s1h>=0) emit_not(s1h,th);
2312                 else{
2313                   emit_loadreg(rs1[i]|64,th);
2314                   emit_not(th,th);
2315                 }
2316               }
2317               else
2318               if(rs2[i]){
2319                 if(s2l>=0) emit_not(s2l,tl);
2320                 else{
2321                   emit_loadreg(rs2[i],tl);
2322                   emit_not(tl,tl);
2323                 }
2324                 if(s2h>=0) emit_not(s2h,th);
2325                 else{
2326                   emit_loadreg(rs2[i]|64,th);
2327                   emit_not(th,th);
2328                 }
2329               }
2330               else {
2331                 emit_movimm(-1,tl);
2332                 emit_movimm(-1,th);
2333               }
2334             }
2335           }
2336         }
2337       }
2338       else
2339       {
2340         // 32 bit
2341         if(tl>=0) {
2342           s1l=get_reg(i_regs->regmap,rs1[i]);
2343           s2l=get_reg(i_regs->regmap,rs2[i]);
2344           if(rs1[i]&&rs2[i]) {
2345             assert(s1l>=0);
2346             assert(s2l>=0);
2347             if(opcode2[i]==0x24) { // AND
2348               emit_and(s1l,s2l,tl);
2349             } else
2350             if(opcode2[i]==0x25) { // OR
2351               emit_or(s1l,s2l,tl);
2352             } else
2353             if(opcode2[i]==0x26) { // XOR
2354               emit_xor(s1l,s2l,tl);
2355             } else
2356             if(opcode2[i]==0x27) { // NOR
2357               emit_or(s1l,s2l,tl);
2358               emit_not(tl,tl);
2359             }
2360           }
2361           else
2362           {
2363             if(opcode2[i]==0x24) { // AND
2364               emit_zeroreg(tl);
2365             } else
2366             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2367               if(rs1[i]){
2368                 if(s1l>=0) emit_mov(s1l,tl);
2369                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2370               }
2371               else
2372               if(rs2[i]){
2373                 if(s2l>=0) emit_mov(s2l,tl);
2374                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2375               }
2376               else emit_zeroreg(tl);
2377             } else
2378             if(opcode2[i]==0x27) { // NOR
2379               if(rs1[i]){
2380                 if(s1l>=0) emit_not(s1l,tl);
2381                 else {
2382                   emit_loadreg(rs1[i],tl);
2383                   emit_not(tl,tl);
2384                 }
2385               }
2386               else
2387               if(rs2[i]){
2388                 if(s2l>=0) emit_not(s2l,tl);
2389                 else {
2390                   emit_loadreg(rs2[i],tl);
2391                   emit_not(tl,tl);
2392                 }
2393               }
2394               else emit_movimm(-1,tl);
2395             }
2396           }
2397         }
2398       }
2399     }
2400   }
2401 }
2402
2403 void imm16_assemble(int i,struct regstat *i_regs)
2404 {
2405   if (opcode[i]==0x0f) { // LUI
2406     if(rt1[i]) {
2407       signed char t;
2408       t=get_reg(i_regs->regmap,rt1[i]);
2409       //assert(t>=0);
2410       if(t>=0) {
2411         if(!((i_regs->isconst>>t)&1))
2412           emit_movimm(imm[i]<<16,t);
2413       }
2414     }
2415   }
2416   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2417     if(rt1[i]) {
2418       signed char s,t;
2419       t=get_reg(i_regs->regmap,rt1[i]);
2420       s=get_reg(i_regs->regmap,rs1[i]);
2421       if(rs1[i]) {
2422         //assert(t>=0);
2423         //assert(s>=0);
2424         if(t>=0) {
2425           if(!((i_regs->isconst>>t)&1)) {
2426             if(s<0) {
2427               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2428               emit_addimm(t,imm[i],t);
2429             }else{
2430               if(!((i_regs->wasconst>>s)&1))
2431                 emit_addimm(s,imm[i],t);
2432               else
2433                 emit_movimm(constmap[i][s]+imm[i],t);
2434             }
2435           }
2436         }
2437       } else {
2438         if(t>=0) {
2439           if(!((i_regs->isconst>>t)&1))
2440             emit_movimm(imm[i],t);
2441         }
2442       }
2443     }
2444   }
2445   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2446     if(rt1[i]) {
2447       signed char sh,sl,th,tl;
2448       th=get_reg(i_regs->regmap,rt1[i]|64);
2449       tl=get_reg(i_regs->regmap,rt1[i]);
2450       sh=get_reg(i_regs->regmap,rs1[i]|64);
2451       sl=get_reg(i_regs->regmap,rs1[i]);
2452       if(tl>=0) {
2453         if(rs1[i]) {
2454           assert(sh>=0);
2455           assert(sl>=0);
2456           if(th>=0) {
2457             emit_addimm64_32(sh,sl,imm[i],th,tl);
2458           }
2459           else {
2460             emit_addimm(sl,imm[i],tl);
2461           }
2462         } else {
2463           emit_movimm(imm[i],tl);
2464           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2465         }
2466       }
2467     }
2468   }
2469   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2470     if(rt1[i]) {
2471       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2472       signed char sh,sl,t;
2473       t=get_reg(i_regs->regmap,rt1[i]);
2474       sh=get_reg(i_regs->regmap,rs1[i]|64);
2475       sl=get_reg(i_regs->regmap,rs1[i]);
2476       //assert(t>=0);
2477       if(t>=0) {
2478         if(rs1[i]>0) {
2479           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2480           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2481             if(opcode[i]==0x0a) { // SLTI
2482               if(sl<0) {
2483                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2484                 emit_slti32(t,imm[i],t);
2485               }else{
2486                 emit_slti32(sl,imm[i],t);
2487               }
2488             }
2489             else { // SLTIU
2490               if(sl<0) {
2491                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2492                 emit_sltiu32(t,imm[i],t);
2493               }else{
2494                 emit_sltiu32(sl,imm[i],t);
2495               }
2496             }
2497           }else{ // 64-bit
2498             assert(sl>=0);
2499             if(opcode[i]==0x0a) // SLTI
2500               emit_slti64_32(sh,sl,imm[i],t);
2501             else // SLTIU
2502               emit_sltiu64_32(sh,sl,imm[i],t);
2503           }
2504         }else{
2505           // SLTI(U) with r0 is just stupid,
2506           // nonetheless examples can be found
2507           if(opcode[i]==0x0a) // SLTI
2508             if(0<imm[i]) emit_movimm(1,t);
2509             else emit_zeroreg(t);
2510           else // SLTIU
2511           {
2512             if(imm[i]) emit_movimm(1,t);
2513             else emit_zeroreg(t);
2514           }
2515         }
2516       }
2517     }
2518   }
2519   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2520     if(rt1[i]) {
2521       signed char sh,sl,th,tl;
2522       th=get_reg(i_regs->regmap,rt1[i]|64);
2523       tl=get_reg(i_regs->regmap,rt1[i]);
2524       sh=get_reg(i_regs->regmap,rs1[i]|64);
2525       sl=get_reg(i_regs->regmap,rs1[i]);
2526       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2527         if(opcode[i]==0x0c) //ANDI
2528         {
2529           if(rs1[i]) {
2530             if(sl<0) {
2531               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2532               emit_andimm(tl,imm[i],tl);
2533             }else{
2534               if(!((i_regs->wasconst>>sl)&1))
2535                 emit_andimm(sl,imm[i],tl);
2536               else
2537                 emit_movimm(constmap[i][sl]&imm[i],tl);
2538             }
2539           }
2540           else
2541             emit_zeroreg(tl);
2542           if(th>=0) emit_zeroreg(th);
2543         }
2544         else
2545         {
2546           if(rs1[i]) {
2547             if(sl<0) {
2548               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2549             }
2550             if(th>=0) {
2551               if(sh<0) {
2552                 emit_loadreg(rs1[i]|64,th);
2553               }else{
2554                 emit_mov(sh,th);
2555               }
2556             }
2557             if(opcode[i]==0x0d) //ORI
2558             if(sl<0) {
2559               emit_orimm(tl,imm[i],tl);
2560             }else{
2561               if(!((i_regs->wasconst>>sl)&1))
2562                 emit_orimm(sl,imm[i],tl);
2563               else
2564                 emit_movimm(constmap[i][sl]|imm[i],tl);
2565             }
2566             if(opcode[i]==0x0e) //XORI
2567             if(sl<0) {
2568               emit_xorimm(tl,imm[i],tl);
2569             }else{
2570               if(!((i_regs->wasconst>>sl)&1))
2571                 emit_xorimm(sl,imm[i],tl);
2572               else
2573                 emit_movimm(constmap[i][sl]^imm[i],tl);
2574             }
2575           }
2576           else {
2577             emit_movimm(imm[i],tl);
2578             if(th>=0) emit_zeroreg(th);
2579           }
2580         }
2581       }
2582     }
2583   }
2584 }
2585
2586 void shiftimm_assemble(int i,struct regstat *i_regs)
2587 {
2588   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2589   {
2590     if(rt1[i]) {
2591       signed char s,t;
2592       t=get_reg(i_regs->regmap,rt1[i]);
2593       s=get_reg(i_regs->regmap,rs1[i]);
2594       //assert(t>=0);
2595       if(t>=0){
2596         if(rs1[i]==0)
2597         {
2598           emit_zeroreg(t);
2599         }
2600         else
2601         {
2602           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2603           if(imm[i]) {
2604             if(opcode2[i]==0) // SLL
2605             {
2606               emit_shlimm(s<0?t:s,imm[i],t);
2607             }
2608             if(opcode2[i]==2) // SRL
2609             {
2610               emit_shrimm(s<0?t:s,imm[i],t);
2611             }
2612             if(opcode2[i]==3) // SRA
2613             {
2614               emit_sarimm(s<0?t:s,imm[i],t);
2615             }
2616           }else{
2617             // Shift by zero
2618             if(s>=0 && s!=t) emit_mov(s,t);
2619           }
2620         }
2621       }
2622       //emit_storereg(rt1[i],t); //DEBUG
2623     }
2624   }
2625   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2626   {
2627     if(rt1[i]) {
2628       signed char sh,sl,th,tl;
2629       th=get_reg(i_regs->regmap,rt1[i]|64);
2630       tl=get_reg(i_regs->regmap,rt1[i]);
2631       sh=get_reg(i_regs->regmap,rs1[i]|64);
2632       sl=get_reg(i_regs->regmap,rs1[i]);
2633       if(tl>=0) {
2634         if(rs1[i]==0)
2635         {
2636           emit_zeroreg(tl);
2637           if(th>=0) emit_zeroreg(th);
2638         }
2639         else
2640         {
2641           assert(sl>=0);
2642           assert(sh>=0);
2643           if(imm[i]) {
2644             if(opcode2[i]==0x38) // DSLL
2645             {
2646               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2647               emit_shlimm(sl,imm[i],tl);
2648             }
2649             if(opcode2[i]==0x3a) // DSRL
2650             {
2651               emit_shrdimm(sl,sh,imm[i],tl);
2652               if(th>=0) emit_shrimm(sh,imm[i],th);
2653             }
2654             if(opcode2[i]==0x3b) // DSRA
2655             {
2656               emit_shrdimm(sl,sh,imm[i],tl);
2657               if(th>=0) emit_sarimm(sh,imm[i],th);
2658             }
2659           }else{
2660             // Shift by zero
2661             if(sl!=tl) emit_mov(sl,tl);
2662             if(th>=0&&sh!=th) emit_mov(sh,th);
2663           }
2664         }
2665       }
2666     }
2667   }
2668   if(opcode2[i]==0x3c) // DSLL32
2669   {
2670     if(rt1[i]) {
2671       signed char sl,tl,th;
2672       tl=get_reg(i_regs->regmap,rt1[i]);
2673       th=get_reg(i_regs->regmap,rt1[i]|64);
2674       sl=get_reg(i_regs->regmap,rs1[i]);
2675       if(th>=0||tl>=0){
2676         assert(tl>=0);
2677         assert(th>=0);
2678         assert(sl>=0);
2679         emit_mov(sl,th);
2680         emit_zeroreg(tl);
2681         if(imm[i]>32)
2682         {
2683           emit_shlimm(th,imm[i]&31,th);
2684         }
2685       }
2686     }
2687   }
2688   if(opcode2[i]==0x3e) // DSRL32
2689   {
2690     if(rt1[i]) {
2691       signed char sh,tl,th;
2692       tl=get_reg(i_regs->regmap,rt1[i]);
2693       th=get_reg(i_regs->regmap,rt1[i]|64);
2694       sh=get_reg(i_regs->regmap,rs1[i]|64);
2695       if(tl>=0){
2696         assert(sh>=0);
2697         emit_mov(sh,tl);
2698         if(th>=0) emit_zeroreg(th);
2699         if(imm[i]>32)
2700         {
2701           emit_shrimm(tl,imm[i]&31,tl);
2702         }
2703       }
2704     }
2705   }
2706   if(opcode2[i]==0x3f) // DSRA32
2707   {
2708     if(rt1[i]) {
2709       signed char sh,tl;
2710       tl=get_reg(i_regs->regmap,rt1[i]);
2711       sh=get_reg(i_regs->regmap,rs1[i]|64);
2712       if(tl>=0){
2713         assert(sh>=0);
2714         emit_mov(sh,tl);
2715         if(imm[i]>32)
2716         {
2717           emit_sarimm(tl,imm[i]&31,tl);
2718         }
2719       }
2720     }
2721   }
2722 }
2723
2724 #ifndef shift_assemble
2725 void shift_assemble(int i,struct regstat *i_regs)
2726 {
2727   printf("Need shift_assemble for this architecture.\n");
2728   exit(1);
2729 }
2730 #endif
2731
2732 void load_assemble(int i,struct regstat *i_regs)
2733 {
2734   int s,th,tl,addr,map=-1;
2735   int offset;
2736   int jaddr=0;
2737   int memtarget=0,c=0;
2738   u_int hr,reglist=0;
2739   th=get_reg(i_regs->regmap,rt1[i]|64);
2740   tl=get_reg(i_regs->regmap,rt1[i]);
2741   s=get_reg(i_regs->regmap,rs1[i]);
2742   offset=imm[i];
2743   for(hr=0;hr<HOST_REGS;hr++) {
2744     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2745   }
2746   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2747   if(s>=0) {
2748     c=(i_regs->wasconst>>s)&1;
2749     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2750     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2751   }
2752   //printf("load_assemble: c=%d\n",c);
2753   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2754   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2755 #ifdef PCSX
2756   if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2757     ||rt1[i]==0) {
2758       // could be FIFO, must perform the read
2759       // ||dummy read
2760       assem_debug("(forced read)\n");
2761       tl=get_reg(i_regs->regmap,-1);
2762       assert(tl>=0);
2763   }
2764 #endif
2765   if(offset||s<0||c) addr=tl;
2766   else addr=s;
2767   if(tl>=0) {
2768     //assert(tl>=0);
2769     //assert(rt1[i]);
2770     reglist&=~(1<<tl);
2771     if(th>=0) reglist&=~(1<<th);
2772     if(!using_tlb) {
2773       if(!c) {
2774 //#define R29_HACK 1
2775         #ifdef R29_HACK
2776         // Strmnnrmn's speed hack
2777         if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2778         #endif
2779         {
2780           emit_cmpimm(addr,RAM_SIZE);
2781           jaddr=(int)out;
2782           #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2783           // Hint to branch predictor that the branch is unlikely to be taken
2784           if(rs1[i]>=28)
2785             emit_jno_unlikely(0);
2786           else
2787           #endif
2788           emit_jno(0);
2789         }
2790       }
2791     }else{ // using tlb
2792       int x=0;
2793       if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2794       if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2795       map=get_reg(i_regs->regmap,TLREG);
2796       assert(map>=0);
2797       map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2798       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2799     }
2800     if (opcode[i]==0x20) { // LB
2801       if(!c||memtarget) {
2802         #ifdef HOST_IMM_ADDR32
2803         if(c)
2804           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2805         else
2806         #endif
2807         {
2808           //emit_xorimm(addr,3,tl);
2809           //gen_tlb_addr_r(tl,map);
2810           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2811           int x=0;
2812 #ifdef BIG_ENDIAN_MIPS
2813           if(!c) emit_xorimm(addr,3,tl);
2814           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2815 #else
2816           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2817           else if (tl!=addr) emit_mov(addr,tl);
2818 #endif
2819           emit_movsbl_indexed_tlb(x,tl,map,tl);
2820         }
2821         if(jaddr)
2822           add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2823       }
2824       else
2825         inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2826     }
2827     if (opcode[i]==0x21) { // LH
2828       if(!c||memtarget) {
2829         #ifdef HOST_IMM_ADDR32
2830         if(c)
2831           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2832         else
2833         #endif
2834         {
2835           int x=0;
2836 #ifdef BIG_ENDIAN_MIPS
2837           if(!c) emit_xorimm(addr,2,tl);
2838           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2839 #else
2840           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2841           else if (tl!=addr) emit_mov(addr,tl);
2842 #endif
2843           //#ifdef
2844           //emit_movswl_indexed_tlb(x,tl,map,tl);
2845           //else
2846           if(map>=0) {
2847             gen_tlb_addr_r(tl,map);
2848             emit_movswl_indexed(x,tl,tl);
2849           }else
2850             emit_movswl_indexed((int)rdram-0x80000000+x,tl,tl);
2851         }
2852         if(jaddr)
2853           add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2854       }
2855       else
2856         inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2857     }
2858     if (opcode[i]==0x23) { // LW
2859       if(!c||memtarget) {
2860         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2861         #ifdef HOST_IMM_ADDR32
2862         if(c)
2863           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2864         else
2865         #endif
2866         emit_readword_indexed_tlb(0,addr,map,tl);
2867         if(jaddr)
2868           add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2869       }
2870       else
2871         inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2872     }
2873     if (opcode[i]==0x24) { // LBU
2874       if(!c||memtarget) {
2875         #ifdef HOST_IMM_ADDR32
2876         if(c)
2877           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2878         else
2879         #endif
2880         {
2881           //emit_xorimm(addr,3,tl);
2882           //gen_tlb_addr_r(tl,map);
2883           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2884           int x=0;
2885 #ifdef BIG_ENDIAN_MIPS
2886           if(!c) emit_xorimm(addr,3,tl);
2887           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2888 #else
2889           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2890           else if (tl!=addr) emit_mov(addr,tl);
2891 #endif
2892           emit_movzbl_indexed_tlb(x,tl,map,tl);
2893         }
2894         if(jaddr)
2895           add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2896       }
2897       else
2898         inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2899     }
2900     if (opcode[i]==0x25) { // LHU
2901       if(!c||memtarget) {
2902         #ifdef HOST_IMM_ADDR32
2903         if(c)
2904           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2905         else
2906         #endif
2907         {
2908           int x=0;
2909 #ifdef BIG_ENDIAN_MIPS
2910           if(!c) emit_xorimm(addr,2,tl);
2911           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2912 #else
2913           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2914           else if (tl!=addr) emit_mov(addr,tl);
2915 #endif
2916           //#ifdef
2917           //emit_movzwl_indexed_tlb(x,tl,map,tl);
2918           //#else
2919           if(map>=0) {
2920             gen_tlb_addr_r(tl,map);
2921             emit_movzwl_indexed(x,tl,tl);
2922           }else
2923             emit_movzwl_indexed((int)rdram-0x80000000+x,tl,tl);
2924           if(jaddr)
2925             add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2926         }
2927       }
2928       else
2929         inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2930     }
2931     if (opcode[i]==0x27) { // LWU
2932       assert(th>=0);
2933       if(!c||memtarget) {
2934         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2935         #ifdef HOST_IMM_ADDR32
2936         if(c)
2937           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2938         else
2939         #endif
2940         emit_readword_indexed_tlb(0,addr,map,tl);
2941         if(jaddr)
2942           add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2943       }
2944       else {
2945         inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2946       }
2947       emit_zeroreg(th);
2948     }
2949     if (opcode[i]==0x37) { // LD
2950       if(!c||memtarget) {
2951         //gen_tlb_addr_r(tl,map);
2952         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2953         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2954         #ifdef HOST_IMM_ADDR32
2955         if(c)
2956           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2957         else
2958         #endif
2959         emit_readdword_indexed_tlb(0,addr,map,th,tl);
2960         if(jaddr)
2961           add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2962       }
2963       else
2964         inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2965     }
2966     //emit_storereg(rt1[i],tl); // DEBUG
2967   }
2968   //if(opcode[i]==0x23)
2969   //if(opcode[i]==0x24)
2970   //if(opcode[i]==0x23||opcode[i]==0x24)
2971   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2972   {
2973     //emit_pusha();
2974     save_regs(0x100f);
2975         emit_readword((int)&last_count,ECX);
2976         #ifdef __i386__
2977         if(get_reg(i_regs->regmap,CCREG)<0)
2978           emit_loadreg(CCREG,HOST_CCREG);
2979         emit_add(HOST_CCREG,ECX,HOST_CCREG);
2980         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2981         emit_writeword(HOST_CCREG,(int)&Count);
2982         #endif
2983         #ifdef __arm__
2984         if(get_reg(i_regs->regmap,CCREG)<0)
2985           emit_loadreg(CCREG,0);
2986         else
2987           emit_mov(HOST_CCREG,0);
2988         emit_add(0,ECX,0);
2989         emit_addimm(0,2*ccadj[i],0);
2990         emit_writeword(0,(int)&Count);
2991         #endif
2992     emit_call((int)memdebug);
2993     //emit_popa();
2994     restore_regs(0x100f);
2995   }/**/
2996 }
2997
2998 #ifndef loadlr_assemble
2999 void loadlr_assemble(int i,struct regstat *i_regs)
3000 {
3001   printf("Need loadlr_assemble for this architecture.\n");
3002   exit(1);
3003 }
3004 #endif
3005
3006 void store_assemble(int i,struct regstat *i_regs)
3007 {
3008   int s,th,tl,map=-1;
3009   int addr,temp;
3010   int offset;
3011   int jaddr=0,jaddr2,type;
3012   int memtarget=0,c=0;
3013   int agr=AGEN1+(i&1);
3014   u_int hr,reglist=0;
3015   th=get_reg(i_regs->regmap,rs2[i]|64);
3016   tl=get_reg(i_regs->regmap,rs2[i]);
3017   s=get_reg(i_regs->regmap,rs1[i]);
3018   temp=get_reg(i_regs->regmap,agr);
3019   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3020   offset=imm[i];
3021   if(s>=0) {
3022     c=(i_regs->wasconst>>s)&1;
3023     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3024     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3025   }
3026   assert(tl>=0);
3027   assert(temp>=0);
3028   for(hr=0;hr<HOST_REGS;hr++) {
3029     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3030   }
3031   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3032   if(offset||s<0||c) addr=temp;
3033   else addr=s;
3034   if(!using_tlb) {
3035     if(!c) {
3036       #ifdef R29_HACK
3037       // Strmnnrmn's speed hack
3038       memtarget=1;
3039       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3040       #endif
3041       emit_cmpimm(addr,RAM_SIZE);
3042       #ifdef DESTRUCTIVE_SHIFT
3043       if(s==addr) emit_mov(s,temp);
3044       #endif
3045       #ifdef R29_HACK
3046       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3047       #endif
3048       {
3049         jaddr=(int)out;
3050         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3051         // Hint to branch predictor that the branch is unlikely to be taken
3052         if(rs1[i]>=28)
3053           emit_jno_unlikely(0);
3054         else
3055         #endif
3056         emit_jno(0);
3057       }
3058     }
3059   }else{ // using tlb
3060     int x=0;
3061     if (opcode[i]==0x28) x=3; // SB
3062     if (opcode[i]==0x29) x=2; // SH
3063     map=get_reg(i_regs->regmap,TLREG);
3064     assert(map>=0);
3065     map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3066     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3067   }
3068
3069   if (opcode[i]==0x28) { // SB
3070     if(!c||memtarget) {
3071       int x=0;
3072 #ifdef BIG_ENDIAN_MIPS
3073       if(!c) emit_xorimm(addr,3,temp);
3074       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3075 #else
3076       if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3077       else if (addr!=temp) emit_mov(addr,temp);
3078 #endif
3079       //gen_tlb_addr_w(temp,map);
3080       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3081       emit_writebyte_indexed_tlb(tl,x,temp,map,temp);
3082     }
3083     type=STOREB_STUB;
3084   }
3085   if (opcode[i]==0x29) { // SH
3086     if(!c||memtarget) {
3087       int x=0;
3088 #ifdef BIG_ENDIAN_MIPS
3089       if(!c) emit_xorimm(addr,2,temp);
3090       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3091 #else
3092       if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3093       else if (addr!=temp) emit_mov(addr,temp);
3094 #endif
3095       //#ifdef
3096       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3097       //#else
3098       if(map>=0) {
3099         gen_tlb_addr_w(temp,map);
3100         emit_writehword_indexed(tl,x,temp);
3101       }else
3102         emit_writehword_indexed(tl,(int)rdram-0x80000000+x,temp);
3103     }
3104     type=STOREH_STUB;
3105   }
3106   if (opcode[i]==0x2B) { // SW
3107     if(!c||memtarget)
3108       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3109       emit_writeword_indexed_tlb(tl,0,addr,map,temp);
3110     type=STOREW_STUB;
3111   }
3112   if (opcode[i]==0x3F) { // SD
3113     if(!c||memtarget) {
3114       if(rs2[i]) {
3115         assert(th>=0);
3116         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3117         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3118         emit_writedword_indexed_tlb(th,tl,0,addr,map,temp);
3119       }else{
3120         // Store zero
3121         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3122         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3123         emit_writedword_indexed_tlb(tl,tl,0,addr,map,temp);
3124       }
3125     }
3126     type=STORED_STUB;
3127   }
3128   if(!using_tlb&&(!c||memtarget))
3129     // addr could be a temp, make sure it survives STORE*_STUB
3130     reglist|=1<<addr;
3131   if(jaddr) {
3132     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3133   } else if(!memtarget) {
3134     inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3135   }
3136   if(!using_tlb) {
3137     if(!c||memtarget) {
3138       #ifdef DESTRUCTIVE_SHIFT
3139       // The x86 shift operation is 'destructive'; it overwrites the
3140       // source register, so we need to make a copy first and use that.
3141       addr=temp;
3142       #endif
3143       #if defined(HOST_IMM8)
3144       int ir=get_reg(i_regs->regmap,INVCP);
3145       assert(ir>=0);
3146       emit_cmpmem_indexedsr12_reg(ir,addr,1);
3147       #else
3148       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3149       #endif
3150       jaddr2=(int)out;
3151       emit_jne(0);
3152       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3153     }
3154   }
3155   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3156   //if(opcode[i]==0x2B || opcode[i]==0x28)
3157   //if(opcode[i]==0x2B || opcode[i]==0x29)
3158   //if(opcode[i]==0x2B)
3159   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3160   {
3161     //emit_pusha();
3162     save_regs(0x100f);
3163         emit_readword((int)&last_count,ECX);
3164         #ifdef __i386__
3165         if(get_reg(i_regs->regmap,CCREG)<0)
3166           emit_loadreg(CCREG,HOST_CCREG);
3167         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3168         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3169         emit_writeword(HOST_CCREG,(int)&Count);
3170         #endif
3171         #ifdef __arm__
3172         if(get_reg(i_regs->regmap,CCREG)<0)
3173           emit_loadreg(CCREG,0);
3174         else
3175           emit_mov(HOST_CCREG,0);
3176         emit_add(0,ECX,0);
3177         emit_addimm(0,2*ccadj[i],0);
3178         emit_writeword(0,(int)&Count);
3179         #endif
3180     emit_call((int)memdebug);
3181     //emit_popa();
3182     restore_regs(0x100f);
3183   }/**/
3184 }
3185
3186 void storelr_assemble(int i,struct regstat *i_regs)
3187 {
3188   int s,th,tl;
3189   int temp;
3190   int temp2;
3191   int offset;
3192   int jaddr=0,jaddr2;
3193   int case1,case2,case3;
3194   int done0,done1,done2;
3195   int memtarget,c=0;
3196   int agr=AGEN1+(i&1);
3197   u_int hr,reglist=0;
3198   th=get_reg(i_regs->regmap,rs2[i]|64);
3199   tl=get_reg(i_regs->regmap,rs2[i]);
3200   s=get_reg(i_regs->regmap,rs1[i]);
3201   temp=get_reg(i_regs->regmap,agr);
3202   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3203   offset=imm[i];
3204   if(s>=0) {
3205     c=(i_regs->isconst>>s)&1;
3206     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3207     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3208   }
3209   assert(tl>=0);
3210   for(hr=0;hr<HOST_REGS;hr++) {
3211     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3212   }
3213   if(tl>=0) {
3214     assert(temp>=0);
3215     if(!using_tlb) {
3216       if(!c) {
3217         emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3218         if(!offset&&s!=temp) emit_mov(s,temp);
3219         jaddr=(int)out;
3220         emit_jno(0);
3221       }
3222       else
3223       {
3224         if(!memtarget||!rs1[i]) {
3225           jaddr=(int)out;
3226           emit_jmp(0);
3227         }
3228       }
3229       if((u_int)rdram!=0x80000000) 
3230         emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3231     }else{ // using tlb
3232       int map=get_reg(i_regs->regmap,TLREG);
3233       assert(map>=0);
3234       map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3235       if(!c&&!offset&&s>=0) emit_mov(s,temp);
3236       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3237       if(!jaddr&&!memtarget) {
3238         jaddr=(int)out;
3239         emit_jmp(0);
3240       }
3241       gen_tlb_addr_w(temp,map);
3242     }
3243
3244     if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3245       temp2=get_reg(i_regs->regmap,FTEMP);
3246       if(!rs2[i]) temp2=th=tl;
3247     }
3248
3249 #ifndef BIG_ENDIAN_MIPS
3250     emit_xorimm(temp,3,temp);
3251 #endif
3252     emit_testimm(temp,2);
3253     case2=(int)out;
3254     emit_jne(0);
3255     emit_testimm(temp,1);
3256     case1=(int)out;
3257     emit_jne(0);
3258     // 0
3259     if (opcode[i]==0x2A) { // SWL
3260       emit_writeword_indexed(tl,0,temp);
3261     }
3262     if (opcode[i]==0x2E) { // SWR
3263       emit_writebyte_indexed(tl,3,temp);
3264     }
3265     if (opcode[i]==0x2C) { // SDL
3266       emit_writeword_indexed(th,0,temp);
3267       if(rs2[i]) emit_mov(tl,temp2);
3268     }
3269     if (opcode[i]==0x2D) { // SDR
3270       emit_writebyte_indexed(tl,3,temp);
3271       if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3272     }
3273     done0=(int)out;
3274     emit_jmp(0);
3275     // 1
3276     set_jump_target(case1,(int)out);
3277     if (opcode[i]==0x2A) { // SWL
3278       // Write 3 msb into three least significant bytes
3279       if(rs2[i]) emit_rorimm(tl,8,tl);
3280       emit_writehword_indexed(tl,-1,temp);
3281       if(rs2[i]) emit_rorimm(tl,16,tl);
3282       emit_writebyte_indexed(tl,1,temp);
3283       if(rs2[i]) emit_rorimm(tl,8,tl);
3284     }
3285     if (opcode[i]==0x2E) { // SWR
3286       // Write two lsb into two most significant bytes
3287       emit_writehword_indexed(tl,1,temp);
3288     }
3289     if (opcode[i]==0x2C) { // SDL
3290       if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3291       // Write 3 msb into three least significant bytes
3292       if(rs2[i]) emit_rorimm(th,8,th);
3293       emit_writehword_indexed(th,-1,temp);
3294       if(rs2[i]) emit_rorimm(th,16,th);
3295       emit_writebyte_indexed(th,1,temp);
3296       if(rs2[i]) emit_rorimm(th,8,th);
3297     }
3298     if (opcode[i]==0x2D) { // SDR
3299       if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3300       // Write two lsb into two most significant bytes
3301       emit_writehword_indexed(tl,1,temp);
3302     }
3303     done1=(int)out;
3304     emit_jmp(0);
3305     // 2
3306     set_jump_target(case2,(int)out);
3307     emit_testimm(temp,1);
3308     case3=(int)out;
3309     emit_jne(0);
3310     if (opcode[i]==0x2A) { // SWL
3311       // Write two msb into two least significant bytes
3312       if(rs2[i]) emit_rorimm(tl,16,tl);
3313       emit_writehword_indexed(tl,-2,temp);
3314       if(rs2[i]) emit_rorimm(tl,16,tl);
3315     }
3316     if (opcode[i]==0x2E) { // SWR
3317       // Write 3 lsb into three most significant bytes
3318       emit_writebyte_indexed(tl,-1,temp);
3319       if(rs2[i]) emit_rorimm(tl,8,tl);
3320       emit_writehword_indexed(tl,0,temp);
3321       if(rs2[i]) emit_rorimm(tl,24,tl);
3322     }
3323     if (opcode[i]==0x2C) { // SDL
3324       if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3325       // Write two msb into two least significant bytes
3326       if(rs2[i]) emit_rorimm(th,16,th);
3327       emit_writehword_indexed(th,-2,temp);
3328       if(rs2[i]) emit_rorimm(th,16,th);
3329     }
3330     if (opcode[i]==0x2D) { // SDR
3331       if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3332       // Write 3 lsb into three most significant bytes
3333       emit_writebyte_indexed(tl,-1,temp);
3334       if(rs2[i]) emit_rorimm(tl,8,tl);
3335       emit_writehword_indexed(tl,0,temp);
3336       if(rs2[i]) emit_rorimm(tl,24,tl);
3337     }
3338     done2=(int)out;
3339     emit_jmp(0);
3340     // 3
3341     set_jump_target(case3,(int)out);
3342     if (opcode[i]==0x2A) { // SWL
3343       // Write msb into least significant byte
3344       if(rs2[i]) emit_rorimm(tl,24,tl);
3345       emit_writebyte_indexed(tl,-3,temp);
3346       if(rs2[i]) emit_rorimm(tl,8,tl);
3347     }
3348     if (opcode[i]==0x2E) { // SWR
3349       // Write entire word
3350       emit_writeword_indexed(tl,-3,temp);
3351     }
3352     if (opcode[i]==0x2C) { // SDL
3353       if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3354       // Write msb into least significant byte
3355       if(rs2[i]) emit_rorimm(th,24,th);
3356       emit_writebyte_indexed(th,-3,temp);
3357       if(rs2[i]) emit_rorimm(th,8,th);
3358     }
3359     if (opcode[i]==0x2D) { // SDR
3360       if(rs2[i]) emit_mov(th,temp2);
3361       // Write entire word
3362       emit_writeword_indexed(tl,-3,temp);
3363     }
3364     set_jump_target(done0,(int)out);
3365     set_jump_target(done1,(int)out);
3366     set_jump_target(done2,(int)out);
3367     if (opcode[i]==0x2C) { // SDL
3368       emit_testimm(temp,4);
3369       done0=(int)out;
3370       emit_jne(0);
3371       emit_andimm(temp,~3,temp);
3372       emit_writeword_indexed(temp2,4,temp);
3373       set_jump_target(done0,(int)out);
3374     }
3375     if (opcode[i]==0x2D) { // SDR
3376       emit_testimm(temp,4);
3377       done0=(int)out;
3378       emit_jeq(0);
3379       emit_andimm(temp,~3,temp);
3380       emit_writeword_indexed(temp2,-4,temp);
3381       set_jump_target(done0,(int)out);
3382     }
3383     if(!c||!memtarget)
3384       add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3385   }
3386   if(!using_tlb) {
3387     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3388     #if defined(HOST_IMM8)
3389     int ir=get_reg(i_regs->regmap,INVCP);
3390     assert(ir>=0);
3391     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3392     #else
3393     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3394     #endif
3395     jaddr2=(int)out;
3396     emit_jne(0);
3397     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3398   }
3399   /*
3400     emit_pusha();
3401     //save_regs(0x100f);
3402         emit_readword((int)&last_count,ECX);
3403         if(get_reg(i_regs->regmap,CCREG)<0)
3404           emit_loadreg(CCREG,HOST_CCREG);
3405         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3406         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3407         emit_writeword(HOST_CCREG,(int)&Count);
3408     emit_call((int)memdebug);
3409     emit_popa();
3410     //restore_regs(0x100f);
3411   /**/
3412 }
3413
3414 void c1ls_assemble(int i,struct regstat *i_regs)
3415 {
3416 #ifndef DISABLE_COP1
3417   int s,th,tl;
3418   int temp,ar;
3419   int map=-1;
3420   int offset;
3421   int c=0;
3422   int jaddr,jaddr2=0,jaddr3,type;
3423   int agr=AGEN1+(i&1);
3424   u_int hr,reglist=0;
3425   th=get_reg(i_regs->regmap,FTEMP|64);
3426   tl=get_reg(i_regs->regmap,FTEMP);
3427   s=get_reg(i_regs->regmap,rs1[i]);
3428   temp=get_reg(i_regs->regmap,agr);
3429   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3430   offset=imm[i];
3431   assert(tl>=0);
3432   assert(rs1[i]>0);
3433   assert(temp>=0);
3434   for(hr=0;hr<HOST_REGS;hr++) {
3435     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3436   }
3437   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3438   if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3439   {
3440     // Loads use a temporary register which we need to save
3441     reglist|=1<<temp;
3442   }
3443   if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3444     ar=temp;
3445   else // LWC1/LDC1
3446     ar=tl;
3447   //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3448   //else c=(i_regs->wasconst>>s)&1;
3449   if(s>=0) c=(i_regs->wasconst>>s)&1;
3450   // Check cop1 unusable
3451   if(!cop1_usable) {
3452     signed char rs=get_reg(i_regs->regmap,CSREG);
3453     assert(rs>=0);
3454     emit_testimm(rs,0x20000000);
3455     jaddr=(int)out;
3456     emit_jeq(0);
3457     add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3458     cop1_usable=1;
3459   }
3460   if (opcode[i]==0x39) { // SWC1 (get float address)
3461     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3462   }
3463   if (opcode[i]==0x3D) { // SDC1 (get double address)
3464     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3465   }
3466   // Generate address + offset
3467   if(!using_tlb) {
3468     if(!c)
3469       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3470   }
3471   else
3472   {
3473     map=get_reg(i_regs->regmap,TLREG);
3474     assert(map>=0);
3475     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3476       map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3477     }
3478     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3479       map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3480     }
3481   }
3482   if (opcode[i]==0x39) { // SWC1 (read float)
3483     emit_readword_indexed(0,tl,tl);
3484   }
3485   if (opcode[i]==0x3D) { // SDC1 (read double)
3486     emit_readword_indexed(4,tl,th);
3487     emit_readword_indexed(0,tl,tl);
3488   }
3489   if (opcode[i]==0x31) { // LWC1 (get target address)
3490     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3491   }
3492   if (opcode[i]==0x35) { // LDC1 (get target address)
3493     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3494   }
3495   if(!using_tlb) {
3496     if(!c) {
3497       jaddr2=(int)out;
3498       emit_jno(0);
3499     }
3500     else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3501       jaddr2=(int)out;
3502       emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
3503     }
3504     #ifdef DESTRUCTIVE_SHIFT
3505     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3506       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3507     }
3508     #endif
3509   }else{
3510     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3511       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3512     }
3513     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3514       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3515     }
3516   }
3517   if (opcode[i]==0x31) { // LWC1
3518     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3519     //gen_tlb_addr_r(ar,map);
3520     //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3521     #ifdef HOST_IMM_ADDR32
3522     if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3523     else
3524     #endif
3525     emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3526     type=LOADW_STUB;
3527   }
3528   if (opcode[i]==0x35) { // LDC1
3529     assert(th>=0);
3530     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3531     //gen_tlb_addr_r(ar,map);
3532     //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3533     //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3534     #ifdef HOST_IMM_ADDR32
3535     if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3536     else
3537     #endif
3538     emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3539     type=LOADD_STUB;
3540   }
3541   if (opcode[i]==0x39) { // SWC1
3542     //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3543     emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3544     type=STOREW_STUB;
3545   }
3546   if (opcode[i]==0x3D) { // SDC1
3547     assert(th>=0);
3548     //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3549     //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3550     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3551     type=STORED_STUB;
3552   }
3553   if(!using_tlb) {
3554     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3555       #ifndef DESTRUCTIVE_SHIFT
3556       temp=offset||c||s<0?ar:s;
3557       #endif
3558       #if defined(HOST_IMM8)
3559       int ir=get_reg(i_regs->regmap,INVCP);
3560       assert(ir>=0);
3561       emit_cmpmem_indexedsr12_reg(ir,temp,1);
3562       #else
3563       emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3564       #endif
3565       jaddr3=(int)out;
3566       emit_jne(0);
3567       add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3568     }
3569   }
3570   if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3571   if (opcode[i]==0x31) { // LWC1 (write float)
3572     emit_writeword_indexed(tl,0,temp);
3573   }
3574   if (opcode[i]==0x35) { // LDC1 (write double)
3575     emit_writeword_indexed(th,4,temp);
3576     emit_writeword_indexed(tl,0,temp);
3577   }
3578   //if(opcode[i]==0x39)
3579   /*if(opcode[i]==0x39||opcode[i]==0x31)
3580   {
3581     emit_pusha();
3582         emit_readword((int)&last_count,ECX);
3583         if(get_reg(i_regs->regmap,CCREG)<0)
3584           emit_loadreg(CCREG,HOST_CCREG);
3585         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3586         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3587         emit_writeword(HOST_CCREG,(int)&Count);
3588     emit_call((int)memdebug);
3589     emit_popa();
3590   }/**/
3591 #else
3592   cop1_unusable(i, i_regs);
3593 #endif
3594 }
3595
3596 void c2ls_assemble(int i,struct regstat *i_regs)
3597 {
3598   int s,tl;
3599   int ar;
3600   int offset;
3601   int memtarget=0,c=0;
3602   int jaddr,jaddr2=0,jaddr3,type;
3603   int agr=AGEN1+(i&1);
3604   u_int hr,reglist=0;
3605   u_int copr=(source[i]>>16)&0x1f;
3606   s=get_reg(i_regs->regmap,rs1[i]);
3607   tl=get_reg(i_regs->regmap,FTEMP);
3608   offset=imm[i];
3609   assert(rs1[i]>0);
3610   assert(tl>=0);
3611   assert(!using_tlb);
3612
3613   for(hr=0;hr<HOST_REGS;hr++) {
3614     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3615   }
3616   if(i_regs->regmap[HOST_CCREG]==CCREG)
3617     reglist&=~(1<<HOST_CCREG);
3618
3619   // get the address
3620   if (opcode[i]==0x3a) { // SWC2
3621     ar=get_reg(i_regs->regmap,agr);
3622     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3623     reglist|=1<<ar;
3624   } else { // LWC2
3625     ar=tl;
3626   }
3627   if(s>=0) c=(i_regs->wasconst>>s)&1;
3628   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3629   if (!offset&&!c&&s>=0) ar=s;
3630   assert(ar>=0);
3631
3632   if (opcode[i]==0x3a) { // SWC2
3633     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3634     type=STOREW_STUB;
3635   }
3636   else
3637     type=LOADW_STUB;
3638
3639   if(c&&!memtarget) {
3640     jaddr2=(int)out;
3641     emit_jmp(0); // inline_readstub/inline_writestub?
3642   }
3643   else {
3644     if(!c) {
3645       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3646       jaddr2=(int)out;
3647       emit_jno(0);
3648     }
3649     if (opcode[i]==0x32) { // LWC2
3650       #ifdef HOST_IMM_ADDR32
3651       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3652       else
3653       #endif
3654       emit_readword_indexed(0,ar,tl);
3655     }
3656     if (opcode[i]==0x3a) { // SWC2
3657       #ifdef DESTRUCTIVE_SHIFT
3658       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3659       #endif
3660       emit_writeword_indexed(tl,0,ar);
3661     }
3662   }
3663   if(jaddr2)
3664     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3665   if (opcode[i]==0x3a) { // SWC2
3666 #if defined(HOST_IMM8)
3667     int ir=get_reg(i_regs->regmap,INVCP);
3668     assert(ir>=0);
3669     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3670 #else
3671     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3672 #endif
3673     jaddr3=(int)out;
3674     emit_jne(0);
3675     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3676   }
3677   if (opcode[i]==0x32) { // LWC2
3678     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3679   }
3680 }
3681
3682 #ifndef multdiv_assemble
3683 void multdiv_assemble(int i,struct regstat *i_regs)
3684 {
3685   printf("Need multdiv_assemble for this architecture.\n");
3686   exit(1);
3687 }
3688 #endif
3689
3690 void mov_assemble(int i,struct regstat *i_regs)
3691 {
3692   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3693   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3694   assert(rt1[i]>0);
3695   if(rt1[i]) {
3696     signed char sh,sl,th,tl;
3697     th=get_reg(i_regs->regmap,rt1[i]|64);
3698     tl=get_reg(i_regs->regmap,rt1[i]);
3699     //assert(tl>=0);
3700     if(tl>=0) {
3701       sh=get_reg(i_regs->regmap,rs1[i]|64);
3702       sl=get_reg(i_regs->regmap,rs1[i]);
3703       if(sl>=0) emit_mov(sl,tl);
3704       else emit_loadreg(rs1[i],tl);
3705       if(th>=0) {
3706         if(sh>=0) emit_mov(sh,th);
3707         else emit_loadreg(rs1[i]|64,th);
3708       }
3709     }
3710   }
3711 }
3712
3713 #ifndef fconv_assemble
3714 void fconv_assemble(int i,struct regstat *i_regs)
3715 {
3716   printf("Need fconv_assemble for this architecture.\n");
3717   exit(1);
3718 }
3719 #endif
3720
3721 #if 0
3722 void float_assemble(int i,struct regstat *i_regs)
3723 {
3724   printf("Need float_assemble for this architecture.\n");
3725   exit(1);
3726 }
3727 #endif
3728
3729 void syscall_assemble(int i,struct regstat *i_regs)
3730 {
3731   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3732   assert(ccreg==HOST_CCREG);
3733   assert(!is_delayslot);
3734   emit_movimm(start+i*4,EAX); // Get PC
3735   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3736   emit_jmp((int)jump_syscall_hle); // XXX
3737 }
3738
3739 void hlecall_assemble(int i,struct regstat *i_regs)
3740 {
3741   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3742   assert(ccreg==HOST_CCREG);
3743   assert(!is_delayslot);
3744   emit_movimm(start+i*4+4,0); // Get PC
3745   emit_movimm((int)psxHLEt[source[i]&7],1);
3746   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
3747   emit_jmp((int)jump_hlecall);
3748 }
3749
3750 void ds_assemble(int i,struct regstat *i_regs)
3751 {
3752   is_delayslot=1;
3753   switch(itype[i]) {
3754     case ALU:
3755       alu_assemble(i,i_regs);break;
3756     case IMM16:
3757       imm16_assemble(i,i_regs);break;
3758     case SHIFT:
3759       shift_assemble(i,i_regs);break;
3760     case SHIFTIMM:
3761       shiftimm_assemble(i,i_regs);break;
3762     case LOAD:
3763       load_assemble(i,i_regs);break;
3764     case LOADLR:
3765       loadlr_assemble(i,i_regs);break;
3766     case STORE:
3767       store_assemble(i,i_regs);break;
3768     case STORELR:
3769       storelr_assemble(i,i_regs);break;
3770     case COP0:
3771       cop0_assemble(i,i_regs);break;
3772     case COP1:
3773       cop1_assemble(i,i_regs);break;
3774     case C1LS:
3775       c1ls_assemble(i,i_regs);break;
3776     case COP2:
3777       cop2_assemble(i,i_regs);break;
3778     case C2LS:
3779       c2ls_assemble(i,i_regs);break;
3780     case C2OP:
3781       c2op_assemble(i,i_regs);break;
3782     case FCONV:
3783       fconv_assemble(i,i_regs);break;
3784     case FLOAT:
3785       float_assemble(i,i_regs);break;
3786     case FCOMP:
3787       fcomp_assemble(i,i_regs);break;
3788     case MULTDIV:
3789       multdiv_assemble(i,i_regs);break;
3790     case MOV:
3791       mov_assemble(i,i_regs);break;
3792     case SYSCALL:
3793     case HLECALL:
3794     case SPAN:
3795     case UJUMP:
3796     case RJUMP:
3797     case CJUMP:
3798     case SJUMP:
3799     case FJUMP:
3800       printf("Jump in the delay slot.  This is probably a bug.\n");
3801   }
3802   is_delayslot=0;
3803 }
3804
3805 // Is the branch target a valid internal jump?
3806 int internal_branch(uint64_t i_is32,int addr)
3807 {
3808   if(addr&1) return 0; // Indirect (register) jump
3809   if(addr>=start && addr<start+slen*4-4)
3810   {
3811     int t=(addr-start)>>2;
3812     // Delay slots are not valid branch targets
3813     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3814     // 64 -> 32 bit transition requires a recompile
3815     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3816     {
3817       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3818       else printf("optimizable: yes\n");
3819     }*/
3820     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3821     if(requires_32bit[t]&~i_is32) return 0;
3822     else return 1;
3823   }
3824   return 0;
3825 }
3826
3827 #ifndef wb_invalidate
3828 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3829   uint64_t u,uint64_t uu)
3830 {
3831   int hr;
3832   for(hr=0;hr<HOST_REGS;hr++) {
3833     if(hr!=EXCLUDE_REG) {
3834       if(pre[hr]!=entry[hr]) {
3835         if(pre[hr]>=0) {
3836           if((dirty>>hr)&1) {
3837             if(get_reg(entry,pre[hr])<0) {
3838               if(pre[hr]<64) {
3839                 if(!((u>>pre[hr])&1)) {
3840                   emit_storereg(pre[hr],hr);
3841                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3842                     emit_sarimm(hr,31,hr);
3843                     emit_storereg(pre[hr]|64,hr);
3844                   }
3845                 }
3846               }else{
3847                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3848                   emit_storereg(pre[hr],hr);
3849                 }
3850               }
3851             }
3852           }
3853         }
3854       }
3855     }
3856   }
3857   // Move from one register to another (no writeback)
3858   for(hr=0;hr<HOST_REGS;hr++) {
3859     if(hr!=EXCLUDE_REG) {
3860       if(pre[hr]!=entry[hr]) {
3861         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3862           int nr;
3863           if((nr=get_reg(entry,pre[hr]))>=0) {
3864             emit_mov(hr,nr);
3865           }
3866         }
3867       }
3868     }
3869   }
3870 }
3871 #endif
3872
3873 // Load the specified registers
3874 // This only loads the registers given as arguments because
3875 // we don't want to load things that will be overwritten
3876 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3877 {
3878   int hr;
3879   // Load 32-bit regs
3880   for(hr=0;hr<HOST_REGS;hr++) {
3881     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3882       if(entry[hr]!=regmap[hr]) {
3883         if(regmap[hr]==rs1||regmap[hr]==rs2)
3884         {
3885           if(regmap[hr]==0) {
3886             emit_zeroreg(hr);
3887           }
3888           else
3889           {
3890             emit_loadreg(regmap[hr],hr);
3891           }
3892         }
3893       }
3894     }
3895   }
3896   //Load 64-bit regs
3897   for(hr=0;hr<HOST_REGS;hr++) {
3898     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3899       if(entry[hr]!=regmap[hr]) {
3900         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3901         {
3902           assert(regmap[hr]!=64);
3903           if((is32>>(regmap[hr]&63))&1) {
3904             int lr=get_reg(regmap,regmap[hr]-64);
3905             if(lr>=0)
3906               emit_sarimm(lr,31,hr);
3907             else
3908               emit_loadreg(regmap[hr],hr);
3909           }
3910           else
3911           {
3912             emit_loadreg(regmap[hr],hr);
3913           }
3914         }
3915       }
3916     }
3917   }
3918 }
3919
3920 // Load registers prior to the start of a loop
3921 // so that they are not loaded within the loop
3922 static void loop_preload(signed char pre[],signed char entry[])
3923 {
3924   int hr;
3925   for(hr=0;hr<HOST_REGS;hr++) {
3926     if(hr!=EXCLUDE_REG) {
3927       if(pre[hr]!=entry[hr]) {
3928         if(entry[hr]>=0) {
3929           if(get_reg(pre,entry[hr])<0) {
3930             assem_debug("loop preload:\n");
3931             //printf("loop preload: %d\n",hr);
3932             if(entry[hr]==0) {
3933               emit_zeroreg(hr);
3934             }
3935             else if(entry[hr]<TEMPREG)
3936             {
3937               emit_loadreg(entry[hr],hr);
3938             }
3939             else if(entry[hr]-64<TEMPREG)
3940             {
3941               emit_loadreg(entry[hr],hr);
3942             }
3943           }
3944         }
3945       }
3946     }
3947   }
3948 }
3949
3950 // Generate address for load/store instruction
3951 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3952 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3953 {
3954   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3955     int ra;
3956     int agr=AGEN1+(i&1);
3957     int mgr=MGEN1+(i&1);
3958     if(itype[i]==LOAD) {
3959       ra=get_reg(i_regs->regmap,rt1[i]);
3960       //if(rt1[i]) assert(ra>=0);
3961     }
3962     if(itype[i]==LOADLR) {
3963       ra=get_reg(i_regs->regmap,FTEMP);
3964     }
3965     if(itype[i]==STORE||itype[i]==STORELR) {
3966       ra=get_reg(i_regs->regmap,agr);
3967       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3968     }
3969     if(itype[i]==C1LS||itype[i]==C2LS) {
3970       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3971         ra=get_reg(i_regs->regmap,FTEMP);
3972       else { // SWC1/SDC1/SWC2/SDC2
3973         ra=get_reg(i_regs->regmap,agr);
3974         if(ra<0) ra=get_reg(i_regs->regmap,-1);
3975       }
3976     }
3977     int rs=get_reg(i_regs->regmap,rs1[i]);
3978     int rm=get_reg(i_regs->regmap,TLREG);
3979     if(ra>=0) {
3980       int offset=imm[i];
3981       int c=(i_regs->wasconst>>rs)&1;
3982       if(rs1[i]==0) {
3983         // Using r0 as a base address
3984         /*if(rm>=0) {
3985           if(!entry||entry[rm]!=mgr) {
3986             generate_map_const(offset,rm);
3987           } // else did it in the previous cycle
3988         }*/
3989         if(!entry||entry[ra]!=agr) {
3990           if (opcode[i]==0x22||opcode[i]==0x26) {
3991             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3992           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3993             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3994           }else{
3995             emit_movimm(offset,ra);
3996           }
3997         } // else did it in the previous cycle
3998       }
3999       else if(rs<0) {
4000         if(!entry||entry[ra]!=rs1[i])
4001           emit_loadreg(rs1[i],ra);
4002         //if(!entry||entry[ra]!=rs1[i])
4003         //  printf("poor load scheduling!\n");
4004       }
4005       else if(c) {
4006         if(rm>=0) {
4007           if(!entry||entry[rm]!=mgr) {
4008             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4009               // Stores to memory go thru the mapper to detect self-modifying
4010               // code, loads don't.
4011               if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4012                  (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
4013                 generate_map_const(constmap[i][rs]+offset,rm);
4014             }else{
4015               if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4016                 generate_map_const(constmap[i][rs]+offset,rm);
4017             }
4018           }
4019         }
4020         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4021           if(!entry||entry[ra]!=agr) {
4022             if (opcode[i]==0x22||opcode[i]==0x26) {
4023               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4024             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4025               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4026             }else{
4027               #ifdef HOST_IMM_ADDR32
4028               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4029                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4030               #endif
4031               emit_movimm(constmap[i][rs]+offset,ra);
4032             }
4033           } // else did it in the previous cycle
4034         } // else load_consts already did it
4035       }
4036       if(offset&&!c&&rs1[i]) {
4037         if(rs>=0) {
4038           emit_addimm(rs,offset,ra);
4039         }else{
4040           emit_addimm(ra,offset,ra);
4041         }
4042       }
4043     }
4044   }
4045   // Preload constants for next instruction
4046   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4047     int agr,ra;
4048     #ifndef HOST_IMM_ADDR32
4049     // Mapper entry
4050     agr=MGEN1+((i+1)&1);
4051     ra=get_reg(i_regs->regmap,agr);
4052     if(ra>=0) {
4053       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4054       int offset=imm[i+1];
4055       int c=(regs[i+1].wasconst>>rs)&1;
4056       if(c) {
4057         if(itype[i+1]==STORE||itype[i+1]==STORELR
4058            ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4059           // Stores to memory go thru the mapper to detect self-modifying
4060           // code, loads don't.
4061           if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4062              (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
4063             generate_map_const(constmap[i+1][rs]+offset,ra);
4064         }else{
4065           if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4066             generate_map_const(constmap[i+1][rs]+offset,ra);
4067         }
4068       }
4069       /*else if(rs1[i]==0) {
4070         generate_map_const(offset,ra);
4071       }*/
4072     }
4073     #endif
4074     // Actual address
4075     agr=AGEN1+((i+1)&1);
4076     ra=get_reg(i_regs->regmap,agr);
4077     if(ra>=0) {
4078       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4079       int offset=imm[i+1];
4080       int c=(regs[i+1].wasconst>>rs)&1;
4081       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4082         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4083           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4084         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4085           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4086         }else{
4087           #ifdef HOST_IMM_ADDR32
4088           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4089              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4090           #endif
4091           emit_movimm(constmap[i+1][rs]+offset,ra);
4092         }
4093       }
4094       else if(rs1[i+1]==0) {
4095         // Using r0 as a base address
4096         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4097           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4098         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4099           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4100         }else{
4101           emit_movimm(offset,ra);
4102         }
4103       }
4104     }
4105   }
4106 }
4107
4108 int get_final_value(int hr, int i, int *value)
4109 {
4110   int reg=regs[i].regmap[hr];
4111   while(i<slen-1) {
4112     if(regs[i+1].regmap[hr]!=reg) break;
4113     if(!((regs[i+1].isconst>>hr)&1)) break;
4114     if(bt[i+1]) break;
4115     i++;
4116   }
4117   if(i<slen-1) {
4118     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4119       *value=constmap[i][hr];
4120       return 1;
4121     }
4122     if(!bt[i+1]) {
4123       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4124         // Load in delay slot, out-of-order execution
4125         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4126         {
4127           #ifdef HOST_IMM_ADDR32
4128           if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4129           #endif
4130           // Precompute load address
4131           *value=constmap[i][hr]+imm[i+2];
4132           return 1;
4133         }
4134       }
4135       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4136       {
4137         #ifdef HOST_IMM_ADDR32
4138         if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4139         #endif
4140         // Precompute load address
4141         *value=constmap[i][hr]+imm[i+1];
4142         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4143         return 1;
4144       }
4145     }
4146   }
4147   *value=constmap[i][hr];
4148   //printf("c=%x\n",(int)constmap[i][hr]);
4149   if(i==slen-1) return 1;
4150   if(reg<64) {
4151     return !((unneeded_reg[i+1]>>reg)&1);
4152   }else{
4153     return !((unneeded_reg_upper[i+1]>>reg)&1);
4154   }
4155 }
4156
4157 // Load registers with known constants
4158 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4159 {
4160   int hr;
4161   // Load 32-bit regs
4162   for(hr=0;hr<HOST_REGS;hr++) {
4163     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4164       //if(entry[hr]!=regmap[hr]) {
4165       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4166         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4167           int value;
4168           if(get_final_value(hr,i,&value)) {
4169             if(value==0) {
4170               emit_zeroreg(hr);
4171             }
4172             else {
4173               emit_movimm(value,hr);
4174             }
4175           }
4176         }
4177       }
4178     }
4179   }
4180   // Load 64-bit regs
4181   for(hr=0;hr<HOST_REGS;hr++) {
4182     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4183       //if(entry[hr]!=regmap[hr]) {
4184       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4185         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4186           if((is32>>(regmap[hr]&63))&1) {
4187             int lr=get_reg(regmap,regmap[hr]-64);
4188             assert(lr>=0);
4189             emit_sarimm(lr,31,hr);
4190           }
4191           else
4192           {
4193             int value;
4194             if(get_final_value(hr,i,&value)) {
4195               if(value==0) {
4196                 emit_zeroreg(hr);
4197               }
4198               else {
4199                 emit_movimm(value,hr);
4200               }
4201             }
4202           }
4203         }
4204       }
4205     }
4206   }
4207 }
4208 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4209 {
4210   int hr;
4211   // Load 32-bit regs
4212   for(hr=0;hr<HOST_REGS;hr++) {
4213     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4214       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4215         int value=constmap[i][hr];
4216         if(value==0) {
4217           emit_zeroreg(hr);
4218         }
4219         else {
4220           emit_movimm(value,hr);
4221         }
4222       }
4223     }
4224   }
4225   // Load 64-bit regs
4226   for(hr=0;hr<HOST_REGS;hr++) {
4227     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4228       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4229         if((is32>>(regmap[hr]&63))&1) {
4230           int lr=get_reg(regmap,regmap[hr]-64);
4231           assert(lr>=0);
4232           emit_sarimm(lr,31,hr);
4233         }
4234         else
4235         {
4236           int value=constmap[i][hr];
4237           if(value==0) {
4238             emit_zeroreg(hr);
4239           }
4240           else {
4241             emit_movimm(value,hr);
4242           }
4243         }
4244       }
4245     }
4246   }
4247 }
4248
4249 // Write out all dirty registers (except cycle count)
4250 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4251 {
4252   int hr;
4253   for(hr=0;hr<HOST_REGS;hr++) {
4254     if(hr!=EXCLUDE_REG) {
4255       if(i_regmap[hr]>0) {
4256         if(i_regmap[hr]!=CCREG) {
4257           if((i_dirty>>hr)&1) {
4258             if(i_regmap[hr]<64) {
4259               emit_storereg(i_regmap[hr],hr);
4260 #ifndef FORCE32
4261               if( ((i_is32>>i_regmap[hr])&1) ) {
4262                 #ifdef DESTRUCTIVE_WRITEBACK
4263                 emit_sarimm(hr,31,hr);
4264                 emit_storereg(i_regmap[hr]|64,hr);
4265                 #else
4266                 emit_sarimm(hr,31,HOST_TEMPREG);
4267                 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4268                 #endif
4269               }
4270 #endif
4271             }else{
4272               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4273                 emit_storereg(i_regmap[hr],hr);
4274               }
4275             }
4276           }
4277         }
4278       }
4279     }
4280   }
4281 }
4282 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4283 // This writes the registers not written by store_regs_bt
4284 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4285 {
4286   int hr;
4287   int t=(addr-start)>>2;
4288   for(hr=0;hr<HOST_REGS;hr++) {
4289     if(hr!=EXCLUDE_REG) {
4290       if(i_regmap[hr]>0) {
4291         if(i_regmap[hr]!=CCREG) {
4292           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4293             if((i_dirty>>hr)&1) {
4294               if(i_regmap[hr]<64) {
4295                 emit_storereg(i_regmap[hr],hr);
4296 #ifndef FORCE32
4297                 if( ((i_is32>>i_regmap[hr])&1) ) {
4298                   #ifdef DESTRUCTIVE_WRITEBACK
4299                   emit_sarimm(hr,31,hr);
4300                   emit_storereg(i_regmap[hr]|64,hr);
4301                   #else
4302                   emit_sarimm(hr,31,HOST_TEMPREG);
4303                   emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4304                   #endif
4305                 }
4306 #endif
4307               }else{
4308                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4309                   emit_storereg(i_regmap[hr],hr);
4310                 }
4311               }
4312             }
4313           }
4314         }
4315       }
4316     }
4317   }
4318 }
4319
4320 // Load all registers (except cycle count)
4321 void load_all_regs(signed char i_regmap[])
4322 {
4323   int hr;
4324   for(hr=0;hr<HOST_REGS;hr++) {
4325     if(hr!=EXCLUDE_REG) {
4326       if(i_regmap[hr]==0) {
4327         emit_zeroreg(hr);
4328       }
4329       else
4330       if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4331       {
4332         emit_loadreg(i_regmap[hr],hr);
4333       }
4334     }
4335   }
4336 }
4337
4338 // Load all current registers also needed by next instruction
4339 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4340 {
4341   int hr;
4342   for(hr=0;hr<HOST_REGS;hr++) {
4343     if(hr!=EXCLUDE_REG) {
4344       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4345         if(i_regmap[hr]==0) {
4346           emit_zeroreg(hr);
4347         }
4348         else
4349         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4350         {
4351           emit_loadreg(i_regmap[hr],hr);
4352         }
4353       }
4354     }
4355   }
4356 }
4357
4358 // Load all regs, storing cycle count if necessary
4359 void load_regs_entry(int t)
4360 {
4361   int hr;
4362   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4363   else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4364   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4365     emit_storereg(CCREG,HOST_CCREG);
4366   }
4367   // Load 32-bit regs
4368   for(hr=0;hr<HOST_REGS;hr++) {
4369     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4370       if(regs[t].regmap_entry[hr]==0) {
4371         emit_zeroreg(hr);
4372       }
4373       else if(regs[t].regmap_entry[hr]!=CCREG)
4374       {
4375         emit_loadreg(regs[t].regmap_entry[hr],hr);
4376       }
4377     }
4378   }
4379   // Load 64-bit regs
4380   for(hr=0;hr<HOST_REGS;hr++) {
4381     if(regs[t].regmap_entry[hr]>=64) {
4382       assert(regs[t].regmap_entry[hr]!=64);
4383       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4384         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4385         if(lr<0) {
4386           emit_loadreg(regs[t].regmap_entry[hr],hr);
4387         }
4388         else
4389         {
4390           emit_sarimm(lr,31,hr);
4391         }
4392       }
4393       else
4394       {
4395         emit_loadreg(regs[t].regmap_entry[hr],hr);
4396       }
4397     }
4398   }
4399 }
4400
4401 // Store dirty registers prior to branch
4402 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4403 {
4404   if(internal_branch(i_is32,addr))
4405   {
4406     int t=(addr-start)>>2;
4407     int hr;
4408     for(hr=0;hr<HOST_REGS;hr++) {
4409       if(hr!=EXCLUDE_REG) {
4410         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4411           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4412             if((i_dirty>>hr)&1) {
4413               if(i_regmap[hr]<64) {
4414                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4415                   emit_storereg(i_regmap[hr],hr);
4416                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4417                     #ifdef DESTRUCTIVE_WRITEBACK
4418                     emit_sarimm(hr,31,hr);
4419                     emit_storereg(i_regmap[hr]|64,hr);
4420                     #else
4421                     emit_sarimm(hr,31,HOST_TEMPREG);
4422                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4423                     #endif
4424                   }
4425                 }
4426               }else{
4427                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4428                   emit_storereg(i_regmap[hr],hr);
4429                 }
4430               }
4431             }
4432           }
4433         }
4434       }
4435     }
4436   }
4437   else
4438   {
4439     // Branch out of this block, write out all dirty regs
4440     wb_dirtys(i_regmap,i_is32,i_dirty);
4441   }
4442 }
4443
4444 // Load all needed registers for branch target
4445 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4446 {
4447   //if(addr>=start && addr<(start+slen*4))
4448   if(internal_branch(i_is32,addr))
4449   {
4450     int t=(addr-start)>>2;
4451     int hr;
4452     // Store the cycle count before loading something else
4453     if(i_regmap[HOST_CCREG]!=CCREG) {
4454       assert(i_regmap[HOST_CCREG]==-1);
4455     }
4456     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4457       emit_storereg(CCREG,HOST_CCREG);
4458     }
4459     // Load 32-bit regs
4460     for(hr=0;hr<HOST_REGS;hr++) {
4461       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4462         #ifdef DESTRUCTIVE_WRITEBACK
4463         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4464         #else
4465         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4466         #endif
4467           if(regs[t].regmap_entry[hr]==0) {
4468             emit_zeroreg(hr);
4469           }
4470           else if(regs[t].regmap_entry[hr]!=CCREG)
4471           {
4472             emit_loadreg(regs[t].regmap_entry[hr],hr);
4473           }
4474         }
4475       }
4476     }
4477     //Load 64-bit regs
4478     for(hr=0;hr<HOST_REGS;hr++) {
4479       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64) {
4480         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4481           assert(regs[t].regmap_entry[hr]!=64);
4482           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4483             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4484             if(lr<0) {
4485               emit_loadreg(regs[t].regmap_entry[hr],hr);
4486             }
4487             else
4488             {
4489               emit_sarimm(lr,31,hr);
4490             }
4491           }
4492           else
4493           {
4494             emit_loadreg(regs[t].regmap_entry[hr],hr);
4495           }
4496         }
4497         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4498           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4499           assert(lr>=0);
4500           emit_sarimm(lr,31,hr);
4501         }
4502       }
4503     }
4504   }
4505 }
4506
4507 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4508 {
4509   if(addr>=start && addr<start+slen*4-4)
4510   {
4511     int t=(addr-start)>>2;
4512     int hr;
4513     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4514     for(hr=0;hr<HOST_REGS;hr++)
4515     {
4516       if(hr!=EXCLUDE_REG)
4517       {
4518         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4519         {
4520           if(regs[t].regmap_entry[hr]!=-1)
4521           {
4522             return 0;
4523           }
4524           else 
4525           if((i_dirty>>hr)&1)
4526           {
4527             if(i_regmap[hr]<64)
4528             {
4529               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4530                 return 0;
4531             }
4532             else
4533             {
4534               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4535                 return 0;
4536             }
4537           }
4538         }
4539         else // Same register but is it 32-bit or dirty?
4540         if(i_regmap[hr]>=0)
4541         {
4542           if(!((regs[t].dirty>>hr)&1))
4543           {
4544             if((i_dirty>>hr)&1)
4545             {
4546               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4547               {
4548                 //printf("%x: dirty no match\n",addr);
4549                 return 0;
4550               }
4551             }
4552           }
4553           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4554           {
4555             //printf("%x: is32 no match\n",addr);
4556             return 0;
4557           }
4558         }
4559       }
4560     }
4561     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4562     if(requires_32bit[t]&~i_is32) return 0;
4563     // Delay slots are not valid branch targets
4564     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4565     // Delay slots require additional processing, so do not match
4566     if(is_ds[t]) return 0;
4567   }
4568   else
4569   {
4570     int hr;
4571     for(hr=0;hr<HOST_REGS;hr++)
4572     {
4573       if(hr!=EXCLUDE_REG)
4574       {
4575         if(i_regmap[hr]>=0)
4576         {
4577           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4578           {
4579             if((i_dirty>>hr)&1)
4580             {
4581               return 0;
4582             }
4583           }
4584         }
4585       }
4586     }
4587   }
4588   return 1;
4589 }
4590
4591 // Used when a branch jumps into the delay slot of another branch
4592 void ds_assemble_entry(int i)
4593 {
4594   int t=(ba[i]-start)>>2;
4595   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4596   assem_debug("Assemble delay slot at %x\n",ba[i]);
4597   assem_debug("<->\n");
4598   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4599     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4600   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4601   address_generation(t,&regs[t],regs[t].regmap_entry);
4602   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4603     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4604   cop1_usable=0;
4605   is_delayslot=0;
4606   switch(itype[t]) {
4607     case ALU:
4608       alu_assemble(t,&regs[t]);break;
4609     case IMM16:
4610       imm16_assemble(t,&regs[t]);break;
4611     case SHIFT:
4612       shift_assemble(t,&regs[t]);break;
4613     case SHIFTIMM:
4614       shiftimm_assemble(t,&regs[t]);break;
4615     case LOAD:
4616       load_assemble(t,&regs[t]);break;
4617     case LOADLR:
4618       loadlr_assemble(t,&regs[t]);break;
4619     case STORE:
4620       store_assemble(t,&regs[t]);break;
4621     case STORELR:
4622       storelr_assemble(t,&regs[t]);break;
4623     case COP0:
4624       cop0_assemble(t,&regs[t]);break;
4625     case COP1:
4626       cop1_assemble(t,&regs[t]);break;
4627     case C1LS:
4628       c1ls_assemble(t,&regs[t]);break;
4629     case COP2:
4630       cop2_assemble(t,&regs[t]);break;
4631     case C2LS:
4632       c2ls_assemble(t,&regs[t]);break;
4633     case C2OP:
4634       c2op_assemble(t,&regs[t]);break;
4635     case FCONV:
4636       fconv_assemble(t,&regs[t]);break;
4637     case FLOAT:
4638       float_assemble(t,&regs[t]);break;
4639     case FCOMP:
4640       fcomp_assemble(t,&regs[t]);break;
4641     case MULTDIV:
4642       multdiv_assemble(t,&regs[t]);break;
4643     case MOV:
4644       mov_assemble(t,&regs[t]);break;
4645     case SYSCALL:
4646     case HLECALL:
4647     case SPAN:
4648     case UJUMP:
4649     case RJUMP:
4650     case CJUMP:
4651     case SJUMP:
4652     case FJUMP:
4653       printf("Jump in the delay slot.  This is probably a bug.\n");
4654   }
4655   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4656   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4657   if(internal_branch(regs[t].is32,ba[i]+4))
4658     assem_debug("branch: internal\n");
4659   else
4660     assem_debug("branch: external\n");
4661   assert(internal_branch(regs[t].is32,ba[i]+4));
4662   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4663   emit_jmp(0);
4664 }
4665
4666 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4667 {
4668   int count;
4669   int jaddr;
4670   int idle=0;
4671   if(itype[i]==RJUMP)
4672   {
4673     *adj=0;
4674   }
4675   //if(ba[i]>=start && ba[i]<(start+slen*4))
4676   if(internal_branch(branch_regs[i].is32,ba[i]))
4677   {
4678     int t=(ba[i]-start)>>2;
4679     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4680     else *adj=ccadj[t];
4681   }
4682   else
4683   {
4684     *adj=0;
4685   }
4686   count=ccadj[i];
4687   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4688     // Idle loop
4689     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4690     idle=(int)out;
4691     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4692     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4693     jaddr=(int)out;
4694     emit_jmp(0);
4695   }
4696   else if(*adj==0||invert) {
4697     emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4698     jaddr=(int)out;
4699     emit_jns(0);
4700   }
4701   else
4702   {
4703     emit_cmpimm(HOST_CCREG,-2*(count+2));
4704     jaddr=(int)out;
4705     emit_jns(0);
4706   }
4707   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4708 }
4709
4710 void do_ccstub(int n)
4711 {
4712   literal_pool(256);
4713   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4714   set_jump_target(stubs[n][1],(int)out);
4715   int i=stubs[n][4];
4716   if(stubs[n][6]==NULLDS) {
4717     // Delay slot instruction is nullified ("likely" branch)
4718     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4719   }
4720   else if(stubs[n][6]!=TAKEN) {
4721     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4722   }
4723   else {
4724     if(internal_branch(branch_regs[i].is32,ba[i]))
4725       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4726   }
4727   if(stubs[n][5]!=-1)
4728   {
4729     // Save PC as return address
4730     emit_movimm(stubs[n][5],EAX);
4731     emit_writeword(EAX,(int)&pcaddr);
4732   }
4733   else
4734   {
4735     // Return address depends on which way the branch goes
4736     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4737     {
4738       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4739       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4740       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4741       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4742       if(rs1[i]==0)
4743       {
4744         s1l=s2l;s1h=s2h;
4745         s2l=s2h=-1;
4746       }
4747       else if(rs2[i]==0)
4748       {
4749         s2l=s2h=-1;
4750       }
4751       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4752         s1h=s2h=-1;
4753       }
4754       assert(s1l>=0);
4755       #ifdef DESTRUCTIVE_WRITEBACK
4756       if(rs1[i]) {
4757         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4758           emit_loadreg(rs1[i],s1l);
4759       } 
4760       else {
4761         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4762           emit_loadreg(rs2[i],s1l);
4763       }
4764       if(s2l>=0)
4765         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4766           emit_loadreg(rs2[i],s2l);
4767       #endif
4768       int hr=0;
4769       int addr,alt,ntaddr;
4770       while(hr<HOST_REGS)
4771       {
4772         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4773            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4774            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4775         {
4776           addr=hr++;break;
4777         }
4778         hr++;
4779       }
4780       while(hr<HOST_REGS)
4781       {
4782         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4783            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4784            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4785         {
4786           alt=hr++;break;
4787         }
4788         hr++;
4789       }
4790       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4791       {
4792         while(hr<HOST_REGS)
4793         {
4794           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4795              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4796              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4797           {
4798             ntaddr=hr;break;
4799           }
4800           hr++;
4801         }
4802         assert(hr<HOST_REGS);
4803       }
4804       if((opcode[i]&0x2f)==4) // BEQ
4805       {
4806         #ifdef HAVE_CMOV_IMM
4807         if(s1h<0) {
4808           if(s2l>=0) emit_cmp(s1l,s2l);
4809           else emit_test(s1l,s1l);
4810           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4811         }
4812         else
4813         #endif
4814         {
4815           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4816           if(s1h>=0) {
4817             if(s2h>=0) emit_cmp(s1h,s2h);
4818             else emit_test(s1h,s1h);
4819             emit_cmovne_reg(alt,addr);
4820           }
4821           if(s2l>=0) emit_cmp(s1l,s2l);
4822           else emit_test(s1l,s1l);
4823           emit_cmovne_reg(alt,addr);
4824         }
4825       }
4826       if((opcode[i]&0x2f)==5) // BNE
4827       {
4828         #ifdef HAVE_CMOV_IMM
4829         if(s1h<0) {
4830           if(s2l>=0) emit_cmp(s1l,s2l);
4831           else emit_test(s1l,s1l);
4832           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4833         }
4834         else
4835         #endif
4836         {
4837           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4838           if(s1h>=0) {
4839             if(s2h>=0) emit_cmp(s1h,s2h);
4840             else emit_test(s1h,s1h);
4841             emit_cmovne_reg(alt,addr);
4842           }
4843           if(s2l>=0) emit_cmp(s1l,s2l);
4844           else emit_test(s1l,s1l);
4845           emit_cmovne_reg(alt,addr);
4846         }
4847       }
4848       if((opcode[i]&0x2f)==6) // BLEZ
4849       {
4850         //emit_movimm(ba[i],alt);
4851         //emit_movimm(start+i*4+8,addr);
4852         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4853         emit_cmpimm(s1l,1);
4854         if(s1h>=0) emit_mov(addr,ntaddr);
4855         emit_cmovl_reg(alt,addr);
4856         if(s1h>=0) {
4857           emit_test(s1h,s1h);
4858           emit_cmovne_reg(ntaddr,addr);
4859           emit_cmovs_reg(alt,addr);
4860         }
4861       }
4862       if((opcode[i]&0x2f)==7) // BGTZ
4863       {
4864         //emit_movimm(ba[i],addr);
4865         //emit_movimm(start+i*4+8,ntaddr);
4866         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4867         emit_cmpimm(s1l,1);
4868         if(s1h>=0) emit_mov(addr,alt);
4869         emit_cmovl_reg(ntaddr,addr);
4870         if(s1h>=0) {
4871           emit_test(s1h,s1h);
4872           emit_cmovne_reg(alt,addr);
4873           emit_cmovs_reg(ntaddr,addr);
4874         }
4875       }
4876       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4877       {
4878         //emit_movimm(ba[i],alt);
4879         //emit_movimm(start+i*4+8,addr);
4880         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4881         if(s1h>=0) emit_test(s1h,s1h);
4882         else emit_test(s1l,s1l);
4883         emit_cmovs_reg(alt,addr);
4884       }
4885       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4886       {
4887         //emit_movimm(ba[i],addr);
4888         //emit_movimm(start+i*4+8,alt);
4889         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4890         if(s1h>=0) emit_test(s1h,s1h);
4891         else emit_test(s1l,s1l);
4892         emit_cmovs_reg(alt,addr);
4893       }
4894       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4895         if(source[i]&0x10000) // BC1T
4896         {
4897           //emit_movimm(ba[i],alt);
4898           //emit_movimm(start+i*4+8,addr);
4899           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4900           emit_testimm(s1l,0x800000);
4901           emit_cmovne_reg(alt,addr);
4902         }
4903         else // BC1F
4904         {
4905           //emit_movimm(ba[i],addr);
4906           //emit_movimm(start+i*4+8,alt);
4907           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4908           emit_testimm(s1l,0x800000);
4909           emit_cmovne_reg(alt,addr);
4910         }
4911       }
4912       emit_writeword(addr,(int)&pcaddr);
4913     }
4914     else
4915     if(itype[i]==RJUMP)
4916     {
4917       int r=get_reg(branch_regs[i].regmap,rs1[i]);
4918       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4919         r=get_reg(branch_regs[i].regmap,RTEMP);
4920       }
4921       emit_writeword(r,(int)&pcaddr);
4922     }
4923     else {printf("Unknown branch type in do_ccstub\n");exit(1);}
4924   }
4925   // Update cycle count
4926   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4927   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4928   emit_call((int)cc_interrupt);
4929   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4930   if(stubs[n][6]==TAKEN) {
4931     if(internal_branch(branch_regs[i].is32,ba[i]))
4932       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4933     else if(itype[i]==RJUMP) {
4934       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4935         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4936       else
4937         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4938     }
4939   }else if(stubs[n][6]==NOTTAKEN) {
4940     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4941     else load_all_regs(branch_regs[i].regmap);
4942   }else if(stubs[n][6]==NULLDS) {
4943     // Delay slot instruction is nullified ("likely" branch)
4944     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4945     else load_all_regs(regs[i].regmap);
4946   }else{
4947     load_all_regs(branch_regs[i].regmap);
4948   }
4949   emit_jmp(stubs[n][2]); // return address
4950   
4951   /* This works but uses a lot of memory...
4952   emit_readword((int)&last_count,ECX);
4953   emit_add(HOST_CCREG,ECX,EAX);
4954   emit_writeword(EAX,(int)&Count);
4955   emit_call((int)gen_interupt);
4956   emit_readword((int)&Count,HOST_CCREG);
4957   emit_readword((int)&next_interupt,EAX);
4958   emit_readword((int)&pending_exception,EBX);
4959   emit_writeword(EAX,(int)&last_count);
4960   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4961   emit_test(EBX,EBX);
4962   int jne_instr=(int)out;
4963   emit_jne(0);
4964   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4965   load_all_regs(branch_regs[i].regmap);
4966   emit_jmp(stubs[n][2]); // return address
4967   set_jump_target(jne_instr,(int)out);
4968   emit_readword((int)&pcaddr,EAX);
4969   // Call get_addr_ht instead of doing the hash table here.
4970   // This code is executed infrequently and takes up a lot of space
4971   // so smaller is better.
4972   emit_storereg(CCREG,HOST_CCREG);
4973   emit_pushreg(EAX);
4974   emit_call((int)get_addr_ht);
4975   emit_loadreg(CCREG,HOST_CCREG);
4976   emit_addimm(ESP,4,ESP);
4977   emit_jmpreg(EAX);*/
4978 }
4979
4980 add_to_linker(int addr,int target,int ext)
4981 {
4982   link_addr[linkcount][0]=addr;
4983   link_addr[linkcount][1]=target;
4984   link_addr[linkcount][2]=ext;  
4985   linkcount++;
4986 }
4987
4988 void ujump_assemble(int i,struct regstat *i_regs)
4989 {
4990   signed char *i_regmap=i_regs->regmap;
4991   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4992   address_generation(i+1,i_regs,regs[i].regmap_entry);
4993   #ifdef REG_PREFETCH
4994   int temp=get_reg(branch_regs[i].regmap,PTEMP);
4995   if(rt1[i]==31&&temp>=0) 
4996   {
4997     int return_address=start+i*4+8;
4998     if(get_reg(branch_regs[i].regmap,31)>0) 
4999     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5000   }
5001   #endif
5002   ds_assemble(i+1,i_regs);
5003   uint64_t bc_unneeded=branch_regs[i].u;
5004   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5005   bc_unneeded|=1|(1LL<<rt1[i]);
5006   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5007   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5008                 bc_unneeded,bc_unneeded_upper);
5009   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5010   if(rt1[i]==31) {
5011     int rt;
5012     unsigned int return_address;
5013     assert(rt1[i+1]!=31);
5014     assert(rt2[i+1]!=31);
5015     rt=get_reg(branch_regs[i].regmap,31);
5016     assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5017     //assert(rt>=0);
5018     return_address=start+i*4+8;
5019     if(rt>=0) {
5020       #ifdef USE_MINI_HT
5021       if(internal_branch(branch_regs[i].is32,return_address)) {
5022         int temp=rt+1;
5023         if(temp==EXCLUDE_REG||temp>=HOST_REGS||
5024            branch_regs[i].regmap[temp]>=0)
5025         {
5026           temp=get_reg(branch_regs[i].regmap,-1);
5027         }
5028         #ifdef HOST_TEMPREG
5029         if(temp<0) temp=HOST_TEMPREG;
5030         #endif
5031         if(temp>=0) do_miniht_insert(return_address,rt,temp);
5032         else emit_movimm(return_address,rt);
5033       }
5034       else
5035       #endif
5036       {
5037         #ifdef REG_PREFETCH
5038         if(temp>=0) 
5039         {
5040           if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5041         }
5042         #endif
5043         emit_movimm(return_address,rt); // PC into link register
5044         #ifdef IMM_PREFETCH
5045         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5046         #endif
5047       }
5048     }
5049   }
5050   int cc,adj;
5051   cc=get_reg(branch_regs[i].regmap,CCREG);
5052   assert(cc==HOST_CCREG);
5053   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5054   #ifdef REG_PREFETCH
5055   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5056   #endif
5057   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5058   if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5059   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5060   if(internal_branch(branch_regs[i].is32,ba[i]))
5061     assem_debug("branch: internal\n");
5062   else
5063     assem_debug("branch: external\n");
5064   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5065     ds_assemble_entry(i);
5066   }
5067   else {
5068     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5069     emit_jmp(0);
5070   }
5071 }
5072
5073 void rjump_assemble(int i,struct regstat *i_regs)
5074 {
5075   signed char *i_regmap=i_regs->regmap;
5076   int temp;
5077   int rs,cc,adj;
5078   rs=get_reg(branch_regs[i].regmap,rs1[i]);
5079   assert(rs>=0);
5080   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5081     // Delay slot abuse, make a copy of the branch address register
5082     temp=get_reg(branch_regs[i].regmap,RTEMP);
5083     assert(temp>=0);
5084     assert(regs[i].regmap[temp]==RTEMP);
5085     emit_mov(rs,temp);
5086     rs=temp;
5087   }
5088   address_generation(i+1,i_regs,regs[i].regmap_entry);
5089   #ifdef REG_PREFETCH
5090   if(rt1[i]==31) 
5091   {
5092     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5093       int return_address=start+i*4+8;
5094       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5095     }
5096   }
5097   #endif
5098   #ifdef USE_MINI_HT
5099   if(rs1[i]==31) {
5100     int rh=get_reg(regs[i].regmap,RHASH);
5101     if(rh>=0) do_preload_rhash(rh);
5102   }
5103   #endif
5104   ds_assemble(i+1,i_regs);
5105   uint64_t bc_unneeded=branch_regs[i].u;
5106   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5107   bc_unneeded|=1|(1LL<<rt1[i]);
5108   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5109   bc_unneeded&=~(1LL<<rs1[i]);
5110   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5111                 bc_unneeded,bc_unneeded_upper);
5112   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5113   if(rt1[i]!=0) {
5114     int rt,return_address;
5115     assert(rt1[i+1]!=rt1[i]);
5116     assert(rt2[i+1]!=rt1[i]);
5117     rt=get_reg(branch_regs[i].regmap,rt1[i]);
5118     assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5119     assert(rt>=0);
5120     return_address=start+i*4+8;
5121     #ifdef REG_PREFETCH
5122     if(temp>=0) 
5123     {
5124       if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5125     }
5126     #endif
5127     emit_movimm(return_address,rt); // PC into link register
5128     #ifdef IMM_PREFETCH
5129     emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5130     #endif
5131   }
5132   cc=get_reg(branch_regs[i].regmap,CCREG);
5133   assert(cc==HOST_CCREG);
5134   #ifdef USE_MINI_HT
5135   int rh=get_reg(branch_regs[i].regmap,RHASH);
5136   int ht=get_reg(branch_regs[i].regmap,RHTBL);
5137   if(rs1[i]==31) {
5138     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5139     do_preload_rhtbl(ht);
5140     do_rhash(rs,rh);
5141   }
5142   #endif
5143   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5144   #ifdef DESTRUCTIVE_WRITEBACK
5145   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5146     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5147       emit_loadreg(rs1[i],rs);
5148     }
5149   }
5150   #endif
5151   #ifdef REG_PREFETCH
5152   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5153   #endif
5154   #ifdef USE_MINI_HT
5155   if(rs1[i]==31) {
5156     do_miniht_load(ht,rh);
5157   }
5158   #endif
5159   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5160   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5161   //assert(adj==0);
5162   emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5163   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5164   emit_jns(0);
5165   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5166   #ifdef USE_MINI_HT
5167   if(rs1[i]==31) {
5168     do_miniht_jump(rs,rh,ht);
5169   }
5170   else
5171   #endif
5172   {
5173     //if(rs!=EAX) emit_mov(rs,EAX);
5174     //emit_jmp((int)jump_vaddr_eax);
5175     emit_jmp(jump_vaddr_reg[rs]);
5176   }
5177   /* Check hash table
5178   temp=!rs;
5179   emit_mov(rs,temp);
5180   emit_shrimm(rs,16,rs);
5181   emit_xor(temp,rs,rs);
5182   emit_movzwl_reg(rs,rs);
5183   emit_shlimm(rs,4,rs);
5184   emit_cmpmem_indexed((int)hash_table,rs,temp);
5185   emit_jne((int)out+14);
5186   emit_readword_indexed((int)hash_table+4,rs,rs);
5187   emit_jmpreg(rs);
5188   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5189   emit_addimm_no_flags(8,rs);
5190   emit_jeq((int)out-17);
5191   // No hit on hash table, call compiler
5192   emit_pushreg(temp);
5193 //DEBUG >
5194 #ifdef DEBUG_CYCLE_COUNT
5195   emit_readword((int)&last_count,ECX);
5196   emit_add(HOST_CCREG,ECX,HOST_CCREG);
5197   emit_readword((int)&next_interupt,ECX);
5198   emit_writeword(HOST_CCREG,(int)&Count);
5199   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5200   emit_writeword(ECX,(int)&last_count);
5201 #endif
5202 //DEBUG <
5203   emit_storereg(CCREG,HOST_CCREG);
5204   emit_call((int)get_addr);
5205   emit_loadreg(CCREG,HOST_CCREG);
5206   emit_addimm(ESP,4,ESP);
5207   emit_jmpreg(EAX);*/
5208   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5209   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5210   #endif
5211 }
5212
5213 void cjump_assemble(int i,struct regstat *i_regs)
5214 {
5215   signed char *i_regmap=i_regs->regmap;
5216   int cc;
5217   int match;
5218   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5219   assem_debug("match=%d\n",match);
5220   int s1h,s1l,s2h,s2l;
5221   int prev_cop1_usable=cop1_usable;
5222   int unconditional=0,nop=0;
5223   int only32=0;
5224   int ooo=1;
5225   int invert=0;
5226   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5227   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5228   if(likely[i]) ooo=0;
5229   if(!match) invert=1;
5230   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5231   if(i>(ba[i]-start)>>2) invert=1;
5232   #endif
5233     
5234   if(ooo)
5235     if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
5236        (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1])))
5237   {
5238     // Write-after-read dependency prevents out of order execution
5239     // First test branch condition, then execute delay slot, then branch
5240     ooo=0;
5241   }
5242
5243   if(ooo) {
5244     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5245     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5246     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5247     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5248   }
5249   else {
5250     s1l=get_reg(i_regmap,rs1[i]);
5251     s1h=get_reg(i_regmap,rs1[i]|64);
5252     s2l=get_reg(i_regmap,rs2[i]);
5253     s2h=get_reg(i_regmap,rs2[i]|64);
5254   }
5255   if(rs1[i]==0&&rs2[i]==0)
5256   {
5257     if(opcode[i]&1) nop=1;
5258     else unconditional=1;
5259     //assert(opcode[i]!=5);
5260     //assert(opcode[i]!=7);
5261     //assert(opcode[i]!=0x15);
5262     //assert(opcode[i]!=0x17);
5263   }
5264   else if(rs1[i]==0)
5265   {
5266     s1l=s2l;s1h=s2h;
5267     s2l=s2h=-1;
5268     only32=(regs[i].was32>>rs2[i])&1;
5269   }
5270   else if(rs2[i]==0)
5271   {
5272     s2l=s2h=-1;
5273     only32=(regs[i].was32>>rs1[i])&1;
5274   }
5275   else {
5276     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5277   }
5278
5279   if(ooo) {
5280     // Out of order execution (delay slot first)
5281     //printf("OOOE\n");
5282     address_generation(i+1,i_regs,regs[i].regmap_entry);
5283     ds_assemble(i+1,i_regs);
5284     int adj;
5285     uint64_t bc_unneeded=branch_regs[i].u;
5286     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5287     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5288     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5289     bc_unneeded|=1;
5290     bc_unneeded_upper|=1;
5291     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5292                   bc_unneeded,bc_unneeded_upper);
5293     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5294     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5295     cc=get_reg(branch_regs[i].regmap,CCREG);
5296     assert(cc==HOST_CCREG);
5297     if(unconditional) 
5298       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5299     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5300     //assem_debug("cycle count (adj)\n");
5301     if(unconditional) {
5302       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5303       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5304         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5305         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5306         if(internal)
5307           assem_debug("branch: internal\n");
5308         else
5309           assem_debug("branch: external\n");
5310         if(internal&&is_ds[(ba[i]-start)>>2]) {
5311           ds_assemble_entry(i);
5312         }
5313         else {
5314           add_to_linker((int)out,ba[i],internal);
5315           emit_jmp(0);
5316         }
5317         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5318         if(((u_int)out)&7) emit_addnop(0);
5319         #endif
5320       }
5321     }
5322     else if(nop) {
5323       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5324       int jaddr=(int)out;
5325       emit_jns(0);
5326       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5327     }
5328     else {
5329       int taken=0,nottaken=0,nottaken1=0;
5330       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5331       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5332       if(!only32)
5333       {
5334         assert(s1h>=0);
5335         if(opcode[i]==4) // BEQ
5336         {
5337           if(s2h>=0) emit_cmp(s1h,s2h);
5338           else emit_test(s1h,s1h);
5339           nottaken1=(int)out;
5340           emit_jne(1);
5341         }
5342         if(opcode[i]==5) // BNE
5343         {
5344           if(s2h>=0) emit_cmp(s1h,s2h);
5345           else emit_test(s1h,s1h);
5346           if(invert) taken=(int)out;
5347           else add_to_linker((int)out,ba[i],internal);
5348           emit_jne(0);
5349         }
5350         if(opcode[i]==6) // BLEZ
5351         {
5352           emit_test(s1h,s1h);
5353           if(invert) taken=(int)out;
5354           else add_to_linker((int)out,ba[i],internal);
5355           emit_js(0);
5356           nottaken1=(int)out;
5357           emit_jne(1);
5358         }
5359         if(opcode[i]==7) // BGTZ
5360         {
5361           emit_test(s1h,s1h);
5362           nottaken1=(int)out;
5363           emit_js(1);
5364           if(invert) taken=(int)out;
5365           else add_to_linker((int)out,ba[i],internal);
5366           emit_jne(0);
5367         }
5368       } // if(!only32)
5369           
5370       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5371       assert(s1l>=0);
5372       if(opcode[i]==4) // BEQ
5373       {
5374         if(s2l>=0) emit_cmp(s1l,s2l);
5375         else emit_test(s1l,s1l);
5376         if(invert){
5377           nottaken=(int)out;
5378           emit_jne(1);
5379         }else{
5380           add_to_linker((int)out,ba[i],internal);
5381           emit_jeq(0);
5382         }
5383       }
5384       if(opcode[i]==5) // BNE
5385       {
5386         if(s2l>=0) emit_cmp(s1l,s2l);
5387         else emit_test(s1l,s1l);
5388         if(invert){
5389           nottaken=(int)out;
5390           emit_jeq(1);
5391         }else{
5392           add_to_linker((int)out,ba[i],internal);
5393           emit_jne(0);
5394         }
5395       }
5396       if(opcode[i]==6) // BLEZ
5397       {
5398         emit_cmpimm(s1l,1);
5399         if(invert){
5400           nottaken=(int)out;
5401           emit_jge(1);
5402         }else{
5403           add_to_linker((int)out,ba[i],internal);
5404           emit_jl(0);
5405         }
5406       }
5407       if(opcode[i]==7) // BGTZ
5408       {
5409         emit_cmpimm(s1l,1);
5410         if(invert){
5411           nottaken=(int)out;
5412           emit_jl(1);
5413         }else{
5414           add_to_linker((int)out,ba[i],internal);
5415           emit_jge(0);
5416         }
5417       }
5418       if(invert) {
5419         if(taken) set_jump_target(taken,(int)out);
5420         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5421         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5422           if(adj) {
5423             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5424             add_to_linker((int)out,ba[i],internal);
5425           }else{
5426             emit_addnop(13);
5427             add_to_linker((int)out,ba[i],internal*2);
5428           }
5429           emit_jmp(0);
5430         }else
5431         #endif
5432         {
5433           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5434           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5435           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5436           if(internal)
5437             assem_debug("branch: internal\n");
5438           else
5439             assem_debug("branch: external\n");
5440           if(internal&&is_ds[(ba[i]-start)>>2]) {
5441             ds_assemble_entry(i);
5442           }
5443           else {
5444             add_to_linker((int)out,ba[i],internal);
5445             emit_jmp(0);
5446           }
5447         }
5448         set_jump_target(nottaken,(int)out);
5449       }
5450
5451       if(nottaken1) set_jump_target(nottaken1,(int)out);
5452       if(adj) {
5453         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5454       }
5455     } // (!unconditional)
5456   } // if(ooo)
5457   else
5458   {
5459     // In-order execution (branch first)
5460     //if(likely[i]) printf("IOL\n");
5461     //else
5462     //printf("IOE\n");
5463     int taken=0,nottaken=0,nottaken1=0;
5464     if(!unconditional&&!nop) {
5465       if(!only32)
5466       {
5467         assert(s1h>=0);
5468         if((opcode[i]&0x2f)==4) // BEQ
5469         {
5470           if(s2h>=0) emit_cmp(s1h,s2h);
5471           else emit_test(s1h,s1h);
5472           nottaken1=(int)out;
5473           emit_jne(2);
5474         }
5475         if((opcode[i]&0x2f)==5) // BNE
5476         {
5477           if(s2h>=0) emit_cmp(s1h,s2h);
5478           else emit_test(s1h,s1h);
5479           taken=(int)out;
5480           emit_jne(1);
5481         }
5482         if((opcode[i]&0x2f)==6) // BLEZ
5483         {
5484           emit_test(s1h,s1h);
5485           taken=(int)out;
5486           emit_js(1);
5487           nottaken1=(int)out;
5488           emit_jne(2);
5489         }
5490         if((opcode[i]&0x2f)==7) // BGTZ
5491         {
5492           emit_test(s1h,s1h);
5493           nottaken1=(int)out;
5494           emit_js(2);
5495           taken=(int)out;
5496           emit_jne(1);
5497         }
5498       } // if(!only32)
5499           
5500       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5501       assert(s1l>=0);
5502       if((opcode[i]&0x2f)==4) // BEQ
5503       {
5504         if(s2l>=0) emit_cmp(s1l,s2l);
5505         else emit_test(s1l,s1l);
5506         nottaken=(int)out;
5507         emit_jne(2);
5508       }
5509       if((opcode[i]&0x2f)==5) // BNE
5510       {
5511         if(s2l>=0) emit_cmp(s1l,s2l);
5512         else emit_test(s1l,s1l);
5513         nottaken=(int)out;
5514         emit_jeq(2);
5515       }
5516       if((opcode[i]&0x2f)==6) // BLEZ
5517       {
5518         emit_cmpimm(s1l,1);
5519         nottaken=(int)out;
5520         emit_jge(2);
5521       }
5522       if((opcode[i]&0x2f)==7) // BGTZ
5523       {
5524         emit_cmpimm(s1l,1);
5525         nottaken=(int)out;
5526         emit_jl(2);
5527       }
5528     } // if(!unconditional)
5529     int adj;
5530     uint64_t ds_unneeded=branch_regs[i].u;
5531     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5532     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5533     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5534     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5535     ds_unneeded|=1;
5536     ds_unneeded_upper|=1;
5537     // branch taken
5538     if(!nop) {
5539       if(taken) set_jump_target(taken,(int)out);
5540       assem_debug("1:\n");
5541       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5542                     ds_unneeded,ds_unneeded_upper);
5543       // load regs
5544       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5545       address_generation(i+1,&branch_regs[i],0);
5546       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5547       ds_assemble(i+1,&branch_regs[i]);
5548       cc=get_reg(branch_regs[i].regmap,CCREG);
5549       if(cc==-1) {
5550         emit_loadreg(CCREG,cc=HOST_CCREG);
5551         // CHECK: Is the following instruction (fall thru) allocated ok?
5552       }
5553       assert(cc==HOST_CCREG);
5554       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5555       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5556       assem_debug("cycle count (adj)\n");
5557       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5558       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5559       if(internal)
5560         assem_debug("branch: internal\n");
5561       else
5562         assem_debug("branch: external\n");
5563       if(internal&&is_ds[(ba[i]-start)>>2]) {
5564         ds_assemble_entry(i);
5565       }
5566       else {
5567         add_to_linker((int)out,ba[i],internal);
5568         emit_jmp(0);
5569       }
5570     }
5571     // branch not taken
5572     cop1_usable=prev_cop1_usable;
5573     if(!unconditional) {
5574       if(nottaken1) set_jump_target(nottaken1,(int)out);
5575       set_jump_target(nottaken,(int)out);
5576       assem_debug("2:\n");
5577       if(!likely[i]) {
5578         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5579                       ds_unneeded,ds_unneeded_upper);
5580         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5581         address_generation(i+1,&branch_regs[i],0);
5582         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5583         ds_assemble(i+1,&branch_regs[i]);
5584       }
5585       cc=get_reg(branch_regs[i].regmap,CCREG);
5586       if(cc==-1&&!likely[i]) {
5587         // Cycle count isn't in a register, temporarily load it then write it out
5588         emit_loadreg(CCREG,HOST_CCREG);
5589         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5590         int jaddr=(int)out;
5591         emit_jns(0);
5592         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5593         emit_storereg(CCREG,HOST_CCREG);
5594       }
5595       else{
5596         cc=get_reg(i_regmap,CCREG);
5597         assert(cc==HOST_CCREG);
5598         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5599         int jaddr=(int)out;
5600         emit_jns(0);
5601         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5602       }
5603     }
5604   }
5605 }
5606
5607 void sjump_assemble(int i,struct regstat *i_regs)
5608 {
5609   signed char *i_regmap=i_regs->regmap;
5610   int cc;
5611   int match;
5612   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5613   assem_debug("smatch=%d\n",match);
5614   int s1h,s1l;
5615   int prev_cop1_usable=cop1_usable;
5616   int unconditional=0,nevertaken=0;
5617   int only32=0;
5618   int ooo=1;
5619   int invert=0;
5620   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5621   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5622   if(likely[i]) ooo=0;
5623   if(!match) invert=1;
5624   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5625   if(i>(ba[i]-start)>>2) invert=1;
5626   #endif
5627
5628   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5629   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5630
5631   if(ooo)
5632     if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))
5633   {
5634     // Write-after-read dependency prevents out of order execution
5635     // First test branch condition, then execute delay slot, then branch
5636     ooo=0;
5637   }
5638   assert(opcode2[i]<0x10||ooo); // FIXME (BxxZALL)
5639
5640   if(ooo) {
5641     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5642     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5643   }
5644   else {
5645     s1l=get_reg(i_regmap,rs1[i]);
5646     s1h=get_reg(i_regmap,rs1[i]|64);
5647   }
5648   if(rs1[i]==0)
5649   {
5650     if(opcode2[i]&1) unconditional=1;
5651     else nevertaken=1;
5652     // These are never taken (r0 is never less than zero)
5653     //assert(opcode2[i]!=0);
5654     //assert(opcode2[i]!=2);
5655     //assert(opcode2[i]!=0x10);
5656     //assert(opcode2[i]!=0x12);
5657   }
5658   else {
5659     only32=(regs[i].was32>>rs1[i])&1;
5660   }
5661
5662   if(ooo) {
5663     // Out of order execution (delay slot first)
5664     //printf("OOOE\n");
5665     address_generation(i+1,i_regs,regs[i].regmap_entry);
5666     ds_assemble(i+1,i_regs);
5667     int adj;
5668     uint64_t bc_unneeded=branch_regs[i].u;
5669     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5670     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5671     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5672     bc_unneeded|=1;
5673     bc_unneeded_upper|=1;
5674     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5675                   bc_unneeded,bc_unneeded_upper);
5676     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5677     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5678     if(rt1[i]==31) {
5679       int rt,return_address;
5680       assert(rt1[i+1]!=31);
5681       assert(rt2[i+1]!=31);
5682       rt=get_reg(branch_regs[i].regmap,31);
5683       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5684       if(rt>=0) {
5685         // Save the PC even if the branch is not taken
5686         return_address=start+i*4+8;
5687         emit_movimm(return_address,rt); // PC into link register
5688         #ifdef IMM_PREFETCH
5689         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5690         #endif
5691       }
5692     }
5693     cc=get_reg(branch_regs[i].regmap,CCREG);
5694     assert(cc==HOST_CCREG);
5695     if(unconditional) 
5696       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5697     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5698     assem_debug("cycle count (adj)\n");
5699     if(unconditional) {
5700       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5701       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5702         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5703         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5704         if(internal)
5705           assem_debug("branch: internal\n");
5706         else
5707           assem_debug("branch: external\n");
5708         if(internal&&is_ds[(ba[i]-start)>>2]) {
5709           ds_assemble_entry(i);
5710         }
5711         else {
5712           add_to_linker((int)out,ba[i],internal);
5713           emit_jmp(0);
5714         }
5715         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5716         if(((u_int)out)&7) emit_addnop(0);
5717         #endif
5718       }
5719     }
5720     else if(nevertaken) {
5721       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5722       int jaddr=(int)out;
5723       emit_jns(0);
5724       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5725     }
5726     else {
5727       int nottaken=0;
5728       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5729       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5730       if(!only32)
5731       {
5732         assert(s1h>=0);
5733         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5734         {
5735           emit_test(s1h,s1h);
5736           if(invert){
5737             nottaken=(int)out;
5738             emit_jns(1);
5739           }else{
5740             add_to_linker((int)out,ba[i],internal);
5741             emit_js(0);
5742           }
5743         }
5744         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5745         {
5746           emit_test(s1h,s1h);
5747           if(invert){
5748             nottaken=(int)out;
5749             emit_js(1);
5750           }else{
5751             add_to_linker((int)out,ba[i],internal);
5752             emit_jns(0);
5753           }
5754         }
5755       } // if(!only32)
5756       else
5757       {
5758         assert(s1l>=0);
5759         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5760         {
5761           emit_test(s1l,s1l);
5762           if(invert){
5763             nottaken=(int)out;
5764             emit_jns(1);
5765           }else{
5766             add_to_linker((int)out,ba[i],internal);
5767             emit_js(0);
5768           }
5769         }
5770         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5771         {
5772           emit_test(s1l,s1l);
5773           if(invert){
5774             nottaken=(int)out;
5775             emit_js(1);
5776           }else{
5777             add_to_linker((int)out,ba[i],internal);
5778             emit_jns(0);
5779           }
5780         }
5781       } // if(!only32)
5782           
5783       if(invert) {
5784         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5785         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5786           if(adj) {
5787             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5788             add_to_linker((int)out,ba[i],internal);
5789           }else{
5790             emit_addnop(13);
5791             add_to_linker((int)out,ba[i],internal*2);
5792           }
5793           emit_jmp(0);
5794         }else
5795         #endif
5796         {
5797           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5798           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5799           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5800           if(internal)
5801             assem_debug("branch: internal\n");
5802           else
5803             assem_debug("branch: external\n");
5804           if(internal&&is_ds[(ba[i]-start)>>2]) {
5805             ds_assemble_entry(i);
5806           }
5807           else {
5808             add_to_linker((int)out,ba[i],internal);
5809             emit_jmp(0);
5810           }
5811         }
5812         set_jump_target(nottaken,(int)out);
5813       }
5814
5815       if(adj) {
5816         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5817       }
5818     } // (!unconditional)
5819   } // if(ooo)
5820   else
5821   {
5822     // In-order execution (branch first)
5823     //printf("IOE\n");
5824     int nottaken=0;
5825     if(!unconditional) {
5826       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5827       if(!only32)
5828       {
5829         assert(s1h>=0);
5830         if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5831         {
5832           emit_test(s1h,s1h);
5833           nottaken=(int)out;
5834           emit_jns(1);
5835         }
5836         if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5837         {
5838           emit_test(s1h,s1h);
5839           nottaken=(int)out;
5840           emit_js(1);
5841         }
5842       } // if(!only32)
5843       else
5844       {
5845         assert(s1l>=0);
5846         if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5847         {
5848           emit_test(s1l,s1l);
5849           nottaken=(int)out;
5850           emit_jns(1);
5851         }
5852         if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5853         {
5854           emit_test(s1l,s1l);
5855           nottaken=(int)out;
5856           emit_js(1);
5857         }
5858       }
5859     } // if(!unconditional)
5860     int adj;
5861     uint64_t ds_unneeded=branch_regs[i].u;
5862     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5863     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5864     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5865     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5866     ds_unneeded|=1;
5867     ds_unneeded_upper|=1;
5868     // branch taken
5869     if(!nevertaken) {
5870       //assem_debug("1:\n");
5871       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5872                     ds_unneeded,ds_unneeded_upper);
5873       // load regs
5874       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5875       address_generation(i+1,&branch_regs[i],0);
5876       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5877       ds_assemble(i+1,&branch_regs[i]);
5878       cc=get_reg(branch_regs[i].regmap,CCREG);
5879       if(cc==-1) {
5880         emit_loadreg(CCREG,cc=HOST_CCREG);
5881         // CHECK: Is the following instruction (fall thru) allocated ok?
5882       }
5883       assert(cc==HOST_CCREG);
5884       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5885       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5886       assem_debug("cycle count (adj)\n");
5887       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5888       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5889       if(internal)
5890         assem_debug("branch: internal\n");
5891       else
5892         assem_debug("branch: external\n");
5893       if(internal&&is_ds[(ba[i]-start)>>2]) {
5894         ds_assemble_entry(i);
5895       }
5896       else {
5897         add_to_linker((int)out,ba[i],internal);
5898         emit_jmp(0);
5899       }
5900     }
5901     // branch not taken
5902     cop1_usable=prev_cop1_usable;
5903     if(!unconditional) {
5904       set_jump_target(nottaken,(int)out);
5905       assem_debug("1:\n");
5906       if(!likely[i]) {
5907         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5908                       ds_unneeded,ds_unneeded_upper);
5909         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5910         address_generation(i+1,&branch_regs[i],0);
5911         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5912         ds_assemble(i+1,&branch_regs[i]);
5913       }
5914       cc=get_reg(branch_regs[i].regmap,CCREG);
5915       if(cc==-1&&!likely[i]) {
5916         // Cycle count isn't in a register, temporarily load it then write it out
5917         emit_loadreg(CCREG,HOST_CCREG);
5918         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5919         int jaddr=(int)out;
5920         emit_jns(0);
5921         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5922         emit_storereg(CCREG,HOST_CCREG);
5923       }
5924       else{
5925         cc=get_reg(i_regmap,CCREG);
5926         assert(cc==HOST_CCREG);
5927         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5928         int jaddr=(int)out;
5929         emit_jns(0);
5930         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5931       }
5932     }
5933   }
5934 }
5935
5936 void fjump_assemble(int i,struct regstat *i_regs)
5937 {
5938   signed char *i_regmap=i_regs->regmap;
5939   int cc;
5940   int match;
5941   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5942   assem_debug("fmatch=%d\n",match);
5943   int fs,cs;
5944   int eaddr;
5945   int ooo=1;
5946   int invert=0;
5947   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5948   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5949   if(likely[i]) ooo=0;
5950   if(!match) invert=1;
5951   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5952   if(i>(ba[i]-start)>>2) invert=1;
5953   #endif
5954
5955   if(ooo)
5956     if(itype[i+1]==FCOMP)
5957   {
5958     // Write-after-read dependency prevents out of order execution
5959     // First test branch condition, then execute delay slot, then branch
5960     ooo=0;
5961   }
5962
5963   if(ooo) {
5964     fs=get_reg(branch_regs[i].regmap,FSREG);
5965     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5966   }
5967   else {
5968     fs=get_reg(i_regmap,FSREG);
5969   }
5970
5971   // Check cop1 unusable
5972   if(!cop1_usable) {
5973     cs=get_reg(i_regmap,CSREG);
5974     assert(cs>=0);
5975     emit_testimm(cs,0x20000000);
5976     eaddr=(int)out;
5977     emit_jeq(0);
5978     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5979     cop1_usable=1;
5980   }
5981
5982   if(ooo) {
5983     // Out of order execution (delay slot first)
5984     //printf("OOOE\n");
5985     ds_assemble(i+1,i_regs);
5986     int adj;
5987     uint64_t bc_unneeded=branch_regs[i].u;
5988     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5989     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5990     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5991     bc_unneeded|=1;
5992     bc_unneeded_upper|=1;
5993     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5994                   bc_unneeded,bc_unneeded_upper);
5995     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5996     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5997     cc=get_reg(branch_regs[i].regmap,CCREG);
5998     assert(cc==HOST_CCREG);
5999     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6000     assem_debug("cycle count (adj)\n");
6001     if(1) {
6002       int nottaken=0;
6003       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6004       if(1) {
6005         assert(fs>=0);
6006         emit_testimm(fs,0x800000);
6007         if(source[i]&0x10000) // BC1T
6008         {
6009           if(invert){
6010             nottaken=(int)out;
6011             emit_jeq(1);
6012           }else{
6013             add_to_linker((int)out,ba[i],internal);
6014             emit_jne(0);
6015           }
6016         }
6017         else // BC1F
6018           if(invert){
6019             nottaken=(int)out;
6020             emit_jne(1);
6021           }else{
6022             add_to_linker((int)out,ba[i],internal);
6023             emit_jeq(0);
6024           }
6025         {
6026         }
6027       } // if(!only32)
6028           
6029       if(invert) {
6030         if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6031         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6032         else if(match) emit_addnop(13);
6033         #endif
6034         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6035         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6036         if(internal)
6037           assem_debug("branch: internal\n");
6038         else
6039           assem_debug("branch: external\n");
6040         if(internal&&is_ds[(ba[i]-start)>>2]) {
6041           ds_assemble_entry(i);
6042         }
6043         else {
6044           add_to_linker((int)out,ba[i],internal);
6045           emit_jmp(0);
6046         }
6047         set_jump_target(nottaken,(int)out);
6048       }
6049
6050       if(adj) {
6051         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6052       }
6053     } // (!unconditional)
6054   } // if(ooo)
6055   else
6056   {
6057     // In-order execution (branch first)
6058     //printf("IOE\n");
6059     int nottaken=0;
6060     if(1) {
6061       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6062       if(1) {
6063         assert(fs>=0);
6064         emit_testimm(fs,0x800000);
6065         if(source[i]&0x10000) // BC1T
6066         {
6067           nottaken=(int)out;
6068           emit_jeq(1);
6069         }
6070         else // BC1F
6071         {
6072           nottaken=(int)out;
6073           emit_jne(1);
6074         }
6075       }
6076     } // if(!unconditional)
6077     int adj;
6078     uint64_t ds_unneeded=branch_regs[i].u;
6079     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6080     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6081     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6082     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6083     ds_unneeded|=1;
6084     ds_unneeded_upper|=1;
6085     // branch taken
6086     //assem_debug("1:\n");
6087     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6088                   ds_unneeded,ds_unneeded_upper);
6089     // load regs
6090     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6091     address_generation(i+1,&branch_regs[i],0);
6092     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6093     ds_assemble(i+1,&branch_regs[i]);
6094     cc=get_reg(branch_regs[i].regmap,CCREG);
6095     if(cc==-1) {
6096       emit_loadreg(CCREG,cc=HOST_CCREG);
6097       // CHECK: Is the following instruction (fall thru) allocated ok?
6098     }
6099     assert(cc==HOST_CCREG);
6100     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6101     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6102     assem_debug("cycle count (adj)\n");
6103     if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6104     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6105     if(internal)
6106       assem_debug("branch: internal\n");
6107     else
6108       assem_debug("branch: external\n");
6109     if(internal&&is_ds[(ba[i]-start)>>2]) {
6110       ds_assemble_entry(i);
6111     }
6112     else {
6113       add_to_linker((int)out,ba[i],internal);
6114       emit_jmp(0);
6115     }
6116
6117     // branch not taken
6118     if(1) { // <- FIXME (don't need this)
6119       set_jump_target(nottaken,(int)out);
6120       assem_debug("1:\n");
6121       if(!likely[i]) {
6122         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6123                       ds_unneeded,ds_unneeded_upper);
6124         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6125         address_generation(i+1,&branch_regs[i],0);
6126         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6127         ds_assemble(i+1,&branch_regs[i]);
6128       }
6129       cc=get_reg(branch_regs[i].regmap,CCREG);
6130       if(cc==-1&&!likely[i]) {
6131         // Cycle count isn't in a register, temporarily load it then write it out
6132         emit_loadreg(CCREG,HOST_CCREG);
6133         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6134         int jaddr=(int)out;
6135         emit_jns(0);
6136         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6137         emit_storereg(CCREG,HOST_CCREG);
6138       }
6139       else{
6140         cc=get_reg(i_regmap,CCREG);
6141         assert(cc==HOST_CCREG);
6142         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6143         int jaddr=(int)out;
6144         emit_jns(0);
6145         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6146       }
6147     }
6148   }
6149 }
6150
6151 static void pagespan_assemble(int i,struct regstat *i_regs)
6152 {
6153   int s1l=get_reg(i_regs->regmap,rs1[i]);
6154   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6155   int s2l=get_reg(i_regs->regmap,rs2[i]);
6156   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6157   void *nt_branch=NULL;
6158   int taken=0;
6159   int nottaken=0;
6160   int unconditional=0;
6161   if(rs1[i]==0)
6162   {
6163     s1l=s2l;s1h=s2h;
6164     s2l=s2h=-1;
6165   }
6166   else if(rs2[i]==0)
6167   {
6168     s2l=s2h=-1;
6169   }
6170   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6171     s1h=s2h=-1;
6172   }
6173   int hr=0;
6174   int addr,alt,ntaddr;
6175   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6176   else {
6177     while(hr<HOST_REGS)
6178     {
6179       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6180          (i_regs->regmap[hr]&63)!=rs1[i] &&
6181          (i_regs->regmap[hr]&63)!=rs2[i] )
6182       {
6183         addr=hr++;break;
6184       }
6185       hr++;
6186     }
6187   }
6188   while(hr<HOST_REGS)
6189   {
6190     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6191        (i_regs->regmap[hr]&63)!=rs1[i] &&
6192        (i_regs->regmap[hr]&63)!=rs2[i] )
6193     {
6194       alt=hr++;break;
6195     }
6196     hr++;
6197   }
6198   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6199   {
6200     while(hr<HOST_REGS)
6201     {
6202       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6203          (i_regs->regmap[hr]&63)!=rs1[i] &&
6204          (i_regs->regmap[hr]&63)!=rs2[i] )
6205       {
6206         ntaddr=hr;break;
6207       }
6208       hr++;
6209     }
6210   }
6211   assert(hr<HOST_REGS);
6212   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6213     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6214   }
6215   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6216   if(opcode[i]==2) // J
6217   {
6218     unconditional=1;
6219   }
6220   if(opcode[i]==3) // JAL
6221   {
6222     // TODO: mini_ht
6223     int rt=get_reg(i_regs->regmap,31);
6224     emit_movimm(start+i*4+8,rt);
6225     unconditional=1;
6226   }
6227   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6228   {
6229     emit_mov(s1l,addr);
6230     if(opcode2[i]==9) // JALR
6231     {
6232       int rt=get_reg(i_regs->regmap,rt1[i]);
6233       emit_movimm(start+i*4+8,rt);
6234     }
6235   }
6236   if((opcode[i]&0x3f)==4) // BEQ
6237   {
6238     if(rs1[i]==rs2[i])
6239     {
6240       unconditional=1;
6241     }
6242     else
6243     #ifdef HAVE_CMOV_IMM
6244     if(s1h<0) {
6245       if(s2l>=0) emit_cmp(s1l,s2l);
6246       else emit_test(s1l,s1l);
6247       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6248     }
6249     else
6250     #endif
6251     {
6252       assert(s1l>=0);
6253       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6254       if(s1h>=0) {
6255         if(s2h>=0) emit_cmp(s1h,s2h);
6256         else emit_test(s1h,s1h);
6257         emit_cmovne_reg(alt,addr);
6258       }
6259       if(s2l>=0) emit_cmp(s1l,s2l);
6260       else emit_test(s1l,s1l);
6261       emit_cmovne_reg(alt,addr);
6262     }
6263   }
6264   if((opcode[i]&0x3f)==5) // BNE
6265   {
6266     #ifdef HAVE_CMOV_IMM
6267     if(s1h<0) {
6268       if(s2l>=0) emit_cmp(s1l,s2l);
6269       else emit_test(s1l,s1l);
6270       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6271     }
6272     else
6273     #endif
6274     {
6275       assert(s1l>=0);
6276       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6277       if(s1h>=0) {
6278         if(s2h>=0) emit_cmp(s1h,s2h);
6279         else emit_test(s1h,s1h);
6280         emit_cmovne_reg(alt,addr);
6281       }
6282       if(s2l>=0) emit_cmp(s1l,s2l);
6283       else emit_test(s1l,s1l);
6284       emit_cmovne_reg(alt,addr);
6285     }
6286   }
6287   if((opcode[i]&0x3f)==0x14) // BEQL
6288   {
6289     if(s1h>=0) {
6290       if(s2h>=0) emit_cmp(s1h,s2h);
6291       else emit_test(s1h,s1h);
6292       nottaken=(int)out;
6293       emit_jne(0);
6294     }
6295     if(s2l>=0) emit_cmp(s1l,s2l);
6296     else emit_test(s1l,s1l);
6297     if(nottaken) set_jump_target(nottaken,(int)out);
6298     nottaken=(int)out;
6299     emit_jne(0);
6300   }
6301   if((opcode[i]&0x3f)==0x15) // BNEL
6302   {
6303     if(s1h>=0) {
6304       if(s2h>=0) emit_cmp(s1h,s2h);
6305       else emit_test(s1h,s1h);
6306       taken=(int)out;
6307       emit_jne(0);
6308     }
6309     if(s2l>=0) emit_cmp(s1l,s2l);
6310     else emit_test(s1l,s1l);
6311     nottaken=(int)out;
6312     emit_jeq(0);
6313     if(taken) set_jump_target(taken,(int)out);
6314   }
6315   if((opcode[i]&0x3f)==6) // BLEZ
6316   {
6317     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6318     emit_cmpimm(s1l,1);
6319     if(s1h>=0) emit_mov(addr,ntaddr);
6320     emit_cmovl_reg(alt,addr);
6321     if(s1h>=0) {
6322       emit_test(s1h,s1h);
6323       emit_cmovne_reg(ntaddr,addr);
6324       emit_cmovs_reg(alt,addr);
6325     }
6326   }
6327   if((opcode[i]&0x3f)==7) // BGTZ
6328   {
6329     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6330     emit_cmpimm(s1l,1);
6331     if(s1h>=0) emit_mov(addr,alt);
6332     emit_cmovl_reg(ntaddr,addr);
6333     if(s1h>=0) {
6334       emit_test(s1h,s1h);
6335       emit_cmovne_reg(alt,addr);
6336       emit_cmovs_reg(ntaddr,addr);
6337     }
6338   }
6339   if((opcode[i]&0x3f)==0x16) // BLEZL
6340   {
6341     assert((opcode[i]&0x3f)!=0x16);
6342   }
6343   if((opcode[i]&0x3f)==0x17) // BGTZL
6344   {
6345     assert((opcode[i]&0x3f)!=0x17);
6346   }
6347   assert(opcode[i]!=1); // BLTZ/BGEZ
6348
6349   //FIXME: Check CSREG
6350   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6351     if((source[i]&0x30000)==0) // BC1F
6352     {
6353       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6354       emit_testimm(s1l,0x800000);
6355       emit_cmovne_reg(alt,addr);
6356     }
6357     if((source[i]&0x30000)==0x10000) // BC1T
6358     {
6359       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6360       emit_testimm(s1l,0x800000);
6361       emit_cmovne_reg(alt,addr);
6362     }
6363     if((source[i]&0x30000)==0x20000) // BC1FL
6364     {
6365       emit_testimm(s1l,0x800000);
6366       nottaken=(int)out;
6367       emit_jne(0);
6368     }
6369     if((source[i]&0x30000)==0x30000) // BC1TL
6370     {
6371       emit_testimm(s1l,0x800000);
6372       nottaken=(int)out;
6373       emit_jeq(0);
6374     }
6375   }
6376
6377   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6378   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6379   if(likely[i]||unconditional)
6380   {
6381     emit_movimm(ba[i],HOST_BTREG);
6382   }
6383   else if(addr!=HOST_BTREG)
6384   {
6385     emit_mov(addr,HOST_BTREG);
6386   }
6387   void *branch_addr=out;
6388   emit_jmp(0);
6389   int target_addr=start+i*4+5;
6390   void *stub=out;
6391   void *compiled_target_addr=check_addr(target_addr);
6392   emit_extjump_ds((int)branch_addr,target_addr);
6393   if(compiled_target_addr) {
6394     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6395     add_link(target_addr,stub);
6396   }
6397   else set_jump_target((int)branch_addr,(int)stub);
6398   if(likely[i]) {
6399     // Not-taken path
6400     set_jump_target((int)nottaken,(int)out);
6401     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6402     void *branch_addr=out;
6403     emit_jmp(0);
6404     int target_addr=start+i*4+8;
6405     void *stub=out;
6406     void *compiled_target_addr=check_addr(target_addr);
6407     emit_extjump_ds((int)branch_addr,target_addr);
6408     if(compiled_target_addr) {
6409       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6410       add_link(target_addr,stub);
6411     }
6412     else set_jump_target((int)branch_addr,(int)stub);
6413   }
6414 }
6415
6416 // Assemble the delay slot for the above
6417 static void pagespan_ds()
6418 {
6419   assem_debug("initial delay slot:\n");
6420   u_int vaddr=start+1;
6421   u_int page=get_page(vaddr);
6422   u_int vpage=get_vpage(vaddr);
6423   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6424   do_dirty_stub_ds();
6425   ll_add(jump_in+page,vaddr,(void *)out);
6426   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6427   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6428     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6429   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6430     emit_writeword(HOST_BTREG,(int)&branch_target);
6431   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6432   address_generation(0,&regs[0],regs[0].regmap_entry);
6433   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6434     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6435   cop1_usable=0;
6436   is_delayslot=0;
6437   switch(itype[0]) {
6438     case ALU:
6439       alu_assemble(0,&regs[0]);break;
6440     case IMM16:
6441       imm16_assemble(0,&regs[0]);break;
6442     case SHIFT:
6443       shift_assemble(0,&regs[0]);break;
6444     case SHIFTIMM:
6445       shiftimm_assemble(0,&regs[0]);break;
6446     case LOAD:
6447       load_assemble(0,&regs[0]);break;
6448     case LOADLR:
6449       loadlr_assemble(0,&regs[0]);break;
6450     case STORE:
6451       store_assemble(0,&regs[0]);break;
6452     case STORELR:
6453       storelr_assemble(0,&regs[0]);break;
6454     case COP0:
6455       cop0_assemble(0,&regs[0]);break;
6456     case COP1:
6457       cop1_assemble(0,&regs[0]);break;
6458     case C1LS:
6459       c1ls_assemble(0,&regs[0]);break;
6460     case COP2:
6461       cop2_assemble(0,&regs[0]);break;
6462     case C2LS:
6463       c2ls_assemble(0,&regs[0]);break;
6464     case C2OP:
6465       c2op_assemble(0,&regs[0]);break;
6466     case FCONV:
6467       fconv_assemble(0,&regs[0]);break;
6468     case FLOAT:
6469       float_assemble(0,&regs[0]);break;
6470     case FCOMP:
6471       fcomp_assemble(0,&regs[0]);break;
6472     case MULTDIV:
6473       multdiv_assemble(0,&regs[0]);break;
6474     case MOV:
6475       mov_assemble(0,&regs[0]);break;
6476     case SYSCALL:
6477     case HLECALL:
6478     case SPAN:
6479     case UJUMP:
6480     case RJUMP:
6481     case CJUMP:
6482     case SJUMP:
6483     case FJUMP:
6484       printf("Jump in the delay slot.  This is probably a bug.\n");
6485   }
6486   int btaddr=get_reg(regs[0].regmap,BTREG);
6487   if(btaddr<0) {
6488     btaddr=get_reg(regs[0].regmap,-1);
6489     emit_readword((int)&branch_target,btaddr);
6490   }
6491   assert(btaddr!=HOST_CCREG);
6492   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6493 #ifdef HOST_IMM8
6494   emit_movimm(start+4,HOST_TEMPREG);
6495   emit_cmp(btaddr,HOST_TEMPREG);
6496 #else
6497   emit_cmpimm(btaddr,start+4);
6498 #endif
6499   int branch=(int)out;
6500   emit_jeq(0);
6501   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6502   emit_jmp(jump_vaddr_reg[btaddr]);
6503   set_jump_target(branch,(int)out);
6504   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6505   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6506 }
6507
6508 // Basic liveness analysis for MIPS registers
6509 void unneeded_registers(int istart,int iend,int r)
6510 {
6511   int i;
6512   uint64_t u,uu,b,bu;
6513   uint64_t temp_u,temp_uu;
6514   uint64_t tdep;
6515   if(iend==slen-1) {
6516     u=1;uu=1;
6517   }else{
6518     u=unneeded_reg[iend+1];
6519     uu=unneeded_reg_upper[iend+1];
6520     u=1;uu=1;
6521   }
6522   for (i=iend;i>=istart;i--)
6523   {
6524     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6525     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6526     {
6527       // If subroutine call, flag return address as a possible branch target
6528       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6529       
6530       if(ba[i]<start || ba[i]>=(start+slen*4))
6531       {
6532         // Branch out of this block, flush all regs
6533         u=1;
6534         uu=1;
6535         /* Hexagon hack 
6536         if(itype[i]==UJUMP&&rt1[i]==31)
6537         {
6538           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6539         }
6540         if(itype[i]==RJUMP&&rs1[i]==31)
6541         {
6542           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6543         }
6544         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6545           if(itype[i]==UJUMP&&rt1[i]==31)
6546           {
6547             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6548             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6549           }
6550           if(itype[i]==RJUMP&&rs1[i]==31)
6551           {
6552             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6553             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6554           }
6555         }*/
6556         branch_unneeded_reg[i]=u;
6557         branch_unneeded_reg_upper[i]=uu;
6558         // Merge in delay slot
6559         tdep=(~uu>>rt1[i+1])&1;
6560         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6561         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6562         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6563         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6564         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6565         u|=1;uu|=1;
6566         // If branch is "likely" (and conditional)
6567         // then we skip the delay slot on the fall-thru path
6568         if(likely[i]) {
6569           if(i<slen-1) {
6570             u&=unneeded_reg[i+2];
6571             uu&=unneeded_reg_upper[i+2];
6572           }
6573           else
6574           {
6575             u=1;
6576             uu=1;
6577           }
6578         }
6579       }
6580       else
6581       {
6582         // Internal branch, flag target
6583         bt[(ba[i]-start)>>2]=1;
6584         if(ba[i]<=start+i*4) {
6585           // Backward branch
6586           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6587           {
6588             // Unconditional branch
6589             temp_u=1;temp_uu=1;
6590           } else {
6591             // Conditional branch (not taken case)
6592             temp_u=unneeded_reg[i+2];
6593             temp_uu=unneeded_reg_upper[i+2];
6594           }
6595           // Merge in delay slot
6596           tdep=(~temp_uu>>rt1[i+1])&1;
6597           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6598           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6599           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6600           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6601           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6602           temp_u|=1;temp_uu|=1;
6603           // If branch is "likely" (and conditional)
6604           // then we skip the delay slot on the fall-thru path
6605           if(likely[i]) {
6606             if(i<slen-1) {
6607               temp_u&=unneeded_reg[i+2];
6608               temp_uu&=unneeded_reg_upper[i+2];
6609             }
6610             else
6611             {
6612               temp_u=1;
6613               temp_uu=1;
6614             }
6615           }
6616           tdep=(~temp_uu>>rt1[i])&1;
6617           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6618           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6619           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6620           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6621           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6622           temp_u|=1;temp_uu|=1;
6623           unneeded_reg[i]=temp_u;
6624           unneeded_reg_upper[i]=temp_uu;
6625           // Only go three levels deep.  This recursion can take an
6626           // excessive amount of time if there are a lot of nested loops.
6627           if(r<2) {
6628             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6629           }else{
6630             unneeded_reg[(ba[i]-start)>>2]=1;
6631             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6632           }
6633         } /*else*/ if(1) {
6634           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6635           {
6636             // Unconditional branch
6637             u=unneeded_reg[(ba[i]-start)>>2];
6638             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6639             branch_unneeded_reg[i]=u;
6640             branch_unneeded_reg_upper[i]=uu;
6641         //u=1;
6642         //uu=1;
6643         //branch_unneeded_reg[i]=u;
6644         //branch_unneeded_reg_upper[i]=uu;
6645             // Merge in delay slot
6646             tdep=(~uu>>rt1[i+1])&1;
6647             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6648             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6649             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6650             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6651             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6652             u|=1;uu|=1;
6653           } else {
6654             // Conditional branch
6655             b=unneeded_reg[(ba[i]-start)>>2];
6656             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6657             branch_unneeded_reg[i]=b;
6658             branch_unneeded_reg_upper[i]=bu;
6659         //b=1;
6660         //bu=1;
6661         //branch_unneeded_reg[i]=b;
6662         //branch_unneeded_reg_upper[i]=bu;
6663             // Branch delay slot
6664             tdep=(~uu>>rt1[i+1])&1;
6665             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6666             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6667             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6668             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6669             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6670             b|=1;bu|=1;
6671             // If branch is "likely" then we skip the
6672             // delay slot on the fall-thru path
6673             if(likely[i]) {
6674               u=b;
6675               uu=bu;
6676               if(i<slen-1) {
6677                 u&=unneeded_reg[i+2];
6678                 uu&=unneeded_reg_upper[i+2];
6679         //u=1;
6680         //uu=1;
6681               }
6682             } else {
6683               u&=b;
6684               uu&=bu;
6685         //u=1;
6686         //uu=1;
6687             }
6688             if(i<slen-1) {
6689               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6690               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6691         //branch_unneeded_reg[i]=1;
6692         //branch_unneeded_reg_upper[i]=1;
6693             } else {
6694               branch_unneeded_reg[i]=1;
6695               branch_unneeded_reg_upper[i]=1;
6696             }
6697           }
6698         }
6699       }
6700     }
6701     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
6702     {
6703       // SYSCALL instruction (software interrupt)
6704       u=1;
6705       uu=1;
6706     }
6707     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6708     {
6709       // ERET instruction (return from interrupt)
6710       u=1;
6711       uu=1;
6712     }
6713     //u=uu=1; // DEBUG
6714     tdep=(~uu>>rt1[i])&1;
6715     // Written registers are unneeded
6716     u|=1LL<<rt1[i];
6717     u|=1LL<<rt2[i];
6718     uu|=1LL<<rt1[i];
6719     uu|=1LL<<rt2[i];
6720     // Accessed registers are needed
6721     u&=~(1LL<<rs1[i]);
6722     u&=~(1LL<<rs2[i]);
6723     uu&=~(1LL<<us1[i]);
6724     uu&=~(1LL<<us2[i]);
6725     // Source-target dependencies
6726     uu&=~(tdep<<dep1[i]);
6727     uu&=~(tdep<<dep2[i]);
6728     // R0 is always unneeded
6729     u|=1;uu|=1;
6730     // Save it
6731     unneeded_reg[i]=u;
6732     unneeded_reg_upper[i]=uu;
6733     /*
6734     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6735     printf("U:");
6736     int r;
6737     for(r=1;r<=CCREG;r++) {
6738       if((unneeded_reg[i]>>r)&1) {
6739         if(r==HIREG) printf(" HI");
6740         else if(r==LOREG) printf(" LO");
6741         else printf(" r%d",r);
6742       }
6743     }
6744     printf(" UU:");
6745     for(r=1;r<=CCREG;r++) {
6746       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6747         if(r==HIREG) printf(" HI");
6748         else if(r==LOREG) printf(" LO");
6749         else printf(" r%d",r);
6750       }
6751     }
6752     printf("\n");*/
6753   }
6754 #ifdef FORCE32
6755   for (i=iend;i>=istart;i--)
6756   {
6757     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6758   }
6759 #endif
6760 }
6761
6762 // Identify registers which are likely to contain 32-bit values
6763 // This is used to predict whether any branches will jump to a
6764 // location with 64-bit values in registers.
6765 static void provisional_32bit()
6766 {
6767   int i,j;
6768   uint64_t is32=1;
6769   uint64_t lastbranch=1;
6770   
6771   for(i=0;i<slen;i++)
6772   {
6773     if(i>0) {
6774       if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
6775         if(i>1) is32=lastbranch;
6776         else is32=1;
6777       }
6778     }
6779     if(i>1)
6780     {
6781       if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
6782         if(likely[i-2]) {
6783           if(i>2) is32=lastbranch;
6784           else is32=1;
6785         }
6786       }
6787       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
6788       {
6789         if(rs1[i-2]==0||rs2[i-2]==0)
6790         {
6791           if(rs1[i-2]) {
6792             is32|=1LL<<rs1[i-2];
6793           }
6794           if(rs2[i-2]) {
6795             is32|=1LL<<rs2[i-2];
6796           }
6797         }
6798       }
6799     }
6800     // If something jumps here with 64-bit values
6801     // then promote those registers to 64 bits
6802     if(bt[i])
6803     {
6804       uint64_t temp_is32=is32;
6805       for(j=i-1;j>=0;j--)
6806       {
6807         if(ba[j]==start+i*4) 
6808           //temp_is32&=branch_regs[j].is32;
6809           temp_is32&=p32[j];
6810       }
6811       for(j=i;j<slen;j++)
6812       {
6813         if(ba[j]==start+i*4) 
6814           temp_is32=1;
6815       }
6816       is32=temp_is32;
6817     }
6818     int type=itype[i];
6819     int op=opcode[i];
6820     int op2=opcode2[i];
6821     int rt=rt1[i];
6822     int s1=rs1[i];
6823     int s2=rs2[i];
6824     if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
6825       // Branches don't write registers, consider the delay slot instead.
6826       type=itype[i+1];
6827       op=opcode[i+1];
6828       op2=opcode2[i+1];
6829       rt=rt1[i+1];
6830       s1=rs1[i+1];
6831       s2=rs2[i+1];
6832       lastbranch=is32;
6833     }
6834     switch(type) {
6835       case LOAD:
6836         if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
6837            opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
6838           is32&=~(1LL<<rt);
6839         else
6840           is32|=1LL<<rt;
6841         break;
6842       case STORE:
6843       case STORELR:
6844         break;
6845       case LOADLR:
6846         if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
6847         if(op==0x22) is32|=1LL<<rt; // LWL
6848         break;
6849       case IMM16:
6850         if (op==0x08||op==0x09|| // ADDI/ADDIU
6851             op==0x0a||op==0x0b|| // SLTI/SLTIU
6852             op==0x0c|| // ANDI
6853             op==0x0f)  // LUI
6854         {
6855           is32|=1LL<<rt;
6856         }
6857         if(op==0x18||op==0x19) { // DADDI/DADDIU
6858           is32&=~(1LL<<rt);
6859           //if(imm[i]==0)
6860           //  is32|=((is32>>s1)&1LL)<<rt;
6861         }
6862         if(op==0x0d||op==0x0e) { // ORI/XORI
6863           uint64_t sr=((is32>>s1)&1LL);
6864           is32&=~(1LL<<rt);
6865           is32|=sr<<rt;
6866         }
6867         break;
6868       case UJUMP:
6869         break;
6870       case RJUMP:
6871         break;
6872       case CJUMP:
6873         break;
6874       case SJUMP:
6875         break;
6876       case FJUMP:
6877         break;
6878       case ALU:
6879         if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
6880           is32|=1LL<<rt;
6881         }
6882         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
6883           is32|=1LL<<rt;
6884         }
6885         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
6886           uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
6887           is32&=~(1LL<<rt);
6888           is32|=sr<<rt;
6889         }
6890         else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
6891           if(s1==0&&s2==0) {
6892             is32|=1LL<<rt;
6893           }
6894           else if(s2==0) {
6895             uint64_t sr=((is32>>s1)&1LL);
6896             is32&=~(1LL<<rt);
6897             is32|=sr<<rt;
6898           }
6899           else if(s1==0) {
6900             uint64_t sr=((is32>>s2)&1LL);
6901             is32&=~(1LL<<rt);
6902             is32|=sr<<rt;
6903           }
6904           else {
6905             is32&=~(1LL<<rt);
6906           }
6907         }
6908         else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
6909           if(s1==0&&s2==0) {
6910             is32|=1LL<<rt;
6911           }
6912           else if(s2==0) {
6913             uint64_t sr=((is32>>s1)&1LL);
6914             is32&=~(1LL<<rt);
6915             is32|=sr<<rt;
6916           }
6917           else {
6918             is32&=~(1LL<<rt);
6919           }
6920         }
6921         break;
6922       case MULTDIV:
6923         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
6924           is32&=~((1LL<<HIREG)|(1LL<<LOREG));
6925         }
6926         else {
6927           is32|=(1LL<<HIREG)|(1LL<<LOREG);
6928         }
6929         break;
6930       case MOV:
6931         {
6932           uint64_t sr=((is32>>s1)&1LL);
6933           is32&=~(1LL<<rt);
6934           is32|=sr<<rt;
6935         }
6936         break;
6937       case SHIFT:
6938         if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
6939         else is32|=1LL<<rt; // SLLV/SRLV/SRAV
6940         break;
6941       case SHIFTIMM:
6942         is32|=1LL<<rt;
6943         // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
6944         if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
6945         break;
6946       case COP0:
6947         if(op2==0) is32|=1LL<<rt; // MFC0
6948         break;
6949       case COP1:
6950       case COP2:
6951         if(op2==0) is32|=1LL<<rt; // MFC1
6952         if(op2==1) is32&=~(1LL<<rt); // DMFC1
6953         if(op2==2) is32|=1LL<<rt; // CFC1
6954         break;
6955       case C1LS:
6956       case C2LS:
6957         break;
6958       case FLOAT:
6959       case FCONV:
6960         break;
6961       case FCOMP:
6962         break;
6963       case C2OP:
6964       case SYSCALL:
6965       case HLECALL:
6966         break;
6967       default:
6968         break;
6969     }
6970     is32|=1;
6971     p32[i]=is32;
6972
6973     if(i>0)
6974     {
6975       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
6976       {
6977         if(rt1[i-1]==31) // JAL/JALR
6978         {
6979           // Subroutine call will return here, don't alloc any registers
6980           is32=1;
6981         }
6982         else if(i+1<slen)
6983         {
6984           // Internal branch will jump here, match registers to caller
6985           is32=0x3FFFFFFFFLL;
6986         }
6987       }
6988     }
6989   }
6990 }
6991
6992 // Identify registers which may be assumed to contain 32-bit values
6993 // and where optimizations will rely on this.
6994 // This is used to determine whether backward branches can safely
6995 // jump to a location with 64-bit values in registers.
6996 static void provisional_r32()
6997 {
6998   u_int r32=0;
6999   int i;
7000   
7001   for (i=slen-1;i>=0;i--)
7002   {
7003     int hr;
7004     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7005     {
7006       if(ba[i]<start || ba[i]>=(start+slen*4))
7007       {
7008         // Branch out of this block, don't need anything
7009         r32=0;
7010       }
7011       else
7012       {
7013         // Internal branch
7014         // Need whatever matches the target
7015         // (and doesn't get overwritten by the delay slot instruction)
7016         r32=0;
7017         int t=(ba[i]-start)>>2;
7018         if(ba[i]>start+i*4) {
7019           // Forward branch
7020           //if(!(requires_32bit[t]&~regs[i].was32))
7021           //  r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7022           if(!(pr32[t]&~regs[i].was32))
7023             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7024         }else{
7025           // Backward branch
7026           if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7027             r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7028         }
7029       }
7030       // Conditional branch may need registers for following instructions
7031       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7032       {
7033         if(i<slen-2) {
7034           //r32|=requires_32bit[i+2];
7035           r32|=pr32[i+2];
7036           r32&=regs[i].was32;
7037           // Mark this address as a branch target since it may be called
7038           // upon return from interrupt
7039           //bt[i+2]=1;
7040         }
7041       }
7042       // Merge in delay slot
7043       if(!likely[i]) {
7044         // These are overwritten unless the branch is "likely"
7045         // and the delay slot is nullified if not taken
7046         r32&=~(1LL<<rt1[i+1]);
7047         r32&=~(1LL<<rt2[i+1]);
7048       }
7049       // Assume these are needed (delay slot)
7050       if(us1[i+1]>0)
7051       {
7052         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7053       }
7054       if(us2[i+1]>0)
7055       {
7056         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7057       }
7058       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7059       {
7060         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7061       }
7062       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7063       {
7064         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7065       }
7066     }
7067     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
7068     {
7069       // SYSCALL instruction (software interrupt)
7070       r32=0;
7071     }
7072     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7073     {
7074       // ERET instruction (return from interrupt)
7075       r32=0;
7076     }
7077     // Check 32 bits
7078     r32&=~(1LL<<rt1[i]);
7079     r32&=~(1LL<<rt2[i]);
7080     if(us1[i]>0)
7081     {
7082       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7083     }
7084     if(us2[i]>0)
7085     {
7086       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7087     }
7088     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7089     {
7090       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7091     }
7092     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7093     {
7094       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7095     }
7096     //requires_32bit[i]=r32;
7097     pr32[i]=r32;
7098     
7099     // Dirty registers which are 32-bit, require 32-bit input
7100     // as they will be written as 32-bit values
7101     for(hr=0;hr<HOST_REGS;hr++)
7102     {
7103       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7104         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7105           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7106           pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7107           //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7108         }
7109       }
7110     }
7111   }
7112 }
7113
7114 // Write back dirty registers as soon as we will no longer modify them,
7115 // so that we don't end up with lots of writes at the branches.
7116 void clean_registers(int istart,int iend,int wr)
7117 {
7118   int i;
7119   int r;
7120   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7121   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7122   if(iend==slen-1) {
7123     will_dirty_i=will_dirty_next=0;
7124     wont_dirty_i=wont_dirty_next=0;
7125   }else{
7126     will_dirty_i=will_dirty_next=will_dirty[iend+1];
7127     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7128   }
7129   for (i=iend;i>=istart;i--)
7130   {
7131     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7132     {
7133       if(ba[i]<start || ba[i]>=(start+slen*4))
7134       {
7135         // Branch out of this block, flush all regs
7136         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7137         {
7138           // Unconditional branch
7139           will_dirty_i=0;
7140           wont_dirty_i=0;
7141           // Merge in delay slot (will dirty)
7142           for(r=0;r<HOST_REGS;r++) {
7143             if(r!=EXCLUDE_REG) {
7144               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7145               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7146               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7147               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7148               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7149               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7150               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7151               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7152               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7153               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7154               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7155               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7156               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7157               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7158             }
7159           }
7160         }
7161         else
7162         {
7163           // Conditional branch
7164           will_dirty_i=0;
7165           wont_dirty_i=wont_dirty_next;
7166           // Merge in delay slot (will dirty)
7167           for(r=0;r<HOST_REGS;r++) {
7168             if(r!=EXCLUDE_REG) {
7169               if(!likely[i]) {
7170                 // Might not dirty if likely branch is not taken
7171                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7172                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7173                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7174                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7175                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7176                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7177                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7178                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7179                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7180                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7181                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7182                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7183                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7184                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7185               }
7186             }
7187           }
7188         }
7189         // Merge in delay slot (wont dirty)
7190         for(r=0;r<HOST_REGS;r++) {
7191           if(r!=EXCLUDE_REG) {
7192             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7193             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7194             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7195             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7196             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7197             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7198             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7199             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7200             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7201             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7202           }
7203         }
7204         if(wr) {
7205           #ifndef DESTRUCTIVE_WRITEBACK
7206           branch_regs[i].dirty&=wont_dirty_i;
7207           #endif
7208           branch_regs[i].dirty|=will_dirty_i;
7209         }
7210       }
7211       else
7212       {
7213         // Internal branch
7214         if(ba[i]<=start+i*4) {
7215           // Backward branch
7216           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7217           {
7218             // Unconditional branch
7219             temp_will_dirty=0;
7220             temp_wont_dirty=0;
7221             // Merge in delay slot (will dirty)
7222             for(r=0;r<HOST_REGS;r++) {
7223               if(r!=EXCLUDE_REG) {
7224                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7225                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7226                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7227                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7228                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7229                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7230                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7231                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7232                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7233                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7234                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7235                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7236                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7237                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7238               }
7239             }
7240           } else {
7241             // Conditional branch (not taken case)
7242             temp_will_dirty=will_dirty_next;
7243             temp_wont_dirty=wont_dirty_next;
7244             // Merge in delay slot (will dirty)
7245             for(r=0;r<HOST_REGS;r++) {
7246               if(r!=EXCLUDE_REG) {
7247                 if(!likely[i]) {
7248                   // Will not dirty if likely branch is not taken
7249                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7250                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7251                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7252                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7253                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7254                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7255                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7256                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7257                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7258                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7259                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7260                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7261                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7262                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7263                 }
7264               }
7265             }
7266           }
7267           // Merge in delay slot (wont dirty)
7268           for(r=0;r<HOST_REGS;r++) {
7269             if(r!=EXCLUDE_REG) {
7270               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7271               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7272               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7273               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7274               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7275               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7276               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7277               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7278               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7279               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7280             }
7281           }
7282           // Deal with changed mappings
7283           if(i<iend) {
7284             for(r=0;r<HOST_REGS;r++) {
7285               if(r!=EXCLUDE_REG) {
7286                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7287                   temp_will_dirty&=~(1<<r);
7288                   temp_wont_dirty&=~(1<<r);
7289                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7290                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7291                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7292                   } else {
7293                     temp_will_dirty|=1<<r;
7294                     temp_wont_dirty|=1<<r;
7295                   }
7296                 }
7297               }
7298             }
7299           }
7300           if(wr) {
7301             will_dirty[i]=temp_will_dirty;
7302             wont_dirty[i]=temp_wont_dirty;
7303             clean_registers((ba[i]-start)>>2,i-1,0);
7304           }else{
7305             // Limit recursion.  It can take an excessive amount
7306             // of time if there are a lot of nested loops.
7307             will_dirty[(ba[i]-start)>>2]=0;
7308             wont_dirty[(ba[i]-start)>>2]=-1;
7309           }
7310         }
7311         /*else*/ if(1)
7312         {
7313           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7314           {
7315             // Unconditional branch
7316             will_dirty_i=0;
7317             wont_dirty_i=0;
7318           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7319             for(r=0;r<HOST_REGS;r++) {
7320               if(r!=EXCLUDE_REG) {
7321                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7322                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7323                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7324                 }
7325               }
7326             }
7327           //}
7328             // Merge in delay slot
7329             for(r=0;r<HOST_REGS;r++) {
7330               if(r!=EXCLUDE_REG) {
7331                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7332                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7333                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7334                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7335                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7336                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7337                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7338                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7339                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7340                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7341                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7342                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7343                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7344                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7345               }
7346             }
7347           } else {
7348             // Conditional branch
7349             will_dirty_i=will_dirty_next;
7350             wont_dirty_i=wont_dirty_next;
7351           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7352             for(r=0;r<HOST_REGS;r++) {
7353               if(r!=EXCLUDE_REG) {
7354                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7355                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7356                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7357                 }
7358                 else
7359                 {
7360                   will_dirty_i&=~(1<<r);
7361                 }
7362                 // Treat delay slot as part of branch too
7363                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7364                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7365                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7366                 }
7367                 else
7368                 {
7369                   will_dirty[i+1]&=~(1<<r);
7370                 }*/
7371               }
7372             }
7373           //}
7374             // Merge in delay slot
7375             for(r=0;r<HOST_REGS;r++) {
7376               if(r!=EXCLUDE_REG) {
7377                 if(!likely[i]) {
7378                   // Might not dirty if likely branch is not taken
7379                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7380                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7381                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7382                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7383                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7384                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7385                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7386                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7387                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7388                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7389                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7390                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7391                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7392                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7393                 }
7394               }
7395             }
7396           }
7397           // Merge in delay slot
7398           for(r=0;r<HOST_REGS;r++) {
7399             if(r!=EXCLUDE_REG) {
7400               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7401               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7402               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7403               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7404               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7405               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7406               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7407               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7408               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7409               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7410             }
7411           }
7412           if(wr) {
7413             #ifndef DESTRUCTIVE_WRITEBACK
7414             branch_regs[i].dirty&=wont_dirty_i;
7415             #endif
7416             branch_regs[i].dirty|=will_dirty_i;
7417           }
7418         }
7419       }
7420     }
7421     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
7422     {
7423       // SYSCALL instruction (software interrupt)
7424       will_dirty_i=0;
7425       wont_dirty_i=0;
7426     }
7427     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7428     {
7429       // ERET instruction (return from interrupt)
7430       will_dirty_i=0;
7431       wont_dirty_i=0;
7432     }
7433     will_dirty_next=will_dirty_i;
7434     wont_dirty_next=wont_dirty_i;
7435     for(r=0;r<HOST_REGS;r++) {
7436       if(r!=EXCLUDE_REG) {
7437         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7438         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7439         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7440         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7441         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7442         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7443         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7444         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7445         if(i>istart) {
7446           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP) 
7447           {
7448             // Don't store a register immediately after writing it,
7449             // may prevent dual-issue.
7450             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7451             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7452           }
7453         }
7454       }
7455     }
7456     // Save it
7457     will_dirty[i]=will_dirty_i;
7458     wont_dirty[i]=wont_dirty_i;
7459     // Mark registers that won't be dirtied as not dirty
7460     if(wr) {
7461       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7462       for(r=0;r<HOST_REGS;r++) {
7463         if((will_dirty_i>>r)&1) {
7464           printf(" r%d",r);
7465         }
7466       }
7467       printf("\n");*/
7468
7469       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7470         regs[i].dirty|=will_dirty_i;
7471         #ifndef DESTRUCTIVE_WRITEBACK
7472         regs[i].dirty&=wont_dirty_i;
7473         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7474         {
7475           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7476             for(r=0;r<HOST_REGS;r++) {
7477               if(r!=EXCLUDE_REG) {
7478                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7479                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7480                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7481               }
7482             }
7483           }
7484         }
7485         else
7486         {
7487           if(i<iend) {
7488             for(r=0;r<HOST_REGS;r++) {
7489               if(r!=EXCLUDE_REG) {
7490                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7491                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7492                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7493               }
7494             }
7495           }
7496         }
7497         #endif
7498       //}
7499     }
7500     // Deal with changed mappings
7501     temp_will_dirty=will_dirty_i;
7502     temp_wont_dirty=wont_dirty_i;
7503     for(r=0;r<HOST_REGS;r++) {
7504       if(r!=EXCLUDE_REG) {
7505         int nr;
7506         if(regs[i].regmap[r]==regmap_pre[i][r]) {
7507           if(wr) {
7508             #ifndef DESTRUCTIVE_WRITEBACK
7509             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7510             #endif
7511             regs[i].wasdirty|=will_dirty_i&(1<<r);
7512           }
7513         }
7514         else if((nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7515           // Register moved to a different register
7516           will_dirty_i&=~(1<<r);
7517           wont_dirty_i&=~(1<<r);
7518           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7519           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7520           if(wr) {
7521             #ifndef DESTRUCTIVE_WRITEBACK
7522             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7523             #endif
7524             regs[i].wasdirty|=will_dirty_i&(1<<r);
7525           }
7526         }
7527         else {
7528           will_dirty_i&=~(1<<r);
7529           wont_dirty_i&=~(1<<r);
7530           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7531             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7532             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7533           } else {
7534             wont_dirty_i|=1<<r;
7535             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7536           }
7537         }
7538       }
7539     }
7540   }
7541 }
7542
7543   /* disassembly */
7544 void disassemble_inst(int i)
7545 {
7546     if (bt[i]) printf("*"); else printf(" ");
7547     switch(itype[i]) {
7548       case UJUMP:
7549         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7550       case CJUMP:
7551         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7552       case SJUMP:
7553         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7554       case FJUMP:
7555         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7556       case RJUMP:
7557         if (opcode[i]==0x9&&rt1[i]!=31)
7558           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7559         else
7560           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7561         break;
7562       case SPAN:
7563         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7564       case IMM16:
7565         if(opcode[i]==0xf) //LUI
7566           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7567         else
7568           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7569         break;
7570       case LOAD:
7571       case LOADLR:
7572         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7573         break;
7574       case STORE:
7575       case STORELR:
7576         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7577         break;
7578       case ALU:
7579       case SHIFT:
7580         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7581         break;
7582       case MULTDIV:
7583         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7584         break;
7585       case SHIFTIMM:
7586         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7587         break;
7588       case MOV:
7589         if((opcode2[i]&0x1d)==0x10)
7590           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7591         else if((opcode2[i]&0x1d)==0x11)
7592           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7593         else
7594           printf (" %x: %s\n",start+i*4,insn[i]);
7595         break;
7596       case COP0:
7597         if(opcode2[i]==0)
7598           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7599         else if(opcode2[i]==4)
7600           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7601         else printf (" %x: %s\n",start+i*4,insn[i]);
7602         break;
7603       case COP1:
7604         if(opcode2[i]<3)
7605           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7606         else if(opcode2[i]>3)
7607           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7608         else printf (" %x: %s\n",start+i*4,insn[i]);
7609         break;
7610       case COP2:
7611         if(opcode2[i]<3)
7612           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7613         else if(opcode2[i]>3)
7614           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7615         else printf (" %x: %s\n",start+i*4,insn[i]);
7616         break;
7617       case C1LS:
7618         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7619         break;
7620       case C2LS:
7621         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7622         break;
7623       default:
7624         //printf (" %s %8x\n",insn[i],source[i]);
7625         printf (" %x: %s\n",start+i*4,insn[i]);
7626     }
7627 }
7628
7629 void new_dynarec_init()
7630 {
7631   printf("Init new dynarec\n");
7632   out=(u_char *)BASE_ADDR;
7633   if (mmap (out, 1<<TARGET_SIZE_2,
7634             PROT_READ | PROT_WRITE | PROT_EXEC,
7635             MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7636             -1, 0) <= 0) {printf("mmap() failed\n");}
7637 #ifdef MUPEN64
7638   rdword=&readmem_dword;
7639   fake_pc.f.r.rs=&readmem_dword;
7640   fake_pc.f.r.rt=&readmem_dword;
7641   fake_pc.f.r.rd=&readmem_dword;
7642 #endif
7643   int n;
7644   for(n=0x80000;n<0x80800;n++)
7645     invalid_code[n]=1;
7646   for(n=0;n<65536;n++)
7647     hash_table[n][0]=hash_table[n][2]=-1;
7648   memset(mini_ht,-1,sizeof(mini_ht));
7649   memset(restore_candidate,0,sizeof(restore_candidate));
7650   copy=shadow;
7651   expirep=16384; // Expiry pointer, +2 blocks
7652   pending_exception=0;
7653   literalcount=0;
7654 #ifdef HOST_IMM8
7655   // Copy this into local area so we don't have to put it in every literal pool
7656   invc_ptr=invalid_code;
7657 #endif
7658   stop_after_jal=0;
7659   // TLB
7660   using_tlb=0;
7661   for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7662     memory_map[n]=-1;
7663   for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7664     memory_map[n]=((u_int)rdram-0x80000000)>>2;
7665   for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7666     memory_map[n]=-1;
7667 #ifdef MUPEN64
7668   for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7669     writemem[n] = write_nomem_new;
7670     writememb[n] = write_nomemb_new;
7671     writememh[n] = write_nomemh_new;
7672 #ifndef FORCE32
7673     writememd[n] = write_nomemd_new;
7674 #endif
7675     readmem[n] = read_nomem_new;
7676     readmemb[n] = read_nomemb_new;
7677     readmemh[n] = read_nomemh_new;
7678 #ifndef FORCE32
7679     readmemd[n] = read_nomemd_new;
7680 #endif
7681   }
7682   for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7683     writemem[n] = write_rdram_new;
7684     writememb[n] = write_rdramb_new;
7685     writememh[n] = write_rdramh_new;
7686 #ifndef FORCE32
7687     writememd[n] = write_rdramd_new;
7688 #endif
7689   }
7690   for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7691     writemem[n] = write_nomem_new;
7692     writememb[n] = write_nomemb_new;
7693     writememh[n] = write_nomemh_new;
7694 #ifndef FORCE32
7695     writememd[n] = write_nomemd_new;
7696 #endif
7697     readmem[n] = read_nomem_new;
7698     readmemb[n] = read_nomemb_new;
7699     readmemh[n] = read_nomemh_new;
7700 #ifndef FORCE32
7701     readmemd[n] = read_nomemd_new;
7702 #endif
7703   }
7704 #endif
7705   tlb_hacks();
7706   arch_init();
7707 }
7708
7709 void new_dynarec_cleanup()
7710 {
7711   int n;
7712   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7713   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7714   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7715   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7716   #ifdef ROM_COPY
7717   if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7718   #endif
7719 }
7720
7721 int new_recompile_block(int addr)
7722 {
7723 /*
7724   if(addr==0x800cd050) {
7725     int block;
7726     for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7727     int n;
7728     for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7729   }
7730 */
7731   //if(Count==365117028) tracedebug=1;
7732   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7733   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7734   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7735   //if(debug) 
7736   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7737   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7738   /*if(Count>=312978186) {
7739     rlist();
7740   }*/
7741   //rlist();
7742   start = (u_int)addr&~3;
7743   //assert(((u_int)addr&1)==0);
7744 #ifdef PCSX
7745   if (Config.HLE && start == 0x80001000) {
7746     // XXX: is this enough? Maybe check hleSoftCall?
7747     u_int beginning=(u_int)out;
7748     u_int page=get_page(start);
7749     ll_add(jump_in+page,start,out);
7750     invalid_code[start>>12]=0;
7751     emit_movimm(start,0);
7752     emit_writeword(0,(int)&pcaddr);
7753     emit_jmp((int)new_dyna_leave);
7754 #ifdef __arm__
7755     __clear_cache((void *)beginning,out);
7756 #endif
7757     return 0;
7758   }
7759   else if ((u_int)addr < 0x00200000) {
7760     // used for BIOS calls mostly?
7761     source = (u_int *)((u_int)rdram+start-0);
7762     pagelimit = 0x00200000;
7763   }
7764   else
7765 #endif
7766 #ifdef MUPEN64
7767   if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
7768     source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
7769     pagelimit = 0xa4001000;
7770   }
7771   else
7772 #endif
7773   if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
7774     source = (u_int *)((u_int)rdram+start-0x80000000);
7775     pagelimit = 0x80000000+RAM_SIZE;
7776   }
7777 #ifndef DISABLE_TLB
7778   else if ((signed int)addr >= (signed int)0xC0000000) {
7779     //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
7780     //if(tlb_LUT_r[start>>12])
7781       //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
7782     if((signed int)memory_map[start>>12]>=0) {
7783       source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
7784       pagelimit=(start+4096)&0xFFFFF000;
7785       int map=memory_map[start>>12];
7786       int i;
7787       for(i=0;i<5;i++) {
7788         //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
7789         if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
7790       }
7791       assem_debug("pagelimit=%x\n",pagelimit);
7792       assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
7793     }
7794     else {
7795       assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
7796       //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
7797       return 1; // Caller will invoke exception handler
7798     }
7799     //printf("source= %x\n",(int)source);
7800   }
7801 #endif
7802   else {
7803     printf("Compile at bogus memory address: %x \n", (int)addr);
7804     exit(1);
7805   }
7806
7807   /* Pass 1: disassemble */
7808   /* Pass 2: register dependencies, branch targets */
7809   /* Pass 3: register allocation */
7810   /* Pass 4: branch dependencies */
7811   /* Pass 5: pre-alloc */
7812   /* Pass 6: optimize clean/dirty state */
7813   /* Pass 7: flag 32-bit registers */
7814   /* Pass 8: assembly */
7815   /* Pass 9: linker */
7816   /* Pass 10: garbage collection / free memory */
7817
7818   int i,j;
7819   int done=0;
7820   unsigned int type,op,op2;
7821
7822   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7823   
7824   /* Pass 1 disassembly */
7825
7826   for(i=0;!done;i++) {
7827     bt[i]=0;likely[i]=0;op2=0;
7828     opcode[i]=op=source[i]>>26;
7829     switch(op)
7830     {
7831       case 0x00: strcpy(insn[i],"special"); type=NI;
7832         op2=source[i]&0x3f;
7833         switch(op2)
7834         {
7835           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7836           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7837           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7838           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7839           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7840           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7841           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7842           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7843           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7844           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7845           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7846           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7847           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7848           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7849           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7850           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7851           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7852           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7853           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7854           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7855           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7856           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7857           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7858           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7859           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7860           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7861           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7862           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7863           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7864           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7865           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7866           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7867           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7868           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7869           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7870           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7871           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7872           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7873           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7874           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7875           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7876           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7877           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7878           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7879           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7880           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7881           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7882           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7883           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7884           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7885           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7886           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7887         }
7888         break;
7889       case 0x01: strcpy(insn[i],"regimm"); type=NI;
7890         op2=(source[i]>>16)&0x1f;
7891         switch(op2)
7892         {
7893           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7894           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7895           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7896           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7897           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7898           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7899           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7900           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7901           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7902           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7903           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7904           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7905           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7906           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7907         }
7908         break;
7909       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7910       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7911       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7912       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7913       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7914       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7915       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7916       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7917       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7918       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7919       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7920       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7921       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7922       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7923       case 0x10: strcpy(insn[i],"cop0"); type=NI;
7924         op2=(source[i]>>21)&0x1f;
7925         switch(op2)
7926         {
7927           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7928           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7929           case 0x10: strcpy(insn[i],"tlb"); type=NI;
7930           switch(source[i]&0x3f)
7931           {
7932             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7933             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7934             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7935             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
7936 #ifdef PCSX
7937             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
7938 #else
7939             case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
7940 #endif
7941           }
7942         }
7943         break;
7944       case 0x11: strcpy(insn[i],"cop1"); type=NI;
7945         op2=(source[i]>>21)&0x1f;
7946         switch(op2)
7947         {
7948           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7949           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7950           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7951           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7952           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7953           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7954           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7955           switch((source[i]>>16)&0x3)
7956           {
7957             case 0x00: strcpy(insn[i],"BC1F"); break;
7958             case 0x01: strcpy(insn[i],"BC1T"); break;
7959             case 0x02: strcpy(insn[i],"BC1FL"); break;
7960             case 0x03: strcpy(insn[i],"BC1TL"); break;
7961           }
7962           break;
7963           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7964           switch(source[i]&0x3f)
7965           {
7966             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7967             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7968             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7969             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7970             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7971             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
7972             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
7973             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
7974             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
7975             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
7976             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
7977             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
7978             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
7979             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
7980             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
7981             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
7982             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
7983             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
7984             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
7985             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
7986             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
7987             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
7988             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
7989             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
7990             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
7991             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
7992             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
7993             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
7994             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
7995             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
7996             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
7997             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
7998             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
7999             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8000             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8001           }
8002           break;
8003           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8004           switch(source[i]&0x3f)
8005           {
8006             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8007             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8008             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8009             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8010             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8011             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8012             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8013             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8014             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8015             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8016             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8017             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8018             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8019             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8020             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8021             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8022             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8023             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8024             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8025             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8026             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8027             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8028             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8029             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8030             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8031             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8032             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8033             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8034             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8035             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8036             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8037             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8038             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8039             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8040             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8041           }
8042           break;
8043           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8044           switch(source[i]&0x3f)
8045           {
8046             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8047             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8048           }
8049           break;
8050           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8051           switch(source[i]&0x3f)
8052           {
8053             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8054             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8055           }
8056           break;
8057         }
8058         break;
8059       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8060       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8061       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8062       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8063 #ifndef FORCE32
8064       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8065       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8066       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8067       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8068 #endif
8069       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8070       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8071       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8072       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8073       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8074       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8075       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8076       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8077       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8078       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8079       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8080       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8081 #ifndef FORCE32
8082       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8083       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8084 #endif
8085       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8086       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8087       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8088       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8089 #ifndef FORCE32
8090       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8091       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8092       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8093 #endif
8094       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8095       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8096 #ifndef FORCE32
8097       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8098       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8099       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8100 #endif
8101 #ifdef PCSX
8102       case 0x12: strcpy(insn[i],"COP2"); type=NI;
8103         op2=(source[i]>>21)&0x1f;
8104         switch(op2)
8105         {
8106           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8107           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8108           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8109           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8110           default:
8111             if (gte_handlers[source[i]&0x3f]!=NULL) {
8112               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8113               type=C2OP;
8114             }
8115             break;
8116         }
8117         break;
8118       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8119       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8120       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8121 #endif
8122       default: strcpy(insn[i],"???"); type=NI;
8123         printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
8124         break;
8125     }
8126     itype[i]=type;
8127     opcode2[i]=op2;
8128     /* Get registers/immediates */
8129     lt1[i]=0;
8130     us1[i]=0;
8131     us2[i]=0;
8132     dep1[i]=0;
8133     dep2[i]=0;
8134     switch(type) {
8135       case LOAD:
8136         rs1[i]=(source[i]>>21)&0x1f;
8137         rs2[i]=0;
8138         rt1[i]=(source[i]>>16)&0x1f;
8139         rt2[i]=0;
8140         imm[i]=(short)source[i];
8141         break;
8142       case STORE:
8143       case STORELR:
8144         rs1[i]=(source[i]>>21)&0x1f;
8145         rs2[i]=(source[i]>>16)&0x1f;
8146         rt1[i]=0;
8147         rt2[i]=0;
8148         imm[i]=(short)source[i];
8149         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8150         break;
8151       case LOADLR:
8152         // LWL/LWR only load part of the register,
8153         // therefore the target register must be treated as a source too
8154         rs1[i]=(source[i]>>21)&0x1f;
8155         rs2[i]=(source[i]>>16)&0x1f;
8156         rt1[i]=(source[i]>>16)&0x1f;
8157         rt2[i]=0;
8158         imm[i]=(short)source[i];
8159         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8160         if(op==0x26) dep1[i]=rt1[i]; // LWR
8161         break;
8162       case IMM16:
8163         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8164         else rs1[i]=(source[i]>>21)&0x1f;
8165         rs2[i]=0;
8166         rt1[i]=(source[i]>>16)&0x1f;
8167         rt2[i]=0;
8168         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8169           imm[i]=(unsigned short)source[i];
8170         }else{
8171           imm[i]=(short)source[i];
8172         }
8173         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8174         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8175         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8176         break;
8177       case UJUMP:
8178         rs1[i]=0;
8179         rs2[i]=0;
8180         rt1[i]=0;
8181         rt2[i]=0;
8182         // The JAL instruction writes to r31.
8183         if (op&1) {
8184           rt1[i]=31;
8185         }
8186         rs2[i]=CCREG;
8187         break;
8188       case RJUMP:
8189         rs1[i]=(source[i]>>21)&0x1f;
8190         rs2[i]=0;
8191         rt1[i]=0;
8192         rt2[i]=0;
8193         // The JALR instruction writes to rd.
8194         if (op2&1) {
8195           rt1[i]=(source[i]>>11)&0x1f;
8196         }
8197         rs2[i]=CCREG;
8198         break;
8199       case CJUMP:
8200         rs1[i]=(source[i]>>21)&0x1f;
8201         rs2[i]=(source[i]>>16)&0x1f;
8202         rt1[i]=0;
8203         rt2[i]=0;
8204         if(op&2) { // BGTZ/BLEZ
8205           rs2[i]=0;
8206         }
8207         us1[i]=rs1[i];
8208         us2[i]=rs2[i];
8209         likely[i]=op>>4;
8210         break;
8211       case SJUMP:
8212         rs1[i]=(source[i]>>21)&0x1f;
8213         rs2[i]=CCREG;
8214         rt1[i]=0;
8215         rt2[i]=0;
8216         us1[i]=rs1[i];
8217         if(op2&0x10) { // BxxAL
8218           rt1[i]=31;
8219           // NOTE: If the branch is not taken, r31 is still overwritten
8220         }
8221         likely[i]=(op2&2)>>1;
8222         break;
8223       case FJUMP:
8224         rs1[i]=FSREG;
8225         rs2[i]=CSREG;
8226         rt1[i]=0;
8227         rt2[i]=0;
8228         likely[i]=((source[i])>>17)&1;
8229         break;
8230       case ALU:
8231         rs1[i]=(source[i]>>21)&0x1f; // source
8232         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8233         rt1[i]=(source[i]>>11)&0x1f; // destination
8234         rt2[i]=0;
8235         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8236           us1[i]=rs1[i];us2[i]=rs2[i];
8237         }
8238         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8239           dep1[i]=rs1[i];dep2[i]=rs2[i];
8240         }
8241         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8242           dep1[i]=rs1[i];dep2[i]=rs2[i];
8243         }
8244         break;
8245       case MULTDIV:
8246         rs1[i]=(source[i]>>21)&0x1f; // source
8247         rs2[i]=(source[i]>>16)&0x1f; // divisor
8248         rt1[i]=HIREG;
8249         rt2[i]=LOREG;
8250         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8251           us1[i]=rs1[i];us2[i]=rs2[i];
8252         }
8253         break;
8254       case MOV:
8255         rs1[i]=0;
8256         rs2[i]=0;
8257         rt1[i]=0;
8258         rt2[i]=0;
8259         if(op2==0x10) rs1[i]=HIREG; // MFHI
8260         if(op2==0x11) rt1[i]=HIREG; // MTHI
8261         if(op2==0x12) rs1[i]=LOREG; // MFLO
8262         if(op2==0x13) rt1[i]=LOREG; // MTLO
8263         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8264         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8265         dep1[i]=rs1[i];
8266         break;
8267       case SHIFT:
8268         rs1[i]=(source[i]>>16)&0x1f; // target of shift
8269         rs2[i]=(source[i]>>21)&0x1f; // shift amount
8270         rt1[i]=(source[i]>>11)&0x1f; // destination
8271         rt2[i]=0;
8272         // DSLLV/DSRLV/DSRAV are 64-bit
8273         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8274         break;
8275       case SHIFTIMM:
8276         rs1[i]=(source[i]>>16)&0x1f;
8277         rs2[i]=0;
8278         rt1[i]=(source[i]>>11)&0x1f;
8279         rt2[i]=0;
8280         imm[i]=(source[i]>>6)&0x1f;
8281         // DSxx32 instructions
8282         if(op2>=0x3c) imm[i]|=0x20;
8283         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8284         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8285         break;
8286       case COP0:
8287         rs1[i]=0;
8288         rs2[i]=0;
8289         rt1[i]=0;
8290         rt2[i]=0;
8291         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8292         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8293         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8294         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8295         break;
8296       case COP1:
8297       case COP2:
8298         rs1[i]=0;
8299         rs2[i]=0;
8300         rt1[i]=0;
8301         rt2[i]=0;
8302         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8303         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8304         if(op2==5) us1[i]=rs1[i]; // DMTC1
8305         rs2[i]=CSREG;
8306         break;
8307       case C1LS:
8308         rs1[i]=(source[i]>>21)&0x1F;
8309         rs2[i]=CSREG;
8310         rt1[i]=0;
8311         rt2[i]=0;
8312         imm[i]=(short)source[i];
8313         break;
8314       case C2LS:
8315         rs1[i]=(source[i]>>21)&0x1F;
8316         rs2[i]=0;
8317         rt1[i]=0;
8318         rt2[i]=0;
8319         imm[i]=(short)source[i];
8320         break;
8321       case FLOAT:
8322       case FCONV:
8323         rs1[i]=0;
8324         rs2[i]=CSREG;
8325         rt1[i]=0;
8326         rt2[i]=0;
8327         break;
8328       case FCOMP:
8329         rs1[i]=FSREG;
8330         rs2[i]=CSREG;
8331         rt1[i]=FSREG;
8332         rt2[i]=0;
8333         break;
8334       case SYSCALL:
8335       case HLECALL:
8336         rs1[i]=CCREG;
8337         rs2[i]=0;
8338         rt1[i]=0;
8339         rt2[i]=0;
8340         break;
8341       default:
8342         rs1[i]=0;
8343         rs2[i]=0;
8344         rt1[i]=0;
8345         rt2[i]=0;
8346     }
8347     /* Calculate branch target addresses */
8348     if(type==UJUMP)
8349       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8350     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8351       ba[i]=start+i*4+8; // Ignore never taken branch
8352     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8353       ba[i]=start+i*4+8; // Ignore never taken branch
8354     else if(type==CJUMP||type==SJUMP||type==FJUMP)
8355       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8356     else ba[i]=-1;
8357     /* Is this the end of the block? */
8358     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8359       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8360         done=1;
8361         // Does the block continue due to a branch?
8362         for(j=i-1;j>=0;j--)
8363         {
8364           if(ba[j]==start+i*4+4) done=j=0;
8365           if(ba[j]==start+i*4+8) done=j=0;
8366         }
8367       }
8368       else {
8369         if(stop_after_jal) done=1;
8370         // Stop on BREAK
8371         if((source[i+1]&0xfc00003f)==0x0d) done=1;
8372       }
8373       // Don't recompile stuff that's already compiled
8374       if(check_addr(start+i*4+4)) done=1;
8375       // Don't get too close to the limit
8376       if(i>MAXBLOCK/2) done=1;
8377     }
8378     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
8379     if(itype[i]==HLECALL) done=1;
8380     //assert(i<MAXBLOCK-1);
8381     if(start+i*4==pagelimit-4) done=1;
8382     assert(start+i*4<pagelimit);
8383     if (i==MAXBLOCK-1) done=1;
8384     // Stop if we're compiling junk
8385     if(itype[i]==NI&&opcode[i]==0x11) {
8386       done=stop_after_jal=1;
8387       printf("Disabled speculative precompilation\n");
8388     }
8389   }
8390   slen=i;
8391   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8392     if(start+i*4==pagelimit) {
8393       itype[i-1]=SPAN;
8394     }
8395   }
8396   assert(slen>0);
8397
8398   /* Pass 2 - Register dependencies and branch targets */
8399
8400   unneeded_registers(0,slen-1,0);
8401   
8402   /* Pass 3 - Register allocation */
8403
8404   struct regstat current; // Current register allocations/status
8405   current.is32=1;
8406   current.dirty=0;
8407   current.u=unneeded_reg[0];
8408   current.uu=unneeded_reg_upper[0];
8409   clear_all_regs(current.regmap);
8410   alloc_reg(&current,0,CCREG);
8411   dirty_reg(&current,CCREG);
8412   current.isconst=0;
8413   current.wasconst=0;
8414   int ds=0;
8415   int cc=0;
8416   int hr;
8417   
8418   provisional_32bit();
8419   
8420   if((u_int)addr&1) {
8421     // First instruction is delay slot
8422     cc=-1;
8423     bt[1]=1;
8424     ds=1;
8425     unneeded_reg[0]=1;
8426     unneeded_reg_upper[0]=1;
8427     current.regmap[HOST_BTREG]=BTREG;
8428   }
8429   
8430   for(i=0;i<slen;i++)
8431   {
8432     if(bt[i])
8433     {
8434       int hr;
8435       for(hr=0;hr<HOST_REGS;hr++)
8436       {
8437         // Is this really necessary?
8438         if(current.regmap[hr]==0) current.regmap[hr]=-1;
8439       }
8440       current.isconst=0;
8441     }
8442     if(i>1)
8443     {
8444       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8445       {
8446         if(rs1[i-2]==0||rs2[i-2]==0)
8447         {
8448           if(rs1[i-2]) {
8449             current.is32|=1LL<<rs1[i-2];
8450             int hr=get_reg(current.regmap,rs1[i-2]|64);
8451             if(hr>=0) current.regmap[hr]=-1;
8452           }
8453           if(rs2[i-2]) {
8454             current.is32|=1LL<<rs2[i-2];
8455             int hr=get_reg(current.regmap,rs2[i-2]|64);
8456             if(hr>=0) current.regmap[hr]=-1;
8457           }
8458         }
8459       }
8460     }
8461     // If something jumps here with 64-bit values
8462     // then promote those registers to 64 bits
8463     if(bt[i])
8464     {
8465       uint64_t temp_is32=current.is32;
8466       for(j=i-1;j>=0;j--)
8467       {
8468         if(ba[j]==start+i*4) 
8469           temp_is32&=branch_regs[j].is32;
8470       }
8471       for(j=i;j<slen;j++)
8472       {
8473         if(ba[j]==start+i*4) 
8474           //temp_is32=1;
8475           temp_is32&=p32[j];
8476       }
8477       if(temp_is32!=current.is32) {
8478         //printf("dumping 32-bit regs (%x)\n",start+i*4);
8479         #ifdef DESTRUCTIVE_WRITEBACK
8480         for(hr=0;hr<HOST_REGS;hr++)
8481         {
8482           int r=current.regmap[hr];
8483           if(r>0&&r<64)
8484           {
8485             if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8486               temp_is32|=1LL<<r;
8487               //printf("restore %d\n",r);
8488             }
8489           }
8490         }
8491         #endif
8492         current.is32=temp_is32;
8493       }
8494     }
8495 #ifdef FORCE32
8496     memset(p32, 0xff, sizeof(p32));
8497     current.is32=-1LL;
8498 #endif
8499
8500     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8501     regs[i].wasconst=current.isconst;
8502     regs[i].was32=current.is32;
8503     regs[i].wasdirty=current.dirty;
8504     #ifdef DESTRUCTIVE_WRITEBACK
8505     // To change a dirty register from 32 to 64 bits, we must write
8506     // it out during the previous cycle (for branches, 2 cycles)
8507     if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8508     {
8509       uint64_t temp_is32=current.is32;
8510       for(j=i-1;j>=0;j--)
8511       {
8512         if(ba[j]==start+i*4+4) 
8513           temp_is32&=branch_regs[j].is32;
8514       }
8515       for(j=i;j<slen;j++)
8516       {
8517         if(ba[j]==start+i*4+4) 
8518           //temp_is32=1;
8519           temp_is32&=p32[j];
8520       }
8521       if(temp_is32!=current.is32) {
8522         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8523         for(hr=0;hr<HOST_REGS;hr++)
8524         {
8525           int r=current.regmap[hr];
8526           if(r>0)
8527           {
8528             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8529               if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8530               {
8531                 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8532                 {
8533                   //printf("dump %d/r%d\n",hr,r);
8534                   current.regmap[hr]=-1;
8535                   if(get_reg(current.regmap,r|64)>=0) 
8536                     current.regmap[get_reg(current.regmap,r|64)]=-1;
8537                 }
8538               }
8539             }
8540           }
8541         }
8542       }
8543     }
8544     else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8545     {
8546       uint64_t temp_is32=current.is32;
8547       for(j=i-1;j>=0;j--)
8548       {
8549         if(ba[j]==start+i*4+8) 
8550           temp_is32&=branch_regs[j].is32;
8551       }
8552       for(j=i;j<slen;j++)
8553       {
8554         if(ba[j]==start+i*4+8) 
8555           //temp_is32=1;
8556           temp_is32&=p32[j];
8557       }
8558       if(temp_is32!=current.is32) {
8559         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8560         for(hr=0;hr<HOST_REGS;hr++)
8561         {
8562           int r=current.regmap[hr];
8563           if(r>0)
8564           {
8565             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8566               if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8567               {
8568                 //printf("dump %d/r%d\n",hr,r);
8569                 current.regmap[hr]=-1;
8570                 if(get_reg(current.regmap,r|64)>=0) 
8571                   current.regmap[get_reg(current.regmap,r|64)]=-1;
8572               }
8573             }
8574           }
8575         }
8576       }
8577     }
8578     #endif
8579     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8580       if(i+1<slen) {
8581         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8582         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8583         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8584         current.u|=1;
8585         current.uu|=1;
8586       } else {
8587         current.u=1;
8588         current.uu=1;
8589       }
8590     } else {
8591       if(i+1<slen) {
8592         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8593         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8594         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8595         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8596         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8597         current.u|=1;
8598         current.uu|=1;
8599       } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8600     }
8601     is_ds[i]=ds;
8602     if(ds) {
8603       ds=0; // Skip delay slot, already allocated as part of branch
8604       // ...but we need to alloc it in case something jumps here
8605       if(i+1<slen) {
8606         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8607         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8608       }else{
8609         current.u=branch_unneeded_reg[i-1];
8610         current.uu=branch_unneeded_reg_upper[i-1];
8611       }
8612       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8613       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8614       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8615       current.u|=1;
8616       current.uu|=1;
8617       struct regstat temp;
8618       memcpy(&temp,&current,sizeof(current));
8619       temp.wasdirty=temp.dirty;
8620       temp.was32=temp.is32;
8621       // TODO: Take into account unconditional branches, as below
8622       delayslot_alloc(&temp,i);
8623       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8624       regs[i].wasdirty=temp.wasdirty;
8625       regs[i].was32=temp.was32;
8626       regs[i].dirty=temp.dirty;
8627       regs[i].is32=temp.is32;
8628       regs[i].isconst=0;
8629       regs[i].wasconst=0;
8630       current.isconst=0;
8631       // Create entry (branch target) regmap
8632       for(hr=0;hr<HOST_REGS;hr++)
8633       {
8634         int r=temp.regmap[hr];
8635         if(r>=0) {
8636           if(r!=regmap_pre[i][hr]) {
8637             regs[i].regmap_entry[hr]=-1;
8638           }
8639           else
8640           {
8641             if(r<64){
8642               if((current.u>>r)&1) {
8643                 regs[i].regmap_entry[hr]=-1;
8644                 regs[i].regmap[hr]=-1;
8645                 //Don't clear regs in the delay slot as the branch might need them
8646                 //current.regmap[hr]=-1;
8647               }else
8648                 regs[i].regmap_entry[hr]=r;
8649             }
8650             else {
8651               if((current.uu>>(r&63))&1) {
8652                 regs[i].regmap_entry[hr]=-1;
8653                 regs[i].regmap[hr]=-1;
8654                 //Don't clear regs in the delay slot as the branch might need them
8655                 //current.regmap[hr]=-1;
8656               }else
8657                 regs[i].regmap_entry[hr]=r;
8658             }
8659           }
8660         } else {
8661           // First instruction expects CCREG to be allocated
8662           if(i==0&&hr==HOST_CCREG) 
8663             regs[i].regmap_entry[hr]=CCREG;
8664           else
8665             regs[i].regmap_entry[hr]=-1;
8666         }
8667       }
8668     }
8669     else { // Not delay slot
8670       switch(itype[i]) {
8671         case UJUMP:
8672           //current.isconst=0; // DEBUG
8673           //current.wasconst=0; // DEBUG
8674           //regs[i].wasconst=0; // DEBUG
8675           clear_const(&current,rt1[i]);
8676           alloc_cc(&current,i);
8677           dirty_reg(&current,CCREG);
8678           if (rt1[i]==31) {
8679             alloc_reg(&current,i,31);
8680             dirty_reg(&current,31);
8681             assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8682             #ifdef REG_PREFETCH
8683             alloc_reg(&current,i,PTEMP);
8684             #endif
8685             //current.is32|=1LL<<rt1[i];
8686           }
8687           delayslot_alloc(&current,i+1);
8688           //current.isconst=0; // DEBUG
8689           ds=1;
8690           //printf("i=%d, isconst=%x\n",i,current.isconst);
8691           break;
8692         case RJUMP:
8693           //current.isconst=0;
8694           //current.wasconst=0;
8695           //regs[i].wasconst=0;
8696           clear_const(&current,rs1[i]);
8697           clear_const(&current,rt1[i]);
8698           alloc_cc(&current,i);
8699           dirty_reg(&current,CCREG);
8700           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8701             alloc_reg(&current,i,rs1[i]);
8702             if (rt1[i]!=0) {
8703               alloc_reg(&current,i,rt1[i]);
8704               dirty_reg(&current,rt1[i]);
8705               assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8706               #ifdef REG_PREFETCH
8707               alloc_reg(&current,i,PTEMP);
8708               #endif
8709             }
8710             #ifdef USE_MINI_HT
8711             if(rs1[i]==31) { // JALR
8712               alloc_reg(&current,i,RHASH);
8713               #ifndef HOST_IMM_ADDR32
8714               alloc_reg(&current,i,RHTBL);
8715               #endif
8716             }
8717             #endif
8718             delayslot_alloc(&current,i+1);
8719           } else {
8720             // The delay slot overwrites our source register,
8721             // allocate a temporary register to hold the old value.
8722             current.isconst=0;
8723             current.wasconst=0;
8724             regs[i].wasconst=0;
8725             delayslot_alloc(&current,i+1);
8726             current.isconst=0;
8727             alloc_reg(&current,i,RTEMP);
8728           }
8729           //current.isconst=0; // DEBUG
8730           ds=1;
8731           break;
8732         case CJUMP:
8733           //current.isconst=0;
8734           //current.wasconst=0;
8735           //regs[i].wasconst=0;
8736           clear_const(&current,rs1[i]);
8737           clear_const(&current,rs2[i]);
8738           if((opcode[i]&0x3E)==4) // BEQ/BNE
8739           {
8740             alloc_cc(&current,i);
8741             dirty_reg(&current,CCREG);
8742             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8743             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8744             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8745             {
8746               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8747               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8748             }
8749             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8750                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8751               // The delay slot overwrites one of our conditions.
8752               // Allocate the branch condition registers instead.
8753               // Note that such a sequence of instructions could
8754               // be considered a bug since the branch can not be
8755               // re-executed if an exception occurs.
8756               current.isconst=0;
8757               current.wasconst=0;
8758               regs[i].wasconst=0;
8759               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8760               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8761               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8762               {
8763                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8764                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8765               }
8766             }
8767             else delayslot_alloc(&current,i+1);
8768           }
8769           else
8770           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8771           {
8772             alloc_cc(&current,i);
8773             dirty_reg(&current,CCREG);
8774             alloc_reg(&current,i,rs1[i]);
8775             if(!(current.is32>>rs1[i]&1))
8776             {
8777               alloc_reg64(&current,i,rs1[i]);
8778             }
8779             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8780               // The delay slot overwrites one of our conditions.
8781               // Allocate the branch condition registers instead.
8782               // Note that such a sequence of instructions could
8783               // be considered a bug since the branch can not be
8784               // re-executed if an exception occurs.
8785               current.isconst=0;
8786               current.wasconst=0;
8787               regs[i].wasconst=0;
8788               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8789               if(!((current.is32>>rs1[i])&1))
8790               {
8791                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8792               }
8793             }
8794             else delayslot_alloc(&current,i+1);
8795           }
8796           else
8797           // Don't alloc the delay slot yet because we might not execute it
8798           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8799           {
8800             current.isconst=0;
8801             current.wasconst=0;
8802             regs[i].wasconst=0;
8803             alloc_cc(&current,i);
8804             dirty_reg(&current,CCREG);
8805             alloc_reg(&current,i,rs1[i]);
8806             alloc_reg(&current,i,rs2[i]);
8807             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8808             {
8809               alloc_reg64(&current,i,rs1[i]);
8810               alloc_reg64(&current,i,rs2[i]);
8811             }
8812           }
8813           else
8814           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8815           {
8816             current.isconst=0;
8817             current.wasconst=0;
8818             regs[i].wasconst=0;
8819             alloc_cc(&current,i);
8820             dirty_reg(&current,CCREG);
8821             alloc_reg(&current,i,rs1[i]);
8822             if(!(current.is32>>rs1[i]&1))
8823             {
8824               alloc_reg64(&current,i,rs1[i]);
8825             }
8826           }
8827           ds=1;
8828           //current.isconst=0;
8829           break;
8830         case SJUMP:
8831           //current.isconst=0;
8832           //current.wasconst=0;
8833           //regs[i].wasconst=0;
8834           clear_const(&current,rs1[i]);
8835           clear_const(&current,rt1[i]);
8836           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8837           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8838           {
8839             alloc_cc(&current,i);
8840             dirty_reg(&current,CCREG);
8841             alloc_reg(&current,i,rs1[i]);
8842             if(!(current.is32>>rs1[i]&1))
8843             {
8844               alloc_reg64(&current,i,rs1[i]);
8845             }
8846             if (rt1[i]==31) { // BLTZAL/BGEZAL
8847               alloc_reg(&current,i,31);
8848               dirty_reg(&current,31);
8849               assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8850               //#ifdef REG_PREFETCH
8851               //alloc_reg(&current,i,PTEMP);
8852               //#endif
8853               //current.is32|=1LL<<rt1[i];
8854             }
8855             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8856               // The delay slot overwrites the branch condition.
8857               // Allocate the branch condition registers instead.
8858               // Note that such a sequence of instructions could
8859               // be considered a bug since the branch can not be
8860               // re-executed if an exception occurs.
8861               current.isconst=0;
8862               current.wasconst=0;
8863               regs[i].wasconst=0;
8864               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8865               if(!((current.is32>>rs1[i])&1))
8866               {
8867                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8868               }
8869             }
8870             else delayslot_alloc(&current,i+1);
8871           }
8872           else
8873           // Don't alloc the delay slot yet because we might not execute it
8874           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8875           {
8876             current.isconst=0;
8877             current.wasconst=0;
8878             regs[i].wasconst=0;
8879             alloc_cc(&current,i);
8880             dirty_reg(&current,CCREG);
8881             alloc_reg(&current,i,rs1[i]);
8882             if(!(current.is32>>rs1[i]&1))
8883             {
8884               alloc_reg64(&current,i,rs1[i]);
8885             }
8886           }
8887           ds=1;
8888           //current.isconst=0;
8889           break;
8890         case FJUMP:
8891           current.isconst=0;
8892           current.wasconst=0;
8893           regs[i].wasconst=0;
8894           if(likely[i]==0) // BC1F/BC1T
8895           {
8896             // TODO: Theoretically we can run out of registers here on x86.
8897             // The delay slot can allocate up to six, and we need to check
8898             // CSREG before executing the delay slot.  Possibly we can drop
8899             // the cycle count and then reload it after checking that the
8900             // FPU is in a usable state, or don't do out-of-order execution.
8901             alloc_cc(&current,i);
8902             dirty_reg(&current,CCREG);
8903             alloc_reg(&current,i,FSREG);
8904             alloc_reg(&current,i,CSREG);
8905             if(itype[i+1]==FCOMP) {
8906               // The delay slot overwrites the branch condition.
8907               // Allocate the branch condition registers instead.
8908               // Note that such a sequence of instructions could
8909               // be considered a bug since the branch can not be
8910               // re-executed if an exception occurs.
8911               alloc_cc(&current,i);
8912               dirty_reg(&current,CCREG);
8913               alloc_reg(&current,i,CSREG);
8914               alloc_reg(&current,i,FSREG);
8915             }
8916             else {
8917               delayslot_alloc(&current,i+1);
8918               alloc_reg(&current,i+1,CSREG);
8919             }
8920           }
8921           else
8922           // Don't alloc the delay slot yet because we might not execute it
8923           if(likely[i]) // BC1FL/BC1TL
8924           {
8925             alloc_cc(&current,i);
8926             dirty_reg(&current,CCREG);
8927             alloc_reg(&current,i,CSREG);
8928             alloc_reg(&current,i,FSREG);
8929           }
8930           ds=1;
8931           current.isconst=0;
8932           break;
8933         case IMM16:
8934           imm16_alloc(&current,i);
8935           break;
8936         case LOAD:
8937         case LOADLR:
8938           load_alloc(&current,i);
8939           break;
8940         case STORE:
8941         case STORELR:
8942           store_alloc(&current,i);
8943           break;
8944         case ALU:
8945           alu_alloc(&current,i);
8946           break;
8947         case SHIFT:
8948           shift_alloc(&current,i);
8949           break;
8950         case MULTDIV:
8951           multdiv_alloc(&current,i);
8952           break;
8953         case SHIFTIMM:
8954           shiftimm_alloc(&current,i);
8955           break;
8956         case MOV:
8957           mov_alloc(&current,i);
8958           break;
8959         case COP0:
8960           cop0_alloc(&current,i);
8961           break;
8962         case COP1:
8963         case COP2:
8964           cop1_alloc(&current,i);
8965           break;
8966         case C1LS:
8967           c1ls_alloc(&current,i);
8968           break;
8969         case C2LS:
8970           c2ls_alloc(&current,i);
8971           break;
8972         case C2OP:
8973           c2op_alloc(&current,i);
8974           break;
8975         case FCONV:
8976           fconv_alloc(&current,i);
8977           break;
8978         case FLOAT:
8979           float_alloc(&current,i);
8980           break;
8981         case FCOMP:
8982           fcomp_alloc(&current,i);
8983           break;
8984         case SYSCALL:
8985         case HLECALL:
8986           syscall_alloc(&current,i);
8987           break;
8988         case SPAN:
8989           pagespan_alloc(&current,i);
8990           break;
8991       }
8992       
8993       // Drop the upper half of registers that have become 32-bit
8994       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
8995       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8996         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8997         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8998         current.uu|=1;
8999       } else {
9000         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9001         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9002         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9003         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9004         current.uu|=1;
9005       }
9006
9007       // Create entry (branch target) regmap
9008       for(hr=0;hr<HOST_REGS;hr++)
9009       {
9010         int r,or,er;
9011         r=current.regmap[hr];
9012         if(r>=0) {
9013           if(r!=regmap_pre[i][hr]) {
9014             // TODO: delay slot (?)
9015             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9016             if(or<0||(r&63)>=TEMPREG){
9017               regs[i].regmap_entry[hr]=-1;
9018             }
9019             else
9020             {
9021               // Just move it to a different register
9022               regs[i].regmap_entry[hr]=r;
9023               // If it was dirty before, it's still dirty
9024               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9025             }
9026           }
9027           else
9028           {
9029             // Unneeded
9030             if(r==0){
9031               regs[i].regmap_entry[hr]=0;
9032             }
9033             else
9034             if(r<64){
9035               if((current.u>>r)&1) {
9036                 regs[i].regmap_entry[hr]=-1;
9037                 //regs[i].regmap[hr]=-1;
9038                 current.regmap[hr]=-1;
9039               }else
9040                 regs[i].regmap_entry[hr]=r;
9041             }
9042             else {
9043               if((current.uu>>(r&63))&1) {
9044                 regs[i].regmap_entry[hr]=-1;
9045                 //regs[i].regmap[hr]=-1;
9046                 current.regmap[hr]=-1;
9047               }else
9048                 regs[i].regmap_entry[hr]=r;
9049             }
9050           }
9051         } else {
9052           // Branches expect CCREG to be allocated at the target
9053           if(regmap_pre[i][hr]==CCREG) 
9054             regs[i].regmap_entry[hr]=CCREG;
9055           else
9056             regs[i].regmap_entry[hr]=-1;
9057         }
9058       }
9059       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9060     }
9061     /* Branch post-alloc */
9062     if(i>0)
9063     {
9064       current.was32=current.is32;
9065       current.wasdirty=current.dirty;
9066       switch(itype[i-1]) {
9067         case UJUMP:
9068           memcpy(&branch_regs[i-1],&current,sizeof(current));
9069           branch_regs[i-1].isconst=0;
9070           branch_regs[i-1].wasconst=0;
9071           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9072           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9073           alloc_cc(&branch_regs[i-1],i-1);
9074           dirty_reg(&branch_regs[i-1],CCREG);
9075           if(rt1[i-1]==31) { // JAL
9076             alloc_reg(&branch_regs[i-1],i-1,31);
9077             dirty_reg(&branch_regs[i-1],31);
9078             branch_regs[i-1].is32|=1LL<<31;
9079           }
9080           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9081           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9082           break;
9083         case RJUMP:
9084           memcpy(&branch_regs[i-1],&current,sizeof(current));
9085           branch_regs[i-1].isconst=0;
9086           branch_regs[i-1].wasconst=0;
9087           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9088           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9089           alloc_cc(&branch_regs[i-1],i-1);
9090           dirty_reg(&branch_regs[i-1],CCREG);
9091           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9092           if(rt1[i-1]!=0) { // JALR
9093             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9094             dirty_reg(&branch_regs[i-1],rt1[i-1]);
9095             branch_regs[i-1].is32|=1LL<<rt1[i-1];
9096           }
9097           #ifdef USE_MINI_HT
9098           if(rs1[i-1]==31) { // JALR
9099             alloc_reg(&branch_regs[i-1],i-1,RHASH);
9100             #ifndef HOST_IMM_ADDR32
9101             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9102             #endif
9103           }
9104           #endif
9105           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9106           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9107           break;
9108         case CJUMP:
9109           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9110           {
9111             alloc_cc(&current,i-1);
9112             dirty_reg(&current,CCREG);
9113             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9114                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9115               // The delay slot overwrote one of our conditions
9116               // Delay slot goes after the test (in order)
9117               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9118               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9119               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9120               current.u|=1;
9121               current.uu|=1;
9122               delayslot_alloc(&current,i);
9123               current.isconst=0;
9124             }
9125             else
9126             {
9127               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9128               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9129               // Alloc the branch condition registers
9130               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9131               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9132               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9133               {
9134                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9135                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9136               }
9137             }
9138             memcpy(&branch_regs[i-1],&current,sizeof(current));
9139             branch_regs[i-1].isconst=0;
9140             branch_regs[i-1].wasconst=0;
9141             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9142             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9143           }
9144           else
9145           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9146           {
9147             alloc_cc(&current,i-1);
9148             dirty_reg(&current,CCREG);
9149             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9150               // The delay slot overwrote the branch condition
9151               // Delay slot goes after the test (in order)
9152               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9153               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9154               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9155               current.u|=1;
9156               current.uu|=1;
9157               delayslot_alloc(&current,i);
9158               current.isconst=0;
9159             }
9160             else
9161             {
9162               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9163               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9164               // Alloc the branch condition register
9165               alloc_reg(&current,i-1,rs1[i-1]);
9166               if(!(current.is32>>rs1[i-1]&1))
9167               {
9168                 alloc_reg64(&current,i-1,rs1[i-1]);
9169               }
9170             }
9171             memcpy(&branch_regs[i-1],&current,sizeof(current));
9172             branch_regs[i-1].isconst=0;
9173             branch_regs[i-1].wasconst=0;
9174             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9175             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9176           }
9177           else
9178           // Alloc the delay slot in case the branch is taken
9179           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9180           {
9181             memcpy(&branch_regs[i-1],&current,sizeof(current));
9182             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9183             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9184             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9185             alloc_cc(&branch_regs[i-1],i);
9186             dirty_reg(&branch_regs[i-1],CCREG);
9187             delayslot_alloc(&branch_regs[i-1],i);
9188             branch_regs[i-1].isconst=0;
9189             alloc_reg(&current,i,CCREG); // Not taken path
9190             dirty_reg(&current,CCREG);
9191             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9192           }
9193           else
9194           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9195           {
9196             memcpy(&branch_regs[i-1],&current,sizeof(current));
9197             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9198             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9199             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9200             alloc_cc(&branch_regs[i-1],i);
9201             dirty_reg(&branch_regs[i-1],CCREG);
9202             delayslot_alloc(&branch_regs[i-1],i);
9203             branch_regs[i-1].isconst=0;
9204             alloc_reg(&current,i,CCREG); // Not taken path
9205             dirty_reg(&current,CCREG);
9206             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9207           }
9208           break;
9209         case SJUMP:
9210           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9211           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9212           {
9213             alloc_cc(&current,i-1);
9214             dirty_reg(&current,CCREG);
9215             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9216               // The delay slot overwrote the branch condition
9217               // Delay slot goes after the test (in order)
9218               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9219               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9220               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9221               current.u|=1;
9222               current.uu|=1;
9223               delayslot_alloc(&current,i);
9224               current.isconst=0;
9225             }
9226             else
9227             {
9228               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9229               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9230               // Alloc the branch condition register
9231               alloc_reg(&current,i-1,rs1[i-1]);
9232               if(!(current.is32>>rs1[i-1]&1))
9233               {
9234                 alloc_reg64(&current,i-1,rs1[i-1]);
9235               }
9236             }
9237             memcpy(&branch_regs[i-1],&current,sizeof(current));
9238             branch_regs[i-1].isconst=0;
9239             branch_regs[i-1].wasconst=0;
9240             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9241             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9242           }
9243           else
9244           // Alloc the delay slot in case the branch is taken
9245           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9246           {
9247             memcpy(&branch_regs[i-1],&current,sizeof(current));
9248             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9249             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9250             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9251             alloc_cc(&branch_regs[i-1],i);
9252             dirty_reg(&branch_regs[i-1],CCREG);
9253             delayslot_alloc(&branch_regs[i-1],i);
9254             branch_regs[i-1].isconst=0;
9255             alloc_reg(&current,i,CCREG); // Not taken path
9256             dirty_reg(&current,CCREG);
9257             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9258           }
9259           // FIXME: BLTZAL/BGEZAL
9260           if(opcode2[i-1]&0x10) { // BxxZAL
9261             alloc_reg(&branch_regs[i-1],i-1,31);
9262             dirty_reg(&branch_regs[i-1],31);
9263             branch_regs[i-1].is32|=1LL<<31;
9264           }
9265           break;
9266         case FJUMP:
9267           if(likely[i-1]==0) // BC1F/BC1T
9268           {
9269             alloc_cc(&current,i-1);
9270             dirty_reg(&current,CCREG);
9271             if(itype[i]==FCOMP) {
9272               // The delay slot overwrote the branch condition
9273               // Delay slot goes after the test (in order)
9274               delayslot_alloc(&current,i);
9275               current.isconst=0;
9276             }
9277             else
9278             {
9279               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9280               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9281               // Alloc the branch condition register
9282               alloc_reg(&current,i-1,FSREG);
9283             }
9284             memcpy(&branch_regs[i-1],&current,sizeof(current));
9285             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9286           }
9287           else // BC1FL/BC1TL
9288           {
9289             // Alloc the delay slot in case the branch is taken
9290             memcpy(&branch_regs[i-1],&current,sizeof(current));
9291             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9292             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9293             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9294             alloc_cc(&branch_regs[i-1],i);
9295             dirty_reg(&branch_regs[i-1],CCREG);
9296             delayslot_alloc(&branch_regs[i-1],i);
9297             branch_regs[i-1].isconst=0;
9298             alloc_reg(&current,i,CCREG); // Not taken path
9299             dirty_reg(&current,CCREG);
9300             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9301           }
9302           break;
9303       }
9304
9305       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9306       {
9307         if(rt1[i-1]==31) // JAL/JALR
9308         {
9309           // Subroutine call will return here, don't alloc any registers
9310           current.is32=1;
9311           current.dirty=0;
9312           clear_all_regs(current.regmap);
9313           alloc_reg(&current,i,CCREG);
9314           dirty_reg(&current,CCREG);
9315         }
9316         else if(i+1<slen)
9317         {
9318           // Internal branch will jump here, match registers to caller
9319           current.is32=0x3FFFFFFFFLL;
9320           current.dirty=0;
9321           clear_all_regs(current.regmap);
9322           alloc_reg(&current,i,CCREG);
9323           dirty_reg(&current,CCREG);
9324           for(j=i-1;j>=0;j--)
9325           {
9326             if(ba[j]==start+i*4+4) {
9327               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9328               current.is32=branch_regs[j].is32;
9329               current.dirty=branch_regs[j].dirty;
9330               break;
9331             }
9332           }
9333           while(j>=0) {
9334             if(ba[j]==start+i*4+4) {
9335               for(hr=0;hr<HOST_REGS;hr++) {
9336                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9337                   current.regmap[hr]=-1;
9338                 }
9339                 current.is32&=branch_regs[j].is32;
9340                 current.dirty&=branch_regs[j].dirty;
9341               }
9342             }
9343             j--;
9344           }
9345         }
9346       }
9347     }
9348
9349     // Count cycles in between branches
9350     ccadj[i]=cc;
9351     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9352     {
9353       cc=0;
9354     }
9355     else
9356     {
9357       cc++;
9358     }
9359
9360     flush_dirty_uppers(&current);
9361     if(!is_ds[i]) {
9362       regs[i].is32=current.is32;
9363       regs[i].dirty=current.dirty;
9364       regs[i].isconst=current.isconst;
9365       memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9366     }
9367     for(hr=0;hr<HOST_REGS;hr++) {
9368       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9369         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9370           regs[i].wasconst&=~(1<<hr);
9371         }
9372       }
9373     }
9374     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9375   }
9376   
9377   /* Pass 4 - Cull unused host registers */
9378   
9379   uint64_t nr=0;
9380   
9381   for (i=slen-1;i>=0;i--)
9382   {
9383     int hr;
9384     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9385     {
9386       if(ba[i]<start || ba[i]>=(start+slen*4))
9387       {
9388         // Branch out of this block, don't need anything
9389         nr=0;
9390       }
9391       else
9392       {
9393         // Internal branch
9394         // Need whatever matches the target
9395         nr=0;
9396         int t=(ba[i]-start)>>2;
9397         for(hr=0;hr<HOST_REGS;hr++)
9398         {
9399           if(regs[i].regmap_entry[hr]>=0) {
9400             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9401           }
9402         }
9403       }
9404       // Conditional branch may need registers for following instructions
9405       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9406       {
9407         if(i<slen-2) {
9408           nr|=needed_reg[i+2];
9409           for(hr=0;hr<HOST_REGS;hr++)
9410           {
9411             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9412             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9413           }
9414         }
9415       }
9416       // Don't need stuff which is overwritten
9417       if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9418       if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9419       // Merge in delay slot
9420       for(hr=0;hr<HOST_REGS;hr++)
9421       {
9422         if(!likely[i]) {
9423           // These are overwritten unless the branch is "likely"
9424           // and the delay slot is nullified if not taken
9425           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9426           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9427         }
9428         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9429         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9430         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9431         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9432         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9433         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9434         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9435         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9436         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9437           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9438           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9439         }
9440         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9441           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9442           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9443         }
9444         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
9445           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9446           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9447         }
9448       }
9449     }
9450     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
9451     {
9452       // SYSCALL instruction (software interrupt)
9453       nr=0;
9454     }
9455     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9456     {
9457       // ERET instruction (return from interrupt)
9458       nr=0;
9459     }
9460     else // Non-branch
9461     {
9462       if(i<slen-1) {
9463         for(hr=0;hr<HOST_REGS;hr++) {
9464           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9465           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9466           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9467           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9468         }
9469       }
9470     }
9471     for(hr=0;hr<HOST_REGS;hr++)
9472     {
9473       // Overwritten registers are not needed
9474       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9475       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9476       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9477       // Source registers are needed
9478       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9479       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9480       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9481       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9482       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9483       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9484       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9485       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9486       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9487         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9488         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9489       }
9490       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9491         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9492         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9493       }
9494       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
9495         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9496         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9497       }
9498       // Don't store a register immediately after writing it,
9499       // may prevent dual-issue.
9500       // But do so if this is a branch target, otherwise we
9501       // might have to load the register before the branch.
9502       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9503         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9504            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9505           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9506           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9507         }
9508         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9509            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9510           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9511           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9512         }
9513       }
9514     }
9515     // Cycle count is needed at branches.  Assume it is needed at the target too.
9516     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9517       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9518       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9519     }
9520     // Save it
9521     needed_reg[i]=nr;
9522     
9523     // Deallocate unneeded registers
9524     for(hr=0;hr<HOST_REGS;hr++)
9525     {
9526       if(!((nr>>hr)&1)) {
9527         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9528         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9529            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9530            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9531         {
9532           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9533           {
9534             if(likely[i]) {
9535               regs[i].regmap[hr]=-1;
9536               regs[i].isconst&=~(1<<hr);
9537               if(i<slen-2) regmap_pre[i+2][hr]=-1;
9538             }
9539           }
9540         }
9541         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9542         {
9543           int d1=0,d2=0,map=0,temp=0;
9544           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9545           {
9546             d1=dep1[i+1];
9547             d2=dep2[i+1];
9548           }
9549           if(using_tlb) {
9550             if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9551                itype[i+1]==STORE || itype[i+1]==STORELR ||
9552                itype[i+1]==C1LS || itype[i+1]==C2LS)
9553             map=TLREG;
9554           } else
9555           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9556              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9557             map=INVCP;
9558           }
9559           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9560              itype[i+1]==C1LS || itype[i+1]==C2LS)
9561             temp=FTEMP;
9562           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9563              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9564              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9565              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9566              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9567              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9568              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9569              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9570              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9571              regs[i].regmap[hr]!=map )
9572           {
9573             regs[i].regmap[hr]=-1;
9574             regs[i].isconst&=~(1<<hr);
9575             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9576                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9577                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9578                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9579                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9580                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9581                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9582                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9583                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9584                branch_regs[i].regmap[hr]!=map)
9585             {
9586               branch_regs[i].regmap[hr]=-1;
9587               branch_regs[i].regmap_entry[hr]=-1;
9588               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9589               {
9590                 if(!likely[i]&&i<slen-2) {
9591                   regmap_pre[i+2][hr]=-1;
9592                 }
9593               }
9594             }
9595           }
9596         }
9597         else
9598         {
9599           // Non-branch
9600           if(i>0)
9601           {
9602             int d1=0,d2=0,map=-1,temp=-1;
9603             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9604             {
9605               d1=dep1[i];
9606               d2=dep2[i];
9607             }
9608             if(using_tlb) {
9609               if(itype[i]==LOAD || itype[i]==LOADLR ||
9610                  itype[i]==STORE || itype[i]==STORELR ||
9611                  itype[i]==C1LS || itype[i]==C2LS)
9612               map=TLREG;
9613             } else if(itype[i]==STORE || itype[i]==STORELR ||
9614                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9615               map=INVCP;
9616             }
9617             if(itype[i]==LOADLR || itype[i]==STORELR ||
9618                itype[i]==C1LS || itype[i]==C2LS)
9619               temp=FTEMP;
9620             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9621                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9622                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9623                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9624                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9625                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9626             {
9627               if(i<slen-1&&!is_ds[i]) {
9628                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9629                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9630                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9631                 {
9632                   printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9633                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9634                 }
9635                 regmap_pre[i+1][hr]=-1;
9636                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9637               }
9638               regs[i].regmap[hr]=-1;
9639               regs[i].isconst&=~(1<<hr);
9640             }
9641           }
9642         }
9643       }
9644     }
9645   }
9646   
9647   /* Pass 5 - Pre-allocate registers */
9648   
9649   // If a register is allocated during a loop, try to allocate it for the
9650   // entire loop, if possible.  This avoids loading/storing registers
9651   // inside of the loop.
9652
9653   signed char f_regmap[HOST_REGS];
9654   clear_all_regs(f_regmap);
9655   for(i=0;i<slen-1;i++)
9656   {
9657     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9658     {
9659       if(ba[i]>=start && ba[i]<(start+i*4)) 
9660       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9661       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9662       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9663       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9664       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9665       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9666       {
9667         int t=(ba[i]-start)>>2;
9668         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9669         if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
9670         for(hr=0;hr<HOST_REGS;hr++)
9671         {
9672           if(regs[i].regmap[hr]>64) {
9673             if(!((regs[i].dirty>>hr)&1))
9674               f_regmap[hr]=regs[i].regmap[hr];
9675             else f_regmap[hr]=-1;
9676           }
9677           else if(regs[i].regmap[hr]>=0) f_regmap[hr]=regs[i].regmap[hr];
9678           if(branch_regs[i].regmap[hr]>64) {
9679             if(!((branch_regs[i].dirty>>hr)&1))
9680               f_regmap[hr]=branch_regs[i].regmap[hr];
9681             else f_regmap[hr]=-1;
9682           }
9683           else if(branch_regs[i].regmap[hr]>=0) f_regmap[hr]=branch_regs[i].regmap[hr];
9684           if(itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9685           ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9686           ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9687           ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9688           {
9689             // Test both in case the delay slot is ooo,
9690             // could be done better...
9691             if(count_free_regs(branch_regs[i].regmap)<2
9692              ||count_free_regs(regs[i].regmap)<2) 
9693               f_regmap[hr]=branch_regs[i].regmap[hr];
9694           }
9695           // Avoid dirty->clean transition
9696           // #ifdef DESTRUCTIVE_WRITEBACK here?
9697           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9698           if(f_regmap[hr]>0) {
9699             if(regs[t].regmap_entry[hr]<0) {
9700               int r=f_regmap[hr];
9701               for(j=t;j<=i;j++)
9702               {
9703                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9704                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9705                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9706                 if(r>63) {
9707                   // NB This can exclude the case where the upper-half
9708                   // register is lower numbered than the lower-half
9709                   // register.  Not sure if it's worth fixing...
9710                   if(get_reg(regs[j].regmap,r&63)<0) break;
9711                   if(regs[j].is32&(1LL<<(r&63))) break;
9712                 }
9713                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9714                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9715                   int k;
9716                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9717                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9718                     if(r>63) {
9719                       if(get_reg(regs[i].regmap,r&63)<0) break;
9720                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9721                     }
9722                     k=i;
9723                     while(k>1&&regs[k-1].regmap[hr]==-1) {
9724                       if(itype[k-1]==STORE||itype[k-1]==STORELR
9725                       ||itype[k-1]==C1LS||itype[k-1]==SHIFT||itype[k-1]==COP1
9726                       ||itype[k-1]==FLOAT||itype[k-1]==FCONV||itype[k-1]==FCOMP
9727                       ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9728                         if(count_free_regs(regs[k-1].regmap)<2) {
9729                           //printf("no free regs for store %x\n",start+(k-1)*4);
9730                           break;
9731                         }
9732                       }
9733                       else
9734                       if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9735                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9736                         //printf("no-match due to different register\n");
9737                         break;
9738                       }
9739                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9740                         //printf("no-match due to branch\n");
9741                         break;
9742                       }
9743                       // call/ret fast path assumes no registers allocated
9744                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
9745                         break;
9746                       }
9747                       if(r>63) {
9748                         // NB This can exclude the case where the upper-half
9749                         // register is lower numbered than the lower-half
9750                         // register.  Not sure if it's worth fixing...
9751                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
9752                         if(regs[k-1].is32&(1LL<<(r&63))) break;
9753                       }
9754                       k--;
9755                     }
9756                     if(i<slen-1) {
9757                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9758                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9759                         //printf("bad match after branch\n");
9760                         break;
9761                       }
9762                     }
9763                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9764                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
9765                       while(k<i) {
9766                         regs[k].regmap_entry[hr]=f_regmap[hr];
9767                         regs[k].regmap[hr]=f_regmap[hr];
9768                         regmap_pre[k+1][hr]=f_regmap[hr];
9769                         regs[k].wasdirty&=~(1<<hr);
9770                         regs[k].dirty&=~(1<<hr);
9771                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9772                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9773                         regs[k].wasconst&=~(1<<hr);
9774                         regs[k].isconst&=~(1<<hr);
9775                         k++;
9776                       }
9777                     }
9778                     else {
9779                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9780                       break;
9781                     }
9782                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9783                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9784                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
9785                       regs[i].regmap_entry[hr]=f_regmap[hr];
9786                       regs[i].regmap[hr]=f_regmap[hr];
9787                       regs[i].wasdirty&=~(1<<hr);
9788                       regs[i].dirty&=~(1<<hr);
9789                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9790                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9791                       regs[i].wasconst&=~(1<<hr);
9792                       regs[i].isconst&=~(1<<hr);
9793                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9794                       branch_regs[i].wasdirty&=~(1<<hr);
9795                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9796                       branch_regs[i].regmap[hr]=f_regmap[hr];
9797                       branch_regs[i].dirty&=~(1<<hr);
9798                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9799                       branch_regs[i].wasconst&=~(1<<hr);
9800                       branch_regs[i].isconst&=~(1<<hr);
9801                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9802                         regmap_pre[i+2][hr]=f_regmap[hr];
9803                         regs[i+2].wasdirty&=~(1<<hr);
9804                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9805                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9806                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
9807                       }
9808                     }
9809                   }
9810                   for(k=t;k<j;k++) {
9811                     regs[k].regmap_entry[hr]=f_regmap[hr];
9812                     regs[k].regmap[hr]=f_regmap[hr];
9813                     regmap_pre[k+1][hr]=f_regmap[hr];
9814                     regs[k+1].wasdirty&=~(1<<hr);
9815                     regs[k].dirty&=~(1<<hr);
9816                     regs[k].wasconst&=~(1<<hr);
9817                     regs[k].isconst&=~(1<<hr);
9818                   }
9819                   if(regs[j].regmap[hr]==f_regmap[hr])
9820                     regs[j].regmap_entry[hr]=f_regmap[hr];
9821                   break;
9822                 }
9823                 if(j==i) break;
9824                 if(regs[j].regmap[hr]>=0)
9825                   break;
9826                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9827                   //printf("no-match due to different register\n");
9828                   break;
9829                 }
9830                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9831                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9832                   break;
9833                 }
9834                 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9835                 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9836                 ||itype[j]==FCOMP||itype[j]==FCONV
9837                 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9838                   if(count_free_regs(regs[j].regmap)<2) {
9839                     //printf("No free regs for store %x\n",start+j*4);
9840                     break;
9841                   }
9842                 }
9843                 else if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9844                 if(f_regmap[hr]>=64) {
9845                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9846                     break;
9847                   }
9848                   else
9849                   {
9850                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9851                       break;
9852                     }
9853                   }
9854                 }
9855               }
9856             }
9857           }
9858         }
9859       }
9860     }else{
9861       int count=0;
9862       for(hr=0;hr<HOST_REGS;hr++)
9863       {
9864         if(hr!=EXCLUDE_REG) {
9865           if(regs[i].regmap[hr]>64) {
9866             if(!((regs[i].dirty>>hr)&1))
9867               f_regmap[hr]=regs[i].regmap[hr];
9868           }
9869           else if(regs[i].regmap[hr]>=0) f_regmap[hr]=regs[i].regmap[hr];
9870           else if(regs[i].regmap[hr]<0) count++;
9871         }
9872       }
9873       // Try to restore cycle count at branch targets
9874       if(bt[i]) {
9875         for(j=i;j<slen-1;j++) {
9876           if(regs[j].regmap[HOST_CCREG]!=-1) break;
9877           if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9878           ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9879           ||itype[j]==FCOMP||itype[j]==FCONV
9880           ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9881             if(count_free_regs(regs[j].regmap)<2) {
9882               //printf("no free regs for store %x\n",start+j*4);
9883               break;
9884             }
9885           }
9886           else
9887           if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9888         }
9889         if(regs[j].regmap[HOST_CCREG]==CCREG) {
9890           int k=i;
9891           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9892           while(k<j) {
9893             regs[k].regmap_entry[HOST_CCREG]=CCREG;
9894             regs[k].regmap[HOST_CCREG]=CCREG;
9895             regmap_pre[k+1][HOST_CCREG]=CCREG;
9896             regs[k+1].wasdirty|=1<<HOST_CCREG;
9897             regs[k].dirty|=1<<HOST_CCREG;
9898             regs[k].wasconst&=~(1<<HOST_CCREG);
9899             regs[k].isconst&=~(1<<HOST_CCREG);
9900             k++;
9901           }
9902           regs[j].regmap_entry[HOST_CCREG]=CCREG;          
9903         }
9904         // Work backwards from the branch target
9905         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9906         {
9907           //printf("Extend backwards\n");
9908           int k;
9909           k=i;
9910           while(regs[k-1].regmap[HOST_CCREG]==-1) {
9911             if(itype[k-1]==STORE||itype[k-1]==STORELR||itype[k-1]==C1LS
9912             ||itype[k-1]==SHIFT||itype[k-1]==COP1||itype[k-1]==FLOAT
9913             ||itype[k-1]==FCONV||itype[k-1]==FCOMP
9914             ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9915               if(count_free_regs(regs[k-1].regmap)<2) {
9916                 //printf("no free regs for store %x\n",start+(k-1)*4);
9917                 break;
9918               }
9919             }
9920             else
9921             if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9922             k--;
9923           }
9924           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
9925             //printf("Extend CC, %x ->\n",start+k*4);
9926             while(k<=i) {
9927               regs[k].regmap_entry[HOST_CCREG]=CCREG;
9928               regs[k].regmap[HOST_CCREG]=CCREG;
9929               regmap_pre[k+1][HOST_CCREG]=CCREG;
9930               regs[k+1].wasdirty|=1<<HOST_CCREG;
9931               regs[k].dirty|=1<<HOST_CCREG;
9932               regs[k].wasconst&=~(1<<HOST_CCREG);
9933               regs[k].isconst&=~(1<<HOST_CCREG);
9934               k++;
9935             }
9936           }
9937           else {
9938             //printf("Fail Extend CC, %x ->\n",start+k*4);
9939           }
9940         }
9941       }
9942       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
9943          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
9944          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
9945          itype[i]!=FCONV&&itype[i]!=FCOMP&&
9946          itype[i]!=COP2&&itype[i]!=C2LS&&itype[i]!=C2OP)
9947       {
9948         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
9949       }
9950     }
9951   }
9952   
9953   // This allocates registers (if possible) one instruction prior
9954   // to use, which can avoid a load-use penalty on certain CPUs.
9955   for(i=0;i<slen-1;i++)
9956   {
9957     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
9958     {
9959       if(!bt[i+1])
9960       {
9961         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
9962            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
9963         {
9964           if(rs1[i+1]) {
9965             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
9966             {
9967               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9968               {
9969                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9970                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9971                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9972                 regs[i].isconst&=~(1<<hr);
9973                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9974                 constmap[i][hr]=constmap[i+1][hr];
9975                 regs[i+1].wasdirty&=~(1<<hr);
9976                 regs[i].dirty&=~(1<<hr);
9977               }
9978             }
9979           }
9980           if(rs2[i+1]) {
9981             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
9982             {
9983               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9984               {
9985                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9986                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9987                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9988                 regs[i].isconst&=~(1<<hr);
9989                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9990                 constmap[i][hr]=constmap[i+1][hr];
9991                 regs[i+1].wasdirty&=~(1<<hr);
9992                 regs[i].dirty&=~(1<<hr);
9993               }
9994             }
9995           }
9996           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9997             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9998             {
9999               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10000               {
10001                 regs[i].regmap[hr]=rs1[i+1];
10002                 regmap_pre[i+1][hr]=rs1[i+1];
10003                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10004                 regs[i].isconst&=~(1<<hr);
10005                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10006                 constmap[i][hr]=constmap[i+1][hr];
10007                 regs[i+1].wasdirty&=~(1<<hr);
10008                 regs[i].dirty&=~(1<<hr);
10009               }
10010             }
10011           }
10012           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10013             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10014             {
10015               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10016               {
10017                 regs[i].regmap[hr]=rs1[i+1];
10018                 regmap_pre[i+1][hr]=rs1[i+1];
10019                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10020                 regs[i].isconst&=~(1<<hr);
10021                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10022                 constmap[i][hr]=constmap[i+1][hr];
10023                 regs[i+1].wasdirty&=~(1<<hr);
10024                 regs[i].dirty&=~(1<<hr);
10025               }
10026             }
10027           }
10028           #ifndef HOST_IMM_ADDR32
10029           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10030             hr=get_reg(regs[i+1].regmap,TLREG);
10031             if(hr>=0) {
10032               int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10033               if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10034                 int nr;
10035                 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10036                 {
10037                   regs[i].regmap[hr]=MGEN1+((i+1)&1);
10038                   regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10039                   regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10040                   regs[i].isconst&=~(1<<hr);
10041                   regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10042                   constmap[i][hr]=constmap[i+1][hr];
10043                   regs[i+1].wasdirty&=~(1<<hr);
10044                   regs[i].dirty&=~(1<<hr);
10045                 }
10046                 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10047                 {
10048                   // move it to another register
10049                   regs[i+1].regmap[hr]=-1;
10050                   regmap_pre[i+2][hr]=-1;
10051                   regs[i+1].regmap[nr]=TLREG;
10052                   regmap_pre[i+2][nr]=TLREG;
10053                   regs[i].regmap[nr]=MGEN1+((i+1)&1);
10054                   regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10055                   regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10056                   regs[i].isconst&=~(1<<nr);
10057                   regs[i+1].isconst&=~(1<<nr);
10058                   regs[i].dirty&=~(1<<nr);
10059                   regs[i+1].wasdirty&=~(1<<nr);
10060                   regs[i+1].dirty&=~(1<<nr);
10061                   regs[i+2].wasdirty&=~(1<<nr);
10062                 }
10063               }
10064             }
10065           }
10066           #endif
10067           if(itype[i+1]==STORE||itype[i+1]==STORELR
10068              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10069             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10070               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10071               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10072               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10073               assert(hr>=0);
10074               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10075               {
10076                 regs[i].regmap[hr]=rs1[i+1];
10077                 regmap_pre[i+1][hr]=rs1[i+1];
10078                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10079                 regs[i].isconst&=~(1<<hr);
10080                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10081                 constmap[i][hr]=constmap[i+1][hr];
10082                 regs[i+1].wasdirty&=~(1<<hr);
10083                 regs[i].dirty&=~(1<<hr);
10084               }
10085             }
10086           }
10087           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10088             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10089               int nr;
10090               hr=get_reg(regs[i+1].regmap,FTEMP);
10091               assert(hr>=0);
10092               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10093               {
10094                 regs[i].regmap[hr]=rs1[i+1];
10095                 regmap_pre[i+1][hr]=rs1[i+1];
10096                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10097                 regs[i].isconst&=~(1<<hr);
10098                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10099                 constmap[i][hr]=constmap[i+1][hr];
10100                 regs[i+1].wasdirty&=~(1<<hr);
10101                 regs[i].dirty&=~(1<<hr);
10102               }
10103               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10104               {
10105                 // move it to another register
10106                 regs[i+1].regmap[hr]=-1;
10107                 regmap_pre[i+2][hr]=-1;
10108                 regs[i+1].regmap[nr]=FTEMP;
10109                 regmap_pre[i+2][nr]=FTEMP;
10110                 regs[i].regmap[nr]=rs1[i+1];
10111                 regmap_pre[i+1][nr]=rs1[i+1];
10112                 regs[i+1].regmap_entry[nr]=rs1[i+1];
10113                 regs[i].isconst&=~(1<<nr);
10114                 regs[i+1].isconst&=~(1<<nr);
10115                 regs[i].dirty&=~(1<<nr);
10116                 regs[i+1].wasdirty&=~(1<<nr);
10117                 regs[i+1].dirty&=~(1<<nr);
10118                 regs[i+2].wasdirty&=~(1<<nr);
10119               }
10120             }
10121           }
10122           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10123             if(itype[i+1]==LOAD) 
10124               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10125             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10126               hr=get_reg(regs[i+1].regmap,FTEMP);
10127             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10128               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10129               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10130             }
10131             if(hr>=0&&regs[i].regmap[hr]<0) {
10132               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10133               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10134                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10135                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10136                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10137                 regs[i].isconst&=~(1<<hr);
10138                 regs[i+1].wasdirty&=~(1<<hr);
10139                 regs[i].dirty&=~(1<<hr);
10140               }
10141             }
10142           }
10143         }
10144       }
10145     }
10146   }
10147   
10148   /* Pass 6 - Optimize clean/dirty state */
10149   clean_registers(0,slen-1,1);
10150   
10151   /* Pass 7 - Identify 32-bit registers */
10152   
10153   provisional_r32();
10154
10155   u_int r32=0;
10156   
10157   for (i=slen-1;i>=0;i--)
10158   {
10159     int hr;
10160     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10161     {
10162       if(ba[i]<start || ba[i]>=(start+slen*4))
10163       {
10164         // Branch out of this block, don't need anything
10165         r32=0;
10166       }
10167       else
10168       {
10169         // Internal branch
10170         // Need whatever matches the target
10171         // (and doesn't get overwritten by the delay slot instruction)
10172         r32=0;
10173         int t=(ba[i]-start)>>2;
10174         if(ba[i]>start+i*4) {
10175           // Forward branch
10176           if(!(requires_32bit[t]&~regs[i].was32))
10177             r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10178         }else{
10179           // Backward branch
10180           //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10181           //  r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10182           if(!(pr32[t]&~regs[i].was32))
10183             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10184         }
10185       }
10186       // Conditional branch may need registers for following instructions
10187       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10188       {
10189         if(i<slen-2) {
10190           r32|=requires_32bit[i+2];
10191           r32&=regs[i].was32;
10192           // Mark this address as a branch target since it may be called
10193           // upon return from interrupt
10194           bt[i+2]=1;
10195         }
10196       }
10197       // Merge in delay slot
10198       if(!likely[i]) {
10199         // These are overwritten unless the branch is "likely"
10200         // and the delay slot is nullified if not taken
10201         r32&=~(1LL<<rt1[i+1]);
10202         r32&=~(1LL<<rt2[i+1]);
10203       }
10204       // Assume these are needed (delay slot)
10205       if(us1[i+1]>0)
10206       {
10207         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10208       }
10209       if(us2[i+1]>0)
10210       {
10211         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10212       }
10213       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10214       {
10215         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10216       }
10217       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10218       {
10219         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10220       }
10221     }
10222     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
10223     {
10224       // SYSCALL instruction (software interrupt)
10225       r32=0;
10226     }
10227     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10228     {
10229       // ERET instruction (return from interrupt)
10230       r32=0;
10231     }
10232     // Check 32 bits
10233     r32&=~(1LL<<rt1[i]);
10234     r32&=~(1LL<<rt2[i]);
10235     if(us1[i]>0)
10236     {
10237       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10238     }
10239     if(us2[i]>0)
10240     {
10241       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10242     }
10243     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10244     {
10245       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10246     }
10247     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10248     {
10249       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10250     }
10251     requires_32bit[i]=r32;
10252     
10253     // Dirty registers which are 32-bit, require 32-bit input
10254     // as they will be written as 32-bit values
10255     for(hr=0;hr<HOST_REGS;hr++)
10256     {
10257       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10258         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10259           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10260           requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10261         }
10262       }
10263     }
10264     //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10265   }
10266
10267   if(itype[slen-1]==SPAN) {
10268     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10269   }
10270   
10271   /* Debug/disassembly */
10272   if((void*)assem_debug==(void*)printf) 
10273   for(i=0;i<slen;i++)
10274   {
10275     printf("U:");
10276     int r;
10277     for(r=1;r<=CCREG;r++) {
10278       if((unneeded_reg[i]>>r)&1) {
10279         if(r==HIREG) printf(" HI");
10280         else if(r==LOREG) printf(" LO");
10281         else printf(" r%d",r);
10282       }
10283     }
10284 #ifndef FORCE32
10285     printf(" UU:");
10286     for(r=1;r<=CCREG;r++) {
10287       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10288         if(r==HIREG) printf(" HI");
10289         else if(r==LOREG) printf(" LO");
10290         else printf(" r%d",r);
10291       }
10292     }
10293     printf(" 32:");
10294     for(r=0;r<=CCREG;r++) {
10295       //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10296       if((regs[i].was32>>r)&1) {
10297         if(r==CCREG) printf(" CC");
10298         else if(r==HIREG) printf(" HI");
10299         else if(r==LOREG) printf(" LO");
10300         else printf(" r%d",r);
10301       }
10302     }
10303 #endif
10304     printf("\n");
10305     #if defined(__i386__) || defined(__x86_64__)
10306     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10307     #endif
10308     #ifdef __arm__
10309     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10310     #endif
10311     printf("needs: ");
10312     if(needed_reg[i]&1) printf("eax ");
10313     if((needed_reg[i]>>1)&1) printf("ecx ");
10314     if((needed_reg[i]>>2)&1) printf("edx ");
10315     if((needed_reg[i]>>3)&1) printf("ebx ");
10316     if((needed_reg[i]>>5)&1) printf("ebp ");
10317     if((needed_reg[i]>>6)&1) printf("esi ");
10318     if((needed_reg[i]>>7)&1) printf("edi ");
10319     printf("r:");
10320     for(r=0;r<=CCREG;r++) {
10321       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10322       if((requires_32bit[i]>>r)&1) {
10323         if(r==CCREG) printf(" CC");
10324         else if(r==HIREG) printf(" HI");
10325         else if(r==LOREG) printf(" LO");
10326         else printf(" r%d",r);
10327       }
10328     }
10329     printf("\n");
10330     /*printf("pr:");
10331     for(r=0;r<=CCREG;r++) {
10332       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10333       if((pr32[i]>>r)&1) {
10334         if(r==CCREG) printf(" CC");
10335         else if(r==HIREG) printf(" HI");
10336         else if(r==LOREG) printf(" LO");
10337         else printf(" r%d",r);
10338       }
10339     }
10340     if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10341     printf("\n");*/
10342     #if defined(__i386__) || defined(__x86_64__)
10343     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10344     printf("dirty: ");
10345     if(regs[i].wasdirty&1) printf("eax ");
10346     if((regs[i].wasdirty>>1)&1) printf("ecx ");
10347     if((regs[i].wasdirty>>2)&1) printf("edx ");
10348     if((regs[i].wasdirty>>3)&1) printf("ebx ");
10349     if((regs[i].wasdirty>>5)&1) printf("ebp ");
10350     if((regs[i].wasdirty>>6)&1) printf("esi ");
10351     if((regs[i].wasdirty>>7)&1) printf("edi ");
10352     #endif
10353     #ifdef __arm__
10354     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10355     printf("dirty: ");
10356     if(regs[i].wasdirty&1) printf("r0 ");
10357     if((regs[i].wasdirty>>1)&1) printf("r1 ");
10358     if((regs[i].wasdirty>>2)&1) printf("r2 ");
10359     if((regs[i].wasdirty>>3)&1) printf("r3 ");
10360     if((regs[i].wasdirty>>4)&1) printf("r4 ");
10361     if((regs[i].wasdirty>>5)&1) printf("r5 ");
10362     if((regs[i].wasdirty>>6)&1) printf("r6 ");
10363     if((regs[i].wasdirty>>7)&1) printf("r7 ");
10364     if((regs[i].wasdirty>>8)&1) printf("r8 ");
10365     if((regs[i].wasdirty>>9)&1) printf("r9 ");
10366     if((regs[i].wasdirty>>10)&1) printf("r10 ");
10367     if((regs[i].wasdirty>>12)&1) printf("r12 ");
10368     #endif
10369     printf("\n");
10370     disassemble_inst(i);
10371     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10372     #if defined(__i386__) || defined(__x86_64__)
10373     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
10374     if(regs[i].dirty&1) printf("eax ");
10375     if((regs[i].dirty>>1)&1) printf("ecx ");
10376     if((regs[i].dirty>>2)&1) printf("edx ");
10377     if((regs[i].dirty>>3)&1) printf("ebx ");
10378     if((regs[i].dirty>>5)&1) printf("ebp ");
10379     if((regs[i].dirty>>6)&1) printf("esi ");
10380     if((regs[i].dirty>>7)&1) printf("edi ");
10381     #endif
10382     #ifdef __arm__
10383     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
10384     if(regs[i].dirty&1) printf("r0 ");
10385     if((regs[i].dirty>>1)&1) printf("r1 ");
10386     if((regs[i].dirty>>2)&1) printf("r2 ");
10387     if((regs[i].dirty>>3)&1) printf("r3 ");
10388     if((regs[i].dirty>>4)&1) printf("r4 ");
10389     if((regs[i].dirty>>5)&1) printf("r5 ");
10390     if((regs[i].dirty>>6)&1) printf("r6 ");
10391     if((regs[i].dirty>>7)&1) printf("r7 ");
10392     if((regs[i].dirty>>8)&1) printf("r8 ");
10393     if((regs[i].dirty>>9)&1) printf("r9 ");
10394     if((regs[i].dirty>>10)&1) printf("r10 ");
10395     if((regs[i].dirty>>12)&1) printf("r12 ");
10396     #endif
10397     printf("\n");
10398     if(regs[i].isconst) {
10399       printf("constants: ");
10400       #if defined(__i386__) || defined(__x86_64__)
10401       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
10402       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
10403       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
10404       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
10405       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
10406       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
10407       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
10408       #endif
10409       #ifdef __arm__
10410       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
10411       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
10412       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
10413       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
10414       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
10415       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
10416       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
10417       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
10418       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
10419       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
10420       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
10421       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
10422       #endif
10423       printf("\n");
10424     }
10425 #ifndef FORCE32
10426     printf(" 32:");
10427     for(r=0;r<=CCREG;r++) {
10428       if((regs[i].is32>>r)&1) {
10429         if(r==CCREG) printf(" CC");
10430         else if(r==HIREG) printf(" HI");
10431         else if(r==LOREG) printf(" LO");
10432         else printf(" r%d",r);
10433       }
10434     }
10435     printf("\n");
10436 #endif
10437     /*printf(" p32:");
10438     for(r=0;r<=CCREG;r++) {
10439       if((p32[i]>>r)&1) {
10440         if(r==CCREG) printf(" CC");
10441         else if(r==HIREG) printf(" HI");
10442         else if(r==LOREG) printf(" LO");
10443         else printf(" r%d",r);
10444       }
10445     }
10446     if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
10447     else printf("\n");*/
10448     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10449       #if defined(__i386__) || defined(__x86_64__)
10450       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
10451       if(branch_regs[i].dirty&1) printf("eax ");
10452       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
10453       if((branch_regs[i].dirty>>2)&1) printf("edx ");
10454       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
10455       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
10456       if((branch_regs[i].dirty>>6)&1) printf("esi ");
10457       if((branch_regs[i].dirty>>7)&1) printf("edi ");
10458       #endif
10459       #ifdef __arm__
10460       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
10461       if(branch_regs[i].dirty&1) printf("r0 ");
10462       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
10463       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
10464       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
10465       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
10466       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
10467       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
10468       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
10469       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
10470       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
10471       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
10472       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
10473       #endif
10474 #ifndef FORCE32
10475       printf(" 32:");
10476       for(r=0;r<=CCREG;r++) {
10477         if((branch_regs[i].is32>>r)&1) {
10478           if(r==CCREG) printf(" CC");
10479           else if(r==HIREG) printf(" HI");
10480           else if(r==LOREG) printf(" LO");
10481           else printf(" r%d",r);
10482         }
10483       }
10484       printf("\n");
10485 #endif
10486     }
10487   }
10488
10489   /* Pass 8 - Assembly */
10490   linkcount=0;stubcount=0;
10491   ds=0;is_delayslot=0;
10492   cop1_usable=0;
10493   uint64_t is32_pre=0;
10494   u_int dirty_pre=0;
10495   u_int beginning=(u_int)out;
10496   if((u_int)addr&1) {
10497     ds=1;
10498     pagespan_ds();
10499   }
10500   for(i=0;i<slen;i++)
10501   {
10502     //if(ds) printf("ds: ");
10503     if((void*)assem_debug==(void*)printf) disassemble_inst(i);
10504     if(ds) {
10505       ds=0; // Skip delay slot
10506       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10507       instr_addr[i]=0;
10508     } else {
10509       #ifndef DESTRUCTIVE_WRITEBACK
10510       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10511       {
10512         wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
10513               unneeded_reg[i],unneeded_reg_upper[i]);
10514         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10515               unneeded_reg[i],unneeded_reg_upper[i]);
10516       }
10517       is32_pre=regs[i].is32;
10518       dirty_pre=regs[i].dirty;
10519       #endif
10520       // write back
10521       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10522       {
10523         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10524                       unneeded_reg[i],unneeded_reg_upper[i]);
10525         loop_preload(regmap_pre[i],regs[i].regmap_entry);
10526       }
10527       // branch target entry point
10528       instr_addr[i]=(u_int)out;
10529       assem_debug("<->\n");
10530       // load regs
10531       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10532         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10533       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10534       address_generation(i,&regs[i],regs[i].regmap_entry);
10535       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10536       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10537       {
10538         // Load the delay slot registers if necessary
10539         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10540           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10541         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10542           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10543         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10544           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10545       }
10546       else if(i+1<slen)
10547       {
10548         // Preload registers for following instruction
10549         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10550           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10551             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10552         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10553           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10554             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10555       }
10556       // TODO: if(is_ooo(i)) address_generation(i+1);
10557       if(itype[i]==CJUMP||itype[i]==FJUMP)
10558         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10559       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10560         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10561       if(bt[i]) cop1_usable=0;
10562       // assemble
10563       switch(itype[i]) {
10564         case ALU:
10565           alu_assemble(i,&regs[i]);break;
10566         case IMM16:
10567           imm16_assemble(i,&regs[i]);break;
10568         case SHIFT:
10569           shift_assemble(i,&regs[i]);break;
10570         case SHIFTIMM:
10571           shiftimm_assemble(i,&regs[i]);break;
10572         case LOAD:
10573           load_assemble(i,&regs[i]);break;
10574         case LOADLR:
10575           loadlr_assemble(i,&regs[i]);break;
10576         case STORE:
10577           store_assemble(i,&regs[i]);break;
10578         case STORELR:
10579           storelr_assemble(i,&regs[i]);break;
10580         case COP0:
10581           cop0_assemble(i,&regs[i]);break;
10582         case COP1:
10583           cop1_assemble(i,&regs[i]);break;
10584         case C1LS:
10585           c1ls_assemble(i,&regs[i]);break;
10586         case COP2:
10587           cop2_assemble(i,&regs[i]);break;
10588         case C2LS:
10589           c2ls_assemble(i,&regs[i]);break;
10590         case C2OP:
10591           c2op_assemble(i,&regs[i]);break;
10592         case FCONV:
10593           fconv_assemble(i,&regs[i]);break;
10594         case FLOAT:
10595           float_assemble(i,&regs[i]);break;
10596         case FCOMP:
10597           fcomp_assemble(i,&regs[i]);break;
10598         case MULTDIV:
10599           multdiv_assemble(i,&regs[i]);break;
10600         case MOV:
10601           mov_assemble(i,&regs[i]);break;
10602         case SYSCALL:
10603           syscall_assemble(i,&regs[i]);break;
10604         case HLECALL:
10605           hlecall_assemble(i,&regs[i]);break;
10606         case UJUMP:
10607           ujump_assemble(i,&regs[i]);ds=1;break;
10608         case RJUMP:
10609           rjump_assemble(i,&regs[i]);ds=1;break;
10610         case CJUMP:
10611           cjump_assemble(i,&regs[i]);ds=1;break;
10612         case SJUMP:
10613           sjump_assemble(i,&regs[i]);ds=1;break;
10614         case FJUMP:
10615           fjump_assemble(i,&regs[i]);ds=1;break;
10616         case SPAN:
10617           pagespan_assemble(i,&regs[i]);break;
10618       }
10619       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10620         literal_pool(1024);
10621       else
10622         literal_pool_jumpover(256);
10623     }
10624   }
10625   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10626   // If the block did not end with an unconditional branch,
10627   // add a jump to the next instruction.
10628   if(i>1) {
10629     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10630       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10631       assert(i==slen);
10632       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10633         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10634         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10635           emit_loadreg(CCREG,HOST_CCREG);
10636         emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10637       }
10638       else if(!likely[i-2])
10639       {
10640         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10641         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10642       }
10643       else
10644       {
10645         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10646         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10647       }
10648       add_to_linker((int)out,start+i*4,0);
10649       emit_jmp(0);
10650     }
10651   }
10652   else
10653   {
10654     assert(i>0);
10655     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10656     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10657     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10658       emit_loadreg(CCREG,HOST_CCREG);
10659     emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10660     add_to_linker((int)out,start+i*4,0);
10661     emit_jmp(0);
10662   }
10663
10664   // TODO: delay slot stubs?
10665   // Stubs
10666   for(i=0;i<stubcount;i++)
10667   {
10668     switch(stubs[i][0])
10669     {
10670       case LOADB_STUB:
10671       case LOADH_STUB:
10672       case LOADW_STUB:
10673       case LOADD_STUB:
10674       case LOADBU_STUB:
10675       case LOADHU_STUB:
10676         do_readstub(i);break;
10677       case STOREB_STUB:
10678       case STOREH_STUB:
10679       case STOREW_STUB:
10680       case STORED_STUB:
10681         do_writestub(i);break;
10682       case CC_STUB:
10683         do_ccstub(i);break;
10684       case INVCODE_STUB:
10685         do_invstub(i);break;
10686       case FP_STUB:
10687         do_cop1stub(i);break;
10688       case STORELR_STUB:
10689         do_unalignedwritestub(i);break;
10690     }
10691   }
10692
10693   /* Pass 9 - Linker */
10694   for(i=0;i<linkcount;i++)
10695   {
10696     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10697     literal_pool(64);
10698     if(!link_addr[i][2])
10699     {
10700       void *stub=out;
10701       void *addr=check_addr(link_addr[i][1]);
10702       emit_extjump(link_addr[i][0],link_addr[i][1]);
10703       if(addr) {
10704         set_jump_target(link_addr[i][0],(int)addr);
10705         add_link(link_addr[i][1],stub);
10706       }
10707       else set_jump_target(link_addr[i][0],(int)stub);
10708     }
10709     else
10710     {
10711       // Internal branch
10712       int target=(link_addr[i][1]-start)>>2;
10713       assert(target>=0&&target<slen);
10714       assert(instr_addr[target]);
10715       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10716       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10717       //#else
10718       set_jump_target(link_addr[i][0],instr_addr[target]);
10719       //#endif
10720     }
10721   }
10722   // External Branch Targets (jump_in)
10723   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10724   for(i=0;i<slen;i++)
10725   {
10726     if(bt[i]||i==0)
10727     {
10728       if(instr_addr[i]) // TODO - delay slots (=null)
10729       {
10730         u_int vaddr=start+i*4;
10731         u_int page=get_page(vaddr);
10732         u_int vpage=get_vpage(vaddr);
10733         literal_pool(256);
10734         //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
10735         if(!requires_32bit[i])
10736         {
10737           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10738           assem_debug("jump_in: %x\n",start+i*4);
10739           ll_add(jump_dirty+vpage,vaddr,(void *)out);
10740           int entry_point=do_dirty_stub(i);
10741           ll_add(jump_in+page,vaddr,(void *)entry_point);
10742           // If there was an existing entry in the hash table,
10743           // replace it with the new address.
10744           // Don't add new entries.  We'll insert the
10745           // ones that actually get used in check_addr().
10746           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10747           if(ht_bin[0]==vaddr) {
10748             ht_bin[1]=entry_point;
10749           }
10750           if(ht_bin[2]==vaddr) {
10751             ht_bin[3]=entry_point;
10752           }
10753         }
10754         else
10755         {
10756           u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
10757           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10758           assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
10759           //int entry_point=(int)out;
10760           ////assem_debug("entry_point: %x\n",entry_point);
10761           //load_regs_entry(i);
10762           //if(entry_point==(int)out)
10763           //  entry_point=instr_addr[i];
10764           //else
10765           //  emit_jmp(instr_addr[i]);
10766           //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10767           ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
10768           int entry_point=do_dirty_stub(i);
10769           ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10770         }
10771       }
10772     }
10773   }
10774   // Write out the literal pool if necessary
10775   literal_pool(0);
10776   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10777   // Align code
10778   if(((u_int)out)&7) emit_addnop(13);
10779   #endif
10780   assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
10781   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10782   memcpy(copy,source,slen*4);
10783   copy+=slen*4;
10784   
10785   #ifdef __arm__
10786   __clear_cache((void *)beginning,out);
10787   #endif
10788   
10789   // If we're within 256K of the end of the buffer,
10790   // start over from the beginning. (Is 256K enough?)
10791   if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10792   
10793   // Trap writes to any of the pages we compiled
10794   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10795     invalid_code[i]=0;
10796 #ifndef DISABLE_TLB
10797     memory_map[i]|=0x40000000;
10798     if((signed int)start>=(signed int)0xC0000000) {
10799       assert(using_tlb);
10800       j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
10801       invalid_code[j]=0;
10802       memory_map[j]|=0x40000000;
10803       //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
10804     }
10805 #endif
10806   }
10807   
10808   /* Pass 10 - Free memory by expiring oldest blocks */
10809   
10810   int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10811   while(expirep!=end)
10812   {
10813     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10814     int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10815     inv_debug("EXP: Phase %d\n",expirep);
10816     switch((expirep>>11)&3)
10817     {
10818       case 0:
10819         // Clear jump_in and jump_dirty
10820         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10821         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10822         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10823         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10824         break;
10825       case 1:
10826         // Clear pointers
10827         ll_kill_pointers(jump_out[expirep&2047],base,shift);
10828         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10829         break;
10830       case 2:
10831         // Clear hash table
10832         for(i=0;i<32;i++) {
10833           int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10834           if((ht_bin[3]>>shift)==(base>>shift) ||
10835              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10836             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10837             ht_bin[2]=ht_bin[3]=-1;
10838           }
10839           if((ht_bin[1]>>shift)==(base>>shift) ||
10840              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10841             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10842             ht_bin[0]=ht_bin[2];
10843             ht_bin[1]=ht_bin[3];
10844             ht_bin[2]=ht_bin[3]=-1;
10845           }
10846         }
10847         break;
10848       case 3:
10849         // Clear jump_out
10850         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10851         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10852         break;
10853     }
10854     expirep=(expirep+1)&65535;
10855   }
10856   return 0;
10857 }
10858
10859 // vim:shiftwidth=2:expandtab