drc: use correct RAM size
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2010 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24
25 #include "emu_if.h" //emulator interface
26
27 #include <sys/mman.h>
28
29 #ifdef __i386__
30 #include "assem_x86.h"
31 #endif
32 #ifdef __x86_64__
33 #include "assem_x64.h"
34 #endif
35 #ifdef __arm__
36 #include "assem_arm.h"
37 #endif
38
39 #define MAXBLOCK 4096
40 #define MAX_OUTPUT_BLOCK_SIZE 262144
41 #define CLOCK_DIVIDER 2
42
43 struct regstat
44 {
45   signed char regmap_entry[HOST_REGS];
46   signed char regmap[HOST_REGS];
47   uint64_t was32;
48   uint64_t is32;
49   uint64_t wasdirty;
50   uint64_t dirty;
51   uint64_t u;
52   uint64_t uu;
53   u_int wasconst;
54   u_int isconst;
55   uint64_t constmap[HOST_REGS];
56 };
57
58 struct ll_entry
59 {
60   u_int vaddr;
61   u_int reg32;
62   void *addr;
63   struct ll_entry *next;
64 };
65
66   u_int start;
67   u_int *source;
68   u_int pagelimit;
69   char insn[MAXBLOCK][10];
70   u_char itype[MAXBLOCK];
71   u_char opcode[MAXBLOCK];
72   u_char opcode2[MAXBLOCK];
73   u_char bt[MAXBLOCK];
74   u_char rs1[MAXBLOCK];
75   u_char rs2[MAXBLOCK];
76   u_char rt1[MAXBLOCK];
77   u_char rt2[MAXBLOCK];
78   u_char us1[MAXBLOCK];
79   u_char us2[MAXBLOCK];
80   u_char dep1[MAXBLOCK];
81   u_char dep2[MAXBLOCK];
82   u_char lt1[MAXBLOCK];
83   int imm[MAXBLOCK];
84   u_int ba[MAXBLOCK];
85   char likely[MAXBLOCK];
86   char is_ds[MAXBLOCK];
87   uint64_t unneeded_reg[MAXBLOCK];
88   uint64_t unneeded_reg_upper[MAXBLOCK];
89   uint64_t branch_unneeded_reg[MAXBLOCK];
90   uint64_t branch_unneeded_reg_upper[MAXBLOCK];
91   uint64_t p32[MAXBLOCK];
92   uint64_t pr32[MAXBLOCK];
93   signed char regmap_pre[MAXBLOCK][HOST_REGS];
94   signed char regmap[MAXBLOCK][HOST_REGS];
95   signed char regmap_entry[MAXBLOCK][HOST_REGS];
96   uint64_t constmap[MAXBLOCK][HOST_REGS];
97   uint64_t known_value[HOST_REGS];
98   u_int known_reg;
99   struct regstat regs[MAXBLOCK];
100   struct regstat branch_regs[MAXBLOCK];
101   u_int needed_reg[MAXBLOCK];
102   uint64_t requires_32bit[MAXBLOCK];
103   u_int wont_dirty[MAXBLOCK];
104   u_int will_dirty[MAXBLOCK];
105   int ccadj[MAXBLOCK];
106   int slen;
107   u_int instr_addr[MAXBLOCK];
108   u_int link_addr[MAXBLOCK][3];
109   int linkcount;
110   u_int stubs[MAXBLOCK*3][8];
111   int stubcount;
112   u_int literals[1024][2];
113   int literalcount;
114   int is_delayslot;
115   int cop1_usable;
116   u_char *out;
117   struct ll_entry *jump_in[4096];
118   struct ll_entry *jump_out[4096];
119   struct ll_entry *jump_dirty[4096];
120   u_int hash_table[65536][4]  __attribute__((aligned(16)));
121   char shadow[1048576]  __attribute__((aligned(16)));
122   void *copy;
123   int expirep;
124   u_int using_tlb;
125   u_int stop_after_jal;
126   extern u_char restore_candidate[512];
127   extern int cycle_count;
128
129   /* registers that may be allocated */
130   /* 1-31 gpr */
131 #define HIREG 32 // hi
132 #define LOREG 33 // lo
133 #define FSREG 34 // FPU status (FCSR)
134 #define CSREG 35 // Coprocessor status
135 #define CCREG 36 // Cycle count
136 #define INVCP 37 // Pointer to invalid_code
137 #define TEMPREG 38
138 #define FTEMP 38 // FPU/LDL/LDR temporary register
139 #define PTEMP 39 // Prefetch temporary register
140 #define TLREG 40 // TLB mapping offset
141 #define RHASH 41 // Return address hash
142 #define RHTBL 42 // Return address hash table address
143 #define RTEMP 43 // JR/JALR address register
144 #define MAXREG 43
145 #define AGEN1 44 // Address generation temporary register
146 #define AGEN2 45 // Address generation temporary register
147 #define MGEN1 46 // Maptable address generation temporary register
148 #define MGEN2 47 // Maptable address generation temporary register
149 #define BTREG 48 // Branch target temporary register
150
151   /* instruction types */
152 #define NOP 0     // No operation
153 #define LOAD 1    // Load
154 #define STORE 2   // Store
155 #define LOADLR 3  // Unaligned load
156 #define STORELR 4 // Unaligned store
157 #define MOV 5     // Move 
158 #define ALU 6     // Arithmetic/logic
159 #define MULTDIV 7 // Multiply/divide
160 #define SHIFT 8   // Shift by register
161 #define SHIFTIMM 9// Shift by immediate
162 #define IMM16 10  // 16-bit immediate
163 #define RJUMP 11  // Unconditional jump to register
164 #define UJUMP 12  // Unconditional jump
165 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
166 #define SJUMP 14  // Conditional branch (regimm format)
167 #define COP0 15   // Coprocessor 0
168 #define COP1 16   // Coprocessor 1
169 #define C1LS 17   // Coprocessor 1 load/store
170 #define FJUMP 18  // Conditional branch (floating point)
171 #define FLOAT 19  // Floating point unit
172 #define FCONV 20  // Convert integer to float
173 #define FCOMP 21  // Floating point compare (sets FSREG)
174 #define SYSCALL 22// SYSCALL
175 #define OTHER 23  // Other
176 #define SPAN 24   // Branch/delay slot spans 2 pages
177 #define NI 25     // Not implemented
178 #define HLECALL 26// PCSX fake opcodes for HLE
179 #define COP2 27   // Coprocessor 2 move
180 #define C2LS 28   // Coprocessor 2 load/store
181 #define C2OP 29   // Coprocessor 2 operation
182
183   /* stubs */
184 #define CC_STUB 1
185 #define FP_STUB 2
186 #define LOADB_STUB 3
187 #define LOADH_STUB 4
188 #define LOADW_STUB 5
189 #define LOADD_STUB 6
190 #define LOADBU_STUB 7
191 #define LOADHU_STUB 8
192 #define STOREB_STUB 9
193 #define STOREH_STUB 10
194 #define STOREW_STUB 11
195 #define STORED_STUB 12
196 #define STORELR_STUB 13
197 #define INVCODE_STUB 14
198
199   /* branch codes */
200 #define TAKEN 1
201 #define NOTTAKEN 2
202 #define NULLDS 3
203
204 // asm linkage
205 int new_recompile_block(int addr);
206 void *get_addr_ht(u_int vaddr);
207 void invalidate_block(u_int block);
208 void invalidate_addr(u_int addr);
209 void remove_hash(int vaddr);
210 void jump_vaddr();
211 void dyna_linker();
212 void dyna_linker_ds();
213 void verify_code();
214 void verify_code_vm();
215 void verify_code_ds();
216 void cc_interrupt();
217 void fp_exception();
218 void fp_exception_ds();
219 void jump_syscall();
220 void jump_syscall_hle();
221 void jump_eret();
222 void jump_hlecall();
223 void new_dyna_leave();
224
225 // TLB
226 void TLBWI_new();
227 void TLBWR_new();
228 void read_nomem_new();
229 void read_nomemb_new();
230 void read_nomemh_new();
231 void read_nomemd_new();
232 void write_nomem_new();
233 void write_nomemb_new();
234 void write_nomemh_new();
235 void write_nomemd_new();
236 void write_rdram_new();
237 void write_rdramb_new();
238 void write_rdramh_new();
239 void write_rdramd_new();
240 extern u_int memory_map[1048576];
241
242 // Needed by assembler
243 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
244 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
245 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
246 void load_all_regs(signed char i_regmap[]);
247 void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
248 void load_regs_entry(int t);
249 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
250
251 int tracedebug=0;
252
253 //#define DEBUG_CYCLE_COUNT 1
254
255 void nullf() {}
256 //#define assem_debug printf
257 //#define inv_debug printf
258 #define assem_debug nullf
259 #define inv_debug nullf
260
261 static void tlb_hacks()
262 {
263 #ifndef DISABLE_TLB
264   // Goldeneye hack
265   if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
266   {
267     u_int addr;
268     int n;
269     switch (ROM_HEADER->Country_code&0xFF) 
270     {
271       case 0x45: // U
272         addr=0x34b30;
273         break;                   
274       case 0x4A: // J 
275         addr=0x34b70;    
276         break;    
277       case 0x50: // E 
278         addr=0x329f0;
279         break;                        
280       default: 
281         // Unknown country code
282         addr=0;
283         break;
284     }
285     u_int rom_addr=(u_int)rom;
286     #ifdef ROM_COPY
287     // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
288     // in the lower 4G of memory to use this hack.  Copy it if necessary.
289     if((void *)rom>(void *)0xffffffff) {
290       munmap(ROM_COPY, 67108864);
291       if(mmap(ROM_COPY, 12582912,
292               PROT_READ | PROT_WRITE,
293               MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
294               -1, 0) <= 0) {printf("mmap() failed\n");}
295       memcpy(ROM_COPY,rom,12582912);
296       rom_addr=(u_int)ROM_COPY;
297     }
298     #endif
299     if(addr) {
300       for(n=0x7F000;n<0x80000;n++) {
301         memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
302       }
303     }
304   }
305 #endif
306 }
307
308 static u_int get_page(u_int vaddr)
309 {
310   u_int page=(vaddr^0x80000000)>>12;
311 #ifndef DISABLE_TLB
312   if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
313 #endif
314   if(page>2048) page=2048+(page&2047);
315   return page;
316 }
317
318 static u_int get_vpage(u_int vaddr)
319 {
320   u_int vpage=(vaddr^0x80000000)>>12;
321 #ifndef DISABLE_TLB
322   if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
323 #endif
324   if(vpage>2048) vpage=2048+(vpage&2047);
325   return vpage;
326 }
327
328 // Get address from virtual address
329 // This is called from the recompiled JR/JALR instructions
330 void *get_addr(u_int vaddr)
331 {
332   u_int page=get_page(vaddr);
333   u_int vpage=get_vpage(vaddr);
334   struct ll_entry *head;
335   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
336   head=jump_in[page];
337   while(head!=NULL) {
338     if(head->vaddr==vaddr&&head->reg32==0) {
339   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
340       int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
341       ht_bin[3]=ht_bin[1];
342       ht_bin[2]=ht_bin[0];
343       ht_bin[1]=(int)head->addr;
344       ht_bin[0]=vaddr;
345       return head->addr;
346     }
347     head=head->next;
348   }
349   head=jump_dirty[vpage];
350   while(head!=NULL) {
351     if(head->vaddr==vaddr&&head->reg32==0) {
352       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
353       // Don't restore blocks which are about to expire from the cache
354       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
355       if(verify_dirty(head->addr)) {
356         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
357         invalid_code[vaddr>>12]=0;
358         memory_map[vaddr>>12]|=0x40000000;
359         if(vpage<2048) {
360 #ifndef DISABLE_TLB
361           if(tlb_LUT_r[vaddr>>12]) {
362             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
363             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
364           }
365 #endif
366           restore_candidate[vpage>>3]|=1<<(vpage&7);
367         }
368         else restore_candidate[page>>3]|=1<<(page&7);
369         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
370         if(ht_bin[0]==vaddr) {
371           ht_bin[1]=(int)head->addr; // Replace existing entry
372         }
373         else
374         {
375           ht_bin[3]=ht_bin[1];
376           ht_bin[2]=ht_bin[0];
377           ht_bin[1]=(int)head->addr;
378           ht_bin[0]=vaddr;
379         }
380         return head->addr;
381       }
382     }
383     head=head->next;
384   }
385   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
386   int r=new_recompile_block(vaddr);
387   if(r==0) return get_addr(vaddr);
388   // Execute in unmapped page, generate pagefault execption
389   Status|=2;
390   Cause=(vaddr<<31)|0x8;
391   EPC=(vaddr&1)?vaddr-5:vaddr;
392   BadVAddr=(vaddr&~1);
393   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
394   EntryHi=BadVAddr&0xFFFFE000;
395   return get_addr_ht(0x80000000);
396 }
397 // Look up address in hash table first
398 void *get_addr_ht(u_int vaddr)
399 {
400   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
401   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
402   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
403   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
404   return get_addr(vaddr);
405 }
406
407 void *get_addr_32(u_int vaddr,u_int flags)
408 {
409 #ifdef FORCE32
410   return get_addr(vaddr);
411 #endif
412   //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
413   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
414   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
415   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
416   u_int page=get_page(vaddr);
417   u_int vpage=get_vpage(vaddr);
418   struct ll_entry *head;
419   head=jump_in[page];
420   while(head!=NULL) {
421     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
422       //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
423       if(head->reg32==0) {
424         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
425         if(ht_bin[0]==-1) {
426           ht_bin[1]=(int)head->addr;
427           ht_bin[0]=vaddr;
428         }else if(ht_bin[2]==-1) {
429           ht_bin[3]=(int)head->addr;
430           ht_bin[2]=vaddr;
431         }
432         //ht_bin[3]=ht_bin[1];
433         //ht_bin[2]=ht_bin[0];
434         //ht_bin[1]=(int)head->addr;
435         //ht_bin[0]=vaddr;
436       }
437       return head->addr;
438     }
439     head=head->next;
440   }
441   head=jump_dirty[vpage];
442   while(head!=NULL) {
443     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
444       //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
445       // Don't restore blocks which are about to expire from the cache
446       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
447       if(verify_dirty(head->addr)) {
448         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
449         invalid_code[vaddr>>12]=0;
450         memory_map[vaddr>>12]|=0x40000000;
451         if(vpage<2048) {
452 #ifndef DISABLE_TLB
453           if(tlb_LUT_r[vaddr>>12]) {
454             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
455             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
456           }
457 #endif
458           restore_candidate[vpage>>3]|=1<<(vpage&7);
459         }
460         else restore_candidate[page>>3]|=1<<(page&7);
461         if(head->reg32==0) {
462           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
463           if(ht_bin[0]==-1) {
464             ht_bin[1]=(int)head->addr;
465             ht_bin[0]=vaddr;
466           }else if(ht_bin[2]==-1) {
467             ht_bin[3]=(int)head->addr;
468             ht_bin[2]=vaddr;
469           }
470           //ht_bin[3]=ht_bin[1];
471           //ht_bin[2]=ht_bin[0];
472           //ht_bin[1]=(int)head->addr;
473           //ht_bin[0]=vaddr;
474         }
475         return head->addr;
476       }
477     }
478     head=head->next;
479   }
480   //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
481   int r=new_recompile_block(vaddr);
482   if(r==0) return get_addr(vaddr);
483   // Execute in unmapped page, generate pagefault execption
484   Status|=2;
485   Cause=(vaddr<<31)|0x8;
486   EPC=(vaddr&1)?vaddr-5:vaddr;
487   BadVAddr=(vaddr&~1);
488   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
489   EntryHi=BadVAddr&0xFFFFE000;
490   return get_addr_ht(0x80000000);
491 }
492
493 void clear_all_regs(signed char regmap[])
494 {
495   int hr;
496   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
497 }
498
499 signed char get_reg(signed char regmap[],int r)
500 {
501   int hr;
502   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
503   return -1;
504 }
505
506 // Find a register that is available for two consecutive cycles
507 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
508 {
509   int hr;
510   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
511   return -1;
512 }
513
514 int count_free_regs(signed char regmap[])
515 {
516   int count=0;
517   int hr;
518   for(hr=0;hr<HOST_REGS;hr++)
519   {
520     if(hr!=EXCLUDE_REG) {
521       if(regmap[hr]<0) count++;
522     }
523   }
524   return count;
525 }
526
527 void dirty_reg(struct regstat *cur,signed char reg)
528 {
529   int hr;
530   if(!reg) return;
531   for (hr=0;hr<HOST_REGS;hr++) {
532     if((cur->regmap[hr]&63)==reg) {
533       cur->dirty|=1<<hr;
534     }
535   }
536 }
537
538 // If we dirty the lower half of a 64 bit register which is now being
539 // sign-extended, we need to dump the upper half.
540 // Note: Do this only after completion of the instruction, because
541 // some instructions may need to read the full 64-bit value even if
542 // overwriting it (eg SLTI, DSRA32).
543 static void flush_dirty_uppers(struct regstat *cur)
544 {
545   int hr,reg;
546   for (hr=0;hr<HOST_REGS;hr++) {
547     if((cur->dirty>>hr)&1) {
548       reg=cur->regmap[hr];
549       if(reg>=64) 
550         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
551     }
552   }
553 }
554
555 void set_const(struct regstat *cur,signed char reg,uint64_t value)
556 {
557   int hr;
558   if(!reg) return;
559   for (hr=0;hr<HOST_REGS;hr++) {
560     if(cur->regmap[hr]==reg) {
561       cur->isconst|=1<<hr;
562       cur->constmap[hr]=value;
563     }
564     else if((cur->regmap[hr]^64)==reg) {
565       cur->isconst|=1<<hr;
566       cur->constmap[hr]=value>>32;
567     }
568   }
569 }
570
571 void clear_const(struct regstat *cur,signed char reg)
572 {
573   int hr;
574   if(!reg) return;
575   for (hr=0;hr<HOST_REGS;hr++) {
576     if((cur->regmap[hr]&63)==reg) {
577       cur->isconst&=~(1<<hr);
578     }
579   }
580 }
581
582 int is_const(struct regstat *cur,signed char reg)
583 {
584   int hr;
585   if(!reg) return 1;
586   for (hr=0;hr<HOST_REGS;hr++) {
587     if((cur->regmap[hr]&63)==reg) {
588       return (cur->isconst>>hr)&1;
589     }
590   }
591   return 0;
592 }
593 uint64_t get_const(struct regstat *cur,signed char reg)
594 {
595   int hr;
596   if(!reg) return 0;
597   for (hr=0;hr<HOST_REGS;hr++) {
598     if(cur->regmap[hr]==reg) {
599       return cur->constmap[hr];
600     }
601   }
602   printf("Unknown constant in r%d\n",reg);
603   exit(1);
604 }
605
606 // Least soon needed registers
607 // Look at the next ten instructions and see which registers
608 // will be used.  Try not to reallocate these.
609 void lsn(u_char hsn[], int i, int *preferred_reg)
610 {
611   int j;
612   int b=-1;
613   for(j=0;j<9;j++)
614   {
615     if(i+j>=slen) {
616       j=slen-i-1;
617       break;
618     }
619     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
620     {
621       // Don't go past an unconditonal jump
622       j++;
623       break;
624     }
625   }
626   for(;j>=0;j--)
627   {
628     if(rs1[i+j]) hsn[rs1[i+j]]=j;
629     if(rs2[i+j]) hsn[rs2[i+j]]=j;
630     if(rt1[i+j]) hsn[rt1[i+j]]=j;
631     if(rt2[i+j]) hsn[rt2[i+j]]=j;
632     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
633       // Stores can allocate zero
634       hsn[rs1[i+j]]=j;
635       hsn[rs2[i+j]]=j;
636     }
637     // On some architectures stores need invc_ptr
638     #if defined(HOST_IMM8)
639     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
640       hsn[INVCP]=j;
641     }
642     #endif
643     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
644     {
645       hsn[CCREG]=j;
646       b=j;
647     }
648   }
649   if(b>=0)
650   {
651     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
652     {
653       // Follow first branch
654       int t=(ba[i+b]-start)>>2;
655       j=7-b;if(t+j>=slen) j=slen-t-1;
656       for(;j>=0;j--)
657       {
658         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
659         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
660         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
661         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
662       }
663     }
664     // TODO: preferred register based on backward branch
665   }
666   // Delay slot should preferably not overwrite branch conditions or cycle count
667   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
668     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
669     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
670     hsn[CCREG]=1;
671     // ...or hash tables
672     hsn[RHASH]=1;
673     hsn[RHTBL]=1;
674   }
675   // Coprocessor load/store needs FTEMP, even if not declared
676   if(itype[i]==C1LS||itype[i]==C2LS) {
677     hsn[FTEMP]=0;
678   }
679   // Load L/R also uses FTEMP as a temporary register
680   if(itype[i]==LOADLR) {
681     hsn[FTEMP]=0;
682   }
683   // Also SWL/SWR/SDL/SDR
684   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
685     hsn[FTEMP]=0;
686   }
687   // Don't remove the TLB registers either
688   if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
689     hsn[TLREG]=0;
690   }
691   // Don't remove the miniht registers
692   if(itype[i]==UJUMP||itype[i]==RJUMP)
693   {
694     hsn[RHASH]=0;
695     hsn[RHTBL]=0;
696   }
697 }
698
699 // We only want to allocate registers if we're going to use them again soon
700 int needed_again(int r, int i)
701 {
702   int j;
703   int b=-1;
704   int rn=10;
705   int hr;
706   u_char hsn[MAXREG+1];
707   int preferred_reg;
708   
709   memset(hsn,10,sizeof(hsn));
710   lsn(hsn,i,&preferred_reg);
711   
712   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
713   {
714     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
715       return 0; // Don't need any registers if exiting the block
716   }
717   for(j=0;j<9;j++)
718   {
719     if(i+j>=slen) {
720       j=slen-i-1;
721       break;
722     }
723     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
724     {
725       // Don't go past an unconditonal jump
726       j++;
727       break;
728     }
729     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||((source[i+j]&0xfc00003f)==0x0d))
730     {
731       break;
732     }
733   }
734   for(;j>=1;j--)
735   {
736     if(rs1[i+j]==r) rn=j;
737     if(rs2[i+j]==r) rn=j;
738     if((unneeded_reg[i+j]>>r)&1) rn=10;
739     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
740     {
741       b=j;
742     }
743   }
744   /*
745   if(b>=0)
746   {
747     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
748     {
749       // Follow first branch
750       int o=rn;
751       int t=(ba[i+b]-start)>>2;
752       j=7-b;if(t+j>=slen) j=slen-t-1;
753       for(;j>=0;j--)
754       {
755         if(!((unneeded_reg[t+j]>>r)&1)) {
756           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
757           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
758         }
759         else rn=o;
760       }
761     }
762   }*/
763   for(hr=0;hr<HOST_REGS;hr++) {
764     if(hr!=EXCLUDE_REG) {
765       if(rn<hsn[hr]) return 1;
766     }
767   }
768   return 0;
769 }
770
771 // Try to match register allocations at the end of a loop with those
772 // at the beginning
773 int loop_reg(int i, int r, int hr)
774 {
775   int j,k;
776   for(j=0;j<9;j++)
777   {
778     if(i+j>=slen) {
779       j=slen-i-1;
780       break;
781     }
782     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
783     {
784       // Don't go past an unconditonal jump
785       j++;
786       break;
787     }
788   }
789   k=0;
790   if(i>0){
791     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
792       k--;
793   }
794   for(;k<j;k++)
795   {
796     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
797     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
798     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
799     {
800       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
801       {
802         int t=(ba[i+k]-start)>>2;
803         int reg=get_reg(regs[t].regmap_entry,r);
804         if(reg>=0) return reg;
805         //reg=get_reg(regs[t+1].regmap_entry,r);
806         //if(reg>=0) return reg;
807       }
808     }
809   }
810   return hr;
811 }
812
813
814 // Allocate every register, preserving source/target regs
815 void alloc_all(struct regstat *cur,int i)
816 {
817   int hr;
818   
819   for(hr=0;hr<HOST_REGS;hr++) {
820     if(hr!=EXCLUDE_REG) {
821       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
822          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
823       {
824         cur->regmap[hr]=-1;
825         cur->dirty&=~(1<<hr);
826       }
827       // Don't need zeros
828       if((cur->regmap[hr]&63)==0)
829       {
830         cur->regmap[hr]=-1;
831         cur->dirty&=~(1<<hr);
832       }
833     }
834   }
835 }
836
837
838 void div64(int64_t dividend,int64_t divisor)
839 {
840   lo=dividend/divisor;
841   hi=dividend%divisor;
842   //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
843   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
844 }
845 void divu64(uint64_t dividend,uint64_t divisor)
846 {
847   lo=dividend/divisor;
848   hi=dividend%divisor;
849   //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
850   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
851 }
852
853 void mult64(uint64_t m1,uint64_t m2)
854 {
855    unsigned long long int op1, op2, op3, op4;
856    unsigned long long int result1, result2, result3, result4;
857    unsigned long long int temp1, temp2, temp3, temp4;
858    int sign = 0;
859    
860    if (m1 < 0)
861      {
862     op2 = -m1;
863     sign = 1 - sign;
864      }
865    else op2 = m1;
866    if (m2 < 0)
867      {
868     op4 = -m2;
869     sign = 1 - sign;
870      }
871    else op4 = m2;
872    
873    op1 = op2 & 0xFFFFFFFF;
874    op2 = (op2 >> 32) & 0xFFFFFFFF;
875    op3 = op4 & 0xFFFFFFFF;
876    op4 = (op4 >> 32) & 0xFFFFFFFF;
877    
878    temp1 = op1 * op3;
879    temp2 = (temp1 >> 32) + op1 * op4;
880    temp3 = op2 * op3;
881    temp4 = (temp3 >> 32) + op2 * op4;
882    
883    result1 = temp1 & 0xFFFFFFFF;
884    result2 = temp2 + (temp3 & 0xFFFFFFFF);
885    result3 = (result2 >> 32) + temp4;
886    result4 = (result3 >> 32);
887    
888    lo = result1 | (result2 << 32);
889    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
890    if (sign)
891      {
892     hi = ~hi;
893     if (!lo) hi++;
894     else lo = ~lo + 1;
895      }
896 }
897
898 void multu64(uint64_t m1,uint64_t m2)
899 {
900    unsigned long long int op1, op2, op3, op4;
901    unsigned long long int result1, result2, result3, result4;
902    unsigned long long int temp1, temp2, temp3, temp4;
903    
904    op1 = m1 & 0xFFFFFFFF;
905    op2 = (m1 >> 32) & 0xFFFFFFFF;
906    op3 = m2 & 0xFFFFFFFF;
907    op4 = (m2 >> 32) & 0xFFFFFFFF;
908    
909    temp1 = op1 * op3;
910    temp2 = (temp1 >> 32) + op1 * op4;
911    temp3 = op2 * op3;
912    temp4 = (temp3 >> 32) + op2 * op4;
913    
914    result1 = temp1 & 0xFFFFFFFF;
915    result2 = temp2 + (temp3 & 0xFFFFFFFF);
916    result3 = (result2 >> 32) + temp4;
917    result4 = (result3 >> 32);
918    
919    lo = result1 | (result2 << 32);
920    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
921    
922   //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
923   //                                      ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
924 }
925
926 uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
927 {
928   if(bits) {
929     original<<=64-bits;
930     original>>=64-bits;
931     loaded<<=bits;
932     original|=loaded;
933   }
934   else original=loaded;
935   return original;
936 }
937 uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
938 {
939   if(bits^56) {
940     original>>=64-(bits^56);
941     original<<=64-(bits^56);
942     loaded>>=bits^56;
943     original|=loaded;
944   }
945   else original=loaded;
946   return original;
947 }
948
949 #ifdef __i386__
950 #include "assem_x86.c"
951 #endif
952 #ifdef __x86_64__
953 #include "assem_x64.c"
954 #endif
955 #ifdef __arm__
956 #include "assem_arm.c"
957 #endif
958
959 // Add virtual address mapping to linked list
960 void ll_add(struct ll_entry **head,int vaddr,void *addr)
961 {
962   struct ll_entry *new_entry;
963   new_entry=malloc(sizeof(struct ll_entry));
964   assert(new_entry!=NULL);
965   new_entry->vaddr=vaddr;
966   new_entry->reg32=0;
967   new_entry->addr=addr;
968   new_entry->next=*head;
969   *head=new_entry;
970 }
971
972 // Add virtual address mapping for 32-bit compiled block
973 void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
974 {
975   ll_add(head,vaddr,addr);
976 #ifndef FORCE32
977   (*head)->reg32=reg32;
978 #endif
979 }
980
981 // Check if an address is already compiled
982 // but don't return addresses which are about to expire from the cache
983 void *check_addr(u_int vaddr)
984 {
985   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
986   if(ht_bin[0]==vaddr) {
987     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
988       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
989   }
990   if(ht_bin[2]==vaddr) {
991     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
992       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
993   }
994   u_int page=get_page(vaddr);
995   struct ll_entry *head;
996   head=jump_in[page];
997   while(head!=NULL) {
998     if(head->vaddr==vaddr&&head->reg32==0) {
999       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1000         // Update existing entry with current address
1001         if(ht_bin[0]==vaddr) {
1002           ht_bin[1]=(int)head->addr;
1003           return head->addr;
1004         }
1005         if(ht_bin[2]==vaddr) {
1006           ht_bin[3]=(int)head->addr;
1007           return head->addr;
1008         }
1009         // Insert into hash table with low priority.
1010         // Don't evict existing entries, as they are probably
1011         // addresses that are being accessed frequently.
1012         if(ht_bin[0]==-1) {
1013           ht_bin[1]=(int)head->addr;
1014           ht_bin[0]=vaddr;
1015         }else if(ht_bin[2]==-1) {
1016           ht_bin[3]=(int)head->addr;
1017           ht_bin[2]=vaddr;
1018         }
1019         return head->addr;
1020       }
1021     }
1022     head=head->next;
1023   }
1024   return 0;
1025 }
1026
1027 void remove_hash(int vaddr)
1028 {
1029   //printf("remove hash: %x\n",vaddr);
1030   int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1031   if(ht_bin[2]==vaddr) {
1032     ht_bin[2]=ht_bin[3]=-1;
1033   }
1034   if(ht_bin[0]==vaddr) {
1035     ht_bin[0]=ht_bin[2];
1036     ht_bin[1]=ht_bin[3];
1037     ht_bin[2]=ht_bin[3]=-1;
1038   }
1039 }
1040
1041 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1042 {
1043   struct ll_entry *next;
1044   while(*head) {
1045     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) || 
1046        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1047     {
1048       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1049       remove_hash((*head)->vaddr);
1050       next=(*head)->next;
1051       free(*head);
1052       *head=next;
1053     }
1054     else
1055     {
1056       head=&((*head)->next);
1057     }
1058   }
1059 }
1060
1061 // Remove all entries from linked list
1062 void ll_clear(struct ll_entry **head)
1063 {
1064   struct ll_entry *cur;
1065   struct ll_entry *next;
1066   if(cur=*head) {
1067     *head=0;
1068     while(cur) {
1069       next=cur->next;
1070       free(cur);
1071       cur=next;
1072     }
1073   }
1074 }
1075
1076 // Dereference the pointers and remove if it matches
1077 void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1078 {
1079   u_int old_host_addr=0;
1080   while(head) {
1081     int ptr=get_pointer(head->addr);
1082     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1083     if(((ptr>>shift)==(addr>>shift)) ||
1084        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1085     {
1086       printf("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1087       u_int host_addr=(u_int)kill_pointer(head->addr);
1088
1089       if((host_addr>>12)!=(old_host_addr>>12)) {
1090         #ifdef __arm__
1091         __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1092         #endif
1093         old_host_addr=host_addr;
1094       }
1095     }
1096     head=head->next;
1097   }
1098   #ifdef __arm__
1099   if (old_host_addr)
1100     __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1101   #endif
1102 }
1103
1104 // This is called when we write to a compiled block (see do_invstub)
1105 void invalidate_page(u_int page)
1106 {
1107   struct ll_entry *head;
1108   struct ll_entry *next;
1109   u_int old_host_addr=0;
1110   head=jump_in[page];
1111   jump_in[page]=0;
1112   while(head!=NULL) {
1113     inv_debug("INVALIDATE: %x\n",head->vaddr);
1114     remove_hash(head->vaddr);
1115     next=head->next;
1116     free(head);
1117     head=next;
1118   }
1119   head=jump_out[page];
1120   jump_out[page]=0;
1121   while(head!=NULL) {
1122     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1123     u_int host_addr=(u_int)kill_pointer(head->addr);
1124
1125     if((host_addr>>12)!=(old_host_addr>>12)) {
1126       #ifdef __arm__
1127       __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1128       #endif
1129       old_host_addr=host_addr;
1130     }
1131     next=head->next;
1132     free(head);
1133     head=next;
1134   }
1135   #ifdef __arm__
1136   if (old_host_addr)
1137     __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1138   #endif
1139 }
1140 void invalidate_block(u_int block)
1141 {
1142   u_int page=get_page(block<<12);
1143   u_int vpage=get_vpage(block<<12);
1144   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1145   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1146   u_int first,last;
1147   first=last=page;
1148   struct ll_entry *head;
1149   head=jump_dirty[vpage];
1150   //printf("page=%d vpage=%d\n",page,vpage);
1151   while(head!=NULL) {
1152     u_int start,end;
1153     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1154       get_bounds((int)head->addr,&start,&end);
1155       //printf("start: %x end: %x\n",start,end);
1156       if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
1157         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1158           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1159           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1160         }
1161       }
1162 #ifndef DISABLE_TLB
1163       if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1164         if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1165           if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1166           if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1167         }
1168       }
1169 #endif
1170     }
1171     head=head->next;
1172   }
1173   //printf("first=%d last=%d\n",first,last);
1174   invalidate_page(page);
1175   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1176   assert(last<page+5);
1177   // Invalidate the adjacent pages if a block crosses a 4K boundary
1178   while(first<page) {
1179     invalidate_page(first);
1180     first++;
1181   }
1182   for(first=page+1;first<last;first++) {
1183     invalidate_page(first);
1184   }
1185   
1186   // Don't trap writes
1187   invalid_code[block]=1;
1188 #ifndef DISABLE_TLB
1189   // If there is a valid TLB entry for this page, remove write protect
1190   if(tlb_LUT_w[block]) {
1191     assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1192     // CHECK: Is this right?
1193     memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1194     u_int real_block=tlb_LUT_w[block]>>12;
1195     invalid_code[real_block]=1;
1196     if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1197   }
1198   else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1199 #endif
1200
1201   #ifdef USE_MINI_HT
1202   memset(mini_ht,-1,sizeof(mini_ht));
1203   #endif
1204 }
1205 void invalidate_addr(u_int addr)
1206 {
1207   invalidate_block(addr>>12);
1208 }
1209 void invalidate_all_pages()
1210 {
1211   u_int page,n;
1212   for(page=0;page<4096;page++)
1213     invalidate_page(page);
1214   for(page=0;page<1048576;page++)
1215     if(!invalid_code[page]) {
1216       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1217       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1218     }
1219   #ifdef __arm__
1220   __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1221   #endif
1222   #ifdef USE_MINI_HT
1223   memset(mini_ht,-1,sizeof(mini_ht));
1224   #endif
1225   #ifndef DISABLE_TLB
1226   // TLB
1227   for(page=0;page<0x100000;page++) {
1228     if(tlb_LUT_r[page]) {
1229       memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1230       if(!tlb_LUT_w[page]||!invalid_code[page])
1231         memory_map[page]|=0x40000000; // Write protect
1232     }
1233     else memory_map[page]=-1;
1234     if(page==0x80000) page=0xC0000;
1235   }
1236   tlb_hacks();
1237   #endif
1238 }
1239
1240 // Add an entry to jump_out after making a link
1241 void add_link(u_int vaddr,void *src)
1242 {
1243   u_int page=get_page(vaddr);
1244   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1245   ll_add(jump_out+page,vaddr,src);
1246   //int ptr=get_pointer(src);
1247   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1248 }
1249
1250 // If a code block was found to be unmodified (bit was set in
1251 // restore_candidate) and it remains unmodified (bit is clear
1252 // in invalid_code) then move the entries for that 4K page from
1253 // the dirty list to the clean list.
1254 void clean_blocks(u_int page)
1255 {
1256   struct ll_entry *head;
1257   inv_debug("INV: clean_blocks page=%d\n",page);
1258   head=jump_dirty[page];
1259   while(head!=NULL) {
1260     if(!invalid_code[head->vaddr>>12]) {
1261       // Don't restore blocks which are about to expire from the cache
1262       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1263         u_int start,end;
1264         if(verify_dirty((int)head->addr)) {
1265           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1266           u_int i;
1267           u_int inv=0;
1268           get_bounds((int)head->addr,&start,&end);
1269           if(start-(u_int)rdram<RAM_SIZE) {
1270             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1271               inv|=invalid_code[i];
1272             }
1273           }
1274           if((signed int)head->vaddr>=(signed int)0xC0000000) {
1275             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1276             //printf("addr=%x start=%x end=%x\n",addr,start,end);
1277             if(addr<start||addr>=end) inv=1;
1278           }
1279           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1280             inv=1;
1281           }
1282           if(!inv) {
1283             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1284             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1285               u_int ppage=page;
1286 #ifndef DISABLE_TLB
1287               if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1288 #endif
1289               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1290               //printf("page=%x, addr=%x\n",page,head->vaddr);
1291               //assert(head->vaddr>>12==(page|0x80000));
1292               ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1293               int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1294               if(!head->reg32) {
1295                 if(ht_bin[0]==head->vaddr) {
1296                   ht_bin[1]=(int)clean_addr; // Replace existing entry
1297                 }
1298                 if(ht_bin[2]==head->vaddr) {
1299                   ht_bin[3]=(int)clean_addr; // Replace existing entry
1300                 }
1301               }
1302             }
1303           }
1304         }
1305       }
1306     }
1307     head=head->next;
1308   }
1309 }
1310
1311
1312 void mov_alloc(struct regstat *current,int i)
1313 {
1314   // Note: Don't need to actually alloc the source registers
1315   if((~current->is32>>rs1[i])&1) {
1316     //alloc_reg64(current,i,rs1[i]);
1317     alloc_reg64(current,i,rt1[i]);
1318     current->is32&=~(1LL<<rt1[i]);
1319   } else {
1320     //alloc_reg(current,i,rs1[i]);
1321     alloc_reg(current,i,rt1[i]);
1322     current->is32|=(1LL<<rt1[i]);
1323   }
1324   clear_const(current,rs1[i]);
1325   clear_const(current,rt1[i]);
1326   dirty_reg(current,rt1[i]);
1327 }
1328
1329 void shiftimm_alloc(struct regstat *current,int i)
1330 {
1331   clear_const(current,rs1[i]);
1332   clear_const(current,rt1[i]);
1333   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1334   {
1335     if(rt1[i]) {
1336       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1337       else lt1[i]=rs1[i];
1338       alloc_reg(current,i,rt1[i]);
1339       current->is32|=1LL<<rt1[i];
1340       dirty_reg(current,rt1[i]);
1341     }
1342   }
1343   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1344   {
1345     if(rt1[i]) {
1346       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1347       alloc_reg64(current,i,rt1[i]);
1348       current->is32&=~(1LL<<rt1[i]);
1349       dirty_reg(current,rt1[i]);
1350     }
1351   }
1352   if(opcode2[i]==0x3c) // DSLL32
1353   {
1354     if(rt1[i]) {
1355       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1356       alloc_reg64(current,i,rt1[i]);
1357       current->is32&=~(1LL<<rt1[i]);
1358       dirty_reg(current,rt1[i]);
1359     }
1360   }
1361   if(opcode2[i]==0x3e) // DSRL32
1362   {
1363     if(rt1[i]) {
1364       alloc_reg64(current,i,rs1[i]);
1365       if(imm[i]==32) {
1366         alloc_reg64(current,i,rt1[i]);
1367         current->is32&=~(1LL<<rt1[i]);
1368       } else {
1369         alloc_reg(current,i,rt1[i]);
1370         current->is32|=1LL<<rt1[i];
1371       }
1372       dirty_reg(current,rt1[i]);
1373     }
1374   }
1375   if(opcode2[i]==0x3f) // DSRA32
1376   {
1377     if(rt1[i]) {
1378       alloc_reg64(current,i,rs1[i]);
1379       alloc_reg(current,i,rt1[i]);
1380       current->is32|=1LL<<rt1[i];
1381       dirty_reg(current,rt1[i]);
1382     }
1383   }
1384 }
1385
1386 void shift_alloc(struct regstat *current,int i)
1387 {
1388   if(rt1[i]) {
1389     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1390     {
1391       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1392       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1393       alloc_reg(current,i,rt1[i]);
1394       if(rt1[i]==rs2[i]) alloc_reg_temp(current,i,-1);
1395       current->is32|=1LL<<rt1[i];
1396     } else { // DSLLV/DSRLV/DSRAV
1397       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1398       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1399       alloc_reg64(current,i,rt1[i]);
1400       current->is32&=~(1LL<<rt1[i]);
1401       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1402         alloc_reg_temp(current,i,-1);
1403     }
1404     clear_const(current,rs1[i]);
1405     clear_const(current,rs2[i]);
1406     clear_const(current,rt1[i]);
1407     dirty_reg(current,rt1[i]);
1408   }
1409 }
1410
1411 void alu_alloc(struct regstat *current,int i)
1412 {
1413   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1414     if(rt1[i]) {
1415       if(rs1[i]&&rs2[i]) {
1416         alloc_reg(current,i,rs1[i]);
1417         alloc_reg(current,i,rs2[i]);
1418       }
1419       else {
1420         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1421         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1422       }
1423       alloc_reg(current,i,rt1[i]);
1424     }
1425     current->is32|=1LL<<rt1[i];
1426   }
1427   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1428     if(rt1[i]) {
1429       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1430       {
1431         alloc_reg64(current,i,rs1[i]);
1432         alloc_reg64(current,i,rs2[i]);
1433         alloc_reg(current,i,rt1[i]);
1434       } else {
1435         alloc_reg(current,i,rs1[i]);
1436         alloc_reg(current,i,rs2[i]);
1437         alloc_reg(current,i,rt1[i]);
1438       }
1439     }
1440     current->is32|=1LL<<rt1[i];
1441   }
1442   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1443     if(rt1[i]) {
1444       if(rs1[i]&&rs2[i]) {
1445         alloc_reg(current,i,rs1[i]);
1446         alloc_reg(current,i,rs2[i]);
1447       }
1448       else
1449       {
1450         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1451         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1452       }
1453       alloc_reg(current,i,rt1[i]);
1454       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1455       {
1456         if(!((current->uu>>rt1[i])&1)) {
1457           alloc_reg64(current,i,rt1[i]);
1458         }
1459         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1460           if(rs1[i]&&rs2[i]) {
1461             alloc_reg64(current,i,rs1[i]);
1462             alloc_reg64(current,i,rs2[i]);
1463           }
1464           else
1465           {
1466             // Is is really worth it to keep 64-bit values in registers?
1467             #ifdef NATIVE_64BIT
1468             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1469             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1470             #endif
1471           }
1472         }
1473         current->is32&=~(1LL<<rt1[i]);
1474       } else {
1475         current->is32|=1LL<<rt1[i];
1476       }
1477     }
1478   }
1479   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1480     if(rt1[i]) {
1481       if(rs1[i]&&rs2[i]) {
1482         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1483           alloc_reg64(current,i,rs1[i]);
1484           alloc_reg64(current,i,rs2[i]);
1485           alloc_reg64(current,i,rt1[i]);
1486         } else {
1487           alloc_reg(current,i,rs1[i]);
1488           alloc_reg(current,i,rs2[i]);
1489           alloc_reg(current,i,rt1[i]);
1490         }
1491       }
1492       else {
1493         alloc_reg(current,i,rt1[i]);
1494         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1495           // DADD used as move, or zeroing
1496           // If we have a 64-bit source, then make the target 64 bits too
1497           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1498             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1499             alloc_reg64(current,i,rt1[i]);
1500           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1501             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1502             alloc_reg64(current,i,rt1[i]);
1503           }
1504           if(opcode2[i]>=0x2e&&rs2[i]) {
1505             // DSUB used as negation - 64-bit result
1506             // If we have a 32-bit register, extend it to 64 bits
1507             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1508             alloc_reg64(current,i,rt1[i]);
1509           }
1510         }
1511       }
1512       if(rs1[i]&&rs2[i]) {
1513         current->is32&=~(1LL<<rt1[i]);
1514       } else if(rs1[i]) {
1515         current->is32&=~(1LL<<rt1[i]);
1516         if((current->is32>>rs1[i])&1)
1517           current->is32|=1LL<<rt1[i];
1518       } else if(rs2[i]) {
1519         current->is32&=~(1LL<<rt1[i]);
1520         if((current->is32>>rs2[i])&1)
1521           current->is32|=1LL<<rt1[i];
1522       } else {
1523         current->is32|=1LL<<rt1[i];
1524       }
1525     }
1526   }
1527   clear_const(current,rs1[i]);
1528   clear_const(current,rs2[i]);
1529   clear_const(current,rt1[i]);
1530   dirty_reg(current,rt1[i]);
1531 }
1532
1533 void imm16_alloc(struct regstat *current,int i)
1534 {
1535   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1536   else lt1[i]=rs1[i];
1537   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1538   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1539     current->is32&=~(1LL<<rt1[i]);
1540     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1541       // TODO: Could preserve the 32-bit flag if the immediate is zero
1542       alloc_reg64(current,i,rt1[i]);
1543       alloc_reg64(current,i,rs1[i]);
1544     }
1545     clear_const(current,rs1[i]);
1546     clear_const(current,rt1[i]);
1547   }
1548   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1549     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1550     current->is32|=1LL<<rt1[i];
1551     clear_const(current,rs1[i]);
1552     clear_const(current,rt1[i]);
1553   }
1554   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1555     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1556       if(rs1[i]!=rt1[i]) {
1557         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1558         alloc_reg64(current,i,rt1[i]);
1559         current->is32&=~(1LL<<rt1[i]);
1560       }
1561     }
1562     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1563     if(is_const(current,rs1[i])) {
1564       int v=get_const(current,rs1[i]);
1565       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1566       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1567       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1568     }
1569     else clear_const(current,rt1[i]);
1570   }
1571   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1572     if(is_const(current,rs1[i])) {
1573       int v=get_const(current,rs1[i]);
1574       set_const(current,rt1[i],v+imm[i]);
1575     }
1576     else clear_const(current,rt1[i]);
1577     current->is32|=1LL<<rt1[i];
1578   }
1579   else {
1580     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1581     current->is32|=1LL<<rt1[i];
1582   }
1583   dirty_reg(current,rt1[i]);
1584 }
1585
1586 void load_alloc(struct regstat *current,int i)
1587 {
1588   clear_const(current,rt1[i]);
1589   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1590   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1591   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1592   if(rt1[i]) {
1593     alloc_reg(current,i,rt1[i]);
1594     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1595     {
1596       current->is32&=~(1LL<<rt1[i]);
1597       alloc_reg64(current,i,rt1[i]);
1598     }
1599     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1600     {
1601       current->is32&=~(1LL<<rt1[i]);
1602       alloc_reg64(current,i,rt1[i]);
1603       alloc_all(current,i);
1604       alloc_reg64(current,i,FTEMP);
1605     }
1606     else current->is32|=1LL<<rt1[i];
1607     dirty_reg(current,rt1[i]);
1608     // If using TLB, need a register for pointer to the mapping table
1609     if(using_tlb) alloc_reg(current,i,TLREG);
1610     // LWL/LWR need a temporary register for the old value
1611     if(opcode[i]==0x22||opcode[i]==0x26)
1612     {
1613       alloc_reg(current,i,FTEMP);
1614       alloc_reg_temp(current,i,-1);
1615     }
1616   }
1617   else
1618   {
1619     // Load to r0 (dummy load)
1620     // but we still need a register to calculate the address
1621     alloc_reg_temp(current,i,-1);
1622   }
1623 }
1624
1625 void store_alloc(struct regstat *current,int i)
1626 {
1627   clear_const(current,rs2[i]);
1628   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1629   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1630   alloc_reg(current,i,rs2[i]);
1631   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1632     alloc_reg64(current,i,rs2[i]);
1633     if(rs2[i]) alloc_reg(current,i,FTEMP);
1634   }
1635   // If using TLB, need a register for pointer to the mapping table
1636   if(using_tlb) alloc_reg(current,i,TLREG);
1637   #if defined(HOST_IMM8)
1638   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1639   else alloc_reg(current,i,INVCP);
1640   #endif
1641   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1642     alloc_reg(current,i,FTEMP);
1643   }
1644   // We need a temporary register for address generation
1645   alloc_reg_temp(current,i,-1);
1646 }
1647
1648 void c1ls_alloc(struct regstat *current,int i)
1649 {
1650   //clear_const(current,rs1[i]); // FIXME
1651   clear_const(current,rt1[i]);
1652   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1653   alloc_reg(current,i,CSREG); // Status
1654   alloc_reg(current,i,FTEMP);
1655   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1656     alloc_reg64(current,i,FTEMP);
1657   }
1658   // If using TLB, need a register for pointer to the mapping table
1659   if(using_tlb) alloc_reg(current,i,TLREG);
1660   #if defined(HOST_IMM8)
1661   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1662   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1663     alloc_reg(current,i,INVCP);
1664   #endif
1665   // We need a temporary register for address generation
1666   alloc_reg_temp(current,i,-1);
1667 }
1668
1669 void c2ls_alloc(struct regstat *current,int i)
1670 {
1671   clear_const(current,rt1[i]);
1672   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1673   alloc_reg(current,i,FTEMP);
1674   // If using TLB, need a register for pointer to the mapping table
1675   if(using_tlb) alloc_reg(current,i,TLREG);
1676   #if defined(HOST_IMM8)
1677   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1678   else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1679     alloc_reg(current,i,INVCP);
1680   #endif
1681   // We need a temporary register for address generation
1682   alloc_reg_temp(current,i,-1);
1683 }
1684
1685 #ifndef multdiv_alloc
1686 void multdiv_alloc(struct regstat *current,int i)
1687 {
1688   //  case 0x18: MULT
1689   //  case 0x19: MULTU
1690   //  case 0x1A: DIV
1691   //  case 0x1B: DIVU
1692   //  case 0x1C: DMULT
1693   //  case 0x1D: DMULTU
1694   //  case 0x1E: DDIV
1695   //  case 0x1F: DDIVU
1696   clear_const(current,rs1[i]);
1697   clear_const(current,rs2[i]);
1698   if(rs1[i]&&rs2[i])
1699   {
1700     if((opcode2[i]&4)==0) // 32-bit
1701     {
1702       current->u&=~(1LL<<HIREG);
1703       current->u&=~(1LL<<LOREG);
1704       alloc_reg(current,i,HIREG);
1705       alloc_reg(current,i,LOREG);
1706       alloc_reg(current,i,rs1[i]);
1707       alloc_reg(current,i,rs2[i]);
1708       current->is32|=1LL<<HIREG;
1709       current->is32|=1LL<<LOREG;
1710       dirty_reg(current,HIREG);
1711       dirty_reg(current,LOREG);
1712     }
1713     else // 64-bit
1714     {
1715       current->u&=~(1LL<<HIREG);
1716       current->u&=~(1LL<<LOREG);
1717       current->uu&=~(1LL<<HIREG);
1718       current->uu&=~(1LL<<LOREG);
1719       alloc_reg64(current,i,HIREG);
1720       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1721       alloc_reg64(current,i,rs1[i]);
1722       alloc_reg64(current,i,rs2[i]);
1723       alloc_all(current,i);
1724       current->is32&=~(1LL<<HIREG);
1725       current->is32&=~(1LL<<LOREG);
1726       dirty_reg(current,HIREG);
1727       dirty_reg(current,LOREG);
1728     }
1729   }
1730   else
1731   {
1732     // Multiply by zero is zero.
1733     // MIPS does not have a divide by zero exception.
1734     // The result is undefined, we return zero.
1735     alloc_reg(current,i,HIREG);
1736     alloc_reg(current,i,LOREG);
1737     current->is32|=1LL<<HIREG;
1738     current->is32|=1LL<<LOREG;
1739     dirty_reg(current,HIREG);
1740     dirty_reg(current,LOREG);
1741   }
1742 }
1743 #endif
1744
1745 void cop0_alloc(struct regstat *current,int i)
1746 {
1747   if(opcode2[i]==0) // MFC0
1748   {
1749     if(rt1[i]) {
1750       clear_const(current,rt1[i]);
1751       alloc_all(current,i);
1752       alloc_reg(current,i,rt1[i]);
1753       current->is32|=1LL<<rt1[i];
1754       dirty_reg(current,rt1[i]);
1755     }
1756   }
1757   else if(opcode2[i]==4) // MTC0
1758   {
1759     if(rs1[i]){
1760       clear_const(current,rs1[i]);
1761       alloc_reg(current,i,rs1[i]);
1762       alloc_all(current,i);
1763     }
1764     else {
1765       alloc_all(current,i); // FIXME: Keep r0
1766       current->u&=~1LL;
1767       alloc_reg(current,i,0);
1768     }
1769   }
1770   else
1771   {
1772     // TLBR/TLBWI/TLBWR/TLBP/ERET
1773     assert(opcode2[i]==0x10);
1774     alloc_all(current,i);
1775   }
1776 }
1777
1778 void cop1_alloc(struct regstat *current,int i)
1779 {
1780   alloc_reg(current,i,CSREG); // Load status
1781   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1782   {
1783     assert(rt1[i]);
1784     clear_const(current,rt1[i]);
1785     if(opcode2[i]==1) {
1786       alloc_reg64(current,i,rt1[i]); // DMFC1
1787       current->is32&=~(1LL<<rt1[i]);
1788     }else{
1789       alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1790       current->is32|=1LL<<rt1[i];
1791     }
1792     dirty_reg(current,rt1[i]);
1793     alloc_reg_temp(current,i,-1);
1794   }
1795   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1796   {
1797     if(rs1[i]){
1798       clear_const(current,rs1[i]);
1799       if(opcode2[i]==5)
1800         alloc_reg64(current,i,rs1[i]); // DMTC1
1801       else
1802         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1803       alloc_reg_temp(current,i,-1);
1804     }
1805     else {
1806       current->u&=~1LL;
1807       alloc_reg(current,i,0);
1808       alloc_reg_temp(current,i,-1);
1809     }
1810   }
1811 }
1812 void fconv_alloc(struct regstat *current,int i)
1813 {
1814   alloc_reg(current,i,CSREG); // Load status
1815   alloc_reg_temp(current,i,-1);
1816 }
1817 void float_alloc(struct regstat *current,int i)
1818 {
1819   alloc_reg(current,i,CSREG); // Load status
1820   alloc_reg_temp(current,i,-1);
1821 }
1822 void c2op_alloc(struct regstat *current,int i)
1823 {
1824   alloc_reg_temp(current,i,-1);
1825 }
1826 void fcomp_alloc(struct regstat *current,int i)
1827 {
1828   alloc_reg(current,i,CSREG); // Load status
1829   alloc_reg(current,i,FSREG); // Load flags
1830   dirty_reg(current,FSREG); // Flag will be modified
1831   alloc_reg_temp(current,i,-1);
1832 }
1833
1834 void syscall_alloc(struct regstat *current,int i)
1835 {
1836   alloc_cc(current,i);
1837   dirty_reg(current,CCREG);
1838   alloc_all(current,i);
1839   current->isconst=0;
1840 }
1841
1842 void delayslot_alloc(struct regstat *current,int i)
1843 {
1844   switch(itype[i]) {
1845     case UJUMP:
1846     case CJUMP:
1847     case SJUMP:
1848     case RJUMP:
1849     case FJUMP:
1850     case SYSCALL:
1851     case HLECALL:
1852     case SPAN:
1853       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1854       printf("Disabled speculative precompilation\n");
1855       stop_after_jal=1;
1856       break;
1857     case IMM16:
1858       imm16_alloc(current,i);
1859       break;
1860     case LOAD:
1861     case LOADLR:
1862       load_alloc(current,i);
1863       break;
1864     case STORE:
1865     case STORELR:
1866       store_alloc(current,i);
1867       break;
1868     case ALU:
1869       alu_alloc(current,i);
1870       break;
1871     case SHIFT:
1872       shift_alloc(current,i);
1873       break;
1874     case MULTDIV:
1875       multdiv_alloc(current,i);
1876       break;
1877     case SHIFTIMM:
1878       shiftimm_alloc(current,i);
1879       break;
1880     case MOV:
1881       mov_alloc(current,i);
1882       break;
1883     case COP0:
1884       cop0_alloc(current,i);
1885       break;
1886     case COP1:
1887     case COP2:
1888       cop1_alloc(current,i);
1889       break;
1890     case C1LS:
1891       c1ls_alloc(current,i);
1892       break;
1893     case C2LS:
1894       c2ls_alloc(current,i);
1895       break;
1896     case FCONV:
1897       fconv_alloc(current,i);
1898       break;
1899     case FLOAT:
1900       float_alloc(current,i);
1901       break;
1902     case FCOMP:
1903       fcomp_alloc(current,i);
1904       break;
1905     case C2OP:
1906       c2op_alloc(current,i);
1907       break;
1908   }
1909 }
1910
1911 // Special case where a branch and delay slot span two pages in virtual memory
1912 static void pagespan_alloc(struct regstat *current,int i)
1913 {
1914   current->isconst=0;
1915   current->wasconst=0;
1916   regs[i].wasconst=0;
1917   alloc_all(current,i);
1918   alloc_cc(current,i);
1919   dirty_reg(current,CCREG);
1920   if(opcode[i]==3) // JAL
1921   {
1922     alloc_reg(current,i,31);
1923     dirty_reg(current,31);
1924   }
1925   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1926   {
1927     alloc_reg(current,i,rs1[i]);
1928     if (rt1[i]!=0) {
1929       alloc_reg(current,i,rt1[i]);
1930       dirty_reg(current,rt1[i]);
1931     }
1932   }
1933   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1934   {
1935     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1936     if(rs2[i]) alloc_reg(current,i,rs2[i]);
1937     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1938     {
1939       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1940       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1941     }
1942   }
1943   else
1944   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1945   {
1946     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1947     if(!((current->is32>>rs1[i])&1))
1948     {
1949       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1950     }
1951   }
1952   else
1953   if(opcode[i]==0x11) // BC1
1954   {
1955     alloc_reg(current,i,FSREG);
1956     alloc_reg(current,i,CSREG);
1957   }
1958   //else ...
1959 }
1960
1961 add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1962 {
1963   stubs[stubcount][0]=type;
1964   stubs[stubcount][1]=addr;
1965   stubs[stubcount][2]=retaddr;
1966   stubs[stubcount][3]=a;
1967   stubs[stubcount][4]=b;
1968   stubs[stubcount][5]=c;
1969   stubs[stubcount][6]=d;
1970   stubs[stubcount][7]=e;
1971   stubcount++;
1972 }
1973
1974 // Write out a single register
1975 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1976 {
1977   int hr;
1978   for(hr=0;hr<HOST_REGS;hr++) {
1979     if(hr!=EXCLUDE_REG) {
1980       if((regmap[hr]&63)==r) {
1981         if((dirty>>hr)&1) {
1982           if(regmap[hr]<64) {
1983             emit_storereg(r,hr);
1984 #ifndef FORCE32
1985             if((is32>>regmap[hr])&1) {
1986               emit_sarimm(hr,31,hr);
1987               emit_storereg(r|64,hr);
1988             }
1989 #endif
1990           }else{
1991             emit_storereg(r|64,hr);
1992           }
1993         }
1994       }
1995     }
1996   }
1997 }
1998
1999 int mchecksum()
2000 {
2001   //if(!tracedebug) return 0;
2002   int i;
2003   int sum=0;
2004   for(i=0;i<2097152;i++) {
2005     unsigned int temp=sum;
2006     sum<<=1;
2007     sum|=(~temp)>>31;
2008     sum^=((u_int *)rdram)[i];
2009   }
2010   return sum;
2011 }
2012 int rchecksum()
2013 {
2014   int i;
2015   int sum=0;
2016   for(i=0;i<64;i++)
2017     sum^=((u_int *)reg)[i];
2018   return sum;
2019 }
2020 void rlist()
2021 {
2022   int i;
2023   printf("TRACE: ");
2024   for(i=0;i<32;i++)
2025     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2026   printf("\n");
2027 #ifndef DISABLE_COP1
2028   printf("TRACE: ");
2029   for(i=0;i<32;i++)
2030     printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2031   printf("\n");
2032 #endif
2033 }
2034
2035 void enabletrace()
2036 {
2037   tracedebug=1;
2038 }
2039
2040 void memdebug(int i)
2041 {
2042   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2043   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2044   //rlist();
2045   //if(tracedebug) {
2046   //if(Count>=-2084597794) {
2047   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2048   //if(0) {
2049     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2050     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2051     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2052     rlist();
2053     #ifdef __i386__
2054     printf("TRACE: %x\n",(&i)[-1]);
2055     #endif
2056     #ifdef __arm__
2057     int j;
2058     printf("TRACE: %x \n",(&j)[10]);
2059     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2060     #endif
2061     //fflush(stdout);
2062   }
2063   //printf("TRACE: %x\n",(&i)[-1]);
2064 }
2065
2066 void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2067 {
2068   printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2069 }
2070
2071 void alu_assemble(int i,struct regstat *i_regs)
2072 {
2073   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2074     if(rt1[i]) {
2075       signed char s1,s2,t;
2076       t=get_reg(i_regs->regmap,rt1[i]);
2077       if(t>=0) {
2078         s1=get_reg(i_regs->regmap,rs1[i]);
2079         s2=get_reg(i_regs->regmap,rs2[i]);
2080         if(rs1[i]&&rs2[i]) {
2081           assert(s1>=0);
2082           assert(s2>=0);
2083           if(opcode2[i]&2) emit_sub(s1,s2,t);
2084           else emit_add(s1,s2,t);
2085         }
2086         else if(rs1[i]) {
2087           if(s1>=0) emit_mov(s1,t);
2088           else emit_loadreg(rs1[i],t);
2089         }
2090         else if(rs2[i]) {
2091           if(s2>=0) {
2092             if(opcode2[i]&2) emit_neg(s2,t);
2093             else emit_mov(s2,t);
2094           }
2095           else {
2096             emit_loadreg(rs2[i],t);
2097             if(opcode2[i]&2) emit_neg(t,t);
2098           }
2099         }
2100         else emit_zeroreg(t);
2101       }
2102     }
2103   }
2104   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2105     if(rt1[i]) {
2106       signed char s1l,s2l,s1h,s2h,tl,th;
2107       tl=get_reg(i_regs->regmap,rt1[i]);
2108       th=get_reg(i_regs->regmap,rt1[i]|64);
2109       if(tl>=0) {
2110         s1l=get_reg(i_regs->regmap,rs1[i]);
2111         s2l=get_reg(i_regs->regmap,rs2[i]);
2112         s1h=get_reg(i_regs->regmap,rs1[i]|64);
2113         s2h=get_reg(i_regs->regmap,rs2[i]|64);
2114         if(rs1[i]&&rs2[i]) {
2115           assert(s1l>=0);
2116           assert(s2l>=0);
2117           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2118           else emit_adds(s1l,s2l,tl);
2119           if(th>=0) {
2120             #ifdef INVERTED_CARRY
2121             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2122             #else
2123             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2124             #endif
2125             else emit_add(s1h,s2h,th);
2126           }
2127         }
2128         else if(rs1[i]) {
2129           if(s1l>=0) emit_mov(s1l,tl);
2130           else emit_loadreg(rs1[i],tl);
2131           if(th>=0) {
2132             if(s1h>=0) emit_mov(s1h,th);
2133             else emit_loadreg(rs1[i]|64,th);
2134           }
2135         }
2136         else if(rs2[i]) {
2137           if(s2l>=0) {
2138             if(opcode2[i]&2) emit_negs(s2l,tl);
2139             else emit_mov(s2l,tl);
2140           }
2141           else {
2142             emit_loadreg(rs2[i],tl);
2143             if(opcode2[i]&2) emit_negs(tl,tl);
2144           }
2145           if(th>=0) {
2146             #ifdef INVERTED_CARRY
2147             if(s2h>=0) emit_mov(s2h,th);
2148             else emit_loadreg(rs2[i]|64,th);
2149             if(opcode2[i]&2) {
2150               emit_adcimm(-1,th); // x86 has inverted carry flag
2151               emit_not(th,th);
2152             }
2153             #else
2154             if(opcode2[i]&2) {
2155               if(s2h>=0) emit_rscimm(s2h,0,th);
2156               else {
2157                 emit_loadreg(rs2[i]|64,th);
2158                 emit_rscimm(th,0,th);
2159               }
2160             }else{
2161               if(s2h>=0) emit_mov(s2h,th);
2162               else emit_loadreg(rs2[i]|64,th);
2163             }
2164             #endif
2165           }
2166         }
2167         else {
2168           emit_zeroreg(tl);
2169           if(th>=0) emit_zeroreg(th);
2170         }
2171       }
2172     }
2173   }
2174   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2175     if(rt1[i]) {
2176       signed char s1l,s1h,s2l,s2h,t;
2177       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2178       {
2179         t=get_reg(i_regs->regmap,rt1[i]);
2180         //assert(t>=0);
2181         if(t>=0) {
2182           s1l=get_reg(i_regs->regmap,rs1[i]);
2183           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2184           s2l=get_reg(i_regs->regmap,rs2[i]);
2185           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2186           if(rs2[i]==0) // rx<r0
2187           {
2188             assert(s1h>=0);
2189             if(opcode2[i]==0x2a) // SLT
2190               emit_shrimm(s1h,31,t);
2191             else // SLTU (unsigned can not be less than zero)
2192               emit_zeroreg(t);
2193           }
2194           else if(rs1[i]==0) // r0<rx
2195           {
2196             assert(s2h>=0);
2197             if(opcode2[i]==0x2a) // SLT
2198               emit_set_gz64_32(s2h,s2l,t);
2199             else // SLTU (set if not zero)
2200               emit_set_nz64_32(s2h,s2l,t);
2201           }
2202           else {
2203             assert(s1l>=0);assert(s1h>=0);
2204             assert(s2l>=0);assert(s2h>=0);
2205             if(opcode2[i]==0x2a) // SLT
2206               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2207             else // SLTU
2208               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2209           }
2210         }
2211       } else {
2212         t=get_reg(i_regs->regmap,rt1[i]);
2213         //assert(t>=0);
2214         if(t>=0) {
2215           s1l=get_reg(i_regs->regmap,rs1[i]);
2216           s2l=get_reg(i_regs->regmap,rs2[i]);
2217           if(rs2[i]==0) // rx<r0
2218           {
2219             assert(s1l>=0);
2220             if(opcode2[i]==0x2a) // SLT
2221               emit_shrimm(s1l,31,t);
2222             else // SLTU (unsigned can not be less than zero)
2223               emit_zeroreg(t);
2224           }
2225           else if(rs1[i]==0) // r0<rx
2226           {
2227             assert(s2l>=0);
2228             if(opcode2[i]==0x2a) // SLT
2229               emit_set_gz32(s2l,t);
2230             else // SLTU (set if not zero)
2231               emit_set_nz32(s2l,t);
2232           }
2233           else{
2234             assert(s1l>=0);assert(s2l>=0);
2235             if(opcode2[i]==0x2a) // SLT
2236               emit_set_if_less32(s1l,s2l,t);
2237             else // SLTU
2238               emit_set_if_carry32(s1l,s2l,t);
2239           }
2240         }
2241       }
2242     }
2243   }
2244   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2245     if(rt1[i]) {
2246       signed char s1l,s1h,s2l,s2h,th,tl;
2247       tl=get_reg(i_regs->regmap,rt1[i]);
2248       th=get_reg(i_regs->regmap,rt1[i]|64);
2249       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2250       {
2251         assert(tl>=0);
2252         if(tl>=0) {
2253           s1l=get_reg(i_regs->regmap,rs1[i]);
2254           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2255           s2l=get_reg(i_regs->regmap,rs2[i]);
2256           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2257           if(rs1[i]&&rs2[i]) {
2258             assert(s1l>=0);assert(s1h>=0);
2259             assert(s2l>=0);assert(s2h>=0);
2260             if(opcode2[i]==0x24) { // AND
2261               emit_and(s1l,s2l,tl);
2262               emit_and(s1h,s2h,th);
2263             } else
2264             if(opcode2[i]==0x25) { // OR
2265               emit_or(s1l,s2l,tl);
2266               emit_or(s1h,s2h,th);
2267             } else
2268             if(opcode2[i]==0x26) { // XOR
2269               emit_xor(s1l,s2l,tl);
2270               emit_xor(s1h,s2h,th);
2271             } else
2272             if(opcode2[i]==0x27) { // NOR
2273               emit_or(s1l,s2l,tl);
2274               emit_or(s1h,s2h,th);
2275               emit_not(tl,tl);
2276               emit_not(th,th);
2277             }
2278           }
2279           else
2280           {
2281             if(opcode2[i]==0x24) { // AND
2282               emit_zeroreg(tl);
2283               emit_zeroreg(th);
2284             } else
2285             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2286               if(rs1[i]){
2287                 if(s1l>=0) emit_mov(s1l,tl);
2288                 else emit_loadreg(rs1[i],tl);
2289                 if(s1h>=0) emit_mov(s1h,th);
2290                 else emit_loadreg(rs1[i]|64,th);
2291               }
2292               else
2293               if(rs2[i]){
2294                 if(s2l>=0) emit_mov(s2l,tl);
2295                 else emit_loadreg(rs2[i],tl);
2296                 if(s2h>=0) emit_mov(s2h,th);
2297                 else emit_loadreg(rs2[i]|64,th);
2298               }
2299               else{
2300                 emit_zeroreg(tl);
2301                 emit_zeroreg(th);
2302               }
2303             } else
2304             if(opcode2[i]==0x27) { // NOR
2305               if(rs1[i]){
2306                 if(s1l>=0) emit_not(s1l,tl);
2307                 else{
2308                   emit_loadreg(rs1[i],tl);
2309                   emit_not(tl,tl);
2310                 }
2311                 if(s1h>=0) emit_not(s1h,th);
2312                 else{
2313                   emit_loadreg(rs1[i]|64,th);
2314                   emit_not(th,th);
2315                 }
2316               }
2317               else
2318               if(rs2[i]){
2319                 if(s2l>=0) emit_not(s2l,tl);
2320                 else{
2321                   emit_loadreg(rs2[i],tl);
2322                   emit_not(tl,tl);
2323                 }
2324                 if(s2h>=0) emit_not(s2h,th);
2325                 else{
2326                   emit_loadreg(rs2[i]|64,th);
2327                   emit_not(th,th);
2328                 }
2329               }
2330               else {
2331                 emit_movimm(-1,tl);
2332                 emit_movimm(-1,th);
2333               }
2334             }
2335           }
2336         }
2337       }
2338       else
2339       {
2340         // 32 bit
2341         if(tl>=0) {
2342           s1l=get_reg(i_regs->regmap,rs1[i]);
2343           s2l=get_reg(i_regs->regmap,rs2[i]);
2344           if(rs1[i]&&rs2[i]) {
2345             assert(s1l>=0);
2346             assert(s2l>=0);
2347             if(opcode2[i]==0x24) { // AND
2348               emit_and(s1l,s2l,tl);
2349             } else
2350             if(opcode2[i]==0x25) { // OR
2351               emit_or(s1l,s2l,tl);
2352             } else
2353             if(opcode2[i]==0x26) { // XOR
2354               emit_xor(s1l,s2l,tl);
2355             } else
2356             if(opcode2[i]==0x27) { // NOR
2357               emit_or(s1l,s2l,tl);
2358               emit_not(tl,tl);
2359             }
2360           }
2361           else
2362           {
2363             if(opcode2[i]==0x24) { // AND
2364               emit_zeroreg(tl);
2365             } else
2366             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2367               if(rs1[i]){
2368                 if(s1l>=0) emit_mov(s1l,tl);
2369                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2370               }
2371               else
2372               if(rs2[i]){
2373                 if(s2l>=0) emit_mov(s2l,tl);
2374                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2375               }
2376               else emit_zeroreg(tl);
2377             } else
2378             if(opcode2[i]==0x27) { // NOR
2379               if(rs1[i]){
2380                 if(s1l>=0) emit_not(s1l,tl);
2381                 else {
2382                   emit_loadreg(rs1[i],tl);
2383                   emit_not(tl,tl);
2384                 }
2385               }
2386               else
2387               if(rs2[i]){
2388                 if(s2l>=0) emit_not(s2l,tl);
2389                 else {
2390                   emit_loadreg(rs2[i],tl);
2391                   emit_not(tl,tl);
2392                 }
2393               }
2394               else emit_movimm(-1,tl);
2395             }
2396           }
2397         }
2398       }
2399     }
2400   }
2401 }
2402
2403 void imm16_assemble(int i,struct regstat *i_regs)
2404 {
2405   if (opcode[i]==0x0f) { // LUI
2406     if(rt1[i]) {
2407       signed char t;
2408       t=get_reg(i_regs->regmap,rt1[i]);
2409       //assert(t>=0);
2410       if(t>=0) {
2411         if(!((i_regs->isconst>>t)&1))
2412           emit_movimm(imm[i]<<16,t);
2413       }
2414     }
2415   }
2416   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2417     if(rt1[i]) {
2418       signed char s,t;
2419       t=get_reg(i_regs->regmap,rt1[i]);
2420       s=get_reg(i_regs->regmap,rs1[i]);
2421       if(rs1[i]) {
2422         //assert(t>=0);
2423         //assert(s>=0);
2424         if(t>=0) {
2425           if(!((i_regs->isconst>>t)&1)) {
2426             if(s<0) {
2427               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2428               emit_addimm(t,imm[i],t);
2429             }else{
2430               if(!((i_regs->wasconst>>s)&1))
2431                 emit_addimm(s,imm[i],t);
2432               else
2433                 emit_movimm(constmap[i][s]+imm[i],t);
2434             }
2435           }
2436         }
2437       } else {
2438         if(t>=0) {
2439           if(!((i_regs->isconst>>t)&1))
2440             emit_movimm(imm[i],t);
2441         }
2442       }
2443     }
2444   }
2445   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2446     if(rt1[i]) {
2447       signed char sh,sl,th,tl;
2448       th=get_reg(i_regs->regmap,rt1[i]|64);
2449       tl=get_reg(i_regs->regmap,rt1[i]);
2450       sh=get_reg(i_regs->regmap,rs1[i]|64);
2451       sl=get_reg(i_regs->regmap,rs1[i]);
2452       if(tl>=0) {
2453         if(rs1[i]) {
2454           assert(sh>=0);
2455           assert(sl>=0);
2456           if(th>=0) {
2457             emit_addimm64_32(sh,sl,imm[i],th,tl);
2458           }
2459           else {
2460             emit_addimm(sl,imm[i],tl);
2461           }
2462         } else {
2463           emit_movimm(imm[i],tl);
2464           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2465         }
2466       }
2467     }
2468   }
2469   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2470     if(rt1[i]) {
2471       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2472       signed char sh,sl,t;
2473       t=get_reg(i_regs->regmap,rt1[i]);
2474       sh=get_reg(i_regs->regmap,rs1[i]|64);
2475       sl=get_reg(i_regs->regmap,rs1[i]);
2476       //assert(t>=0);
2477       if(t>=0) {
2478         if(rs1[i]>0) {
2479           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2480           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2481             if(opcode[i]==0x0a) { // SLTI
2482               if(sl<0) {
2483                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2484                 emit_slti32(t,imm[i],t);
2485               }else{
2486                 emit_slti32(sl,imm[i],t);
2487               }
2488             }
2489             else { // SLTIU
2490               if(sl<0) {
2491                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2492                 emit_sltiu32(t,imm[i],t);
2493               }else{
2494                 emit_sltiu32(sl,imm[i],t);
2495               }
2496             }
2497           }else{ // 64-bit
2498             assert(sl>=0);
2499             if(opcode[i]==0x0a) // SLTI
2500               emit_slti64_32(sh,sl,imm[i],t);
2501             else // SLTIU
2502               emit_sltiu64_32(sh,sl,imm[i],t);
2503           }
2504         }else{
2505           // SLTI(U) with r0 is just stupid,
2506           // nonetheless examples can be found
2507           if(opcode[i]==0x0a) // SLTI
2508             if(0<imm[i]) emit_movimm(1,t);
2509             else emit_zeroreg(t);
2510           else // SLTIU
2511           {
2512             if(imm[i]) emit_movimm(1,t);
2513             else emit_zeroreg(t);
2514           }
2515         }
2516       }
2517     }
2518   }
2519   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2520     if(rt1[i]) {
2521       signed char sh,sl,th,tl;
2522       th=get_reg(i_regs->regmap,rt1[i]|64);
2523       tl=get_reg(i_regs->regmap,rt1[i]);
2524       sh=get_reg(i_regs->regmap,rs1[i]|64);
2525       sl=get_reg(i_regs->regmap,rs1[i]);
2526       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2527         if(opcode[i]==0x0c) //ANDI
2528         {
2529           if(rs1[i]) {
2530             if(sl<0) {
2531               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2532               emit_andimm(tl,imm[i],tl);
2533             }else{
2534               if(!((i_regs->wasconst>>sl)&1))
2535                 emit_andimm(sl,imm[i],tl);
2536               else
2537                 emit_movimm(constmap[i][sl]&imm[i],tl);
2538             }
2539           }
2540           else
2541             emit_zeroreg(tl);
2542           if(th>=0) emit_zeroreg(th);
2543         }
2544         else
2545         {
2546           if(rs1[i]) {
2547             if(sl<0) {
2548               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2549             }
2550             if(th>=0) {
2551               if(sh<0) {
2552                 emit_loadreg(rs1[i]|64,th);
2553               }else{
2554                 emit_mov(sh,th);
2555               }
2556             }
2557             if(opcode[i]==0x0d) //ORI
2558             if(sl<0) {
2559               emit_orimm(tl,imm[i],tl);
2560             }else{
2561               if(!((i_regs->wasconst>>sl)&1))
2562                 emit_orimm(sl,imm[i],tl);
2563               else
2564                 emit_movimm(constmap[i][sl]|imm[i],tl);
2565             }
2566             if(opcode[i]==0x0e) //XORI
2567             if(sl<0) {
2568               emit_xorimm(tl,imm[i],tl);
2569             }else{
2570               if(!((i_regs->wasconst>>sl)&1))
2571                 emit_xorimm(sl,imm[i],tl);
2572               else
2573                 emit_movimm(constmap[i][sl]^imm[i],tl);
2574             }
2575           }
2576           else {
2577             emit_movimm(imm[i],tl);
2578             if(th>=0) emit_zeroreg(th);
2579           }
2580         }
2581       }
2582     }
2583   }
2584 }
2585
2586 void shiftimm_assemble(int i,struct regstat *i_regs)
2587 {
2588   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2589   {
2590     if(rt1[i]) {
2591       signed char s,t;
2592       t=get_reg(i_regs->regmap,rt1[i]);
2593       s=get_reg(i_regs->regmap,rs1[i]);
2594       //assert(t>=0);
2595       if(t>=0){
2596         if(rs1[i]==0)
2597         {
2598           emit_zeroreg(t);
2599         }
2600         else
2601         {
2602           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2603           if(imm[i]) {
2604             if(opcode2[i]==0) // SLL
2605             {
2606               emit_shlimm(s<0?t:s,imm[i],t);
2607             }
2608             if(opcode2[i]==2) // SRL
2609             {
2610               emit_shrimm(s<0?t:s,imm[i],t);
2611             }
2612             if(opcode2[i]==3) // SRA
2613             {
2614               emit_sarimm(s<0?t:s,imm[i],t);
2615             }
2616           }else{
2617             // Shift by zero
2618             if(s>=0 && s!=t) emit_mov(s,t);
2619           }
2620         }
2621       }
2622       //emit_storereg(rt1[i],t); //DEBUG
2623     }
2624   }
2625   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2626   {
2627     if(rt1[i]) {
2628       signed char sh,sl,th,tl;
2629       th=get_reg(i_regs->regmap,rt1[i]|64);
2630       tl=get_reg(i_regs->regmap,rt1[i]);
2631       sh=get_reg(i_regs->regmap,rs1[i]|64);
2632       sl=get_reg(i_regs->regmap,rs1[i]);
2633       if(tl>=0) {
2634         if(rs1[i]==0)
2635         {
2636           emit_zeroreg(tl);
2637           if(th>=0) emit_zeroreg(th);
2638         }
2639         else
2640         {
2641           assert(sl>=0);
2642           assert(sh>=0);
2643           if(imm[i]) {
2644             if(opcode2[i]==0x38) // DSLL
2645             {
2646               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2647               emit_shlimm(sl,imm[i],tl);
2648             }
2649             if(opcode2[i]==0x3a) // DSRL
2650             {
2651               emit_shrdimm(sl,sh,imm[i],tl);
2652               if(th>=0) emit_shrimm(sh,imm[i],th);
2653             }
2654             if(opcode2[i]==0x3b) // DSRA
2655             {
2656               emit_shrdimm(sl,sh,imm[i],tl);
2657               if(th>=0) emit_sarimm(sh,imm[i],th);
2658             }
2659           }else{
2660             // Shift by zero
2661             if(sl!=tl) emit_mov(sl,tl);
2662             if(th>=0&&sh!=th) emit_mov(sh,th);
2663           }
2664         }
2665       }
2666     }
2667   }
2668   if(opcode2[i]==0x3c) // DSLL32
2669   {
2670     if(rt1[i]) {
2671       signed char sl,tl,th;
2672       tl=get_reg(i_regs->regmap,rt1[i]);
2673       th=get_reg(i_regs->regmap,rt1[i]|64);
2674       sl=get_reg(i_regs->regmap,rs1[i]);
2675       if(th>=0||tl>=0){
2676         assert(tl>=0);
2677         assert(th>=0);
2678         assert(sl>=0);
2679         emit_mov(sl,th);
2680         emit_zeroreg(tl);
2681         if(imm[i]>32)
2682         {
2683           emit_shlimm(th,imm[i]&31,th);
2684         }
2685       }
2686     }
2687   }
2688   if(opcode2[i]==0x3e) // DSRL32
2689   {
2690     if(rt1[i]) {
2691       signed char sh,tl,th;
2692       tl=get_reg(i_regs->regmap,rt1[i]);
2693       th=get_reg(i_regs->regmap,rt1[i]|64);
2694       sh=get_reg(i_regs->regmap,rs1[i]|64);
2695       if(tl>=0){
2696         assert(sh>=0);
2697         emit_mov(sh,tl);
2698         if(th>=0) emit_zeroreg(th);
2699         if(imm[i]>32)
2700         {
2701           emit_shrimm(tl,imm[i]&31,tl);
2702         }
2703       }
2704     }
2705   }
2706   if(opcode2[i]==0x3f) // DSRA32
2707   {
2708     if(rt1[i]) {
2709       signed char sh,tl;
2710       tl=get_reg(i_regs->regmap,rt1[i]);
2711       sh=get_reg(i_regs->regmap,rs1[i]|64);
2712       if(tl>=0){
2713         assert(sh>=0);
2714         emit_mov(sh,tl);
2715         if(imm[i]>32)
2716         {
2717           emit_sarimm(tl,imm[i]&31,tl);
2718         }
2719       }
2720     }
2721   }
2722 }
2723
2724 #ifndef shift_assemble
2725 void shift_assemble(int i,struct regstat *i_regs)
2726 {
2727   printf("Need shift_assemble for this architecture.\n");
2728   exit(1);
2729 }
2730 #endif
2731
2732 void load_assemble(int i,struct regstat *i_regs)
2733 {
2734   int s,th,tl,addr,map=-1;
2735   int offset;
2736   int jaddr=0;
2737   int memtarget=0,c=0;
2738   u_int hr,reglist=0;
2739   th=get_reg(i_regs->regmap,rt1[i]|64);
2740   tl=get_reg(i_regs->regmap,rt1[i]);
2741   s=get_reg(i_regs->regmap,rs1[i]);
2742   offset=imm[i];
2743   for(hr=0;hr<HOST_REGS;hr++) {
2744     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2745   }
2746   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2747   if(s>=0) {
2748     c=(i_regs->wasconst>>s)&1;
2749     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2750     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2751   }
2752   //printf("load_assemble: c=%d\n",c);
2753   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2754   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2755 #ifdef PCSX
2756   if(tl<0) {
2757     if(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80) {
2758       // could be FIFO, must perform the read
2759       assem_debug("(forced read)\n");
2760       tl=get_reg(i_regs->regmap,-1);
2761       assert(tl>=0);
2762     }
2763   }
2764   if(offset||s<0||c) addr=tl;
2765   else addr=s;
2766 #endif
2767   if(tl>=0) {
2768     //assert(tl>=0);
2769     //assert(rt1[i]);
2770     reglist&=~(1<<tl);
2771     if(th>=0) reglist&=~(1<<th);
2772     if(!using_tlb) {
2773       if(!c) {
2774 //#define R29_HACK 1
2775         #ifdef R29_HACK
2776         // Strmnnrmn's speed hack
2777         if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2778         #endif
2779         {
2780           emit_cmpimm(addr,RAM_SIZE);
2781           jaddr=(int)out;
2782           #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2783           // Hint to branch predictor that the branch is unlikely to be taken
2784           if(rs1[i]>=28)
2785             emit_jno_unlikely(0);
2786           else
2787           #endif
2788           emit_jno(0);
2789         }
2790       }
2791     }else{ // using tlb
2792       int x=0;
2793       if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2794       if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2795       map=get_reg(i_regs->regmap,TLREG);
2796       assert(map>=0);
2797       map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2798       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2799     }
2800     if (opcode[i]==0x20) { // LB
2801       if(!c||memtarget) {
2802         #ifdef HOST_IMM_ADDR32
2803         if(c)
2804           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2805         else
2806         #endif
2807         {
2808           //emit_xorimm(addr,3,tl);
2809           //gen_tlb_addr_r(tl,map);
2810           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2811           int x=0;
2812 #ifdef BIG_ENDIAN_MIPS
2813           if(!c) emit_xorimm(addr,3,tl);
2814           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2815 #else
2816           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2817           else if (tl!=addr) emit_mov(addr,tl);
2818 #endif
2819           emit_movsbl_indexed_tlb(x,tl,map,tl);
2820         }
2821         if(jaddr)
2822           add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2823       }
2824       else
2825         inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2826     }
2827     if (opcode[i]==0x21) { // LH
2828       if(!c||memtarget) {
2829         #ifdef HOST_IMM_ADDR32
2830         if(c)
2831           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2832         else
2833         #endif
2834         {
2835           int x=0;
2836 #ifdef BIG_ENDIAN_MIPS
2837           if(!c) emit_xorimm(addr,2,tl);
2838           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2839 #else
2840           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2841           else if (tl!=addr) emit_mov(addr,tl);
2842 #endif
2843           //#ifdef
2844           //emit_movswl_indexed_tlb(x,tl,map,tl);
2845           //else
2846           if(map>=0) {
2847             gen_tlb_addr_r(tl,map);
2848             emit_movswl_indexed(x,tl,tl);
2849           }else
2850             emit_movswl_indexed((int)rdram-0x80000000+x,tl,tl);
2851         }
2852         if(jaddr)
2853           add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2854       }
2855       else
2856         inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2857     }
2858     if (opcode[i]==0x23) { // LW
2859       if(!c||memtarget) {
2860         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2861         #ifdef HOST_IMM_ADDR32
2862         if(c)
2863           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2864         else
2865         #endif
2866         emit_readword_indexed_tlb(0,addr,map,tl);
2867         if(jaddr)
2868           add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2869       }
2870       else
2871         inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2872     }
2873     if (opcode[i]==0x24) { // LBU
2874       if(!c||memtarget) {
2875         #ifdef HOST_IMM_ADDR32
2876         if(c)
2877           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2878         else
2879         #endif
2880         {
2881           //emit_xorimm(addr,3,tl);
2882           //gen_tlb_addr_r(tl,map);
2883           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2884           int x=0;
2885 #ifdef BIG_ENDIAN_MIPS
2886           if(!c) emit_xorimm(addr,3,tl);
2887           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2888 #else
2889           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2890           else if (tl!=addr) emit_mov(addr,tl);
2891 #endif
2892           emit_movzbl_indexed_tlb(x,tl,map,tl);
2893         }
2894         if(jaddr)
2895           add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2896       }
2897       else
2898         inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2899     }
2900     if (opcode[i]==0x25) { // LHU
2901       if(!c||memtarget) {
2902         #ifdef HOST_IMM_ADDR32
2903         if(c)
2904           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2905         else
2906         #endif
2907         {
2908           int x=0;
2909 #ifdef BIG_ENDIAN_MIPS
2910           if(!c) emit_xorimm(addr,2,tl);
2911           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2912 #else
2913           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2914           else if (tl!=addr) emit_mov(addr,tl);
2915 #endif
2916           //#ifdef
2917           //emit_movzwl_indexed_tlb(x,tl,map,tl);
2918           //#else
2919           if(map>=0) {
2920             gen_tlb_addr_r(tl,map);
2921             emit_movzwl_indexed(x,tl,tl);
2922           }else
2923             emit_movzwl_indexed((int)rdram-0x80000000+x,tl,tl);
2924           if(jaddr)
2925             add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2926         }
2927       }
2928       else
2929         inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2930     }
2931     if (opcode[i]==0x27) { // LWU
2932       assert(th>=0);
2933       if(!c||memtarget) {
2934         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2935         #ifdef HOST_IMM_ADDR32
2936         if(c)
2937           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2938         else
2939         #endif
2940         emit_readword_indexed_tlb(0,addr,map,tl);
2941         if(jaddr)
2942           add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2943       }
2944       else {
2945         inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2946       }
2947       emit_zeroreg(th);
2948     }
2949     if (opcode[i]==0x37) { // LD
2950       if(!c||memtarget) {
2951         //gen_tlb_addr_r(tl,map);
2952         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2953         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2954         #ifdef HOST_IMM_ADDR32
2955         if(c)
2956           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2957         else
2958         #endif
2959         emit_readdword_indexed_tlb(0,addr,map,th,tl);
2960         if(jaddr)
2961           add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2962       }
2963       else
2964         inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2965     }
2966     //emit_storereg(rt1[i],tl); // DEBUG
2967   }
2968   //if(opcode[i]==0x23)
2969   //if(opcode[i]==0x24)
2970   //if(opcode[i]==0x23||opcode[i]==0x24)
2971   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2972   {
2973     //emit_pusha();
2974     save_regs(0x100f);
2975         emit_readword((int)&last_count,ECX);
2976         #ifdef __i386__
2977         if(get_reg(i_regs->regmap,CCREG)<0)
2978           emit_loadreg(CCREG,HOST_CCREG);
2979         emit_add(HOST_CCREG,ECX,HOST_CCREG);
2980         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2981         emit_writeword(HOST_CCREG,(int)&Count);
2982         #endif
2983         #ifdef __arm__
2984         if(get_reg(i_regs->regmap,CCREG)<0)
2985           emit_loadreg(CCREG,0);
2986         else
2987           emit_mov(HOST_CCREG,0);
2988         emit_add(0,ECX,0);
2989         emit_addimm(0,2*ccadj[i],0);
2990         emit_writeword(0,(int)&Count);
2991         #endif
2992     emit_call((int)memdebug);
2993     //emit_popa();
2994     restore_regs(0x100f);
2995   }/**/
2996 }
2997
2998 #ifndef loadlr_assemble
2999 void loadlr_assemble(int i,struct regstat *i_regs)
3000 {
3001   printf("Need loadlr_assemble for this architecture.\n");
3002   exit(1);
3003 }
3004 #endif
3005
3006 void store_assemble(int i,struct regstat *i_regs)
3007 {
3008   int s,th,tl,map=-1;
3009   int addr,temp;
3010   int offset;
3011   int jaddr=0,jaddr2,type;
3012   int memtarget=0,c=0;
3013   int agr=AGEN1+(i&1);
3014   u_int hr,reglist=0;
3015   th=get_reg(i_regs->regmap,rs2[i]|64);
3016   tl=get_reg(i_regs->regmap,rs2[i]);
3017   s=get_reg(i_regs->regmap,rs1[i]);
3018   temp=get_reg(i_regs->regmap,agr);
3019   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3020   offset=imm[i];
3021   if(s>=0) {
3022     c=(i_regs->wasconst>>s)&1;
3023     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3024     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3025   }
3026   assert(tl>=0);
3027   assert(temp>=0);
3028   for(hr=0;hr<HOST_REGS;hr++) {
3029     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3030   }
3031   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3032   if(offset||s<0||c) addr=temp;
3033   else addr=s;
3034   if(!using_tlb) {
3035     if(!c) {
3036       #ifdef R29_HACK
3037       // Strmnnrmn's speed hack
3038       memtarget=1;
3039       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3040       #endif
3041       emit_cmpimm(addr,RAM_SIZE);
3042       #ifdef DESTRUCTIVE_SHIFT
3043       if(s==addr) emit_mov(s,temp);
3044       #endif
3045       #ifdef R29_HACK
3046       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3047       #endif
3048       {
3049         jaddr=(int)out;
3050         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3051         // Hint to branch predictor that the branch is unlikely to be taken
3052         if(rs1[i]>=28)
3053           emit_jno_unlikely(0);
3054         else
3055         #endif
3056         emit_jno(0);
3057       }
3058     }
3059   }else{ // using tlb
3060     int x=0;
3061     if (opcode[i]==0x28) x=3; // SB
3062     if (opcode[i]==0x29) x=2; // SH
3063     map=get_reg(i_regs->regmap,TLREG);
3064     assert(map>=0);
3065     map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3066     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3067   }
3068
3069   if (opcode[i]==0x28) { // SB
3070     if(!c||memtarget) {
3071       int x=0;
3072 #ifdef BIG_ENDIAN_MIPS
3073       if(!c) emit_xorimm(addr,3,temp);
3074       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3075 #else
3076       if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3077       else if (addr!=temp) emit_mov(addr,temp);
3078 #endif
3079       //gen_tlb_addr_w(temp,map);
3080       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3081       emit_writebyte_indexed_tlb(tl,x,temp,map,temp);
3082     }
3083     type=STOREB_STUB;
3084   }
3085   if (opcode[i]==0x29) { // SH
3086     if(!c||memtarget) {
3087       int x=0;
3088 #ifdef BIG_ENDIAN_MIPS
3089       if(!c) emit_xorimm(addr,2,temp);
3090       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3091 #else
3092       if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3093       else if (addr!=temp) emit_mov(addr,temp);
3094 #endif
3095       //#ifdef
3096       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3097       //#else
3098       if(map>=0) {
3099         gen_tlb_addr_w(temp,map);
3100         emit_writehword_indexed(tl,x,temp);
3101       }else
3102         emit_writehword_indexed(tl,(int)rdram-0x80000000+x,temp);
3103     }
3104     type=STOREH_STUB;
3105   }
3106   if (opcode[i]==0x2B) { // SW
3107     if(!c||memtarget)
3108       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3109       emit_writeword_indexed_tlb(tl,0,addr,map,temp);
3110     type=STOREW_STUB;
3111   }
3112   if (opcode[i]==0x3F) { // SD
3113     if(!c||memtarget) {
3114       if(rs2[i]) {
3115         assert(th>=0);
3116         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3117         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3118         emit_writedword_indexed_tlb(th,tl,0,addr,map,temp);
3119       }else{
3120         // Store zero
3121         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3122         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3123         emit_writedword_indexed_tlb(tl,tl,0,addr,map,temp);
3124       }
3125     }
3126     type=STORED_STUB;
3127   }
3128   if(!using_tlb&&(!c||memtarget))
3129     // addr could be a temp, make sure it survives STORE*_STUB
3130     reglist|=1<<addr;
3131   if(jaddr) {
3132     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3133   } else if(!memtarget) {
3134     inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3135   }
3136   if(!using_tlb) {
3137     if(!c||memtarget) {
3138       #ifdef DESTRUCTIVE_SHIFT
3139       // The x86 shift operation is 'destructive'; it overwrites the
3140       // source register, so we need to make a copy first and use that.
3141       addr=temp;
3142       #endif
3143       #if defined(HOST_IMM8)
3144       int ir=get_reg(i_regs->regmap,INVCP);
3145       assert(ir>=0);
3146       emit_cmpmem_indexedsr12_reg(ir,addr,1);
3147       #else
3148       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3149       #endif
3150       jaddr2=(int)out;
3151       emit_jne(0);
3152       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3153     }
3154   }
3155   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3156   //if(opcode[i]==0x2B || opcode[i]==0x28)
3157   //if(opcode[i]==0x2B || opcode[i]==0x29)
3158   //if(opcode[i]==0x2B)
3159   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3160   {
3161     //emit_pusha();
3162     save_regs(0x100f);
3163         emit_readword((int)&last_count,ECX);
3164         #ifdef __i386__
3165         if(get_reg(i_regs->regmap,CCREG)<0)
3166           emit_loadreg(CCREG,HOST_CCREG);
3167         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3168         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3169         emit_writeword(HOST_CCREG,(int)&Count);
3170         #endif
3171         #ifdef __arm__
3172         if(get_reg(i_regs->regmap,CCREG)<0)
3173           emit_loadreg(CCREG,0);
3174         else
3175           emit_mov(HOST_CCREG,0);
3176         emit_add(0,ECX,0);
3177         emit_addimm(0,2*ccadj[i],0);
3178         emit_writeword(0,(int)&Count);
3179         #endif
3180     emit_call((int)memdebug);
3181     //emit_popa();
3182     restore_regs(0x100f);
3183   }/**/
3184 }
3185
3186 void storelr_assemble(int i,struct regstat *i_regs)
3187 {
3188   int s,th,tl;
3189   int temp;
3190   int temp2;
3191   int offset;
3192   int jaddr=0,jaddr2;
3193   int case1,case2,case3;
3194   int done0,done1,done2;
3195   int memtarget,c=0;
3196   int agr=AGEN1+(i&1);
3197   u_int hr,reglist=0;
3198   th=get_reg(i_regs->regmap,rs2[i]|64);
3199   tl=get_reg(i_regs->regmap,rs2[i]);
3200   s=get_reg(i_regs->regmap,rs1[i]);
3201   temp=get_reg(i_regs->regmap,agr);
3202   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3203   offset=imm[i];
3204   if(s>=0) {
3205     c=(i_regs->isconst>>s)&1;
3206     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3207     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3208   }
3209   assert(tl>=0);
3210   for(hr=0;hr<HOST_REGS;hr++) {
3211     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3212   }
3213   if(tl>=0) {
3214     assert(temp>=0);
3215     if(!using_tlb) {
3216       if(!c) {
3217         emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3218         if(!offset&&s!=temp) emit_mov(s,temp);
3219         jaddr=(int)out;
3220         emit_jno(0);
3221       }
3222       else
3223       {
3224         if(!memtarget||!rs1[i]) {
3225           jaddr=(int)out;
3226           emit_jmp(0);
3227         }
3228       }
3229       if((u_int)rdram!=0x80000000) 
3230         emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3231     }else{ // using tlb
3232       int map=get_reg(i_regs->regmap,TLREG);
3233       assert(map>=0);
3234       map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3235       if(!c&&!offset&&s>=0) emit_mov(s,temp);
3236       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3237       if(!jaddr&&!memtarget) {
3238         jaddr=(int)out;
3239         emit_jmp(0);
3240       }
3241       gen_tlb_addr_w(temp,map);
3242     }
3243
3244     if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3245       temp2=get_reg(i_regs->regmap,FTEMP);
3246       if(!rs2[i]) temp2=th=tl;
3247     }
3248
3249 #ifndef BIG_ENDIAN_MIPS
3250     emit_xorimm(temp,3,temp);
3251 #endif
3252     emit_testimm(temp,2);
3253     case2=(int)out;
3254     emit_jne(0);
3255     emit_testimm(temp,1);
3256     case1=(int)out;
3257     emit_jne(0);
3258     // 0
3259     if (opcode[i]==0x2A) { // SWL
3260       emit_writeword_indexed(tl,0,temp);
3261     }
3262     if (opcode[i]==0x2E) { // SWR
3263       emit_writebyte_indexed(tl,3,temp);
3264     }
3265     if (opcode[i]==0x2C) { // SDL
3266       emit_writeword_indexed(th,0,temp);
3267       if(rs2[i]) emit_mov(tl,temp2);
3268     }
3269     if (opcode[i]==0x2D) { // SDR
3270       emit_writebyte_indexed(tl,3,temp);
3271       if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3272     }
3273     done0=(int)out;
3274     emit_jmp(0);
3275     // 1
3276     set_jump_target(case1,(int)out);
3277     if (opcode[i]==0x2A) { // SWL
3278       // Write 3 msb into three least significant bytes
3279       if(rs2[i]) emit_rorimm(tl,8,tl);
3280       emit_writehword_indexed(tl,-1,temp);
3281       if(rs2[i]) emit_rorimm(tl,16,tl);
3282       emit_writebyte_indexed(tl,1,temp);
3283       if(rs2[i]) emit_rorimm(tl,8,tl);
3284     }
3285     if (opcode[i]==0x2E) { // SWR
3286       // Write two lsb into two most significant bytes
3287       emit_writehword_indexed(tl,1,temp);
3288     }
3289     if (opcode[i]==0x2C) { // SDL
3290       if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3291       // Write 3 msb into three least significant bytes
3292       if(rs2[i]) emit_rorimm(th,8,th);
3293       emit_writehword_indexed(th,-1,temp);
3294       if(rs2[i]) emit_rorimm(th,16,th);
3295       emit_writebyte_indexed(th,1,temp);
3296       if(rs2[i]) emit_rorimm(th,8,th);
3297     }
3298     if (opcode[i]==0x2D) { // SDR
3299       if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3300       // Write two lsb into two most significant bytes
3301       emit_writehword_indexed(tl,1,temp);
3302     }
3303     done1=(int)out;
3304     emit_jmp(0);
3305     // 2
3306     set_jump_target(case2,(int)out);
3307     emit_testimm(temp,1);
3308     case3=(int)out;
3309     emit_jne(0);
3310     if (opcode[i]==0x2A) { // SWL
3311       // Write two msb into two least significant bytes
3312       if(rs2[i]) emit_rorimm(tl,16,tl);
3313       emit_writehword_indexed(tl,-2,temp);
3314       if(rs2[i]) emit_rorimm(tl,16,tl);
3315     }
3316     if (opcode[i]==0x2E) { // SWR
3317       // Write 3 lsb into three most significant bytes
3318       emit_writebyte_indexed(tl,-1,temp);
3319       if(rs2[i]) emit_rorimm(tl,8,tl);
3320       emit_writehword_indexed(tl,0,temp);
3321       if(rs2[i]) emit_rorimm(tl,24,tl);
3322     }
3323     if (opcode[i]==0x2C) { // SDL
3324       if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3325       // Write two msb into two least significant bytes
3326       if(rs2[i]) emit_rorimm(th,16,th);
3327       emit_writehword_indexed(th,-2,temp);
3328       if(rs2[i]) emit_rorimm(th,16,th);
3329     }
3330     if (opcode[i]==0x2D) { // SDR
3331       if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3332       // Write 3 lsb into three most significant bytes
3333       emit_writebyte_indexed(tl,-1,temp);
3334       if(rs2[i]) emit_rorimm(tl,8,tl);
3335       emit_writehword_indexed(tl,0,temp);
3336       if(rs2[i]) emit_rorimm(tl,24,tl);
3337     }
3338     done2=(int)out;
3339     emit_jmp(0);
3340     // 3
3341     set_jump_target(case3,(int)out);
3342     if (opcode[i]==0x2A) { // SWL
3343       // Write msb into least significant byte
3344       if(rs2[i]) emit_rorimm(tl,24,tl);
3345       emit_writebyte_indexed(tl,-3,temp);
3346       if(rs2[i]) emit_rorimm(tl,8,tl);
3347     }
3348     if (opcode[i]==0x2E) { // SWR
3349       // Write entire word
3350       emit_writeword_indexed(tl,-3,temp);
3351     }
3352     if (opcode[i]==0x2C) { // SDL
3353       if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3354       // Write msb into least significant byte
3355       if(rs2[i]) emit_rorimm(th,24,th);
3356       emit_writebyte_indexed(th,-3,temp);
3357       if(rs2[i]) emit_rorimm(th,8,th);
3358     }
3359     if (opcode[i]==0x2D) { // SDR
3360       if(rs2[i]) emit_mov(th,temp2);
3361       // Write entire word
3362       emit_writeword_indexed(tl,-3,temp);
3363     }
3364     set_jump_target(done0,(int)out);
3365     set_jump_target(done1,(int)out);
3366     set_jump_target(done2,(int)out);
3367     if (opcode[i]==0x2C) { // SDL
3368       emit_testimm(temp,4);
3369       done0=(int)out;
3370       emit_jne(0);
3371       emit_andimm(temp,~3,temp);
3372       emit_writeword_indexed(temp2,4,temp);
3373       set_jump_target(done0,(int)out);
3374     }
3375     if (opcode[i]==0x2D) { // SDR
3376       emit_testimm(temp,4);
3377       done0=(int)out;
3378       emit_jeq(0);
3379       emit_andimm(temp,~3,temp);
3380       emit_writeword_indexed(temp2,-4,temp);
3381       set_jump_target(done0,(int)out);
3382     }
3383     if(!c||!memtarget)
3384       add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3385   }
3386   if(!using_tlb) {
3387     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3388     #if defined(HOST_IMM8)
3389     int ir=get_reg(i_regs->regmap,INVCP);
3390     assert(ir>=0);
3391     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3392     #else
3393     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3394     #endif
3395     jaddr2=(int)out;
3396     emit_jne(0);
3397     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3398   }
3399   /*
3400     emit_pusha();
3401     //save_regs(0x100f);
3402         emit_readword((int)&last_count,ECX);
3403         if(get_reg(i_regs->regmap,CCREG)<0)
3404           emit_loadreg(CCREG,HOST_CCREG);
3405         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3406         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3407         emit_writeword(HOST_CCREG,(int)&Count);
3408     emit_call((int)memdebug);
3409     emit_popa();
3410     //restore_regs(0x100f);
3411   /**/
3412 }
3413
3414 void c1ls_assemble(int i,struct regstat *i_regs)
3415 {
3416 #ifndef DISABLE_COP1
3417   int s,th,tl;
3418   int temp,ar;
3419   int map=-1;
3420   int offset;
3421   int c=0;
3422   int jaddr,jaddr2=0,jaddr3,type;
3423   int agr=AGEN1+(i&1);
3424   u_int hr,reglist=0;
3425   th=get_reg(i_regs->regmap,FTEMP|64);
3426   tl=get_reg(i_regs->regmap,FTEMP);
3427   s=get_reg(i_regs->regmap,rs1[i]);
3428   temp=get_reg(i_regs->regmap,agr);
3429   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3430   offset=imm[i];
3431   assert(tl>=0);
3432   assert(rs1[i]>0);
3433   assert(temp>=0);
3434   for(hr=0;hr<HOST_REGS;hr++) {
3435     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3436   }
3437   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3438   if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3439   {
3440     // Loads use a temporary register which we need to save
3441     reglist|=1<<temp;
3442   }
3443   if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3444     ar=temp;
3445   else // LWC1/LDC1
3446     ar=tl;
3447   //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3448   //else c=(i_regs->wasconst>>s)&1;
3449   if(s>=0) c=(i_regs->wasconst>>s)&1;
3450   // Check cop1 unusable
3451   if(!cop1_usable) {
3452     signed char rs=get_reg(i_regs->regmap,CSREG);
3453     assert(rs>=0);
3454     emit_testimm(rs,0x20000000);
3455     jaddr=(int)out;
3456     emit_jeq(0);
3457     add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3458     cop1_usable=1;
3459   }
3460   if (opcode[i]==0x39) { // SWC1 (get float address)
3461     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3462   }
3463   if (opcode[i]==0x3D) { // SDC1 (get double address)
3464     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3465   }
3466   // Generate address + offset
3467   if(!using_tlb) {
3468     if(!c)
3469       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3470   }
3471   else
3472   {
3473     map=get_reg(i_regs->regmap,TLREG);
3474     assert(map>=0);
3475     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3476       map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3477     }
3478     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3479       map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3480     }
3481   }
3482   if (opcode[i]==0x39) { // SWC1 (read float)
3483     emit_readword_indexed(0,tl,tl);
3484   }
3485   if (opcode[i]==0x3D) { // SDC1 (read double)
3486     emit_readword_indexed(4,tl,th);
3487     emit_readword_indexed(0,tl,tl);
3488   }
3489   if (opcode[i]==0x31) { // LWC1 (get target address)
3490     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3491   }
3492   if (opcode[i]==0x35) { // LDC1 (get target address)
3493     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3494   }
3495   if(!using_tlb) {
3496     if(!c) {
3497       jaddr2=(int)out;
3498       emit_jno(0);
3499     }
3500     else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3501       jaddr2=(int)out;
3502       emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
3503     }
3504     #ifdef DESTRUCTIVE_SHIFT
3505     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3506       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3507     }
3508     #endif
3509   }else{
3510     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3511       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3512     }
3513     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3514       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3515     }
3516   }
3517   if (opcode[i]==0x31) { // LWC1
3518     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3519     //gen_tlb_addr_r(ar,map);
3520     //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3521     #ifdef HOST_IMM_ADDR32
3522     if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3523     else
3524     #endif
3525     emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3526     type=LOADW_STUB;
3527   }
3528   if (opcode[i]==0x35) { // LDC1
3529     assert(th>=0);
3530     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3531     //gen_tlb_addr_r(ar,map);
3532     //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3533     //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3534     #ifdef HOST_IMM_ADDR32
3535     if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3536     else
3537     #endif
3538     emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3539     type=LOADD_STUB;
3540   }
3541   if (opcode[i]==0x39) { // SWC1
3542     //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3543     emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3544     type=STOREW_STUB;
3545   }
3546   if (opcode[i]==0x3D) { // SDC1
3547     assert(th>=0);
3548     //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3549     //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3550     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3551     type=STORED_STUB;
3552   }
3553   if(!using_tlb) {
3554     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3555       #ifndef DESTRUCTIVE_SHIFT
3556       temp=offset||c||s<0?ar:s;
3557       #endif
3558       #if defined(HOST_IMM8)
3559       int ir=get_reg(i_regs->regmap,INVCP);
3560       assert(ir>=0);
3561       emit_cmpmem_indexedsr12_reg(ir,temp,1);
3562       #else
3563       emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3564       #endif
3565       jaddr3=(int)out;
3566       emit_jne(0);
3567       add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3568     }
3569   }
3570   if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3571   if (opcode[i]==0x31) { // LWC1 (write float)
3572     emit_writeword_indexed(tl,0,temp);
3573   }
3574   if (opcode[i]==0x35) { // LDC1 (write double)
3575     emit_writeword_indexed(th,4,temp);
3576     emit_writeword_indexed(tl,0,temp);
3577   }
3578   //if(opcode[i]==0x39)
3579   /*if(opcode[i]==0x39||opcode[i]==0x31)
3580   {
3581     emit_pusha();
3582         emit_readword((int)&last_count,ECX);
3583         if(get_reg(i_regs->regmap,CCREG)<0)
3584           emit_loadreg(CCREG,HOST_CCREG);
3585         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3586         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3587         emit_writeword(HOST_CCREG,(int)&Count);
3588     emit_call((int)memdebug);
3589     emit_popa();
3590   }/**/
3591 #else
3592   cop1_unusable(i, i_regs);
3593 #endif
3594 }
3595
3596 void c2ls_assemble(int i,struct regstat *i_regs)
3597 {
3598   int s,tl;
3599   int ar;
3600   int offset;
3601   int c=0;
3602   int jaddr,jaddr2=0,jaddr3,type;
3603   int agr=AGEN1+(i&1);
3604   u_int hr,reglist=0;
3605   u_int copr=(source[i]>>16)&0x1f;
3606   s=get_reg(i_regs->regmap,rs1[i]);
3607   tl=get_reg(i_regs->regmap,FTEMP);
3608   offset=imm[i];
3609   assert(rs1[i]>0);
3610   assert(tl>=0);
3611   assert(!using_tlb);
3612
3613   for(hr=0;hr<HOST_REGS;hr++) {
3614     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3615   }
3616   if(i_regs->regmap[HOST_CCREG]==CCREG)
3617     reglist&=~(1<<HOST_CCREG);
3618
3619   // get the address
3620   if (opcode[i]==0x3a) { // SWC2
3621     ar=get_reg(i_regs->regmap,agr);
3622     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3623     reglist|=1<<ar;
3624   } else { // LWC2
3625     ar=tl;
3626   }
3627   if (!offset&&!c&&s>=0) ar=s;
3628   assert(ar>=0);
3629
3630   if (opcode[i]==0x3a) { // SWC2
3631     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3632   }
3633   if(s>=0) c=(i_regs->wasconst>>s)&1;
3634   if(!c) {
3635     emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3636     jaddr2=(int)out;
3637     emit_jno(0);
3638   }
3639   else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3640     jaddr2=(int)out;
3641     emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
3642   }
3643   if (opcode[i]==0x32) { // LWC2
3644     #ifdef HOST_IMM_ADDR32
3645     if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3646     else
3647     #endif
3648     emit_readword_indexed(0,ar,tl);
3649     type=LOADW_STUB;
3650   }
3651   if (opcode[i]==0x3a) { // SWC2
3652 #ifdef DESTRUCTIVE_SHIFT
3653     if(!offset&&!c&&s>=0) emit_mov(s,ar);
3654 #endif
3655     emit_writeword_indexed(tl,0,ar);
3656     type=STOREW_STUB;
3657   }
3658   if(jaddr2)
3659     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3660   if (opcode[i]==0x3a) { // SWC2
3661 #if defined(HOST_IMM8)
3662     int ir=get_reg(i_regs->regmap,INVCP);
3663     assert(ir>=0);
3664     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3665 #else
3666     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3667 #endif
3668     jaddr3=(int)out;
3669     emit_jne(0);
3670     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3671   }
3672   if (opcode[i]==0x32) { // LWC2
3673     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3674   }
3675 }
3676
3677 #ifndef multdiv_assemble
3678 void multdiv_assemble(int i,struct regstat *i_regs)
3679 {
3680   printf("Need multdiv_assemble for this architecture.\n");
3681   exit(1);
3682 }
3683 #endif
3684
3685 void mov_assemble(int i,struct regstat *i_regs)
3686 {
3687   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3688   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3689   assert(rt1[i]>0);
3690   if(rt1[i]) {
3691     signed char sh,sl,th,tl;
3692     th=get_reg(i_regs->regmap,rt1[i]|64);
3693     tl=get_reg(i_regs->regmap,rt1[i]);
3694     //assert(tl>=0);
3695     if(tl>=0) {
3696       sh=get_reg(i_regs->regmap,rs1[i]|64);
3697       sl=get_reg(i_regs->regmap,rs1[i]);
3698       if(sl>=0) emit_mov(sl,tl);
3699       else emit_loadreg(rs1[i],tl);
3700       if(th>=0) {
3701         if(sh>=0) emit_mov(sh,th);
3702         else emit_loadreg(rs1[i]|64,th);
3703       }
3704     }
3705   }
3706 }
3707
3708 #ifndef fconv_assemble
3709 void fconv_assemble(int i,struct regstat *i_regs)
3710 {
3711   printf("Need fconv_assemble for this architecture.\n");
3712   exit(1);
3713 }
3714 #endif
3715
3716 #if 0
3717 void float_assemble(int i,struct regstat *i_regs)
3718 {
3719   printf("Need float_assemble for this architecture.\n");
3720   exit(1);
3721 }
3722 #endif
3723
3724 void syscall_assemble(int i,struct regstat *i_regs)
3725 {
3726   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3727   assert(ccreg==HOST_CCREG);
3728   assert(!is_delayslot);
3729   emit_movimm(start+i*4,EAX); // Get PC
3730   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3731   emit_jmp((int)jump_syscall_hle); // XXX
3732 }
3733
3734 void hlecall_assemble(int i,struct regstat *i_regs)
3735 {
3736   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3737   assert(ccreg==HOST_CCREG);
3738   assert(!is_delayslot);
3739   emit_movimm(start+i*4+4,0); // Get PC
3740   emit_movimm((int)psxHLEt[source[i]&7],1);
3741   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
3742   emit_jmp((int)jump_hlecall);
3743 }
3744
3745 void ds_assemble(int i,struct regstat *i_regs)
3746 {
3747   is_delayslot=1;
3748   switch(itype[i]) {
3749     case ALU:
3750       alu_assemble(i,i_regs);break;
3751     case IMM16:
3752       imm16_assemble(i,i_regs);break;
3753     case SHIFT:
3754       shift_assemble(i,i_regs);break;
3755     case SHIFTIMM:
3756       shiftimm_assemble(i,i_regs);break;
3757     case LOAD:
3758       load_assemble(i,i_regs);break;
3759     case LOADLR:
3760       loadlr_assemble(i,i_regs);break;
3761     case STORE:
3762       store_assemble(i,i_regs);break;
3763     case STORELR:
3764       storelr_assemble(i,i_regs);break;
3765     case COP0:
3766       cop0_assemble(i,i_regs);break;
3767     case COP1:
3768       cop1_assemble(i,i_regs);break;
3769     case C1LS:
3770       c1ls_assemble(i,i_regs);break;
3771     case COP2:
3772       cop2_assemble(i,i_regs);break;
3773     case C2LS:
3774       c2ls_assemble(i,i_regs);break;
3775     case C2OP:
3776       c2op_assemble(i,i_regs);break;
3777     case FCONV:
3778       fconv_assemble(i,i_regs);break;
3779     case FLOAT:
3780       float_assemble(i,i_regs);break;
3781     case FCOMP:
3782       fcomp_assemble(i,i_regs);break;
3783     case MULTDIV:
3784       multdiv_assemble(i,i_regs);break;
3785     case MOV:
3786       mov_assemble(i,i_regs);break;
3787     case SYSCALL:
3788     case HLECALL:
3789     case SPAN:
3790     case UJUMP:
3791     case RJUMP:
3792     case CJUMP:
3793     case SJUMP:
3794     case FJUMP:
3795       printf("Jump in the delay slot.  This is probably a bug.\n");
3796   }
3797   is_delayslot=0;
3798 }
3799
3800 // Is the branch target a valid internal jump?
3801 int internal_branch(uint64_t i_is32,int addr)
3802 {
3803   if(addr&1) return 0; // Indirect (register) jump
3804   if(addr>=start && addr<start+slen*4-4)
3805   {
3806     int t=(addr-start)>>2;
3807     // Delay slots are not valid branch targets
3808     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3809     // 64 -> 32 bit transition requires a recompile
3810     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3811     {
3812       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3813       else printf("optimizable: yes\n");
3814     }*/
3815     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3816     if(requires_32bit[t]&~i_is32) return 0;
3817     else return 1;
3818   }
3819   return 0;
3820 }
3821
3822 #ifndef wb_invalidate
3823 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3824   uint64_t u,uint64_t uu)
3825 {
3826   int hr;
3827   for(hr=0;hr<HOST_REGS;hr++) {
3828     if(hr!=EXCLUDE_REG) {
3829       if(pre[hr]!=entry[hr]) {
3830         if(pre[hr]>=0) {
3831           if((dirty>>hr)&1) {
3832             if(get_reg(entry,pre[hr])<0) {
3833               if(pre[hr]<64) {
3834                 if(!((u>>pre[hr])&1)) {
3835                   emit_storereg(pre[hr],hr);
3836                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3837                     emit_sarimm(hr,31,hr);
3838                     emit_storereg(pre[hr]|64,hr);
3839                   }
3840                 }
3841               }else{
3842                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3843                   emit_storereg(pre[hr],hr);
3844                 }
3845               }
3846             }
3847           }
3848         }
3849       }
3850     }
3851   }
3852   // Move from one register to another (no writeback)
3853   for(hr=0;hr<HOST_REGS;hr++) {
3854     if(hr!=EXCLUDE_REG) {
3855       if(pre[hr]!=entry[hr]) {
3856         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3857           int nr;
3858           if((nr=get_reg(entry,pre[hr]))>=0) {
3859             emit_mov(hr,nr);
3860           }
3861         }
3862       }
3863     }
3864   }
3865 }
3866 #endif
3867
3868 // Load the specified registers
3869 // This only loads the registers given as arguments because
3870 // we don't want to load things that will be overwritten
3871 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3872 {
3873   int hr;
3874   // Load 32-bit regs
3875   for(hr=0;hr<HOST_REGS;hr++) {
3876     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3877       if(entry[hr]!=regmap[hr]) {
3878         if(regmap[hr]==rs1||regmap[hr]==rs2)
3879         {
3880           if(regmap[hr]==0) {
3881             emit_zeroreg(hr);
3882           }
3883           else
3884           {
3885             emit_loadreg(regmap[hr],hr);
3886           }
3887         }
3888       }
3889     }
3890   }
3891   //Load 64-bit regs
3892   for(hr=0;hr<HOST_REGS;hr++) {
3893     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3894       if(entry[hr]!=regmap[hr]) {
3895         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3896         {
3897           assert(regmap[hr]!=64);
3898           if((is32>>(regmap[hr]&63))&1) {
3899             int lr=get_reg(regmap,regmap[hr]-64);
3900             if(lr>=0)
3901               emit_sarimm(lr,31,hr);
3902             else
3903               emit_loadreg(regmap[hr],hr);
3904           }
3905           else
3906           {
3907             emit_loadreg(regmap[hr],hr);
3908           }
3909         }
3910       }
3911     }
3912   }
3913 }
3914
3915 // Load registers prior to the start of a loop
3916 // so that they are not loaded within the loop
3917 static void loop_preload(signed char pre[],signed char entry[])
3918 {
3919   int hr;
3920   for(hr=0;hr<HOST_REGS;hr++) {
3921     if(hr!=EXCLUDE_REG) {
3922       if(pre[hr]!=entry[hr]) {
3923         if(entry[hr]>=0) {
3924           if(get_reg(pre,entry[hr])<0) {
3925             assem_debug("loop preload:\n");
3926             //printf("loop preload: %d\n",hr);
3927             if(entry[hr]==0) {
3928               emit_zeroreg(hr);
3929             }
3930             else if(entry[hr]<TEMPREG)
3931             {
3932               emit_loadreg(entry[hr],hr);
3933             }
3934             else if(entry[hr]-64<TEMPREG)
3935             {
3936               emit_loadreg(entry[hr],hr);
3937             }
3938           }
3939         }
3940       }
3941     }
3942   }
3943 }
3944
3945 // Generate address for load/store instruction
3946 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3947 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3948 {
3949   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3950     int ra;
3951     int agr=AGEN1+(i&1);
3952     int mgr=MGEN1+(i&1);
3953     if(itype[i]==LOAD) {
3954       ra=get_reg(i_regs->regmap,rt1[i]);
3955       //if(rt1[i]) assert(ra>=0);
3956     }
3957     if(itype[i]==LOADLR) {
3958       ra=get_reg(i_regs->regmap,FTEMP);
3959     }
3960     if(itype[i]==STORE||itype[i]==STORELR) {
3961       ra=get_reg(i_regs->regmap,agr);
3962       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3963     }
3964     if(itype[i]==C1LS||itype[i]==C2LS) {
3965       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3966         ra=get_reg(i_regs->regmap,FTEMP);
3967       else { // SWC1/SDC1
3968         ra=get_reg(i_regs->regmap,agr);
3969         if(ra<0) ra=get_reg(i_regs->regmap,-1);
3970       }
3971     }
3972     int rs=get_reg(i_regs->regmap,rs1[i]);
3973     int rm=get_reg(i_regs->regmap,TLREG);
3974     if(ra>=0) {
3975       int offset=imm[i];
3976       int c=(i_regs->wasconst>>rs)&1;
3977       if(rs1[i]==0) {
3978         // Using r0 as a base address
3979         /*if(rm>=0) {
3980           if(!entry||entry[rm]!=mgr) {
3981             generate_map_const(offset,rm);
3982           } // else did it in the previous cycle
3983         }*/
3984         if(!entry||entry[ra]!=agr) {
3985           if (opcode[i]==0x22||opcode[i]==0x26) {
3986             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3987           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3988             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3989           }else{
3990             emit_movimm(offset,ra);
3991           }
3992         } // else did it in the previous cycle
3993       }
3994       else if(rs<0) {
3995         if(!entry||entry[ra]!=rs1[i])
3996           emit_loadreg(rs1[i],ra);
3997         //if(!entry||entry[ra]!=rs1[i])
3998         //  printf("poor load scheduling!\n");
3999       }
4000       else if(c) {
4001         if(rm>=0) {
4002           if(!entry||entry[rm]!=mgr) {
4003             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4004               // Stores to memory go thru the mapper to detect self-modifying
4005               // code, loads don't.
4006               if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4007                  (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
4008                 generate_map_const(constmap[i][rs]+offset,rm);
4009             }else{
4010               if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4011                 generate_map_const(constmap[i][rs]+offset,rm);
4012             }
4013           }
4014         }
4015         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4016           if(!entry||entry[ra]!=agr) {
4017             if (opcode[i]==0x22||opcode[i]==0x26) {
4018               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4019             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4020               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4021             }else{
4022               #ifdef HOST_IMM_ADDR32
4023               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4024                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4025               #endif
4026               emit_movimm(constmap[i][rs]+offset,ra);
4027             }
4028           } // else did it in the previous cycle
4029         } // else load_consts already did it
4030       }
4031       if(offset&&!c&&rs1[i]) {
4032         if(rs>=0) {
4033           emit_addimm(rs,offset,ra);
4034         }else{
4035           emit_addimm(ra,offset,ra);
4036         }
4037       }
4038     }
4039   }
4040   // Preload constants for next instruction
4041   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4042     int agr,ra;
4043     #ifndef HOST_IMM_ADDR32
4044     // Mapper entry
4045     agr=MGEN1+((i+1)&1);
4046     ra=get_reg(i_regs->regmap,agr);
4047     if(ra>=0) {
4048       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4049       int offset=imm[i+1];
4050       int c=(regs[i+1].wasconst>>rs)&1;
4051       if(c) {
4052         if(itype[i+1]==STORE||itype[i+1]==STORELR
4053            ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4054           // Stores to memory go thru the mapper to detect self-modifying
4055           // code, loads don't.
4056           if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4057              (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
4058             generate_map_const(constmap[i+1][rs]+offset,ra);
4059         }else{
4060           if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4061             generate_map_const(constmap[i+1][rs]+offset,ra);
4062         }
4063       }
4064       /*else if(rs1[i]==0) {
4065         generate_map_const(offset,ra);
4066       }*/
4067     }
4068     #endif
4069     // Actual address
4070     agr=AGEN1+((i+1)&1);
4071     ra=get_reg(i_regs->regmap,agr);
4072     if(ra>=0) {
4073       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4074       int offset=imm[i+1];
4075       int c=(regs[i+1].wasconst>>rs)&1;
4076       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4077         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4078           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4079         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4080           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4081         }else{
4082           #ifdef HOST_IMM_ADDR32
4083           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4084              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4085           #endif
4086           emit_movimm(constmap[i+1][rs]+offset,ra);
4087         }
4088       }
4089       else if(rs1[i+1]==0) {
4090         // Using r0 as a base address
4091         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4092           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4093         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4094           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4095         }else{
4096           emit_movimm(offset,ra);
4097         }
4098       }
4099     }
4100   }
4101 }
4102
4103 int get_final_value(int hr, int i, int *value)
4104 {
4105   int reg=regs[i].regmap[hr];
4106   while(i<slen-1) {
4107     if(regs[i+1].regmap[hr]!=reg) break;
4108     if(!((regs[i+1].isconst>>hr)&1)) break;
4109     if(bt[i+1]) break;
4110     i++;
4111   }
4112   if(i<slen-1) {
4113     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4114       *value=constmap[i][hr];
4115       return 1;
4116     }
4117     if(!bt[i+1]) {
4118       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4119         // Load in delay slot, out-of-order execution
4120         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4121         {
4122           #ifdef HOST_IMM_ADDR32
4123           if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4124           #endif
4125           // Precompute load address
4126           *value=constmap[i][hr]+imm[i+2];
4127           return 1;
4128         }
4129       }
4130       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4131       {
4132         #ifdef HOST_IMM_ADDR32
4133         if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4134         #endif
4135         // Precompute load address
4136         *value=constmap[i][hr]+imm[i+1];
4137         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4138         return 1;
4139       }
4140     }
4141   }
4142   *value=constmap[i][hr];
4143   //printf("c=%x\n",(int)constmap[i][hr]);
4144   if(i==slen-1) return 1;
4145   if(reg<64) {
4146     return !((unneeded_reg[i+1]>>reg)&1);
4147   }else{
4148     return !((unneeded_reg_upper[i+1]>>reg)&1);
4149   }
4150 }
4151
4152 // Load registers with known constants
4153 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4154 {
4155   int hr;
4156   // Load 32-bit regs
4157   for(hr=0;hr<HOST_REGS;hr++) {
4158     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4159       //if(entry[hr]!=regmap[hr]) {
4160       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4161         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4162           int value;
4163           if(get_final_value(hr,i,&value)) {
4164             if(value==0) {
4165               emit_zeroreg(hr);
4166             }
4167             else {
4168               emit_movimm(value,hr);
4169             }
4170           }
4171         }
4172       }
4173     }
4174   }
4175   // Load 64-bit regs
4176   for(hr=0;hr<HOST_REGS;hr++) {
4177     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4178       //if(entry[hr]!=regmap[hr]) {
4179       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4180         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4181           if((is32>>(regmap[hr]&63))&1) {
4182             int lr=get_reg(regmap,regmap[hr]-64);
4183             assert(lr>=0);
4184             emit_sarimm(lr,31,hr);
4185           }
4186           else
4187           {
4188             int value;
4189             if(get_final_value(hr,i,&value)) {
4190               if(value==0) {
4191                 emit_zeroreg(hr);
4192               }
4193               else {
4194                 emit_movimm(value,hr);
4195               }
4196             }
4197           }
4198         }
4199       }
4200     }
4201   }
4202 }
4203 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4204 {
4205   int hr;
4206   // Load 32-bit regs
4207   for(hr=0;hr<HOST_REGS;hr++) {
4208     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4209       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4210         int value=constmap[i][hr];
4211         if(value==0) {
4212           emit_zeroreg(hr);
4213         }
4214         else {
4215           emit_movimm(value,hr);
4216         }
4217       }
4218     }
4219   }
4220   // Load 64-bit regs
4221   for(hr=0;hr<HOST_REGS;hr++) {
4222     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4223       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4224         if((is32>>(regmap[hr]&63))&1) {
4225           int lr=get_reg(regmap,regmap[hr]-64);
4226           assert(lr>=0);
4227           emit_sarimm(lr,31,hr);
4228         }
4229         else
4230         {
4231           int value=constmap[i][hr];
4232           if(value==0) {
4233             emit_zeroreg(hr);
4234           }
4235           else {
4236             emit_movimm(value,hr);
4237           }
4238         }
4239       }
4240     }
4241   }
4242 }
4243
4244 // Write out all dirty registers (except cycle count)
4245 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4246 {
4247   int hr;
4248   for(hr=0;hr<HOST_REGS;hr++) {
4249     if(hr!=EXCLUDE_REG) {
4250       if(i_regmap[hr]>0) {
4251         if(i_regmap[hr]!=CCREG) {
4252           if((i_dirty>>hr)&1) {
4253             if(i_regmap[hr]<64) {
4254               emit_storereg(i_regmap[hr],hr);
4255 #ifndef FORCE32
4256               if( ((i_is32>>i_regmap[hr])&1) ) {
4257                 #ifdef DESTRUCTIVE_WRITEBACK
4258                 emit_sarimm(hr,31,hr);
4259                 emit_storereg(i_regmap[hr]|64,hr);
4260                 #else
4261                 emit_sarimm(hr,31,HOST_TEMPREG);
4262                 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4263                 #endif
4264               }
4265 #endif
4266             }else{
4267               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4268                 emit_storereg(i_regmap[hr],hr);
4269               }
4270             }
4271           }
4272         }
4273       }
4274     }
4275   }
4276 }
4277 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4278 // This writes the registers not written by store_regs_bt
4279 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4280 {
4281   int hr;
4282   int t=(addr-start)>>2;
4283   for(hr=0;hr<HOST_REGS;hr++) {
4284     if(hr!=EXCLUDE_REG) {
4285       if(i_regmap[hr]>0) {
4286         if(i_regmap[hr]!=CCREG) {
4287           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4288             if((i_dirty>>hr)&1) {
4289               if(i_regmap[hr]<64) {
4290                 emit_storereg(i_regmap[hr],hr);
4291 #ifndef FORCE32
4292                 if( ((i_is32>>i_regmap[hr])&1) ) {
4293                   #ifdef DESTRUCTIVE_WRITEBACK
4294                   emit_sarimm(hr,31,hr);
4295                   emit_storereg(i_regmap[hr]|64,hr);
4296                   #else
4297                   emit_sarimm(hr,31,HOST_TEMPREG);
4298                   emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4299                   #endif
4300                 }
4301 #endif
4302               }else{
4303                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4304                   emit_storereg(i_regmap[hr],hr);
4305                 }
4306               }
4307             }
4308           }
4309         }
4310       }
4311     }
4312   }
4313 }
4314
4315 // Load all registers (except cycle count)
4316 void load_all_regs(signed char i_regmap[])
4317 {
4318   int hr;
4319   for(hr=0;hr<HOST_REGS;hr++) {
4320     if(hr!=EXCLUDE_REG) {
4321       if(i_regmap[hr]==0) {
4322         emit_zeroreg(hr);
4323       }
4324       else
4325       if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4326       {
4327         emit_loadreg(i_regmap[hr],hr);
4328       }
4329     }
4330   }
4331 }
4332
4333 // Load all current registers also needed by next instruction
4334 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4335 {
4336   int hr;
4337   for(hr=0;hr<HOST_REGS;hr++) {
4338     if(hr!=EXCLUDE_REG) {
4339       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4340         if(i_regmap[hr]==0) {
4341           emit_zeroreg(hr);
4342         }
4343         else
4344         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4345         {
4346           emit_loadreg(i_regmap[hr],hr);
4347         }
4348       }
4349     }
4350   }
4351 }
4352
4353 // Load all regs, storing cycle count if necessary
4354 void load_regs_entry(int t)
4355 {
4356   int hr;
4357   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4358   else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4359   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4360     emit_storereg(CCREG,HOST_CCREG);
4361   }
4362   // Load 32-bit regs
4363   for(hr=0;hr<HOST_REGS;hr++) {
4364     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4365       if(regs[t].regmap_entry[hr]==0) {
4366         emit_zeroreg(hr);
4367       }
4368       else if(regs[t].regmap_entry[hr]!=CCREG)
4369       {
4370         emit_loadreg(regs[t].regmap_entry[hr],hr);
4371       }
4372     }
4373   }
4374   // Load 64-bit regs
4375   for(hr=0;hr<HOST_REGS;hr++) {
4376     if(regs[t].regmap_entry[hr]>=64) {
4377       assert(regs[t].regmap_entry[hr]!=64);
4378       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4379         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4380         if(lr<0) {
4381           emit_loadreg(regs[t].regmap_entry[hr],hr);
4382         }
4383         else
4384         {
4385           emit_sarimm(lr,31,hr);
4386         }
4387       }
4388       else
4389       {
4390         emit_loadreg(regs[t].regmap_entry[hr],hr);
4391       }
4392     }
4393   }
4394 }
4395
4396 // Store dirty registers prior to branch
4397 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4398 {
4399   if(internal_branch(i_is32,addr))
4400   {
4401     int t=(addr-start)>>2;
4402     int hr;
4403     for(hr=0;hr<HOST_REGS;hr++) {
4404       if(hr!=EXCLUDE_REG) {
4405         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4406           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4407             if((i_dirty>>hr)&1) {
4408               if(i_regmap[hr]<64) {
4409                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4410                   emit_storereg(i_regmap[hr],hr);
4411                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4412                     #ifdef DESTRUCTIVE_WRITEBACK
4413                     emit_sarimm(hr,31,hr);
4414                     emit_storereg(i_regmap[hr]|64,hr);
4415                     #else
4416                     emit_sarimm(hr,31,HOST_TEMPREG);
4417                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4418                     #endif
4419                   }
4420                 }
4421               }else{
4422                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4423                   emit_storereg(i_regmap[hr],hr);
4424                 }
4425               }
4426             }
4427           }
4428         }
4429       }
4430     }
4431   }
4432   else
4433   {
4434     // Branch out of this block, write out all dirty regs
4435     wb_dirtys(i_regmap,i_is32,i_dirty);
4436   }
4437 }
4438
4439 // Load all needed registers for branch target
4440 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4441 {
4442   //if(addr>=start && addr<(start+slen*4))
4443   if(internal_branch(i_is32,addr))
4444   {
4445     int t=(addr-start)>>2;
4446     int hr;
4447     // Store the cycle count before loading something else
4448     if(i_regmap[HOST_CCREG]!=CCREG) {
4449       assert(i_regmap[HOST_CCREG]==-1);
4450     }
4451     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4452       emit_storereg(CCREG,HOST_CCREG);
4453     }
4454     // Load 32-bit regs
4455     for(hr=0;hr<HOST_REGS;hr++) {
4456       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4457         #ifdef DESTRUCTIVE_WRITEBACK
4458         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4459         #else
4460         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4461         #endif
4462           if(regs[t].regmap_entry[hr]==0) {
4463             emit_zeroreg(hr);
4464           }
4465           else if(regs[t].regmap_entry[hr]!=CCREG)
4466           {
4467             emit_loadreg(regs[t].regmap_entry[hr],hr);
4468           }
4469         }
4470       }
4471     }
4472     //Load 64-bit regs
4473     for(hr=0;hr<HOST_REGS;hr++) {
4474       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64) {
4475         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4476           assert(regs[t].regmap_entry[hr]!=64);
4477           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4478             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4479             if(lr<0) {
4480               emit_loadreg(regs[t].regmap_entry[hr],hr);
4481             }
4482             else
4483             {
4484               emit_sarimm(lr,31,hr);
4485             }
4486           }
4487           else
4488           {
4489             emit_loadreg(regs[t].regmap_entry[hr],hr);
4490           }
4491         }
4492         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4493           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4494           assert(lr>=0);
4495           emit_sarimm(lr,31,hr);
4496         }
4497       }
4498     }
4499   }
4500 }
4501
4502 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4503 {
4504   if(addr>=start && addr<start+slen*4-4)
4505   {
4506     int t=(addr-start)>>2;
4507     int hr;
4508     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4509     for(hr=0;hr<HOST_REGS;hr++)
4510     {
4511       if(hr!=EXCLUDE_REG)
4512       {
4513         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4514         {
4515           if(regs[t].regmap_entry[hr]!=-1)
4516           {
4517             return 0;
4518           }
4519           else 
4520           if((i_dirty>>hr)&1)
4521           {
4522             if(i_regmap[hr]<64)
4523             {
4524               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4525                 return 0;
4526             }
4527             else
4528             {
4529               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4530                 return 0;
4531             }
4532           }
4533         }
4534         else // Same register but is it 32-bit or dirty?
4535         if(i_regmap[hr]>=0)
4536         {
4537           if(!((regs[t].dirty>>hr)&1))
4538           {
4539             if((i_dirty>>hr)&1)
4540             {
4541               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4542               {
4543                 //printf("%x: dirty no match\n",addr);
4544                 return 0;
4545               }
4546             }
4547           }
4548           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4549           {
4550             //printf("%x: is32 no match\n",addr);
4551             return 0;
4552           }
4553         }
4554       }
4555     }
4556     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4557     if(requires_32bit[t]&~i_is32) return 0;
4558     // Delay slots are not valid branch targets
4559     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4560     // Delay slots require additional processing, so do not match
4561     if(is_ds[t]) return 0;
4562   }
4563   else
4564   {
4565     int hr;
4566     for(hr=0;hr<HOST_REGS;hr++)
4567     {
4568       if(hr!=EXCLUDE_REG)
4569       {
4570         if(i_regmap[hr]>=0)
4571         {
4572           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4573           {
4574             if((i_dirty>>hr)&1)
4575             {
4576               return 0;
4577             }
4578           }
4579         }
4580       }
4581     }
4582   }
4583   return 1;
4584 }
4585
4586 // Used when a branch jumps into the delay slot of another branch
4587 void ds_assemble_entry(int i)
4588 {
4589   int t=(ba[i]-start)>>2;
4590   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4591   assem_debug("Assemble delay slot at %x\n",ba[i]);
4592   assem_debug("<->\n");
4593   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4594     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4595   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4596   address_generation(t,&regs[t],regs[t].regmap_entry);
4597   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4598     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4599   cop1_usable=0;
4600   is_delayslot=0;
4601   switch(itype[t]) {
4602     case ALU:
4603       alu_assemble(t,&regs[t]);break;
4604     case IMM16:
4605       imm16_assemble(t,&regs[t]);break;
4606     case SHIFT:
4607       shift_assemble(t,&regs[t]);break;
4608     case SHIFTIMM:
4609       shiftimm_assemble(t,&regs[t]);break;
4610     case LOAD:
4611       load_assemble(t,&regs[t]);break;
4612     case LOADLR:
4613       loadlr_assemble(t,&regs[t]);break;
4614     case STORE:
4615       store_assemble(t,&regs[t]);break;
4616     case STORELR:
4617       storelr_assemble(t,&regs[t]);break;
4618     case COP0:
4619       cop0_assemble(t,&regs[t]);break;
4620     case COP1:
4621       cop1_assemble(t,&regs[t]);break;
4622     case C1LS:
4623       c1ls_assemble(t,&regs[t]);break;
4624     case COP2:
4625       cop2_assemble(t,&regs[t]);break;
4626     case C2LS:
4627       c2ls_assemble(t,&regs[t]);break;
4628     case C2OP:
4629       c2op_assemble(t,&regs[t]);break;
4630     case FCONV:
4631       fconv_assemble(t,&regs[t]);break;
4632     case FLOAT:
4633       float_assemble(t,&regs[t]);break;
4634     case FCOMP:
4635       fcomp_assemble(t,&regs[t]);break;
4636     case MULTDIV:
4637       multdiv_assemble(t,&regs[t]);break;
4638     case MOV:
4639       mov_assemble(t,&regs[t]);break;
4640     case SYSCALL:
4641     case HLECALL:
4642     case SPAN:
4643     case UJUMP:
4644     case RJUMP:
4645     case CJUMP:
4646     case SJUMP:
4647     case FJUMP:
4648       printf("Jump in the delay slot.  This is probably a bug.\n");
4649   }
4650   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4651   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4652   if(internal_branch(regs[t].is32,ba[i]+4))
4653     assem_debug("branch: internal\n");
4654   else
4655     assem_debug("branch: external\n");
4656   assert(internal_branch(regs[t].is32,ba[i]+4));
4657   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4658   emit_jmp(0);
4659 }
4660
4661 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4662 {
4663   int count;
4664   int jaddr;
4665   int idle=0;
4666   if(itype[i]==RJUMP)
4667   {
4668     *adj=0;
4669   }
4670   //if(ba[i]>=start && ba[i]<(start+slen*4))
4671   if(internal_branch(branch_regs[i].is32,ba[i]))
4672   {
4673     int t=(ba[i]-start)>>2;
4674     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4675     else *adj=ccadj[t];
4676   }
4677   else
4678   {
4679     *adj=0;
4680   }
4681   count=ccadj[i];
4682   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4683     // Idle loop
4684     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4685     idle=(int)out;
4686     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4687     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4688     jaddr=(int)out;
4689     emit_jmp(0);
4690   }
4691   else if(*adj==0||invert) {
4692     emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4693     jaddr=(int)out;
4694     emit_jns(0);
4695   }
4696   else
4697   {
4698     emit_cmpimm(HOST_CCREG,-2*(count+2));
4699     jaddr=(int)out;
4700     emit_jns(0);
4701   }
4702   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4703 }
4704
4705 void do_ccstub(int n)
4706 {
4707   literal_pool(256);
4708   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4709   set_jump_target(stubs[n][1],(int)out);
4710   int i=stubs[n][4];
4711   if(stubs[n][6]==NULLDS) {
4712     // Delay slot instruction is nullified ("likely" branch)
4713     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4714   }
4715   else if(stubs[n][6]!=TAKEN) {
4716     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4717   }
4718   else {
4719     if(internal_branch(branch_regs[i].is32,ba[i]))
4720       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4721   }
4722   if(stubs[n][5]!=-1)
4723   {
4724     // Save PC as return address
4725     emit_movimm(stubs[n][5],EAX);
4726     emit_writeword(EAX,(int)&pcaddr);
4727   }
4728   else
4729   {
4730     // Return address depends on which way the branch goes
4731     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4732     {
4733       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4734       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4735       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4736       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4737       if(rs1[i]==0)
4738       {
4739         s1l=s2l;s1h=s2h;
4740         s2l=s2h=-1;
4741       }
4742       else if(rs2[i]==0)
4743       {
4744         s2l=s2h=-1;
4745       }
4746       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4747         s1h=s2h=-1;
4748       }
4749       assert(s1l>=0);
4750       #ifdef DESTRUCTIVE_WRITEBACK
4751       if(rs1[i]) {
4752         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4753           emit_loadreg(rs1[i],s1l);
4754       } 
4755       else {
4756         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4757           emit_loadreg(rs2[i],s1l);
4758       }
4759       if(s2l>=0)
4760         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4761           emit_loadreg(rs2[i],s2l);
4762       #endif
4763       int hr=0;
4764       int addr,alt,ntaddr;
4765       while(hr<HOST_REGS)
4766       {
4767         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4768            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4769            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4770         {
4771           addr=hr++;break;
4772         }
4773         hr++;
4774       }
4775       while(hr<HOST_REGS)
4776       {
4777         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4778            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4779            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4780         {
4781           alt=hr++;break;
4782         }
4783         hr++;
4784       }
4785       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4786       {
4787         while(hr<HOST_REGS)
4788         {
4789           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4790              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4791              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4792           {
4793             ntaddr=hr;break;
4794           }
4795           hr++;
4796         }
4797         assert(hr<HOST_REGS);
4798       }
4799       if((opcode[i]&0x2f)==4) // BEQ
4800       {
4801         #ifdef HAVE_CMOV_IMM
4802         if(s1h<0) {
4803           if(s2l>=0) emit_cmp(s1l,s2l);
4804           else emit_test(s1l,s1l);
4805           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4806         }
4807         else
4808         #endif
4809         {
4810           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4811           if(s1h>=0) {
4812             if(s2h>=0) emit_cmp(s1h,s2h);
4813             else emit_test(s1h,s1h);
4814             emit_cmovne_reg(alt,addr);
4815           }
4816           if(s2l>=0) emit_cmp(s1l,s2l);
4817           else emit_test(s1l,s1l);
4818           emit_cmovne_reg(alt,addr);
4819         }
4820       }
4821       if((opcode[i]&0x2f)==5) // BNE
4822       {
4823         #ifdef HAVE_CMOV_IMM
4824         if(s1h<0) {
4825           if(s2l>=0) emit_cmp(s1l,s2l);
4826           else emit_test(s1l,s1l);
4827           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4828         }
4829         else
4830         #endif
4831         {
4832           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4833           if(s1h>=0) {
4834             if(s2h>=0) emit_cmp(s1h,s2h);
4835             else emit_test(s1h,s1h);
4836             emit_cmovne_reg(alt,addr);
4837           }
4838           if(s2l>=0) emit_cmp(s1l,s2l);
4839           else emit_test(s1l,s1l);
4840           emit_cmovne_reg(alt,addr);
4841         }
4842       }
4843       if((opcode[i]&0x2f)==6) // BLEZ
4844       {
4845         //emit_movimm(ba[i],alt);
4846         //emit_movimm(start+i*4+8,addr);
4847         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4848         emit_cmpimm(s1l,1);
4849         if(s1h>=0) emit_mov(addr,ntaddr);
4850         emit_cmovl_reg(alt,addr);
4851         if(s1h>=0) {
4852           emit_test(s1h,s1h);
4853           emit_cmovne_reg(ntaddr,addr);
4854           emit_cmovs_reg(alt,addr);
4855         }
4856       }
4857       if((opcode[i]&0x2f)==7) // BGTZ
4858       {
4859         //emit_movimm(ba[i],addr);
4860         //emit_movimm(start+i*4+8,ntaddr);
4861         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4862         emit_cmpimm(s1l,1);
4863         if(s1h>=0) emit_mov(addr,alt);
4864         emit_cmovl_reg(ntaddr,addr);
4865         if(s1h>=0) {
4866           emit_test(s1h,s1h);
4867           emit_cmovne_reg(alt,addr);
4868           emit_cmovs_reg(ntaddr,addr);
4869         }
4870       }
4871       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4872       {
4873         //emit_movimm(ba[i],alt);
4874         //emit_movimm(start+i*4+8,addr);
4875         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4876         if(s1h>=0) emit_test(s1h,s1h);
4877         else emit_test(s1l,s1l);
4878         emit_cmovs_reg(alt,addr);
4879       }
4880       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4881       {
4882         //emit_movimm(ba[i],addr);
4883         //emit_movimm(start+i*4+8,alt);
4884         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4885         if(s1h>=0) emit_test(s1h,s1h);
4886         else emit_test(s1l,s1l);
4887         emit_cmovs_reg(alt,addr);
4888       }
4889       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4890         if(source[i]&0x10000) // BC1T
4891         {
4892           //emit_movimm(ba[i],alt);
4893           //emit_movimm(start+i*4+8,addr);
4894           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4895           emit_testimm(s1l,0x800000);
4896           emit_cmovne_reg(alt,addr);
4897         }
4898         else // BC1F
4899         {
4900           //emit_movimm(ba[i],addr);
4901           //emit_movimm(start+i*4+8,alt);
4902           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4903           emit_testimm(s1l,0x800000);
4904           emit_cmovne_reg(alt,addr);
4905         }
4906       }
4907       emit_writeword(addr,(int)&pcaddr);
4908     }
4909     else
4910     if(itype[i]==RJUMP)
4911     {
4912       int r=get_reg(branch_regs[i].regmap,rs1[i]);
4913       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4914         r=get_reg(branch_regs[i].regmap,RTEMP);
4915       }
4916       emit_writeword(r,(int)&pcaddr);
4917     }
4918     else {printf("Unknown branch type in do_ccstub\n");exit(1);}
4919   }
4920   // Update cycle count
4921   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4922   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4923   emit_call((int)cc_interrupt);
4924   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4925   if(stubs[n][6]==TAKEN) {
4926     if(internal_branch(branch_regs[i].is32,ba[i]))
4927       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4928     else if(itype[i]==RJUMP) {
4929       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4930         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4931       else
4932         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4933     }
4934   }else if(stubs[n][6]==NOTTAKEN) {
4935     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4936     else load_all_regs(branch_regs[i].regmap);
4937   }else if(stubs[n][6]==NULLDS) {
4938     // Delay slot instruction is nullified ("likely" branch)
4939     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4940     else load_all_regs(regs[i].regmap);
4941   }else{
4942     load_all_regs(branch_regs[i].regmap);
4943   }
4944   emit_jmp(stubs[n][2]); // return address
4945   
4946   /* This works but uses a lot of memory...
4947   emit_readword((int)&last_count,ECX);
4948   emit_add(HOST_CCREG,ECX,EAX);
4949   emit_writeword(EAX,(int)&Count);
4950   emit_call((int)gen_interupt);
4951   emit_readword((int)&Count,HOST_CCREG);
4952   emit_readword((int)&next_interupt,EAX);
4953   emit_readword((int)&pending_exception,EBX);
4954   emit_writeword(EAX,(int)&last_count);
4955   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4956   emit_test(EBX,EBX);
4957   int jne_instr=(int)out;
4958   emit_jne(0);
4959   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4960   load_all_regs(branch_regs[i].regmap);
4961   emit_jmp(stubs[n][2]); // return address
4962   set_jump_target(jne_instr,(int)out);
4963   emit_readword((int)&pcaddr,EAX);
4964   // Call get_addr_ht instead of doing the hash table here.
4965   // This code is executed infrequently and takes up a lot of space
4966   // so smaller is better.
4967   emit_storereg(CCREG,HOST_CCREG);
4968   emit_pushreg(EAX);
4969   emit_call((int)get_addr_ht);
4970   emit_loadreg(CCREG,HOST_CCREG);
4971   emit_addimm(ESP,4,ESP);
4972   emit_jmpreg(EAX);*/
4973 }
4974
4975 add_to_linker(int addr,int target,int ext)
4976 {
4977   link_addr[linkcount][0]=addr;
4978   link_addr[linkcount][1]=target;
4979   link_addr[linkcount][2]=ext;  
4980   linkcount++;
4981 }
4982
4983 void ujump_assemble(int i,struct regstat *i_regs)
4984 {
4985   signed char *i_regmap=i_regs->regmap;
4986   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4987   address_generation(i+1,i_regs,regs[i].regmap_entry);
4988   #ifdef REG_PREFETCH
4989   int temp=get_reg(branch_regs[i].regmap,PTEMP);
4990   if(rt1[i]==31&&temp>=0) 
4991   {
4992     int return_address=start+i*4+8;
4993     if(get_reg(branch_regs[i].regmap,31)>0) 
4994     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4995   }
4996   #endif
4997   ds_assemble(i+1,i_regs);
4998   uint64_t bc_unneeded=branch_regs[i].u;
4999   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5000   bc_unneeded|=1|(1LL<<rt1[i]);
5001   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5002   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5003                 bc_unneeded,bc_unneeded_upper);
5004   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5005   if(rt1[i]==31) {
5006     int rt;
5007     unsigned int return_address;
5008     assert(rt1[i+1]!=31);
5009     assert(rt2[i+1]!=31);
5010     rt=get_reg(branch_regs[i].regmap,31);
5011     assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5012     //assert(rt>=0);
5013     return_address=start+i*4+8;
5014     if(rt>=0) {
5015       #ifdef USE_MINI_HT
5016       if(internal_branch(branch_regs[i].is32,return_address)) {
5017         int temp=rt+1;
5018         if(temp==EXCLUDE_REG||temp>=HOST_REGS||
5019            branch_regs[i].regmap[temp]>=0)
5020         {
5021           temp=get_reg(branch_regs[i].regmap,-1);
5022         }
5023         #ifdef HOST_TEMPREG
5024         if(temp<0) temp=HOST_TEMPREG;
5025         #endif
5026         if(temp>=0) do_miniht_insert(return_address,rt,temp);
5027         else emit_movimm(return_address,rt);
5028       }
5029       else
5030       #endif
5031       {
5032         #ifdef REG_PREFETCH
5033         if(temp>=0) 
5034         {
5035           if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5036         }
5037         #endif
5038         emit_movimm(return_address,rt); // PC into link register
5039         #ifdef IMM_PREFETCH
5040         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5041         #endif
5042       }
5043     }
5044   }
5045   int cc,adj;
5046   cc=get_reg(branch_regs[i].regmap,CCREG);
5047   assert(cc==HOST_CCREG);
5048   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5049   #ifdef REG_PREFETCH
5050   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5051   #endif
5052   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5053   if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5054   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5055   if(internal_branch(branch_regs[i].is32,ba[i]))
5056     assem_debug("branch: internal\n");
5057   else
5058     assem_debug("branch: external\n");
5059   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5060     ds_assemble_entry(i);
5061   }
5062   else {
5063     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5064     emit_jmp(0);
5065   }
5066 }
5067
5068 void rjump_assemble(int i,struct regstat *i_regs)
5069 {
5070   signed char *i_regmap=i_regs->regmap;
5071   int temp;
5072   int rs,cc,adj;
5073   rs=get_reg(branch_regs[i].regmap,rs1[i]);
5074   assert(rs>=0);
5075   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5076     // Delay slot abuse, make a copy of the branch address register
5077     temp=get_reg(branch_regs[i].regmap,RTEMP);
5078     assert(temp>=0);
5079     assert(regs[i].regmap[temp]==RTEMP);
5080     emit_mov(rs,temp);
5081     rs=temp;
5082   }
5083   address_generation(i+1,i_regs,regs[i].regmap_entry);
5084   #ifdef REG_PREFETCH
5085   if(rt1[i]==31) 
5086   {
5087     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5088       int return_address=start+i*4+8;
5089       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5090     }
5091   }
5092   #endif
5093   #ifdef USE_MINI_HT
5094   if(rs1[i]==31) {
5095     int rh=get_reg(regs[i].regmap,RHASH);
5096     if(rh>=0) do_preload_rhash(rh);
5097   }
5098   #endif
5099   ds_assemble(i+1,i_regs);
5100   uint64_t bc_unneeded=branch_regs[i].u;
5101   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5102   bc_unneeded|=1|(1LL<<rt1[i]);
5103   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5104   bc_unneeded&=~(1LL<<rs1[i]);
5105   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5106                 bc_unneeded,bc_unneeded_upper);
5107   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5108   if(rt1[i]!=0) {
5109     int rt,return_address;
5110     assert(rt1[i+1]!=rt1[i]);
5111     assert(rt2[i+1]!=rt1[i]);
5112     rt=get_reg(branch_regs[i].regmap,rt1[i]);
5113     assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5114     assert(rt>=0);
5115     return_address=start+i*4+8;
5116     #ifdef REG_PREFETCH
5117     if(temp>=0) 
5118     {
5119       if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5120     }
5121     #endif
5122     emit_movimm(return_address,rt); // PC into link register
5123     #ifdef IMM_PREFETCH
5124     emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5125     #endif
5126   }
5127   cc=get_reg(branch_regs[i].regmap,CCREG);
5128   assert(cc==HOST_CCREG);
5129   #ifdef USE_MINI_HT
5130   int rh=get_reg(branch_regs[i].regmap,RHASH);
5131   int ht=get_reg(branch_regs[i].regmap,RHTBL);
5132   if(rs1[i]==31) {
5133     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5134     do_preload_rhtbl(ht);
5135     do_rhash(rs,rh);
5136   }
5137   #endif
5138   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5139   #ifdef DESTRUCTIVE_WRITEBACK
5140   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5141     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5142       emit_loadreg(rs1[i],rs);
5143     }
5144   }
5145   #endif
5146   #ifdef REG_PREFETCH
5147   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5148   #endif
5149   #ifdef USE_MINI_HT
5150   if(rs1[i]==31) {
5151     do_miniht_load(ht,rh);
5152   }
5153   #endif
5154   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5155   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5156   //assert(adj==0);
5157   emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5158   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5159   emit_jns(0);
5160   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5161   #ifdef USE_MINI_HT
5162   if(rs1[i]==31) {
5163     do_miniht_jump(rs,rh,ht);
5164   }
5165   else
5166   #endif
5167   {
5168     //if(rs!=EAX) emit_mov(rs,EAX);
5169     //emit_jmp((int)jump_vaddr_eax);
5170     emit_jmp(jump_vaddr_reg[rs]);
5171   }
5172   /* Check hash table
5173   temp=!rs;
5174   emit_mov(rs,temp);
5175   emit_shrimm(rs,16,rs);
5176   emit_xor(temp,rs,rs);
5177   emit_movzwl_reg(rs,rs);
5178   emit_shlimm(rs,4,rs);
5179   emit_cmpmem_indexed((int)hash_table,rs,temp);
5180   emit_jne((int)out+14);
5181   emit_readword_indexed((int)hash_table+4,rs,rs);
5182   emit_jmpreg(rs);
5183   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5184   emit_addimm_no_flags(8,rs);
5185   emit_jeq((int)out-17);
5186   // No hit on hash table, call compiler
5187   emit_pushreg(temp);
5188 //DEBUG >
5189 #ifdef DEBUG_CYCLE_COUNT
5190   emit_readword((int)&last_count,ECX);
5191   emit_add(HOST_CCREG,ECX,HOST_CCREG);
5192   emit_readword((int)&next_interupt,ECX);
5193   emit_writeword(HOST_CCREG,(int)&Count);
5194   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5195   emit_writeword(ECX,(int)&last_count);
5196 #endif
5197 //DEBUG <
5198   emit_storereg(CCREG,HOST_CCREG);
5199   emit_call((int)get_addr);
5200   emit_loadreg(CCREG,HOST_CCREG);
5201   emit_addimm(ESP,4,ESP);
5202   emit_jmpreg(EAX);*/
5203   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5204   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5205   #endif
5206 }
5207
5208 void cjump_assemble(int i,struct regstat *i_regs)
5209 {
5210   signed char *i_regmap=i_regs->regmap;
5211   int cc;
5212   int match;
5213   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5214   assem_debug("match=%d\n",match);
5215   int s1h,s1l,s2h,s2l;
5216   int prev_cop1_usable=cop1_usable;
5217   int unconditional=0,nop=0;
5218   int only32=0;
5219   int ooo=1;
5220   int invert=0;
5221   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5222   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5223   if(likely[i]) ooo=0;
5224   if(!match) invert=1;
5225   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5226   if(i>(ba[i]-start)>>2) invert=1;
5227   #endif
5228     
5229   if(ooo)
5230     if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
5231        (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1])))
5232   {
5233     // Write-after-read dependency prevents out of order execution
5234     // First test branch condition, then execute delay slot, then branch
5235     ooo=0;
5236   }
5237
5238   if(ooo) {
5239     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5240     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5241     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5242     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5243   }
5244   else {
5245     s1l=get_reg(i_regmap,rs1[i]);
5246     s1h=get_reg(i_regmap,rs1[i]|64);
5247     s2l=get_reg(i_regmap,rs2[i]);
5248     s2h=get_reg(i_regmap,rs2[i]|64);
5249   }
5250   if(rs1[i]==0&&rs2[i]==0)
5251   {
5252     if(opcode[i]&1) nop=1;
5253     else unconditional=1;
5254     //assert(opcode[i]!=5);
5255     //assert(opcode[i]!=7);
5256     //assert(opcode[i]!=0x15);
5257     //assert(opcode[i]!=0x17);
5258   }
5259   else if(rs1[i]==0)
5260   {
5261     s1l=s2l;s1h=s2h;
5262     s2l=s2h=-1;
5263     only32=(regs[i].was32>>rs2[i])&1;
5264   }
5265   else if(rs2[i]==0)
5266   {
5267     s2l=s2h=-1;
5268     only32=(regs[i].was32>>rs1[i])&1;
5269   }
5270   else {
5271     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5272   }
5273
5274   if(ooo) {
5275     // Out of order execution (delay slot first)
5276     //printf("OOOE\n");
5277     address_generation(i+1,i_regs,regs[i].regmap_entry);
5278     ds_assemble(i+1,i_regs);
5279     int adj;
5280     uint64_t bc_unneeded=branch_regs[i].u;
5281     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5282     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5283     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5284     bc_unneeded|=1;
5285     bc_unneeded_upper|=1;
5286     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5287                   bc_unneeded,bc_unneeded_upper);
5288     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5289     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5290     cc=get_reg(branch_regs[i].regmap,CCREG);
5291     assert(cc==HOST_CCREG);
5292     if(unconditional) 
5293       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5294     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5295     //assem_debug("cycle count (adj)\n");
5296     if(unconditional) {
5297       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5298       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5299         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5300         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5301         if(internal)
5302           assem_debug("branch: internal\n");
5303         else
5304           assem_debug("branch: external\n");
5305         if(internal&&is_ds[(ba[i]-start)>>2]) {
5306           ds_assemble_entry(i);
5307         }
5308         else {
5309           add_to_linker((int)out,ba[i],internal);
5310           emit_jmp(0);
5311         }
5312         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5313         if(((u_int)out)&7) emit_addnop(0);
5314         #endif
5315       }
5316     }
5317     else if(nop) {
5318       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5319       int jaddr=(int)out;
5320       emit_jns(0);
5321       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5322     }
5323     else {
5324       int taken=0,nottaken=0,nottaken1=0;
5325       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5326       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5327       if(!only32)
5328       {
5329         assert(s1h>=0);
5330         if(opcode[i]==4) // BEQ
5331         {
5332           if(s2h>=0) emit_cmp(s1h,s2h);
5333           else emit_test(s1h,s1h);
5334           nottaken1=(int)out;
5335           emit_jne(1);
5336         }
5337         if(opcode[i]==5) // BNE
5338         {
5339           if(s2h>=0) emit_cmp(s1h,s2h);
5340           else emit_test(s1h,s1h);
5341           if(invert) taken=(int)out;
5342           else add_to_linker((int)out,ba[i],internal);
5343           emit_jne(0);
5344         }
5345         if(opcode[i]==6) // BLEZ
5346         {
5347           emit_test(s1h,s1h);
5348           if(invert) taken=(int)out;
5349           else add_to_linker((int)out,ba[i],internal);
5350           emit_js(0);
5351           nottaken1=(int)out;
5352           emit_jne(1);
5353         }
5354         if(opcode[i]==7) // BGTZ
5355         {
5356           emit_test(s1h,s1h);
5357           nottaken1=(int)out;
5358           emit_js(1);
5359           if(invert) taken=(int)out;
5360           else add_to_linker((int)out,ba[i],internal);
5361           emit_jne(0);
5362         }
5363       } // if(!only32)
5364           
5365       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5366       assert(s1l>=0);
5367       if(opcode[i]==4) // BEQ
5368       {
5369         if(s2l>=0) emit_cmp(s1l,s2l);
5370         else emit_test(s1l,s1l);
5371         if(invert){
5372           nottaken=(int)out;
5373           emit_jne(1);
5374         }else{
5375           add_to_linker((int)out,ba[i],internal);
5376           emit_jeq(0);
5377         }
5378       }
5379       if(opcode[i]==5) // BNE
5380       {
5381         if(s2l>=0) emit_cmp(s1l,s2l);
5382         else emit_test(s1l,s1l);
5383         if(invert){
5384           nottaken=(int)out;
5385           emit_jeq(1);
5386         }else{
5387           add_to_linker((int)out,ba[i],internal);
5388           emit_jne(0);
5389         }
5390       }
5391       if(opcode[i]==6) // BLEZ
5392       {
5393         emit_cmpimm(s1l,1);
5394         if(invert){
5395           nottaken=(int)out;
5396           emit_jge(1);
5397         }else{
5398           add_to_linker((int)out,ba[i],internal);
5399           emit_jl(0);
5400         }
5401       }
5402       if(opcode[i]==7) // BGTZ
5403       {
5404         emit_cmpimm(s1l,1);
5405         if(invert){
5406           nottaken=(int)out;
5407           emit_jl(1);
5408         }else{
5409           add_to_linker((int)out,ba[i],internal);
5410           emit_jge(0);
5411         }
5412       }
5413       if(invert) {
5414         if(taken) set_jump_target(taken,(int)out);
5415         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5416         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5417           if(adj) {
5418             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5419             add_to_linker((int)out,ba[i],internal);
5420           }else{
5421             emit_addnop(13);
5422             add_to_linker((int)out,ba[i],internal*2);
5423           }
5424           emit_jmp(0);
5425         }else
5426         #endif
5427         {
5428           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5429           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5430           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5431           if(internal)
5432             assem_debug("branch: internal\n");
5433           else
5434             assem_debug("branch: external\n");
5435           if(internal&&is_ds[(ba[i]-start)>>2]) {
5436             ds_assemble_entry(i);
5437           }
5438           else {
5439             add_to_linker((int)out,ba[i],internal);
5440             emit_jmp(0);
5441           }
5442         }
5443         set_jump_target(nottaken,(int)out);
5444       }
5445
5446       if(nottaken1) set_jump_target(nottaken1,(int)out);
5447       if(adj) {
5448         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5449       }
5450     } // (!unconditional)
5451   } // if(ooo)
5452   else
5453   {
5454     // In-order execution (branch first)
5455     //if(likely[i]) printf("IOL\n");
5456     //else
5457     //printf("IOE\n");
5458     int taken=0,nottaken=0,nottaken1=0;
5459     if(!unconditional&&!nop) {
5460       if(!only32)
5461       {
5462         assert(s1h>=0);
5463         if((opcode[i]&0x2f)==4) // BEQ
5464         {
5465           if(s2h>=0) emit_cmp(s1h,s2h);
5466           else emit_test(s1h,s1h);
5467           nottaken1=(int)out;
5468           emit_jne(2);
5469         }
5470         if((opcode[i]&0x2f)==5) // BNE
5471         {
5472           if(s2h>=0) emit_cmp(s1h,s2h);
5473           else emit_test(s1h,s1h);
5474           taken=(int)out;
5475           emit_jne(1);
5476         }
5477         if((opcode[i]&0x2f)==6) // BLEZ
5478         {
5479           emit_test(s1h,s1h);
5480           taken=(int)out;
5481           emit_js(1);
5482           nottaken1=(int)out;
5483           emit_jne(2);
5484         }
5485         if((opcode[i]&0x2f)==7) // BGTZ
5486         {
5487           emit_test(s1h,s1h);
5488           nottaken1=(int)out;
5489           emit_js(2);
5490           taken=(int)out;
5491           emit_jne(1);
5492         }
5493       } // if(!only32)
5494           
5495       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5496       assert(s1l>=0);
5497       if((opcode[i]&0x2f)==4) // BEQ
5498       {
5499         if(s2l>=0) emit_cmp(s1l,s2l);
5500         else emit_test(s1l,s1l);
5501         nottaken=(int)out;
5502         emit_jne(2);
5503       }
5504       if((opcode[i]&0x2f)==5) // BNE
5505       {
5506         if(s2l>=0) emit_cmp(s1l,s2l);
5507         else emit_test(s1l,s1l);
5508         nottaken=(int)out;
5509         emit_jeq(2);
5510       }
5511       if((opcode[i]&0x2f)==6) // BLEZ
5512       {
5513         emit_cmpimm(s1l,1);
5514         nottaken=(int)out;
5515         emit_jge(2);
5516       }
5517       if((opcode[i]&0x2f)==7) // BGTZ
5518       {
5519         emit_cmpimm(s1l,1);
5520         nottaken=(int)out;
5521         emit_jl(2);
5522       }
5523     } // if(!unconditional)
5524     int adj;
5525     uint64_t ds_unneeded=branch_regs[i].u;
5526     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5527     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5528     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5529     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5530     ds_unneeded|=1;
5531     ds_unneeded_upper|=1;
5532     // branch taken
5533     if(!nop) {
5534       if(taken) set_jump_target(taken,(int)out);
5535       assem_debug("1:\n");
5536       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5537                     ds_unneeded,ds_unneeded_upper);
5538       // load regs
5539       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5540       address_generation(i+1,&branch_regs[i],0);
5541       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5542       ds_assemble(i+1,&branch_regs[i]);
5543       cc=get_reg(branch_regs[i].regmap,CCREG);
5544       if(cc==-1) {
5545         emit_loadreg(CCREG,cc=HOST_CCREG);
5546         // CHECK: Is the following instruction (fall thru) allocated ok?
5547       }
5548       assert(cc==HOST_CCREG);
5549       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5550       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5551       assem_debug("cycle count (adj)\n");
5552       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5553       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5554       if(internal)
5555         assem_debug("branch: internal\n");
5556       else
5557         assem_debug("branch: external\n");
5558       if(internal&&is_ds[(ba[i]-start)>>2]) {
5559         ds_assemble_entry(i);
5560       }
5561       else {
5562         add_to_linker((int)out,ba[i],internal);
5563         emit_jmp(0);
5564       }
5565     }
5566     // branch not taken
5567     cop1_usable=prev_cop1_usable;
5568     if(!unconditional) {
5569       if(nottaken1) set_jump_target(nottaken1,(int)out);
5570       set_jump_target(nottaken,(int)out);
5571       assem_debug("2:\n");
5572       if(!likely[i]) {
5573         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5574                       ds_unneeded,ds_unneeded_upper);
5575         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5576         address_generation(i+1,&branch_regs[i],0);
5577         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5578         ds_assemble(i+1,&branch_regs[i]);
5579       }
5580       cc=get_reg(branch_regs[i].regmap,CCREG);
5581       if(cc==-1&&!likely[i]) {
5582         // Cycle count isn't in a register, temporarily load it then write it out
5583         emit_loadreg(CCREG,HOST_CCREG);
5584         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5585         int jaddr=(int)out;
5586         emit_jns(0);
5587         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5588         emit_storereg(CCREG,HOST_CCREG);
5589       }
5590       else{
5591         cc=get_reg(i_regmap,CCREG);
5592         assert(cc==HOST_CCREG);
5593         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5594         int jaddr=(int)out;
5595         emit_jns(0);
5596         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5597       }
5598     }
5599   }
5600 }
5601
5602 void sjump_assemble(int i,struct regstat *i_regs)
5603 {
5604   signed char *i_regmap=i_regs->regmap;
5605   int cc;
5606   int match;
5607   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5608   assem_debug("smatch=%d\n",match);
5609   int s1h,s1l;
5610   int prev_cop1_usable=cop1_usable;
5611   int unconditional=0,nevertaken=0;
5612   int only32=0;
5613   int ooo=1;
5614   int invert=0;
5615   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5616   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5617   if(likely[i]) ooo=0;
5618   if(!match) invert=1;
5619   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5620   if(i>(ba[i]-start)>>2) invert=1;
5621   #endif
5622
5623   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5624   assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5625
5626   if(ooo)
5627     if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))
5628   {
5629     // Write-after-read dependency prevents out of order execution
5630     // First test branch condition, then execute delay slot, then branch
5631     ooo=0;
5632   }
5633   // TODO: Conditional branches w/link must execute in-order so that
5634   // condition test and write to r31 occur before cycle count test
5635
5636   if(ooo) {
5637     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5638     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5639   }
5640   else {
5641     s1l=get_reg(i_regmap,rs1[i]);
5642     s1h=get_reg(i_regmap,rs1[i]|64);
5643   }
5644   if(rs1[i]==0)
5645   {
5646     if(opcode2[i]&1) unconditional=1;
5647     else nevertaken=1;
5648     // These are never taken (r0 is never less than zero)
5649     //assert(opcode2[i]!=0);
5650     //assert(opcode2[i]!=2);
5651     //assert(opcode2[i]!=0x10);
5652     //assert(opcode2[i]!=0x12);
5653   }
5654   else {
5655     only32=(regs[i].was32>>rs1[i])&1;
5656   }
5657
5658   if(ooo) {
5659     // Out of order execution (delay slot first)
5660     //printf("OOOE\n");
5661     address_generation(i+1,i_regs,regs[i].regmap_entry);
5662     ds_assemble(i+1,i_regs);
5663     int adj;
5664     uint64_t bc_unneeded=branch_regs[i].u;
5665     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5666     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5667     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5668     bc_unneeded|=1;
5669     bc_unneeded_upper|=1;
5670     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5671                   bc_unneeded,bc_unneeded_upper);
5672     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5673     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5674     if(rt1[i]==31) {
5675       int rt,return_address;
5676       assert(rt1[i+1]!=31);
5677       assert(rt2[i+1]!=31);
5678       rt=get_reg(branch_regs[i].regmap,31);
5679       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5680       if(rt>=0) {
5681         // Save the PC even if the branch is not taken
5682         return_address=start+i*4+8;
5683         emit_movimm(return_address,rt); // PC into link register
5684         #ifdef IMM_PREFETCH
5685         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5686         #endif
5687       }
5688     }
5689     cc=get_reg(branch_regs[i].regmap,CCREG);
5690     assert(cc==HOST_CCREG);
5691     if(unconditional) 
5692       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5693     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5694     assem_debug("cycle count (adj)\n");
5695     if(unconditional) {
5696       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5697       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5698         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5699         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5700         if(internal)
5701           assem_debug("branch: internal\n");
5702         else
5703           assem_debug("branch: external\n");
5704         if(internal&&is_ds[(ba[i]-start)>>2]) {
5705           ds_assemble_entry(i);
5706         }
5707         else {
5708           add_to_linker((int)out,ba[i],internal);
5709           emit_jmp(0);
5710         }
5711         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5712         if(((u_int)out)&7) emit_addnop(0);
5713         #endif
5714       }
5715     }
5716     else if(nevertaken) {
5717       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5718       int jaddr=(int)out;
5719       emit_jns(0);
5720       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5721     }
5722     else {
5723       int nottaken=0;
5724       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5725       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5726       if(!only32)
5727       {
5728         assert(s1h>=0);
5729         if(opcode2[i]==0) // BLTZ
5730         {
5731           emit_test(s1h,s1h);
5732           if(invert){
5733             nottaken=(int)out;
5734             emit_jns(1);
5735           }else{
5736             add_to_linker((int)out,ba[i],internal);
5737             emit_js(0);
5738           }
5739         }
5740         if(opcode2[i]==1) // BGEZ
5741         {
5742           emit_test(s1h,s1h);
5743           if(invert){
5744             nottaken=(int)out;
5745             emit_js(1);
5746           }else{
5747             add_to_linker((int)out,ba[i],internal);
5748             emit_jns(0);
5749           }
5750         }
5751       } // if(!only32)
5752       else
5753       {
5754         assert(s1l>=0);
5755         if(opcode2[i]==0) // BLTZ
5756         {
5757           emit_test(s1l,s1l);
5758           if(invert){
5759             nottaken=(int)out;
5760             emit_jns(1);
5761           }else{
5762             add_to_linker((int)out,ba[i],internal);
5763             emit_js(0);
5764           }
5765         }
5766         if(opcode2[i]==1) // BGEZ
5767         {
5768           emit_test(s1l,s1l);
5769           if(invert){
5770             nottaken=(int)out;
5771             emit_js(1);
5772           }else{
5773             add_to_linker((int)out,ba[i],internal);
5774             emit_jns(0);
5775           }
5776         }
5777       } // if(!only32)
5778           
5779       if(invert) {
5780         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5781         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5782           if(adj) {
5783             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5784             add_to_linker((int)out,ba[i],internal);
5785           }else{
5786             emit_addnop(13);
5787             add_to_linker((int)out,ba[i],internal*2);
5788           }
5789           emit_jmp(0);
5790         }else
5791         #endif
5792         {
5793           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5794           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5795           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5796           if(internal)
5797             assem_debug("branch: internal\n");
5798           else
5799             assem_debug("branch: external\n");
5800           if(internal&&is_ds[(ba[i]-start)>>2]) {
5801             ds_assemble_entry(i);
5802           }
5803           else {
5804             add_to_linker((int)out,ba[i],internal);
5805             emit_jmp(0);
5806           }
5807         }
5808         set_jump_target(nottaken,(int)out);
5809       }
5810
5811       if(adj) {
5812         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5813       }
5814     } // (!unconditional)
5815   } // if(ooo)
5816   else
5817   {
5818     // In-order execution (branch first)
5819     //printf("IOE\n");
5820     int nottaken=0;
5821     if(!unconditional) {
5822       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5823       if(!only32)
5824       {
5825         assert(s1h>=0);
5826         if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5827         {
5828           emit_test(s1h,s1h);
5829           nottaken=(int)out;
5830           emit_jns(1);
5831         }
5832         if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5833         {
5834           emit_test(s1h,s1h);
5835           nottaken=(int)out;
5836           emit_js(1);
5837         }
5838       } // if(!only32)
5839       else
5840       {
5841         assert(s1l>=0);
5842         if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5843         {
5844           emit_test(s1l,s1l);
5845           nottaken=(int)out;
5846           emit_jns(1);
5847         }
5848         if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5849         {
5850           emit_test(s1l,s1l);
5851           nottaken=(int)out;
5852           emit_js(1);
5853         }
5854       }
5855     } // if(!unconditional)
5856     int adj;
5857     uint64_t ds_unneeded=branch_regs[i].u;
5858     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5859     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5860     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5861     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5862     ds_unneeded|=1;
5863     ds_unneeded_upper|=1;
5864     // branch taken
5865     if(!nevertaken) {
5866       //assem_debug("1:\n");
5867       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5868                     ds_unneeded,ds_unneeded_upper);
5869       // load regs
5870       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5871       address_generation(i+1,&branch_regs[i],0);
5872       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5873       ds_assemble(i+1,&branch_regs[i]);
5874       cc=get_reg(branch_regs[i].regmap,CCREG);
5875       if(cc==-1) {
5876         emit_loadreg(CCREG,cc=HOST_CCREG);
5877         // CHECK: Is the following instruction (fall thru) allocated ok?
5878       }
5879       assert(cc==HOST_CCREG);
5880       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5881       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5882       assem_debug("cycle count (adj)\n");
5883       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5884       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5885       if(internal)
5886         assem_debug("branch: internal\n");
5887       else
5888         assem_debug("branch: external\n");
5889       if(internal&&is_ds[(ba[i]-start)>>2]) {
5890         ds_assemble_entry(i);
5891       }
5892       else {
5893         add_to_linker((int)out,ba[i],internal);
5894         emit_jmp(0);
5895       }
5896     }
5897     // branch not taken
5898     cop1_usable=prev_cop1_usable;
5899     if(!unconditional) {
5900       set_jump_target(nottaken,(int)out);
5901       assem_debug("1:\n");
5902       if(!likely[i]) {
5903         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5904                       ds_unneeded,ds_unneeded_upper);
5905         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5906         address_generation(i+1,&branch_regs[i],0);
5907         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5908         ds_assemble(i+1,&branch_regs[i]);
5909       }
5910       cc=get_reg(branch_regs[i].regmap,CCREG);
5911       if(cc==-1&&!likely[i]) {
5912         // Cycle count isn't in a register, temporarily load it then write it out
5913         emit_loadreg(CCREG,HOST_CCREG);
5914         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5915         int jaddr=(int)out;
5916         emit_jns(0);
5917         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5918         emit_storereg(CCREG,HOST_CCREG);
5919       }
5920       else{
5921         cc=get_reg(i_regmap,CCREG);
5922         assert(cc==HOST_CCREG);
5923         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5924         int jaddr=(int)out;
5925         emit_jns(0);
5926         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5927       }
5928     }
5929   }
5930 }
5931
5932 void fjump_assemble(int i,struct regstat *i_regs)
5933 {
5934   signed char *i_regmap=i_regs->regmap;
5935   int cc;
5936   int match;
5937   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5938   assem_debug("fmatch=%d\n",match);
5939   int fs,cs;
5940   int eaddr;
5941   int ooo=1;
5942   int invert=0;
5943   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5944   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5945   if(likely[i]) ooo=0;
5946   if(!match) invert=1;
5947   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5948   if(i>(ba[i]-start)>>2) invert=1;
5949   #endif
5950
5951   if(ooo)
5952     if(itype[i+1]==FCOMP)
5953   {
5954     // Write-after-read dependency prevents out of order execution
5955     // First test branch condition, then execute delay slot, then branch
5956     ooo=0;
5957   }
5958
5959   if(ooo) {
5960     fs=get_reg(branch_regs[i].regmap,FSREG);
5961     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5962   }
5963   else {
5964     fs=get_reg(i_regmap,FSREG);
5965   }
5966
5967   // Check cop1 unusable
5968   if(!cop1_usable) {
5969     cs=get_reg(i_regmap,CSREG);
5970     assert(cs>=0);
5971     emit_testimm(cs,0x20000000);
5972     eaddr=(int)out;
5973     emit_jeq(0);
5974     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5975     cop1_usable=1;
5976   }
5977
5978   if(ooo) {
5979     // Out of order execution (delay slot first)
5980     //printf("OOOE\n");
5981     ds_assemble(i+1,i_regs);
5982     int adj;
5983     uint64_t bc_unneeded=branch_regs[i].u;
5984     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5985     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5986     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5987     bc_unneeded|=1;
5988     bc_unneeded_upper|=1;
5989     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5990                   bc_unneeded,bc_unneeded_upper);
5991     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5992     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5993     cc=get_reg(branch_regs[i].regmap,CCREG);
5994     assert(cc==HOST_CCREG);
5995     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5996     assem_debug("cycle count (adj)\n");
5997     if(1) {
5998       int nottaken=0;
5999       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6000       if(1) {
6001         assert(fs>=0);
6002         emit_testimm(fs,0x800000);
6003         if(source[i]&0x10000) // BC1T
6004         {
6005           if(invert){
6006             nottaken=(int)out;
6007             emit_jeq(1);
6008           }else{
6009             add_to_linker((int)out,ba[i],internal);
6010             emit_jne(0);
6011           }
6012         }
6013         else // BC1F
6014           if(invert){
6015             nottaken=(int)out;
6016             emit_jne(1);
6017           }else{
6018             add_to_linker((int)out,ba[i],internal);
6019             emit_jeq(0);
6020           }
6021         {
6022         }
6023       } // if(!only32)
6024           
6025       if(invert) {
6026         if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6027         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6028         else if(match) emit_addnop(13);
6029         #endif
6030         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6031         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6032         if(internal)
6033           assem_debug("branch: internal\n");
6034         else
6035           assem_debug("branch: external\n");
6036         if(internal&&is_ds[(ba[i]-start)>>2]) {
6037           ds_assemble_entry(i);
6038         }
6039         else {
6040           add_to_linker((int)out,ba[i],internal);
6041           emit_jmp(0);
6042         }
6043         set_jump_target(nottaken,(int)out);
6044       }
6045
6046       if(adj) {
6047         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6048       }
6049     } // (!unconditional)
6050   } // if(ooo)
6051   else
6052   {
6053     // In-order execution (branch first)
6054     //printf("IOE\n");
6055     int nottaken=0;
6056     if(1) {
6057       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6058       if(1) {
6059         assert(fs>=0);
6060         emit_testimm(fs,0x800000);
6061         if(source[i]&0x10000) // BC1T
6062         {
6063           nottaken=(int)out;
6064           emit_jeq(1);
6065         }
6066         else // BC1F
6067         {
6068           nottaken=(int)out;
6069           emit_jne(1);
6070         }
6071       }
6072     } // if(!unconditional)
6073     int adj;
6074     uint64_t ds_unneeded=branch_regs[i].u;
6075     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6076     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6077     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6078     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6079     ds_unneeded|=1;
6080     ds_unneeded_upper|=1;
6081     // branch taken
6082     //assem_debug("1:\n");
6083     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6084                   ds_unneeded,ds_unneeded_upper);
6085     // load regs
6086     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6087     address_generation(i+1,&branch_regs[i],0);
6088     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6089     ds_assemble(i+1,&branch_regs[i]);
6090     cc=get_reg(branch_regs[i].regmap,CCREG);
6091     if(cc==-1) {
6092       emit_loadreg(CCREG,cc=HOST_CCREG);
6093       // CHECK: Is the following instruction (fall thru) allocated ok?
6094     }
6095     assert(cc==HOST_CCREG);
6096     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6097     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6098     assem_debug("cycle count (adj)\n");
6099     if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6100     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6101     if(internal)
6102       assem_debug("branch: internal\n");
6103     else
6104       assem_debug("branch: external\n");
6105     if(internal&&is_ds[(ba[i]-start)>>2]) {
6106       ds_assemble_entry(i);
6107     }
6108     else {
6109       add_to_linker((int)out,ba[i],internal);
6110       emit_jmp(0);
6111     }
6112
6113     // branch not taken
6114     if(1) { // <- FIXME (don't need this)
6115       set_jump_target(nottaken,(int)out);
6116       assem_debug("1:\n");
6117       if(!likely[i]) {
6118         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6119                       ds_unneeded,ds_unneeded_upper);
6120         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6121         address_generation(i+1,&branch_regs[i],0);
6122         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6123         ds_assemble(i+1,&branch_regs[i]);
6124       }
6125       cc=get_reg(branch_regs[i].regmap,CCREG);
6126       if(cc==-1&&!likely[i]) {
6127         // Cycle count isn't in a register, temporarily load it then write it out
6128         emit_loadreg(CCREG,HOST_CCREG);
6129         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6130         int jaddr=(int)out;
6131         emit_jns(0);
6132         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6133         emit_storereg(CCREG,HOST_CCREG);
6134       }
6135       else{
6136         cc=get_reg(i_regmap,CCREG);
6137         assert(cc==HOST_CCREG);
6138         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6139         int jaddr=(int)out;
6140         emit_jns(0);
6141         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6142       }
6143     }
6144   }
6145 }
6146
6147 static void pagespan_assemble(int i,struct regstat *i_regs)
6148 {
6149   int s1l=get_reg(i_regs->regmap,rs1[i]);
6150   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6151   int s2l=get_reg(i_regs->regmap,rs2[i]);
6152   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6153   void *nt_branch=NULL;
6154   int taken=0;
6155   int nottaken=0;
6156   int unconditional=0;
6157   if(rs1[i]==0)
6158   {
6159     s1l=s2l;s1h=s2h;
6160     s2l=s2h=-1;
6161   }
6162   else if(rs2[i]==0)
6163   {
6164     s2l=s2h=-1;
6165   }
6166   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6167     s1h=s2h=-1;
6168   }
6169   int hr=0;
6170   int addr,alt,ntaddr;
6171   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6172   else {
6173     while(hr<HOST_REGS)
6174     {
6175       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6176          (i_regs->regmap[hr]&63)!=rs1[i] &&
6177          (i_regs->regmap[hr]&63)!=rs2[i] )
6178       {
6179         addr=hr++;break;
6180       }
6181       hr++;
6182     }
6183   }
6184   while(hr<HOST_REGS)
6185   {
6186     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6187        (i_regs->regmap[hr]&63)!=rs1[i] &&
6188        (i_regs->regmap[hr]&63)!=rs2[i] )
6189     {
6190       alt=hr++;break;
6191     }
6192     hr++;
6193   }
6194   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6195   {
6196     while(hr<HOST_REGS)
6197     {
6198       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6199          (i_regs->regmap[hr]&63)!=rs1[i] &&
6200          (i_regs->regmap[hr]&63)!=rs2[i] )
6201       {
6202         ntaddr=hr;break;
6203       }
6204       hr++;
6205     }
6206   }
6207   assert(hr<HOST_REGS);
6208   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6209     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6210   }
6211   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6212   if(opcode[i]==2) // J
6213   {
6214     unconditional=1;
6215   }
6216   if(opcode[i]==3) // JAL
6217   {
6218     // TODO: mini_ht
6219     int rt=get_reg(i_regs->regmap,31);
6220     emit_movimm(start+i*4+8,rt);
6221     unconditional=1;
6222   }
6223   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6224   {
6225     emit_mov(s1l,addr);
6226     if(opcode2[i]==9) // JALR
6227     {
6228       int rt=get_reg(i_regs->regmap,rt1[i]);
6229       emit_movimm(start+i*4+8,rt);
6230     }
6231   }
6232   if((opcode[i]&0x3f)==4) // BEQ
6233   {
6234     if(rs1[i]==rs2[i])
6235     {
6236       unconditional=1;
6237     }
6238     else
6239     #ifdef HAVE_CMOV_IMM
6240     if(s1h<0) {
6241       if(s2l>=0) emit_cmp(s1l,s2l);
6242       else emit_test(s1l,s1l);
6243       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6244     }
6245     else
6246     #endif
6247     {
6248       assert(s1l>=0);
6249       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6250       if(s1h>=0) {
6251         if(s2h>=0) emit_cmp(s1h,s2h);
6252         else emit_test(s1h,s1h);
6253         emit_cmovne_reg(alt,addr);
6254       }
6255       if(s2l>=0) emit_cmp(s1l,s2l);
6256       else emit_test(s1l,s1l);
6257       emit_cmovne_reg(alt,addr);
6258     }
6259   }
6260   if((opcode[i]&0x3f)==5) // BNE
6261   {
6262     #ifdef HAVE_CMOV_IMM
6263     if(s1h<0) {
6264       if(s2l>=0) emit_cmp(s1l,s2l);
6265       else emit_test(s1l,s1l);
6266       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6267     }
6268     else
6269     #endif
6270     {
6271       assert(s1l>=0);
6272       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6273       if(s1h>=0) {
6274         if(s2h>=0) emit_cmp(s1h,s2h);
6275         else emit_test(s1h,s1h);
6276         emit_cmovne_reg(alt,addr);
6277       }
6278       if(s2l>=0) emit_cmp(s1l,s2l);
6279       else emit_test(s1l,s1l);
6280       emit_cmovne_reg(alt,addr);
6281     }
6282   }
6283   if((opcode[i]&0x3f)==0x14) // BEQL
6284   {
6285     if(s1h>=0) {
6286       if(s2h>=0) emit_cmp(s1h,s2h);
6287       else emit_test(s1h,s1h);
6288       nottaken=(int)out;
6289       emit_jne(0);
6290     }
6291     if(s2l>=0) emit_cmp(s1l,s2l);
6292     else emit_test(s1l,s1l);
6293     if(nottaken) set_jump_target(nottaken,(int)out);
6294     nottaken=(int)out;
6295     emit_jne(0);
6296   }
6297   if((opcode[i]&0x3f)==0x15) // BNEL
6298   {
6299     if(s1h>=0) {
6300       if(s2h>=0) emit_cmp(s1h,s2h);
6301       else emit_test(s1h,s1h);
6302       taken=(int)out;
6303       emit_jne(0);
6304     }
6305     if(s2l>=0) emit_cmp(s1l,s2l);
6306     else emit_test(s1l,s1l);
6307     nottaken=(int)out;
6308     emit_jeq(0);
6309     if(taken) set_jump_target(taken,(int)out);
6310   }
6311   if((opcode[i]&0x3f)==6) // BLEZ
6312   {
6313     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6314     emit_cmpimm(s1l,1);
6315     if(s1h>=0) emit_mov(addr,ntaddr);
6316     emit_cmovl_reg(alt,addr);
6317     if(s1h>=0) {
6318       emit_test(s1h,s1h);
6319       emit_cmovne_reg(ntaddr,addr);
6320       emit_cmovs_reg(alt,addr);
6321     }
6322   }
6323   if((opcode[i]&0x3f)==7) // BGTZ
6324   {
6325     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6326     emit_cmpimm(s1l,1);
6327     if(s1h>=0) emit_mov(addr,alt);
6328     emit_cmovl_reg(ntaddr,addr);
6329     if(s1h>=0) {
6330       emit_test(s1h,s1h);
6331       emit_cmovne_reg(alt,addr);
6332       emit_cmovs_reg(ntaddr,addr);
6333     }
6334   }
6335   if((opcode[i]&0x3f)==0x16) // BLEZL
6336   {
6337     assert((opcode[i]&0x3f)!=0x16);
6338   }
6339   if((opcode[i]&0x3f)==0x17) // BGTZL
6340   {
6341     assert((opcode[i]&0x3f)!=0x17);
6342   }
6343   assert(opcode[i]!=1); // BLTZ/BGEZ
6344
6345   //FIXME: Check CSREG
6346   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6347     if((source[i]&0x30000)==0) // BC1F
6348     {
6349       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6350       emit_testimm(s1l,0x800000);
6351       emit_cmovne_reg(alt,addr);
6352     }
6353     if((source[i]&0x30000)==0x10000) // BC1T
6354     {
6355       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6356       emit_testimm(s1l,0x800000);
6357       emit_cmovne_reg(alt,addr);
6358     }
6359     if((source[i]&0x30000)==0x20000) // BC1FL
6360     {
6361       emit_testimm(s1l,0x800000);
6362       nottaken=(int)out;
6363       emit_jne(0);
6364     }
6365     if((source[i]&0x30000)==0x30000) // BC1TL
6366     {
6367       emit_testimm(s1l,0x800000);
6368       nottaken=(int)out;
6369       emit_jeq(0);
6370     }
6371   }
6372
6373   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6374   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6375   if(likely[i]||unconditional)
6376   {
6377     emit_movimm(ba[i],HOST_BTREG);
6378   }
6379   else if(addr!=HOST_BTREG)
6380   {
6381     emit_mov(addr,HOST_BTREG);
6382   }
6383   void *branch_addr=out;
6384   emit_jmp(0);
6385   int target_addr=start+i*4+5;
6386   void *stub=out;
6387   void *compiled_target_addr=check_addr(target_addr);
6388   emit_extjump_ds((int)branch_addr,target_addr);
6389   if(compiled_target_addr) {
6390     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6391     add_link(target_addr,stub);
6392   }
6393   else set_jump_target((int)branch_addr,(int)stub);
6394   if(likely[i]) {
6395     // Not-taken path
6396     set_jump_target((int)nottaken,(int)out);
6397     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6398     void *branch_addr=out;
6399     emit_jmp(0);
6400     int target_addr=start+i*4+8;
6401     void *stub=out;
6402     void *compiled_target_addr=check_addr(target_addr);
6403     emit_extjump_ds((int)branch_addr,target_addr);
6404     if(compiled_target_addr) {
6405       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6406       add_link(target_addr,stub);
6407     }
6408     else set_jump_target((int)branch_addr,(int)stub);
6409   }
6410 }
6411
6412 // Assemble the delay slot for the above
6413 static void pagespan_ds()
6414 {
6415   assem_debug("initial delay slot:\n");
6416   u_int vaddr=start+1;
6417   u_int page=get_page(vaddr);
6418   u_int vpage=get_vpage(vaddr);
6419   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6420   do_dirty_stub_ds();
6421   ll_add(jump_in+page,vaddr,(void *)out);
6422   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6423   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6424     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6425   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6426     emit_writeword(HOST_BTREG,(int)&branch_target);
6427   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6428   address_generation(0,&regs[0],regs[0].regmap_entry);
6429   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6430     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6431   cop1_usable=0;
6432   is_delayslot=0;
6433   switch(itype[0]) {
6434     case ALU:
6435       alu_assemble(0,&regs[0]);break;
6436     case IMM16:
6437       imm16_assemble(0,&regs[0]);break;
6438     case SHIFT:
6439       shift_assemble(0,&regs[0]);break;
6440     case SHIFTIMM:
6441       shiftimm_assemble(0,&regs[0]);break;
6442     case LOAD:
6443       load_assemble(0,&regs[0]);break;
6444     case LOADLR:
6445       loadlr_assemble(0,&regs[0]);break;
6446     case STORE:
6447       store_assemble(0,&regs[0]);break;
6448     case STORELR:
6449       storelr_assemble(0,&regs[0]);break;
6450     case COP0:
6451       cop0_assemble(0,&regs[0]);break;
6452     case COP1:
6453       cop1_assemble(0,&regs[0]);break;
6454     case C1LS:
6455       c1ls_assemble(0,&regs[0]);break;
6456     case COP2:
6457       cop2_assemble(0,&regs[0]);break;
6458     case C2LS:
6459       c2ls_assemble(0,&regs[0]);break;
6460     case C2OP:
6461       c2op_assemble(0,&regs[0]);break;
6462     case FCONV:
6463       fconv_assemble(0,&regs[0]);break;
6464     case FLOAT:
6465       float_assemble(0,&regs[0]);break;
6466     case FCOMP:
6467       fcomp_assemble(0,&regs[0]);break;
6468     case MULTDIV:
6469       multdiv_assemble(0,&regs[0]);break;
6470     case MOV:
6471       mov_assemble(0,&regs[0]);break;
6472     case SYSCALL:
6473     case HLECALL:
6474     case SPAN:
6475     case UJUMP:
6476     case RJUMP:
6477     case CJUMP:
6478     case SJUMP:
6479     case FJUMP:
6480       printf("Jump in the delay slot.  This is probably a bug.\n");
6481   }
6482   int btaddr=get_reg(regs[0].regmap,BTREG);
6483   if(btaddr<0) {
6484     btaddr=get_reg(regs[0].regmap,-1);
6485     emit_readword((int)&branch_target,btaddr);
6486   }
6487   assert(btaddr!=HOST_CCREG);
6488   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6489 #ifdef HOST_IMM8
6490   emit_movimm(start+4,HOST_TEMPREG);
6491   emit_cmp(btaddr,HOST_TEMPREG);
6492 #else
6493   emit_cmpimm(btaddr,start+4);
6494 #endif
6495   int branch=(int)out;
6496   emit_jeq(0);
6497   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6498   emit_jmp(jump_vaddr_reg[btaddr]);
6499   set_jump_target(branch,(int)out);
6500   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6501   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6502 }
6503
6504 // Basic liveness analysis for MIPS registers
6505 void unneeded_registers(int istart,int iend,int r)
6506 {
6507   int i;
6508   uint64_t u,uu,b,bu;
6509   uint64_t temp_u,temp_uu;
6510   uint64_t tdep;
6511   if(iend==slen-1) {
6512     u=1;uu=1;
6513   }else{
6514     u=unneeded_reg[iend+1];
6515     uu=unneeded_reg_upper[iend+1];
6516     u=1;uu=1;
6517   }
6518   for (i=iend;i>=istart;i--)
6519   {
6520     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6521     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6522     {
6523       // If subroutine call, flag return address as a possible branch target
6524       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6525       
6526       if(ba[i]<start || ba[i]>=(start+slen*4))
6527       {
6528         // Branch out of this block, flush all regs
6529         u=1;
6530         uu=1;
6531         /* Hexagon hack 
6532         if(itype[i]==UJUMP&&rt1[i]==31)
6533         {
6534           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6535         }
6536         if(itype[i]==RJUMP&&rs1[i]==31)
6537         {
6538           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6539         }
6540         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6541           if(itype[i]==UJUMP&&rt1[i]==31)
6542           {
6543             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6544             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6545           }
6546           if(itype[i]==RJUMP&&rs1[i]==31)
6547           {
6548             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6549             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6550           }
6551         }*/
6552         branch_unneeded_reg[i]=u;
6553         branch_unneeded_reg_upper[i]=uu;
6554         // Merge in delay slot
6555         tdep=(~uu>>rt1[i+1])&1;
6556         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6557         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6558         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6559         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6560         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6561         u|=1;uu|=1;
6562         // If branch is "likely" (and conditional)
6563         // then we skip the delay slot on the fall-thru path
6564         if(likely[i]) {
6565           if(i<slen-1) {
6566             u&=unneeded_reg[i+2];
6567             uu&=unneeded_reg_upper[i+2];
6568           }
6569           else
6570           {
6571             u=1;
6572             uu=1;
6573           }
6574         }
6575       }
6576       else
6577       {
6578         // Internal branch, flag target
6579         bt[(ba[i]-start)>>2]=1;
6580         if(ba[i]<=start+i*4) {
6581           // Backward branch
6582           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6583           {
6584             // Unconditional branch
6585             temp_u=1;temp_uu=1;
6586           } else {
6587             // Conditional branch (not taken case)
6588             temp_u=unneeded_reg[i+2];
6589             temp_uu=unneeded_reg_upper[i+2];
6590           }
6591           // Merge in delay slot
6592           tdep=(~temp_uu>>rt1[i+1])&1;
6593           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6594           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6595           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6596           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6597           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6598           temp_u|=1;temp_uu|=1;
6599           // If branch is "likely" (and conditional)
6600           // then we skip the delay slot on the fall-thru path
6601           if(likely[i]) {
6602             if(i<slen-1) {
6603               temp_u&=unneeded_reg[i+2];
6604               temp_uu&=unneeded_reg_upper[i+2];
6605             }
6606             else
6607             {
6608               temp_u=1;
6609               temp_uu=1;
6610             }
6611           }
6612           tdep=(~temp_uu>>rt1[i])&1;
6613           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6614           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6615           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6616           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6617           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6618           temp_u|=1;temp_uu|=1;
6619           unneeded_reg[i]=temp_u;
6620           unneeded_reg_upper[i]=temp_uu;
6621           // Only go three levels deep.  This recursion can take an
6622           // excessive amount of time if there are a lot of nested loops.
6623           if(r<2) {
6624             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6625           }else{
6626             unneeded_reg[(ba[i]-start)>>2]=1;
6627             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6628           }
6629         } /*else*/ if(1) {
6630           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6631           {
6632             // Unconditional branch
6633             u=unneeded_reg[(ba[i]-start)>>2];
6634             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6635             branch_unneeded_reg[i]=u;
6636             branch_unneeded_reg_upper[i]=uu;
6637         //u=1;
6638         //uu=1;
6639         //branch_unneeded_reg[i]=u;
6640         //branch_unneeded_reg_upper[i]=uu;
6641             // Merge in delay slot
6642             tdep=(~uu>>rt1[i+1])&1;
6643             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6644             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6645             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6646             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6647             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6648             u|=1;uu|=1;
6649           } else {
6650             // Conditional branch
6651             b=unneeded_reg[(ba[i]-start)>>2];
6652             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6653             branch_unneeded_reg[i]=b;
6654             branch_unneeded_reg_upper[i]=bu;
6655         //b=1;
6656         //bu=1;
6657         //branch_unneeded_reg[i]=b;
6658         //branch_unneeded_reg_upper[i]=bu;
6659             // Branch delay slot
6660             tdep=(~uu>>rt1[i+1])&1;
6661             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6662             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6663             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6664             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6665             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6666             b|=1;bu|=1;
6667             // If branch is "likely" then we skip the
6668             // delay slot on the fall-thru path
6669             if(likely[i]) {
6670               u=b;
6671               uu=bu;
6672               if(i<slen-1) {
6673                 u&=unneeded_reg[i+2];
6674                 uu&=unneeded_reg_upper[i+2];
6675         //u=1;
6676         //uu=1;
6677               }
6678             } else {
6679               u&=b;
6680               uu&=bu;
6681         //u=1;
6682         //uu=1;
6683             }
6684             if(i<slen-1) {
6685               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6686               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6687         //branch_unneeded_reg[i]=1;
6688         //branch_unneeded_reg_upper[i]=1;
6689             } else {
6690               branch_unneeded_reg[i]=1;
6691               branch_unneeded_reg_upper[i]=1;
6692             }
6693           }
6694         }
6695       }
6696     }
6697     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
6698     {
6699       // SYSCALL instruction (software interrupt)
6700       u=1;
6701       uu=1;
6702     }
6703     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6704     {
6705       // ERET instruction (return from interrupt)
6706       u=1;
6707       uu=1;
6708     }
6709     //u=uu=1; // DEBUG
6710     tdep=(~uu>>rt1[i])&1;
6711     // Written registers are unneeded
6712     u|=1LL<<rt1[i];
6713     u|=1LL<<rt2[i];
6714     uu|=1LL<<rt1[i];
6715     uu|=1LL<<rt2[i];
6716     // Accessed registers are needed
6717     u&=~(1LL<<rs1[i]);
6718     u&=~(1LL<<rs2[i]);
6719     uu&=~(1LL<<us1[i]);
6720     uu&=~(1LL<<us2[i]);
6721     // Source-target dependencies
6722     uu&=~(tdep<<dep1[i]);
6723     uu&=~(tdep<<dep2[i]);
6724     // R0 is always unneeded
6725     u|=1;uu|=1;
6726     // Save it
6727     unneeded_reg[i]=u;
6728     unneeded_reg_upper[i]=uu;
6729     /*
6730     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6731     printf("U:");
6732     int r;
6733     for(r=1;r<=CCREG;r++) {
6734       if((unneeded_reg[i]>>r)&1) {
6735         if(r==HIREG) printf(" HI");
6736         else if(r==LOREG) printf(" LO");
6737         else printf(" r%d",r);
6738       }
6739     }
6740     printf(" UU:");
6741     for(r=1;r<=CCREG;r++) {
6742       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6743         if(r==HIREG) printf(" HI");
6744         else if(r==LOREG) printf(" LO");
6745         else printf(" r%d",r);
6746       }
6747     }
6748     printf("\n");*/
6749   }
6750 #ifdef FORCE32
6751   for (i=iend;i>=istart;i--)
6752   {
6753     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6754   }
6755 #endif
6756 }
6757
6758 // Identify registers which are likely to contain 32-bit values
6759 // This is used to predict whether any branches will jump to a
6760 // location with 64-bit values in registers.
6761 static void provisional_32bit()
6762 {
6763   int i,j;
6764   uint64_t is32=1;
6765   uint64_t lastbranch=1;
6766   
6767   for(i=0;i<slen;i++)
6768   {
6769     if(i>0) {
6770       if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
6771         if(i>1) is32=lastbranch;
6772         else is32=1;
6773       }
6774     }
6775     if(i>1)
6776     {
6777       if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
6778         if(likely[i-2]) {
6779           if(i>2) is32=lastbranch;
6780           else is32=1;
6781         }
6782       }
6783       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
6784       {
6785         if(rs1[i-2]==0||rs2[i-2]==0)
6786         {
6787           if(rs1[i-2]) {
6788             is32|=1LL<<rs1[i-2];
6789           }
6790           if(rs2[i-2]) {
6791             is32|=1LL<<rs2[i-2];
6792           }
6793         }
6794       }
6795     }
6796     // If something jumps here with 64-bit values
6797     // then promote those registers to 64 bits
6798     if(bt[i])
6799     {
6800       uint64_t temp_is32=is32;
6801       for(j=i-1;j>=0;j--)
6802       {
6803         if(ba[j]==start+i*4) 
6804           //temp_is32&=branch_regs[j].is32;
6805           temp_is32&=p32[j];
6806       }
6807       for(j=i;j<slen;j++)
6808       {
6809         if(ba[j]==start+i*4) 
6810           temp_is32=1;
6811       }
6812       is32=temp_is32;
6813     }
6814     int type=itype[i];
6815     int op=opcode[i];
6816     int op2=opcode2[i];
6817     int rt=rt1[i];
6818     int s1=rs1[i];
6819     int s2=rs2[i];
6820     if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
6821       // Branches don't write registers, consider the delay slot instead.
6822       type=itype[i+1];
6823       op=opcode[i+1];
6824       op2=opcode2[i+1];
6825       rt=rt1[i+1];
6826       s1=rs1[i+1];
6827       s2=rs2[i+1];
6828       lastbranch=is32;
6829     }
6830     switch(type) {
6831       case LOAD:
6832         if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
6833            opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
6834           is32&=~(1LL<<rt);
6835         else
6836           is32|=1LL<<rt;
6837         break;
6838       case STORE:
6839       case STORELR:
6840         break;
6841       case LOADLR:
6842         if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
6843         if(op==0x22) is32|=1LL<<rt; // LWL
6844         break;
6845       case IMM16:
6846         if (op==0x08||op==0x09|| // ADDI/ADDIU
6847             op==0x0a||op==0x0b|| // SLTI/SLTIU
6848             op==0x0c|| // ANDI
6849             op==0x0f)  // LUI
6850         {
6851           is32|=1LL<<rt;
6852         }
6853         if(op==0x18||op==0x19) { // DADDI/DADDIU
6854           is32&=~(1LL<<rt);
6855           //if(imm[i]==0)
6856           //  is32|=((is32>>s1)&1LL)<<rt;
6857         }
6858         if(op==0x0d||op==0x0e) { // ORI/XORI
6859           uint64_t sr=((is32>>s1)&1LL);
6860           is32&=~(1LL<<rt);
6861           is32|=sr<<rt;
6862         }
6863         break;
6864       case UJUMP:
6865         break;
6866       case RJUMP:
6867         break;
6868       case CJUMP:
6869         break;
6870       case SJUMP:
6871         break;
6872       case FJUMP:
6873         break;
6874       case ALU:
6875         if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
6876           is32|=1LL<<rt;
6877         }
6878         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
6879           is32|=1LL<<rt;
6880         }
6881         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
6882           uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
6883           is32&=~(1LL<<rt);
6884           is32|=sr<<rt;
6885         }
6886         else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
6887           if(s1==0&&s2==0) {
6888             is32|=1LL<<rt;
6889           }
6890           else if(s2==0) {
6891             uint64_t sr=((is32>>s1)&1LL);
6892             is32&=~(1LL<<rt);
6893             is32|=sr<<rt;
6894           }
6895           else if(s1==0) {
6896             uint64_t sr=((is32>>s2)&1LL);
6897             is32&=~(1LL<<rt);
6898             is32|=sr<<rt;
6899           }
6900           else {
6901             is32&=~(1LL<<rt);
6902           }
6903         }
6904         else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
6905           if(s1==0&&s2==0) {
6906             is32|=1LL<<rt;
6907           }
6908           else if(s2==0) {
6909             uint64_t sr=((is32>>s1)&1LL);
6910             is32&=~(1LL<<rt);
6911             is32|=sr<<rt;
6912           }
6913           else {
6914             is32&=~(1LL<<rt);
6915           }
6916         }
6917         break;
6918       case MULTDIV:
6919         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
6920           is32&=~((1LL<<HIREG)|(1LL<<LOREG));
6921         }
6922         else {
6923           is32|=(1LL<<HIREG)|(1LL<<LOREG);
6924         }
6925         break;
6926       case MOV:
6927         {
6928           uint64_t sr=((is32>>s1)&1LL);
6929           is32&=~(1LL<<rt);
6930           is32|=sr<<rt;
6931         }
6932         break;
6933       case SHIFT:
6934         if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
6935         else is32|=1LL<<rt; // SLLV/SRLV/SRAV
6936         break;
6937       case SHIFTIMM:
6938         is32|=1LL<<rt;
6939         // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
6940         if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
6941         break;
6942       case COP0:
6943         if(op2==0) is32|=1LL<<rt; // MFC0
6944         break;
6945       case COP1:
6946       case COP2:
6947         if(op2==0) is32|=1LL<<rt; // MFC1
6948         if(op2==1) is32&=~(1LL<<rt); // DMFC1
6949         if(op2==2) is32|=1LL<<rt; // CFC1
6950         break;
6951       case C1LS:
6952       case C2LS:
6953         break;
6954       case FLOAT:
6955       case FCONV:
6956         break;
6957       case FCOMP:
6958         break;
6959       case C2OP:
6960       case SYSCALL:
6961       case HLECALL:
6962         break;
6963       default:
6964         break;
6965     }
6966     is32|=1;
6967     p32[i]=is32;
6968
6969     if(i>0)
6970     {
6971       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
6972       {
6973         if(rt1[i-1]==31) // JAL/JALR
6974         {
6975           // Subroutine call will return here, don't alloc any registers
6976           is32=1;
6977         }
6978         else if(i+1<slen)
6979         {
6980           // Internal branch will jump here, match registers to caller
6981           is32=0x3FFFFFFFFLL;
6982         }
6983       }
6984     }
6985   }
6986 }
6987
6988 // Identify registers which may be assumed to contain 32-bit values
6989 // and where optimizations will rely on this.
6990 // This is used to determine whether backward branches can safely
6991 // jump to a location with 64-bit values in registers.
6992 static void provisional_r32()
6993 {
6994   u_int r32=0;
6995   int i;
6996   
6997   for (i=slen-1;i>=0;i--)
6998   {
6999     int hr;
7000     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7001     {
7002       if(ba[i]<start || ba[i]>=(start+slen*4))
7003       {
7004         // Branch out of this block, don't need anything
7005         r32=0;
7006       }
7007       else
7008       {
7009         // Internal branch
7010         // Need whatever matches the target
7011         // (and doesn't get overwritten by the delay slot instruction)
7012         r32=0;
7013         int t=(ba[i]-start)>>2;
7014         if(ba[i]>start+i*4) {
7015           // Forward branch
7016           //if(!(requires_32bit[t]&~regs[i].was32))
7017           //  r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7018           if(!(pr32[t]&~regs[i].was32))
7019             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7020         }else{
7021           // Backward branch
7022           if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7023             r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7024         }
7025       }
7026       // Conditional branch may need registers for following instructions
7027       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7028       {
7029         if(i<slen-2) {
7030           //r32|=requires_32bit[i+2];
7031           r32|=pr32[i+2];
7032           r32&=regs[i].was32;
7033           // Mark this address as a branch target since it may be called
7034           // upon return from interrupt
7035           //bt[i+2]=1;
7036         }
7037       }
7038       // Merge in delay slot
7039       if(!likely[i]) {
7040         // These are overwritten unless the branch is "likely"
7041         // and the delay slot is nullified if not taken
7042         r32&=~(1LL<<rt1[i+1]);
7043         r32&=~(1LL<<rt2[i+1]);
7044       }
7045       // Assume these are needed (delay slot)
7046       if(us1[i+1]>0)
7047       {
7048         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7049       }
7050       if(us2[i+1]>0)
7051       {
7052         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7053       }
7054       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7055       {
7056         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7057       }
7058       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7059       {
7060         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7061       }
7062     }
7063     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
7064     {
7065       // SYSCALL instruction (software interrupt)
7066       r32=0;
7067     }
7068     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7069     {
7070       // ERET instruction (return from interrupt)
7071       r32=0;
7072     }
7073     // Check 32 bits
7074     r32&=~(1LL<<rt1[i]);
7075     r32&=~(1LL<<rt2[i]);
7076     if(us1[i]>0)
7077     {
7078       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7079     }
7080     if(us2[i]>0)
7081     {
7082       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7083     }
7084     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7085     {
7086       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7087     }
7088     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7089     {
7090       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7091     }
7092     //requires_32bit[i]=r32;
7093     pr32[i]=r32;
7094     
7095     // Dirty registers which are 32-bit, require 32-bit input
7096     // as they will be written as 32-bit values
7097     for(hr=0;hr<HOST_REGS;hr++)
7098     {
7099       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7100         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7101           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7102           pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7103           //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7104         }
7105       }
7106     }
7107   }
7108 }
7109
7110 // Write back dirty registers as soon as we will no longer modify them,
7111 // so that we don't end up with lots of writes at the branches.
7112 void clean_registers(int istart,int iend,int wr)
7113 {
7114   int i;
7115   int r;
7116   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7117   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7118   if(iend==slen-1) {
7119     will_dirty_i=will_dirty_next=0;
7120     wont_dirty_i=wont_dirty_next=0;
7121   }else{
7122     will_dirty_i=will_dirty_next=will_dirty[iend+1];
7123     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7124   }
7125   for (i=iend;i>=istart;i--)
7126   {
7127     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7128     {
7129       if(ba[i]<start || ba[i]>=(start+slen*4))
7130       {
7131         // Branch out of this block, flush all regs
7132         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7133         {
7134           // Unconditional branch
7135           will_dirty_i=0;
7136           wont_dirty_i=0;
7137           // Merge in delay slot (will dirty)
7138           for(r=0;r<HOST_REGS;r++) {
7139             if(r!=EXCLUDE_REG) {
7140               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7141               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7142               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7143               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7144               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7145               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7146               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7147               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7148               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7149               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7150               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7151               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7152               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7153               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7154             }
7155           }
7156         }
7157         else
7158         {
7159           // Conditional branch
7160           will_dirty_i=0;
7161           wont_dirty_i=wont_dirty_next;
7162           // Merge in delay slot (will dirty)
7163           for(r=0;r<HOST_REGS;r++) {
7164             if(r!=EXCLUDE_REG) {
7165               if(!likely[i]) {
7166                 // Might not dirty if likely branch is not taken
7167                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7168                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7169                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7170                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7171                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7172                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7173                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7174                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7175                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7176                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7177                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7178                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7179                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7180                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7181               }
7182             }
7183           }
7184         }
7185         // Merge in delay slot (wont dirty)
7186         for(r=0;r<HOST_REGS;r++) {
7187           if(r!=EXCLUDE_REG) {
7188             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7189             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7190             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7191             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7192             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7193             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7194             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7195             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7196             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7197             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7198           }
7199         }
7200         if(wr) {
7201           #ifndef DESTRUCTIVE_WRITEBACK
7202           branch_regs[i].dirty&=wont_dirty_i;
7203           #endif
7204           branch_regs[i].dirty|=will_dirty_i;
7205         }
7206       }
7207       else
7208       {
7209         // Internal branch
7210         if(ba[i]<=start+i*4) {
7211           // Backward branch
7212           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7213           {
7214             // Unconditional branch
7215             temp_will_dirty=0;
7216             temp_wont_dirty=0;
7217             // Merge in delay slot (will dirty)
7218             for(r=0;r<HOST_REGS;r++) {
7219               if(r!=EXCLUDE_REG) {
7220                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7221                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7222                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7223                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7224                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7225                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7226                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7227                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7228                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7229                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7230                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7231                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7232                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7233                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7234               }
7235             }
7236           } else {
7237             // Conditional branch (not taken case)
7238             temp_will_dirty=will_dirty_next;
7239             temp_wont_dirty=wont_dirty_next;
7240             // Merge in delay slot (will dirty)
7241             for(r=0;r<HOST_REGS;r++) {
7242               if(r!=EXCLUDE_REG) {
7243                 if(!likely[i]) {
7244                   // Will not dirty if likely branch is not taken
7245                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7246                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7247                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7248                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7249                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7250                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7251                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7252                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7253                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7254                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7255                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7256                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7257                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7258                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7259                 }
7260               }
7261             }
7262           }
7263           // Merge in delay slot (wont dirty)
7264           for(r=0;r<HOST_REGS;r++) {
7265             if(r!=EXCLUDE_REG) {
7266               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7267               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7268               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7269               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7270               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7271               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7272               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7273               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7274               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7275               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7276             }
7277           }
7278           // Deal with changed mappings
7279           if(i<iend) {
7280             for(r=0;r<HOST_REGS;r++) {
7281               if(r!=EXCLUDE_REG) {
7282                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7283                   temp_will_dirty&=~(1<<r);
7284                   temp_wont_dirty&=~(1<<r);
7285                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7286                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7287                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7288                   } else {
7289                     temp_will_dirty|=1<<r;
7290                     temp_wont_dirty|=1<<r;
7291                   }
7292                 }
7293               }
7294             }
7295           }
7296           if(wr) {
7297             will_dirty[i]=temp_will_dirty;
7298             wont_dirty[i]=temp_wont_dirty;
7299             clean_registers((ba[i]-start)>>2,i-1,0);
7300           }else{
7301             // Limit recursion.  It can take an excessive amount
7302             // of time if there are a lot of nested loops.
7303             will_dirty[(ba[i]-start)>>2]=0;
7304             wont_dirty[(ba[i]-start)>>2]=-1;
7305           }
7306         }
7307         /*else*/ if(1)
7308         {
7309           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7310           {
7311             // Unconditional branch
7312             will_dirty_i=0;
7313             wont_dirty_i=0;
7314           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7315             for(r=0;r<HOST_REGS;r++) {
7316               if(r!=EXCLUDE_REG) {
7317                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7318                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7319                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7320                 }
7321               }
7322             }
7323           //}
7324             // Merge in delay slot
7325             for(r=0;r<HOST_REGS;r++) {
7326               if(r!=EXCLUDE_REG) {
7327                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7328                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7329                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7330                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7331                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7332                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7333                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7334                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7335                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7336                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7337                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7338                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7339                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7340                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7341               }
7342             }
7343           } else {
7344             // Conditional branch
7345             will_dirty_i=will_dirty_next;
7346             wont_dirty_i=wont_dirty_next;
7347           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7348             for(r=0;r<HOST_REGS;r++) {
7349               if(r!=EXCLUDE_REG) {
7350                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7351                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7352                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7353                 }
7354                 else
7355                 {
7356                   will_dirty_i&=~(1<<r);
7357                 }
7358                 // Treat delay slot as part of branch too
7359                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7360                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7361                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7362                 }
7363                 else
7364                 {
7365                   will_dirty[i+1]&=~(1<<r);
7366                 }*/
7367               }
7368             }
7369           //}
7370             // Merge in delay slot
7371             for(r=0;r<HOST_REGS;r++) {
7372               if(r!=EXCLUDE_REG) {
7373                 if(!likely[i]) {
7374                   // Might not dirty if likely branch is not taken
7375                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7376                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7377                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7378                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7379                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7380                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7381                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7382                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7383                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7384                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7385                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7386                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7387                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7388                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7389                 }
7390               }
7391             }
7392           }
7393           // Merge in delay slot
7394           for(r=0;r<HOST_REGS;r++) {
7395             if(r!=EXCLUDE_REG) {
7396               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7397               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7398               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7399               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7400               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7401               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7402               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7403               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7404               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7405               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7406             }
7407           }
7408           if(wr) {
7409             #ifndef DESTRUCTIVE_WRITEBACK
7410             branch_regs[i].dirty&=wont_dirty_i;
7411             #endif
7412             branch_regs[i].dirty|=will_dirty_i;
7413           }
7414         }
7415       }
7416     }
7417     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
7418     {
7419       // SYSCALL instruction (software interrupt)
7420       will_dirty_i=0;
7421       wont_dirty_i=0;
7422     }
7423     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7424     {
7425       // ERET instruction (return from interrupt)
7426       will_dirty_i=0;
7427       wont_dirty_i=0;
7428     }
7429     will_dirty_next=will_dirty_i;
7430     wont_dirty_next=wont_dirty_i;
7431     for(r=0;r<HOST_REGS;r++) {
7432       if(r!=EXCLUDE_REG) {
7433         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7434         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7435         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7436         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7437         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7438         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7439         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7440         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7441         if(i>istart) {
7442           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP) 
7443           {
7444             // Don't store a register immediately after writing it,
7445             // may prevent dual-issue.
7446             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7447             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7448           }
7449         }
7450       }
7451     }
7452     // Save it
7453     will_dirty[i]=will_dirty_i;
7454     wont_dirty[i]=wont_dirty_i;
7455     // Mark registers that won't be dirtied as not dirty
7456     if(wr) {
7457       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7458       for(r=0;r<HOST_REGS;r++) {
7459         if((will_dirty_i>>r)&1) {
7460           printf(" r%d",r);
7461         }
7462       }
7463       printf("\n");*/
7464
7465       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7466         regs[i].dirty|=will_dirty_i;
7467         #ifndef DESTRUCTIVE_WRITEBACK
7468         regs[i].dirty&=wont_dirty_i;
7469         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7470         {
7471           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7472             for(r=0;r<HOST_REGS;r++) {
7473               if(r!=EXCLUDE_REG) {
7474                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7475                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7476                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7477               }
7478             }
7479           }
7480         }
7481         else
7482         {
7483           if(i<iend) {
7484             for(r=0;r<HOST_REGS;r++) {
7485               if(r!=EXCLUDE_REG) {
7486                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7487                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7488                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7489               }
7490             }
7491           }
7492         }
7493         #endif
7494       //}
7495     }
7496     // Deal with changed mappings
7497     temp_will_dirty=will_dirty_i;
7498     temp_wont_dirty=wont_dirty_i;
7499     for(r=0;r<HOST_REGS;r++) {
7500       if(r!=EXCLUDE_REG) {
7501         int nr;
7502         if(regs[i].regmap[r]==regmap_pre[i][r]) {
7503           if(wr) {
7504             #ifndef DESTRUCTIVE_WRITEBACK
7505             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7506             #endif
7507             regs[i].wasdirty|=will_dirty_i&(1<<r);
7508           }
7509         }
7510         else if((nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7511           // Register moved to a different register
7512           will_dirty_i&=~(1<<r);
7513           wont_dirty_i&=~(1<<r);
7514           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7515           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7516           if(wr) {
7517             #ifndef DESTRUCTIVE_WRITEBACK
7518             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7519             #endif
7520             regs[i].wasdirty|=will_dirty_i&(1<<r);
7521           }
7522         }
7523         else {
7524           will_dirty_i&=~(1<<r);
7525           wont_dirty_i&=~(1<<r);
7526           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7527             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7528             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7529           } else {
7530             wont_dirty_i|=1<<r;
7531             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7532           }
7533         }
7534       }
7535     }
7536   }
7537 }
7538
7539   /* disassembly */
7540 void disassemble_inst(int i)
7541 {
7542     if (bt[i]) printf("*"); else printf(" ");
7543     switch(itype[i]) {
7544       case UJUMP:
7545         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7546       case CJUMP:
7547         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7548       case SJUMP:
7549         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7550       case FJUMP:
7551         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7552       case RJUMP:
7553         if (rt1[i]!=31)
7554           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7555         else
7556           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7557         break;
7558       case SPAN:
7559         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7560       case IMM16:
7561         if(opcode[i]==0xf) //LUI
7562           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7563         else
7564           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7565         break;
7566       case LOAD:
7567       case LOADLR:
7568         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7569         break;
7570       case STORE:
7571       case STORELR:
7572         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7573         break;
7574       case ALU:
7575       case SHIFT:
7576         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7577         break;
7578       case MULTDIV:
7579         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7580         break;
7581       case SHIFTIMM:
7582         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7583         break;
7584       case MOV:
7585         if((opcode2[i]&0x1d)==0x10)
7586           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7587         else if((opcode2[i]&0x1d)==0x11)
7588           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7589         else
7590           printf (" %x: %s\n",start+i*4,insn[i]);
7591         break;
7592       case COP0:
7593         if(opcode2[i]==0)
7594           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7595         else if(opcode2[i]==4)
7596           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7597         else printf (" %x: %s\n",start+i*4,insn[i]);
7598         break;
7599       case COP1:
7600         if(opcode2[i]<3)
7601           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7602         else if(opcode2[i]>3)
7603           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7604         else printf (" %x: %s\n",start+i*4,insn[i]);
7605         break;
7606       case COP2:
7607         if(opcode2[i]<3)
7608           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7609         else if(opcode2[i]>3)
7610           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7611         else printf (" %x: %s\n",start+i*4,insn[i]);
7612         break;
7613       case C1LS:
7614         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7615         break;
7616       case C2LS:
7617         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7618         break;
7619       default:
7620         //printf (" %s %8x\n",insn[i],source[i]);
7621         printf (" %x: %s\n",start+i*4,insn[i]);
7622     }
7623 }
7624
7625 void new_dynarec_init()
7626 {
7627   printf("Init new dynarec\n");
7628   out=(u_char *)BASE_ADDR;
7629   if (mmap (out, 1<<TARGET_SIZE_2,
7630             PROT_READ | PROT_WRITE | PROT_EXEC,
7631             MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7632             -1, 0) <= 0) {printf("mmap() failed\n");}
7633 #ifdef MUPEN64
7634   rdword=&readmem_dword;
7635   fake_pc.f.r.rs=&readmem_dword;
7636   fake_pc.f.r.rt=&readmem_dword;
7637   fake_pc.f.r.rd=&readmem_dword;
7638 #endif
7639   int n;
7640   for(n=0x80000;n<0x80800;n++)
7641     invalid_code[n]=1;
7642   for(n=0;n<65536;n++)
7643     hash_table[n][0]=hash_table[n][2]=-1;
7644   memset(mini_ht,-1,sizeof(mini_ht));
7645   memset(restore_candidate,0,sizeof(restore_candidate));
7646   copy=shadow;
7647   expirep=16384; // Expiry pointer, +2 blocks
7648   pending_exception=0;
7649   literalcount=0;
7650 #ifdef HOST_IMM8
7651   // Copy this into local area so we don't have to put it in every literal pool
7652   invc_ptr=invalid_code;
7653 #endif
7654   stop_after_jal=0;
7655   // TLB
7656   using_tlb=0;
7657   for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7658     memory_map[n]=-1;
7659   for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7660     memory_map[n]=((u_int)rdram-0x80000000)>>2;
7661   for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7662     memory_map[n]=-1;
7663 #ifdef MUPEN64
7664   for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7665     writemem[n] = write_nomem_new;
7666     writememb[n] = write_nomemb_new;
7667     writememh[n] = write_nomemh_new;
7668 #ifndef FORCE32
7669     writememd[n] = write_nomemd_new;
7670 #endif
7671     readmem[n] = read_nomem_new;
7672     readmemb[n] = read_nomemb_new;
7673     readmemh[n] = read_nomemh_new;
7674 #ifndef FORCE32
7675     readmemd[n] = read_nomemd_new;
7676 #endif
7677   }
7678   for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7679     writemem[n] = write_rdram_new;
7680     writememb[n] = write_rdramb_new;
7681     writememh[n] = write_rdramh_new;
7682 #ifndef FORCE32
7683     writememd[n] = write_rdramd_new;
7684 #endif
7685   }
7686   for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7687     writemem[n] = write_nomem_new;
7688     writememb[n] = write_nomemb_new;
7689     writememh[n] = write_nomemh_new;
7690 #ifndef FORCE32
7691     writememd[n] = write_nomemd_new;
7692 #endif
7693     readmem[n] = read_nomem_new;
7694     readmemb[n] = read_nomemb_new;
7695     readmemh[n] = read_nomemh_new;
7696 #ifndef FORCE32
7697     readmemd[n] = read_nomemd_new;
7698 #endif
7699   }
7700 #endif
7701   tlb_hacks();
7702   arch_init();
7703 }
7704
7705 void new_dynarec_cleanup()
7706 {
7707   int n;
7708   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7709   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7710   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7711   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7712   #ifdef ROM_COPY
7713   if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7714   #endif
7715 }
7716
7717 int new_recompile_block(int addr)
7718 {
7719 /*
7720   if(addr==0x800cd050) {
7721     int block;
7722     for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7723     int n;
7724     for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7725   }
7726 */
7727   //if(Count==365117028) tracedebug=1;
7728   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7729   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7730   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7731   //if(debug) 
7732   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7733   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7734   /*if(Count>=312978186) {
7735     rlist();
7736   }*/
7737   //rlist();
7738   start = (u_int)addr&~3;
7739   //assert(((u_int)addr&1)==0);
7740 #ifdef PCSX
7741   if (Config.HLE && start == 0x80001000) {
7742     // XXX: is this enough? Maybe check hleSoftCall?
7743     u_int beginning=(u_int)out;
7744     u_int page=get_page(start);
7745     ll_add(jump_in+page,start,out);
7746     invalid_code[start>>12]=0;
7747     emit_movimm(start,0);
7748     emit_writeword(0,(int)&pcaddr);
7749     emit_jmp((int)new_dyna_leave);
7750 #ifdef __arm__
7751     __clear_cache((void *)beginning,out);
7752 #endif
7753     return 0;
7754   }
7755   else if ((u_int)addr < 0x00200000) {
7756     // used for BIOS calls mostly?
7757     source = (u_int *)((u_int)rdram+start-0);
7758     pagelimit = 0x00200000;
7759   }
7760   else
7761 #endif
7762 #ifdef MUPEN64
7763   if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
7764     source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
7765     pagelimit = 0xa4001000;
7766   }
7767   else
7768 #endif
7769   if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
7770     source = (u_int *)((u_int)rdram+start-0x80000000);
7771     pagelimit = 0x80000000+RAM_SIZE;
7772   }
7773 #ifndef DISABLE_TLB
7774   else if ((signed int)addr >= (signed int)0xC0000000) {
7775     //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
7776     //if(tlb_LUT_r[start>>12])
7777       //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
7778     if((signed int)memory_map[start>>12]>=0) {
7779       source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
7780       pagelimit=(start+4096)&0xFFFFF000;
7781       int map=memory_map[start>>12];
7782       int i;
7783       for(i=0;i<5;i++) {
7784         //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
7785         if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
7786       }
7787       assem_debug("pagelimit=%x\n",pagelimit);
7788       assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
7789     }
7790     else {
7791       assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
7792       //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
7793       return 1; // Caller will invoke exception handler
7794     }
7795     //printf("source= %x\n",(int)source);
7796   }
7797 #endif
7798   else {
7799     printf("Compile at bogus memory address: %x \n", (int)addr);
7800     exit(1);
7801   }
7802
7803   /* Pass 1: disassemble */
7804   /* Pass 2: register dependencies, branch targets */
7805   /* Pass 3: register allocation */
7806   /* Pass 4: branch dependencies */
7807   /* Pass 5: pre-alloc */
7808   /* Pass 6: optimize clean/dirty state */
7809   /* Pass 7: flag 32-bit registers */
7810   /* Pass 8: assembly */
7811   /* Pass 9: linker */
7812   /* Pass 10: garbage collection / free memory */
7813
7814   int i,j;
7815   int done=0;
7816   unsigned int type,op,op2;
7817
7818   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7819   
7820   /* Pass 1 disassembly */
7821
7822   for(i=0;!done;i++) {
7823     bt[i]=0;likely[i]=0;op2=0;
7824     opcode[i]=op=source[i]>>26;
7825     switch(op)
7826     {
7827       case 0x00: strcpy(insn[i],"special"); type=NI;
7828         op2=source[i]&0x3f;
7829         switch(op2)
7830         {
7831           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7832           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7833           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7834           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7835           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7836           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7837           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7838           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7839           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7840           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7841           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7842           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7843           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7844           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7845           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7846           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7847           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7848           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7849           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7850           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7851           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7852           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7853           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7854           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7855           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7856           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7857           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7858           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7859           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7860           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7861           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7862           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7863           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7864           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7865           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7866           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7867           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7868           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7869           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7870           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7871           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7872           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7873           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7874           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7875           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7876           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7877           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7878           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7879           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7880           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7881           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7882           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7883         }
7884         break;
7885       case 0x01: strcpy(insn[i],"regimm"); type=NI;
7886         op2=(source[i]>>16)&0x1f;
7887         switch(op2)
7888         {
7889           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7890           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7891           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7892           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7893           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7894           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7895           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7896           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7897           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7898           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7899           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7900           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7901           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7902           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7903         }
7904         break;
7905       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7906       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7907       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7908       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7909       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7910       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7911       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7912       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7913       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7914       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7915       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7916       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7917       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7918       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7919       case 0x10: strcpy(insn[i],"cop0"); type=NI;
7920         op2=(source[i]>>21)&0x1f;
7921         switch(op2)
7922         {
7923           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7924           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7925           case 0x10: strcpy(insn[i],"tlb"); type=NI;
7926           switch(source[i]&0x3f)
7927           {
7928             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7929             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7930             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7931             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
7932             case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
7933           }
7934         }
7935         break;
7936       case 0x11: strcpy(insn[i],"cop1"); type=NI;
7937         op2=(source[i]>>21)&0x1f;
7938         switch(op2)
7939         {
7940           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7941           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7942           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7943           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7944           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7945           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7946           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7947           switch((source[i]>>16)&0x3)
7948           {
7949             case 0x00: strcpy(insn[i],"BC1F"); break;
7950             case 0x01: strcpy(insn[i],"BC1T"); break;
7951             case 0x02: strcpy(insn[i],"BC1FL"); break;
7952             case 0x03: strcpy(insn[i],"BC1TL"); break;
7953           }
7954           break;
7955           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7956           switch(source[i]&0x3f)
7957           {
7958             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7959             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7960             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7961             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7962             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7963             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
7964             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
7965             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
7966             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
7967             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
7968             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
7969             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
7970             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
7971             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
7972             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
7973             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
7974             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
7975             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
7976             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
7977             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
7978             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
7979             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
7980             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
7981             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
7982             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
7983             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
7984             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
7985             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
7986             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
7987             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
7988             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
7989             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
7990             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
7991             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
7992             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
7993           }
7994           break;
7995           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
7996           switch(source[i]&0x3f)
7997           {
7998             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
7999             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8000             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8001             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8002             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8003             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8004             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8005             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8006             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8007             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8008             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8009             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8010             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8011             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8012             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8013             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8014             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8015             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8016             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8017             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8018             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8019             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8020             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8021             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8022             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8023             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8024             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8025             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8026             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8027             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8028             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8029             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8030             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8031             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8032             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8033           }
8034           break;
8035           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8036           switch(source[i]&0x3f)
8037           {
8038             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8039             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8040           }
8041           break;
8042           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8043           switch(source[i]&0x3f)
8044           {
8045             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8046             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8047           }
8048           break;
8049         }
8050         break;
8051       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8052       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8053       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8054       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8055       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8056       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8057       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8058       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8059       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8060       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8061       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8062       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8063       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8064       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8065       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8066       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8067       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8068       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8069       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8070       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8071       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8072       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8073       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8074       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8075       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8076       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8077       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8078       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8079       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8080       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8081       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8082       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8083       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8084       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8085 #ifdef PCSX
8086       case 0x12: strcpy(insn[i],"COP2"); type=NI;
8087         op2=(source[i]>>21)&0x1f;
8088         switch(op2)
8089         {
8090           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8091           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8092           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8093           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8094           default:
8095             if (gte_handlers[source[i]&0x3f]!=NULL) {
8096               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8097               type=C2OP;
8098             }
8099             break;
8100         }
8101         break;
8102       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8103       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8104       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8105 #endif
8106       default: strcpy(insn[i],"???"); type=NI;
8107         printf("NI %08x @%08x\n", source[i], addr + i*4);
8108         break;
8109     }
8110     itype[i]=type;
8111     opcode2[i]=op2;
8112     /* Get registers/immediates */
8113     lt1[i]=0;
8114     us1[i]=0;
8115     us2[i]=0;
8116     dep1[i]=0;
8117     dep2[i]=0;
8118     switch(type) {
8119       case LOAD:
8120         rs1[i]=(source[i]>>21)&0x1f;
8121         rs2[i]=0;
8122         rt1[i]=(source[i]>>16)&0x1f;
8123         rt2[i]=0;
8124         imm[i]=(short)source[i];
8125         break;
8126       case STORE:
8127       case STORELR:
8128         rs1[i]=(source[i]>>21)&0x1f;
8129         rs2[i]=(source[i]>>16)&0x1f;
8130         rt1[i]=0;
8131         rt2[i]=0;
8132         imm[i]=(short)source[i];
8133         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8134         break;
8135       case LOADLR:
8136         // LWL/LWR only load part of the register,
8137         // therefore the target register must be treated as a source too
8138         rs1[i]=(source[i]>>21)&0x1f;
8139         rs2[i]=(source[i]>>16)&0x1f;
8140         rt1[i]=(source[i]>>16)&0x1f;
8141         rt2[i]=0;
8142         imm[i]=(short)source[i];
8143         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8144         if(op==0x26) dep1[i]=rt1[i]; // LWR
8145         break;
8146       case IMM16:
8147         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8148         else rs1[i]=(source[i]>>21)&0x1f;
8149         rs2[i]=0;
8150         rt1[i]=(source[i]>>16)&0x1f;
8151         rt2[i]=0;
8152         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8153           imm[i]=(unsigned short)source[i];
8154         }else{
8155           imm[i]=(short)source[i];
8156         }
8157         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8158         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8159         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8160         break;
8161       case UJUMP:
8162         rs1[i]=0;
8163         rs2[i]=0;
8164         rt1[i]=0;
8165         rt2[i]=0;
8166         // The JAL instruction writes to r31.
8167         if (op&1) {
8168           rt1[i]=31;
8169         }
8170         rs2[i]=CCREG;
8171         break;
8172       case RJUMP:
8173         rs1[i]=(source[i]>>21)&0x1f;
8174         rs2[i]=0;
8175         rt1[i]=0;
8176         rt2[i]=0;
8177         // The JALR instruction writes to rd.
8178         if (op2&1) {
8179           rt1[i]=(source[i]>>11)&0x1f;
8180         }
8181         rs2[i]=CCREG;
8182         break;
8183       case CJUMP:
8184         rs1[i]=(source[i]>>21)&0x1f;
8185         rs2[i]=(source[i]>>16)&0x1f;
8186         rt1[i]=0;
8187         rt2[i]=0;
8188         if(op&2) { // BGTZ/BLEZ
8189           rs2[i]=0;
8190         }
8191         us1[i]=rs1[i];
8192         us2[i]=rs2[i];
8193         likely[i]=op>>4;
8194         break;
8195       case SJUMP:
8196         rs1[i]=(source[i]>>21)&0x1f;
8197         rs2[i]=CCREG;
8198         rt1[i]=0;
8199         rt2[i]=0;
8200         us1[i]=rs1[i];
8201         if(op2&0x10) { // BxxAL
8202           rt1[i]=31;
8203           // NOTE: If the branch is not taken, r31 is still overwritten
8204         }
8205         likely[i]=(op2&2)>>1;
8206         break;
8207       case FJUMP:
8208         rs1[i]=FSREG;
8209         rs2[i]=CSREG;
8210         rt1[i]=0;
8211         rt2[i]=0;
8212         likely[i]=((source[i])>>17)&1;
8213         break;
8214       case ALU:
8215         rs1[i]=(source[i]>>21)&0x1f; // source
8216         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8217         rt1[i]=(source[i]>>11)&0x1f; // destination
8218         rt2[i]=0;
8219         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8220           us1[i]=rs1[i];us2[i]=rs2[i];
8221         }
8222         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8223           dep1[i]=rs1[i];dep2[i]=rs2[i];
8224         }
8225         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8226           dep1[i]=rs1[i];dep2[i]=rs2[i];
8227         }
8228         break;
8229       case MULTDIV:
8230         rs1[i]=(source[i]>>21)&0x1f; // source
8231         rs2[i]=(source[i]>>16)&0x1f; // divisor
8232         rt1[i]=HIREG;
8233         rt2[i]=LOREG;
8234         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8235           us1[i]=rs1[i];us2[i]=rs2[i];
8236         }
8237         break;
8238       case MOV:
8239         rs1[i]=0;
8240         rs2[i]=0;
8241         rt1[i]=0;
8242         rt2[i]=0;
8243         if(op2==0x10) rs1[i]=HIREG; // MFHI
8244         if(op2==0x11) rt1[i]=HIREG; // MTHI
8245         if(op2==0x12) rs1[i]=LOREG; // MFLO
8246         if(op2==0x13) rt1[i]=LOREG; // MTLO
8247         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8248         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8249         dep1[i]=rs1[i];
8250         break;
8251       case SHIFT:
8252         rs1[i]=(source[i]>>16)&0x1f; // target of shift
8253         rs2[i]=(source[i]>>21)&0x1f; // shift amount
8254         rt1[i]=(source[i]>>11)&0x1f; // destination
8255         rt2[i]=0;
8256         // DSLLV/DSRLV/DSRAV are 64-bit
8257         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8258         break;
8259       case SHIFTIMM:
8260         rs1[i]=(source[i]>>16)&0x1f;
8261         rs2[i]=0;
8262         rt1[i]=(source[i]>>11)&0x1f;
8263         rt2[i]=0;
8264         imm[i]=(source[i]>>6)&0x1f;
8265         // DSxx32 instructions
8266         if(op2>=0x3c) imm[i]|=0x20;
8267         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8268         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8269         break;
8270       case COP0:
8271         rs1[i]=0;
8272         rs2[i]=0;
8273         rt1[i]=0;
8274         rt2[i]=0;
8275         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8276         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8277         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8278         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8279         break;
8280       case COP1:
8281       case COP2:
8282         rs1[i]=0;
8283         rs2[i]=0;
8284         rt1[i]=0;
8285         rt2[i]=0;
8286         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8287         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8288         if(op2==5) us1[i]=rs1[i]; // DMTC1
8289         rs2[i]=CSREG;
8290         break;
8291       case C1LS:
8292         rs1[i]=(source[i]>>21)&0x1F;
8293         rs2[i]=CSREG;
8294         rt1[i]=0;
8295         rt2[i]=0;
8296         imm[i]=(short)source[i];
8297         break;
8298       case C2LS:
8299         rs1[i]=(source[i]>>21)&0x1F;
8300         rs2[i]=0;
8301         rt1[i]=0;
8302         rt2[i]=0;
8303         imm[i]=(short)source[i];
8304         break;
8305       case FLOAT:
8306       case FCONV:
8307         rs1[i]=0;
8308         rs2[i]=CSREG;
8309         rt1[i]=0;
8310         rt2[i]=0;
8311         break;
8312       case FCOMP:
8313         rs1[i]=FSREG;
8314         rs2[i]=CSREG;
8315         rt1[i]=FSREG;
8316         rt2[i]=0;
8317         break;
8318       case SYSCALL:
8319       case HLECALL:
8320         rs1[i]=CCREG;
8321         rs2[i]=0;
8322         rt1[i]=0;
8323         rt2[i]=0;
8324         break;
8325       default:
8326         rs1[i]=0;
8327         rs2[i]=0;
8328         rt1[i]=0;
8329         rt2[i]=0;
8330     }
8331     /* Calculate branch target addresses */
8332     if(type==UJUMP)
8333       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8334     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8335       ba[i]=start+i*4+8; // Ignore never taken branch
8336     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8337       ba[i]=start+i*4+8; // Ignore never taken branch
8338     else if(type==CJUMP||type==SJUMP||type==FJUMP)
8339       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8340     else ba[i]=-1;
8341     /* Is this the end of the block? */
8342     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8343       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8344         done=1;
8345         // Does the block continue due to a branch?
8346         for(j=i-1;j>=0;j--)
8347         {
8348           if(ba[j]==start+i*4+4) done=j=0;
8349           if(ba[j]==start+i*4+8) done=j=0;
8350         }
8351       }
8352       else {
8353         if(stop_after_jal) done=1;
8354         // Stop on BREAK
8355         if((source[i+1]&0xfc00003f)==0x0d) done=1;
8356       }
8357       // Don't recompile stuff that's already compiled
8358       if(check_addr(start+i*4+4)) done=1;
8359       // Don't get too close to the limit
8360       if(i>MAXBLOCK/2) done=1;
8361     }
8362     if(i>0&&itype[i-1]==SYSCALL&&stop_after_jal) done=1;
8363     if(itype[i-1]==HLECALL) done=1;
8364     assert(i<MAXBLOCK-1);
8365     if(start+i*4==pagelimit-4) done=1;
8366     assert(start+i*4<pagelimit);
8367     if (i==MAXBLOCK-1) done=1;
8368     // Stop if we're compiling junk
8369     if(itype[i]==NI&&opcode[i]==0x11) {
8370       done=stop_after_jal=1;
8371       printf("Disabled speculative precompilation\n");
8372     }
8373   }
8374   slen=i;
8375   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8376     if(start+i*4==pagelimit) {
8377       itype[i-1]=SPAN;
8378     }
8379   }
8380   assert(slen>0);
8381
8382   /* Pass 2 - Register dependencies and branch targets */
8383
8384   unneeded_registers(0,slen-1,0);
8385   
8386   /* Pass 3 - Register allocation */
8387
8388   struct regstat current; // Current register allocations/status
8389   current.is32=1;
8390   current.dirty=0;
8391   current.u=unneeded_reg[0];
8392   current.uu=unneeded_reg_upper[0];
8393   clear_all_regs(current.regmap);
8394   alloc_reg(&current,0,CCREG);
8395   dirty_reg(&current,CCREG);
8396   current.isconst=0;
8397   current.wasconst=0;
8398   int ds=0;
8399   int cc=0;
8400   int hr;
8401   
8402   provisional_32bit();
8403   
8404   if((u_int)addr&1) {
8405     // First instruction is delay slot
8406     cc=-1;
8407     bt[1]=1;
8408     ds=1;
8409     unneeded_reg[0]=1;
8410     unneeded_reg_upper[0]=1;
8411     current.regmap[HOST_BTREG]=BTREG;
8412   }
8413   
8414   for(i=0;i<slen;i++)
8415   {
8416     if(bt[i])
8417     {
8418       int hr;
8419       for(hr=0;hr<HOST_REGS;hr++)
8420       {
8421         // Is this really necessary?
8422         if(current.regmap[hr]==0) current.regmap[hr]=-1;
8423       }
8424       current.isconst=0;
8425     }
8426     if(i>1)
8427     {
8428       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8429       {
8430         if(rs1[i-2]==0||rs2[i-2]==0)
8431         {
8432           if(rs1[i-2]) {
8433             current.is32|=1LL<<rs1[i-2];
8434             int hr=get_reg(current.regmap,rs1[i-2]|64);
8435             if(hr>=0) current.regmap[hr]=-1;
8436           }
8437           if(rs2[i-2]) {
8438             current.is32|=1LL<<rs2[i-2];
8439             int hr=get_reg(current.regmap,rs2[i-2]|64);
8440             if(hr>=0) current.regmap[hr]=-1;
8441           }
8442         }
8443       }
8444     }
8445     // If something jumps here with 64-bit values
8446     // then promote those registers to 64 bits
8447     if(bt[i])
8448     {
8449       uint64_t temp_is32=current.is32;
8450       for(j=i-1;j>=0;j--)
8451       {
8452         if(ba[j]==start+i*4) 
8453           temp_is32&=branch_regs[j].is32;
8454       }
8455       for(j=i;j<slen;j++)
8456       {
8457         if(ba[j]==start+i*4) 
8458           //temp_is32=1;
8459           temp_is32&=p32[j];
8460       }
8461       if(temp_is32!=current.is32) {
8462         //printf("dumping 32-bit regs (%x)\n",start+i*4);
8463         #ifdef DESTRUCTIVE_WRITEBACK
8464         for(hr=0;hr<HOST_REGS;hr++)
8465         {
8466           int r=current.regmap[hr];
8467           if(r>0&&r<64)
8468           {
8469             if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8470               temp_is32|=1LL<<r;
8471               //printf("restore %d\n",r);
8472             }
8473           }
8474         }
8475         #endif
8476         current.is32=temp_is32;
8477       }
8478     }
8479 #ifdef FORCE32
8480     memset(p32, 0xff, sizeof(p32));
8481     current.is32=-1LL;
8482 #endif
8483
8484     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8485     regs[i].wasconst=current.isconst;
8486     regs[i].was32=current.is32;
8487     regs[i].wasdirty=current.dirty;
8488     #ifdef DESTRUCTIVE_WRITEBACK
8489     // To change a dirty register from 32 to 64 bits, we must write
8490     // it out during the previous cycle (for branches, 2 cycles)
8491     if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8492     {
8493       uint64_t temp_is32=current.is32;
8494       for(j=i-1;j>=0;j--)
8495       {
8496         if(ba[j]==start+i*4+4) 
8497           temp_is32&=branch_regs[j].is32;
8498       }
8499       for(j=i;j<slen;j++)
8500       {
8501         if(ba[j]==start+i*4+4) 
8502           //temp_is32=1;
8503           temp_is32&=p32[j];
8504       }
8505       if(temp_is32!=current.is32) {
8506         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8507         for(hr=0;hr<HOST_REGS;hr++)
8508         {
8509           int r=current.regmap[hr];
8510           if(r>0)
8511           {
8512             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8513               if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8514               {
8515                 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8516                 {
8517                   //printf("dump %d/r%d\n",hr,r);
8518                   current.regmap[hr]=-1;
8519                   if(get_reg(current.regmap,r|64)>=0) 
8520                     current.regmap[get_reg(current.regmap,r|64)]=-1;
8521                 }
8522               }
8523             }
8524           }
8525         }
8526       }
8527     }
8528     else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8529     {
8530       uint64_t temp_is32=current.is32;
8531       for(j=i-1;j>=0;j--)
8532       {
8533         if(ba[j]==start+i*4+8) 
8534           temp_is32&=branch_regs[j].is32;
8535       }
8536       for(j=i;j<slen;j++)
8537       {
8538         if(ba[j]==start+i*4+8) 
8539           //temp_is32=1;
8540           temp_is32&=p32[j];
8541       }
8542       if(temp_is32!=current.is32) {
8543         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8544         for(hr=0;hr<HOST_REGS;hr++)
8545         {
8546           int r=current.regmap[hr];
8547           if(r>0)
8548           {
8549             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8550               if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8551               {
8552                 //printf("dump %d/r%d\n",hr,r);
8553                 current.regmap[hr]=-1;
8554                 if(get_reg(current.regmap,r|64)>=0) 
8555                   current.regmap[get_reg(current.regmap,r|64)]=-1;
8556               }
8557             }
8558           }
8559         }
8560       }
8561     }
8562     #endif
8563     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8564       if(i+1<slen) {
8565         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8566         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8567         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8568         current.u|=1;
8569         current.uu|=1;
8570       } else {
8571         current.u=1;
8572         current.uu=1;
8573       }
8574     } else {
8575       if(i+1<slen) {
8576         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8577         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8578         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8579         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8580         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8581         current.u|=1;
8582         current.uu|=1;
8583       } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8584     }
8585     is_ds[i]=ds;
8586     if(ds) {
8587       ds=0; // Skip delay slot, already allocated as part of branch
8588       // ...but we need to alloc it in case something jumps here
8589       if(i+1<slen) {
8590         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8591         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8592       }else{
8593         current.u=branch_unneeded_reg[i-1];
8594         current.uu=branch_unneeded_reg_upper[i-1];
8595       }
8596       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8597       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8598       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8599       current.u|=1;
8600       current.uu|=1;
8601       struct regstat temp;
8602       memcpy(&temp,&current,sizeof(current));
8603       temp.wasdirty=temp.dirty;
8604       temp.was32=temp.is32;
8605       // TODO: Take into account unconditional branches, as below
8606       delayslot_alloc(&temp,i);
8607       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8608       regs[i].wasdirty=temp.wasdirty;
8609       regs[i].was32=temp.was32;
8610       regs[i].dirty=temp.dirty;
8611       regs[i].is32=temp.is32;
8612       regs[i].isconst=0;
8613       regs[i].wasconst=0;
8614       current.isconst=0;
8615       // Create entry (branch target) regmap
8616       for(hr=0;hr<HOST_REGS;hr++)
8617       {
8618         int r=temp.regmap[hr];
8619         if(r>=0) {
8620           if(r!=regmap_pre[i][hr]) {
8621             regs[i].regmap_entry[hr]=-1;
8622           }
8623           else
8624           {
8625             if(r<64){
8626               if((current.u>>r)&1) {
8627                 regs[i].regmap_entry[hr]=-1;
8628                 regs[i].regmap[hr]=-1;
8629                 //Don't clear regs in the delay slot as the branch might need them
8630                 //current.regmap[hr]=-1;
8631               }else
8632                 regs[i].regmap_entry[hr]=r;
8633             }
8634             else {
8635               if((current.uu>>(r&63))&1) {
8636                 regs[i].regmap_entry[hr]=-1;
8637                 regs[i].regmap[hr]=-1;
8638                 //Don't clear regs in the delay slot as the branch might need them
8639                 //current.regmap[hr]=-1;
8640               }else
8641                 regs[i].regmap_entry[hr]=r;
8642             }
8643           }
8644         } else {
8645           // First instruction expects CCREG to be allocated
8646           if(i==0&&hr==HOST_CCREG) 
8647             regs[i].regmap_entry[hr]=CCREG;
8648           else
8649             regs[i].regmap_entry[hr]=-1;
8650         }
8651       }
8652     }
8653     else { // Not delay slot
8654       switch(itype[i]) {
8655         case UJUMP:
8656           //current.isconst=0; // DEBUG
8657           //current.wasconst=0; // DEBUG
8658           //regs[i].wasconst=0; // DEBUG
8659           clear_const(&current,rt1[i]);
8660           alloc_cc(&current,i);
8661           dirty_reg(&current,CCREG);
8662           if (rt1[i]==31) {
8663             alloc_reg(&current,i,31);
8664             dirty_reg(&current,31);
8665             assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8666             #ifdef REG_PREFETCH
8667             alloc_reg(&current,i,PTEMP);
8668             #endif
8669             //current.is32|=1LL<<rt1[i];
8670           }
8671           delayslot_alloc(&current,i+1);
8672           //current.isconst=0; // DEBUG
8673           ds=1;
8674           //printf("i=%d, isconst=%x\n",i,current.isconst);
8675           break;
8676         case RJUMP:
8677           //current.isconst=0;
8678           //current.wasconst=0;
8679           //regs[i].wasconst=0;
8680           clear_const(&current,rs1[i]);
8681           clear_const(&current,rt1[i]);
8682           alloc_cc(&current,i);
8683           dirty_reg(&current,CCREG);
8684           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8685             alloc_reg(&current,i,rs1[i]);
8686             if (rt1[i]!=0) {
8687               alloc_reg(&current,i,rt1[i]);
8688               dirty_reg(&current,rt1[i]);
8689               assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8690               #ifdef REG_PREFETCH
8691               alloc_reg(&current,i,PTEMP);
8692               #endif
8693             }
8694             #ifdef USE_MINI_HT
8695             if(rs1[i]==31) { // JALR
8696               alloc_reg(&current,i,RHASH);
8697               #ifndef HOST_IMM_ADDR32
8698               alloc_reg(&current,i,RHTBL);
8699               #endif
8700             }
8701             #endif
8702             delayslot_alloc(&current,i+1);
8703           } else {
8704             // The delay slot overwrites our source register,
8705             // allocate a temporary register to hold the old value.
8706             current.isconst=0;
8707             current.wasconst=0;
8708             regs[i].wasconst=0;
8709             delayslot_alloc(&current,i+1);
8710             current.isconst=0;
8711             alloc_reg(&current,i,RTEMP);
8712           }
8713           //current.isconst=0; // DEBUG
8714           ds=1;
8715           break;
8716         case CJUMP:
8717           //current.isconst=0;
8718           //current.wasconst=0;
8719           //regs[i].wasconst=0;
8720           clear_const(&current,rs1[i]);
8721           clear_const(&current,rs2[i]);
8722           if((opcode[i]&0x3E)==4) // BEQ/BNE
8723           {
8724             alloc_cc(&current,i);
8725             dirty_reg(&current,CCREG);
8726             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8727             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8728             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8729             {
8730               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8731               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8732             }
8733             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8734                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8735               // The delay slot overwrites one of our conditions.
8736               // Allocate the branch condition registers instead.
8737               // Note that such a sequence of instructions could
8738               // be considered a bug since the branch can not be
8739               // re-executed if an exception occurs.
8740               current.isconst=0;
8741               current.wasconst=0;
8742               regs[i].wasconst=0;
8743               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8744               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8745               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8746               {
8747                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8748                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8749               }
8750             }
8751             else delayslot_alloc(&current,i+1);
8752           }
8753           else
8754           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8755           {
8756             alloc_cc(&current,i);
8757             dirty_reg(&current,CCREG);
8758             alloc_reg(&current,i,rs1[i]);
8759             if(!(current.is32>>rs1[i]&1))
8760             {
8761               alloc_reg64(&current,i,rs1[i]);
8762             }
8763             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8764               // The delay slot overwrites one of our conditions.
8765               // Allocate the branch condition registers instead.
8766               // Note that such a sequence of instructions could
8767               // be considered a bug since the branch can not be
8768               // re-executed if an exception occurs.
8769               current.isconst=0;
8770               current.wasconst=0;
8771               regs[i].wasconst=0;
8772               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8773               if(!((current.is32>>rs1[i])&1))
8774               {
8775                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8776               }
8777             }
8778             else delayslot_alloc(&current,i+1);
8779           }
8780           else
8781           // Don't alloc the delay slot yet because we might not execute it
8782           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8783           {
8784             current.isconst=0;
8785             current.wasconst=0;
8786             regs[i].wasconst=0;
8787             alloc_cc(&current,i);
8788             dirty_reg(&current,CCREG);
8789             alloc_reg(&current,i,rs1[i]);
8790             alloc_reg(&current,i,rs2[i]);
8791             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8792             {
8793               alloc_reg64(&current,i,rs1[i]);
8794               alloc_reg64(&current,i,rs2[i]);
8795             }
8796           }
8797           else
8798           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8799           {
8800             current.isconst=0;
8801             current.wasconst=0;
8802             regs[i].wasconst=0;
8803             alloc_cc(&current,i);
8804             dirty_reg(&current,CCREG);
8805             alloc_reg(&current,i,rs1[i]);
8806             if(!(current.is32>>rs1[i]&1))
8807             {
8808               alloc_reg64(&current,i,rs1[i]);
8809             }
8810           }
8811           ds=1;
8812           //current.isconst=0;
8813           break;
8814         case SJUMP:
8815           //current.isconst=0;
8816           //current.wasconst=0;
8817           //regs[i].wasconst=0;
8818           clear_const(&current,rs1[i]);
8819           clear_const(&current,rt1[i]);
8820           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8821           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8822           {
8823             alloc_cc(&current,i);
8824             dirty_reg(&current,CCREG);
8825             alloc_reg(&current,i,rs1[i]);
8826             if(!(current.is32>>rs1[i]&1))
8827             {
8828               alloc_reg64(&current,i,rs1[i]);
8829             }
8830             if (rt1[i]==31) { // BLTZAL/BGEZAL
8831               alloc_reg(&current,i,31);
8832               dirty_reg(&current,31);
8833               assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8834               //#ifdef REG_PREFETCH
8835               //alloc_reg(&current,i,PTEMP);
8836               //#endif
8837               //current.is32|=1LL<<rt1[i];
8838             }
8839             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8840               // The delay slot overwrites the branch condition.
8841               // Allocate the branch condition registers instead.
8842               // Note that such a sequence of instructions could
8843               // be considered a bug since the branch can not be
8844               // re-executed if an exception occurs.
8845               current.isconst=0;
8846               current.wasconst=0;
8847               regs[i].wasconst=0;
8848               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8849               if(!((current.is32>>rs1[i])&1))
8850               {
8851                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8852               }
8853             }
8854             else delayslot_alloc(&current,i+1);
8855           }
8856           else
8857           // Don't alloc the delay slot yet because we might not execute it
8858           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8859           {
8860             current.isconst=0;
8861             current.wasconst=0;
8862             regs[i].wasconst=0;
8863             alloc_cc(&current,i);
8864             dirty_reg(&current,CCREG);
8865             alloc_reg(&current,i,rs1[i]);
8866             if(!(current.is32>>rs1[i]&1))
8867             {
8868               alloc_reg64(&current,i,rs1[i]);
8869             }
8870           }
8871           ds=1;
8872           //current.isconst=0;
8873           break;
8874         case FJUMP:
8875           current.isconst=0;
8876           current.wasconst=0;
8877           regs[i].wasconst=0;
8878           if(likely[i]==0) // BC1F/BC1T
8879           {
8880             // TODO: Theoretically we can run out of registers here on x86.
8881             // The delay slot can allocate up to six, and we need to check
8882             // CSREG before executing the delay slot.  Possibly we can drop
8883             // the cycle count and then reload it after checking that the
8884             // FPU is in a usable state, or don't do out-of-order execution.
8885             alloc_cc(&current,i);
8886             dirty_reg(&current,CCREG);
8887             alloc_reg(&current,i,FSREG);
8888             alloc_reg(&current,i,CSREG);
8889             if(itype[i+1]==FCOMP) {
8890               // The delay slot overwrites the branch condition.
8891               // Allocate the branch condition registers instead.
8892               // Note that such a sequence of instructions could
8893               // be considered a bug since the branch can not be
8894               // re-executed if an exception occurs.
8895               alloc_cc(&current,i);
8896               dirty_reg(&current,CCREG);
8897               alloc_reg(&current,i,CSREG);
8898               alloc_reg(&current,i,FSREG);
8899             }
8900             else {
8901               delayslot_alloc(&current,i+1);
8902               alloc_reg(&current,i+1,CSREG);
8903             }
8904           }
8905           else
8906           // Don't alloc the delay slot yet because we might not execute it
8907           if(likely[i]) // BC1FL/BC1TL
8908           {
8909             alloc_cc(&current,i);
8910             dirty_reg(&current,CCREG);
8911             alloc_reg(&current,i,CSREG);
8912             alloc_reg(&current,i,FSREG);
8913           }
8914           ds=1;
8915           current.isconst=0;
8916           break;
8917         case IMM16:
8918           imm16_alloc(&current,i);
8919           break;
8920         case LOAD:
8921         case LOADLR:
8922           load_alloc(&current,i);
8923           break;
8924         case STORE:
8925         case STORELR:
8926           store_alloc(&current,i);
8927           break;
8928         case ALU:
8929           alu_alloc(&current,i);
8930           break;
8931         case SHIFT:
8932           shift_alloc(&current,i);
8933           break;
8934         case MULTDIV:
8935           multdiv_alloc(&current,i);
8936           break;
8937         case SHIFTIMM:
8938           shiftimm_alloc(&current,i);
8939           break;
8940         case MOV:
8941           mov_alloc(&current,i);
8942           break;
8943         case COP0:
8944           cop0_alloc(&current,i);
8945           break;
8946         case COP1:
8947         case COP2:
8948           cop1_alloc(&current,i);
8949           break;
8950         case C1LS:
8951           c1ls_alloc(&current,i);
8952           break;
8953         case C2LS:
8954           c2ls_alloc(&current,i);
8955           break;
8956         case C2OP:
8957           c2op_alloc(&current,i);
8958           break;
8959         case FCONV:
8960           fconv_alloc(&current,i);
8961           break;
8962         case FLOAT:
8963           float_alloc(&current,i);
8964           break;
8965         case FCOMP:
8966           fcomp_alloc(&current,i);
8967           break;
8968         case SYSCALL:
8969         case HLECALL:
8970           syscall_alloc(&current,i);
8971           break;
8972         case SPAN:
8973           pagespan_alloc(&current,i);
8974           break;
8975       }
8976       
8977       // Drop the upper half of registers that have become 32-bit
8978       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
8979       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8980         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8981         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8982         current.uu|=1;
8983       } else {
8984         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
8985         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8986         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8987         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8988         current.uu|=1;
8989       }
8990
8991       // Create entry (branch target) regmap
8992       for(hr=0;hr<HOST_REGS;hr++)
8993       {
8994         int r,or,er;
8995         r=current.regmap[hr];
8996         if(r>=0) {
8997           if(r!=regmap_pre[i][hr]) {
8998             // TODO: delay slot (?)
8999             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9000             if(or<0||(r&63)>=TEMPREG){
9001               regs[i].regmap_entry[hr]=-1;
9002             }
9003             else
9004             {
9005               // Just move it to a different register
9006               regs[i].regmap_entry[hr]=r;
9007               // If it was dirty before, it's still dirty
9008               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9009             }
9010           }
9011           else
9012           {
9013             // Unneeded
9014             if(r==0){
9015               regs[i].regmap_entry[hr]=0;
9016             }
9017             else
9018             if(r<64){
9019               if((current.u>>r)&1) {
9020                 regs[i].regmap_entry[hr]=-1;
9021                 //regs[i].regmap[hr]=-1;
9022                 current.regmap[hr]=-1;
9023               }else
9024                 regs[i].regmap_entry[hr]=r;
9025             }
9026             else {
9027               if((current.uu>>(r&63))&1) {
9028                 regs[i].regmap_entry[hr]=-1;
9029                 //regs[i].regmap[hr]=-1;
9030                 current.regmap[hr]=-1;
9031               }else
9032                 regs[i].regmap_entry[hr]=r;
9033             }
9034           }
9035         } else {
9036           // Branches expect CCREG to be allocated at the target
9037           if(regmap_pre[i][hr]==CCREG) 
9038             regs[i].regmap_entry[hr]=CCREG;
9039           else
9040             regs[i].regmap_entry[hr]=-1;
9041         }
9042       }
9043       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9044     }
9045     /* Branch post-alloc */
9046     if(i>0)
9047     {
9048       current.was32=current.is32;
9049       current.wasdirty=current.dirty;
9050       switch(itype[i-1]) {
9051         case UJUMP:
9052           memcpy(&branch_regs[i-1],&current,sizeof(current));
9053           branch_regs[i-1].isconst=0;
9054           branch_regs[i-1].wasconst=0;
9055           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9056           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9057           alloc_cc(&branch_regs[i-1],i-1);
9058           dirty_reg(&branch_regs[i-1],CCREG);
9059           if(rt1[i-1]==31) { // JAL
9060             alloc_reg(&branch_regs[i-1],i-1,31);
9061             dirty_reg(&branch_regs[i-1],31);
9062             branch_regs[i-1].is32|=1LL<<31;
9063           }
9064           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9065           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9066           break;
9067         case RJUMP:
9068           memcpy(&branch_regs[i-1],&current,sizeof(current));
9069           branch_regs[i-1].isconst=0;
9070           branch_regs[i-1].wasconst=0;
9071           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9072           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9073           alloc_cc(&branch_regs[i-1],i-1);
9074           dirty_reg(&branch_regs[i-1],CCREG);
9075           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9076           if(rt1[i-1]!=0) { // JALR
9077             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9078             dirty_reg(&branch_regs[i-1],rt1[i-1]);
9079             branch_regs[i-1].is32|=1LL<<rt1[i-1];
9080           }
9081           #ifdef USE_MINI_HT
9082           if(rs1[i-1]==31) { // JALR
9083             alloc_reg(&branch_regs[i-1],i-1,RHASH);
9084             #ifndef HOST_IMM_ADDR32
9085             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9086             #endif
9087           }
9088           #endif
9089           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9090           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9091           break;
9092         case CJUMP:
9093           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9094           {
9095             alloc_cc(&current,i-1);
9096             dirty_reg(&current,CCREG);
9097             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9098                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9099               // The delay slot overwrote one of our conditions
9100               // Delay slot goes after the test (in order)
9101               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9102               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9103               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9104               current.u|=1;
9105               current.uu|=1;
9106               delayslot_alloc(&current,i);
9107               current.isconst=0;
9108             }
9109             else
9110             {
9111               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9112               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9113               // Alloc the branch condition registers
9114               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9115               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9116               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9117               {
9118                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9119                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9120               }
9121             }
9122             memcpy(&branch_regs[i-1],&current,sizeof(current));
9123             branch_regs[i-1].isconst=0;
9124             branch_regs[i-1].wasconst=0;
9125             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9126             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9127           }
9128           else
9129           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9130           {
9131             alloc_cc(&current,i-1);
9132             dirty_reg(&current,CCREG);
9133             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9134               // The delay slot overwrote the branch condition
9135               // Delay slot goes after the test (in order)
9136               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9137               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9138               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9139               current.u|=1;
9140               current.uu|=1;
9141               delayslot_alloc(&current,i);
9142               current.isconst=0;
9143             }
9144             else
9145             {
9146               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9147               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9148               // Alloc the branch condition register
9149               alloc_reg(&current,i-1,rs1[i-1]);
9150               if(!(current.is32>>rs1[i-1]&1))
9151               {
9152                 alloc_reg64(&current,i-1,rs1[i-1]);
9153               }
9154             }
9155             memcpy(&branch_regs[i-1],&current,sizeof(current));
9156             branch_regs[i-1].isconst=0;
9157             branch_regs[i-1].wasconst=0;
9158             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9159             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9160           }
9161           else
9162           // Alloc the delay slot in case the branch is taken
9163           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9164           {
9165             memcpy(&branch_regs[i-1],&current,sizeof(current));
9166             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9167             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9168             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9169             alloc_cc(&branch_regs[i-1],i);
9170             dirty_reg(&branch_regs[i-1],CCREG);
9171             delayslot_alloc(&branch_regs[i-1],i);
9172             branch_regs[i-1].isconst=0;
9173             alloc_reg(&current,i,CCREG); // Not taken path
9174             dirty_reg(&current,CCREG);
9175             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9176           }
9177           else
9178           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9179           {
9180             memcpy(&branch_regs[i-1],&current,sizeof(current));
9181             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9182             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9183             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9184             alloc_cc(&branch_regs[i-1],i);
9185             dirty_reg(&branch_regs[i-1],CCREG);
9186             delayslot_alloc(&branch_regs[i-1],i);
9187             branch_regs[i-1].isconst=0;
9188             alloc_reg(&current,i,CCREG); // Not taken path
9189             dirty_reg(&current,CCREG);
9190             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9191           }
9192           break;
9193         case SJUMP:
9194           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9195           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9196           {
9197             alloc_cc(&current,i-1);
9198             dirty_reg(&current,CCREG);
9199             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9200               // The delay slot overwrote the branch condition
9201               // Delay slot goes after the test (in order)
9202               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9203               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9204               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9205               current.u|=1;
9206               current.uu|=1;
9207               delayslot_alloc(&current,i);
9208               current.isconst=0;
9209             }
9210             else
9211             {
9212               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9213               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9214               // Alloc the branch condition register
9215               alloc_reg(&current,i-1,rs1[i-1]);
9216               if(!(current.is32>>rs1[i-1]&1))
9217               {
9218                 alloc_reg64(&current,i-1,rs1[i-1]);
9219               }
9220             }
9221             memcpy(&branch_regs[i-1],&current,sizeof(current));
9222             branch_regs[i-1].isconst=0;
9223             branch_regs[i-1].wasconst=0;
9224             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9225             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9226           }
9227           else
9228           // Alloc the delay slot in case the branch is taken
9229           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9230           {
9231             memcpy(&branch_regs[i-1],&current,sizeof(current));
9232             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9233             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9234             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9235             alloc_cc(&branch_regs[i-1],i);
9236             dirty_reg(&branch_regs[i-1],CCREG);
9237             delayslot_alloc(&branch_regs[i-1],i);
9238             branch_regs[i-1].isconst=0;
9239             alloc_reg(&current,i,CCREG); // Not taken path
9240             dirty_reg(&current,CCREG);
9241             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9242           }
9243           // FIXME: BLTZAL/BGEZAL
9244           if(opcode2[i-1]&0x10) { // BxxZAL
9245             alloc_reg(&branch_regs[i-1],i-1,31);
9246             dirty_reg(&branch_regs[i-1],31);
9247             branch_regs[i-1].is32|=1LL<<31;
9248           }
9249           break;
9250         case FJUMP:
9251           if(likely[i-1]==0) // BC1F/BC1T
9252           {
9253             alloc_cc(&current,i-1);
9254             dirty_reg(&current,CCREG);
9255             if(itype[i]==FCOMP) {
9256               // The delay slot overwrote the branch condition
9257               // Delay slot goes after the test (in order)
9258               delayslot_alloc(&current,i);
9259               current.isconst=0;
9260             }
9261             else
9262             {
9263               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9264               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9265               // Alloc the branch condition register
9266               alloc_reg(&current,i-1,FSREG);
9267             }
9268             memcpy(&branch_regs[i-1],&current,sizeof(current));
9269             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9270           }
9271           else // BC1FL/BC1TL
9272           {
9273             // Alloc the delay slot in case the branch is taken
9274             memcpy(&branch_regs[i-1],&current,sizeof(current));
9275             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9276             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9277             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9278             alloc_cc(&branch_regs[i-1],i);
9279             dirty_reg(&branch_regs[i-1],CCREG);
9280             delayslot_alloc(&branch_regs[i-1],i);
9281             branch_regs[i-1].isconst=0;
9282             alloc_reg(&current,i,CCREG); // Not taken path
9283             dirty_reg(&current,CCREG);
9284             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9285           }
9286           break;
9287       }
9288
9289       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9290       {
9291         if(rt1[i-1]==31) // JAL/JALR
9292         {
9293           // Subroutine call will return here, don't alloc any registers
9294           current.is32=1;
9295           current.dirty=0;
9296           clear_all_regs(current.regmap);
9297           alloc_reg(&current,i,CCREG);
9298           dirty_reg(&current,CCREG);
9299         }
9300         else if(i+1<slen)
9301         {
9302           // Internal branch will jump here, match registers to caller
9303           current.is32=0x3FFFFFFFFLL;
9304           current.dirty=0;
9305           clear_all_regs(current.regmap);
9306           alloc_reg(&current,i,CCREG);
9307           dirty_reg(&current,CCREG);
9308           for(j=i-1;j>=0;j--)
9309           {
9310             if(ba[j]==start+i*4+4) {
9311               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9312               current.is32=branch_regs[j].is32;
9313               current.dirty=branch_regs[j].dirty;
9314               break;
9315             }
9316           }
9317           while(j>=0) {
9318             if(ba[j]==start+i*4+4) {
9319               for(hr=0;hr<HOST_REGS;hr++) {
9320                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9321                   current.regmap[hr]=-1;
9322                 }
9323                 current.is32&=branch_regs[j].is32;
9324                 current.dirty&=branch_regs[j].dirty;
9325               }
9326             }
9327             j--;
9328           }
9329         }
9330       }
9331     }
9332
9333     // Count cycles in between branches
9334     ccadj[i]=cc;
9335     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9336     {
9337       cc=0;
9338     }
9339     else
9340     {
9341       cc++;
9342     }
9343
9344     flush_dirty_uppers(&current);
9345     if(!is_ds[i]) {
9346       regs[i].is32=current.is32;
9347       regs[i].dirty=current.dirty;
9348       regs[i].isconst=current.isconst;
9349       memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9350     }
9351     for(hr=0;hr<HOST_REGS;hr++) {
9352       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9353         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9354           regs[i].wasconst&=~(1<<hr);
9355         }
9356       }
9357     }
9358     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9359   }
9360   
9361   /* Pass 4 - Cull unused host registers */
9362   
9363   uint64_t nr=0;
9364   
9365   for (i=slen-1;i>=0;i--)
9366   {
9367     int hr;
9368     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9369     {
9370       if(ba[i]<start || ba[i]>=(start+slen*4))
9371       {
9372         // Branch out of this block, don't need anything
9373         nr=0;
9374       }
9375       else
9376       {
9377         // Internal branch
9378         // Need whatever matches the target
9379         nr=0;
9380         int t=(ba[i]-start)>>2;
9381         for(hr=0;hr<HOST_REGS;hr++)
9382         {
9383           if(regs[i].regmap_entry[hr]>=0) {
9384             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9385           }
9386         }
9387       }
9388       // Conditional branch may need registers for following instructions
9389       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9390       {
9391         if(i<slen-2) {
9392           nr|=needed_reg[i+2];
9393           for(hr=0;hr<HOST_REGS;hr++)
9394           {
9395             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9396             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9397           }
9398         }
9399       }
9400       // Don't need stuff which is overwritten
9401       if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9402       if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9403       // Merge in delay slot
9404       for(hr=0;hr<HOST_REGS;hr++)
9405       {
9406         if(!likely[i]) {
9407           // These are overwritten unless the branch is "likely"
9408           // and the delay slot is nullified if not taken
9409           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9410           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9411         }
9412         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9413         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9414         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9415         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9416         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9417         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9418         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9419         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9420         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9421           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9422           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9423         }
9424         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9425           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9426           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9427         }
9428         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
9429           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9430           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9431         }
9432       }
9433     }
9434     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
9435     {
9436       // SYSCALL instruction (software interrupt)
9437       nr=0;
9438     }
9439     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9440     {
9441       // ERET instruction (return from interrupt)
9442       nr=0;
9443     }
9444     else // Non-branch
9445     {
9446       if(i<slen-1) {
9447         for(hr=0;hr<HOST_REGS;hr++) {
9448           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9449           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9450           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9451           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9452         }
9453       }
9454     }
9455     for(hr=0;hr<HOST_REGS;hr++)
9456     {
9457       // Overwritten registers are not needed
9458       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9459       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9460       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9461       // Source registers are needed
9462       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9463       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9464       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9465       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9466       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9467       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9468       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9469       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9470       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9471         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9472         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9473       }
9474       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9475         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9476         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9477       }
9478       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
9479         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9480         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9481       }
9482       // Don't store a register immediately after writing it,
9483       // may prevent dual-issue.
9484       // But do so if this is a branch target, otherwise we
9485       // might have to load the register before the branch.
9486       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9487         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9488            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9489           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9490           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9491         }
9492         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9493            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9494           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9495           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9496         }
9497       }
9498     }
9499     // Cycle count is needed at branches.  Assume it is needed at the target too.
9500     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9501       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9502       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9503     }
9504     // Save it
9505     needed_reg[i]=nr;
9506     
9507     // Deallocate unneeded registers
9508     for(hr=0;hr<HOST_REGS;hr++)
9509     {
9510       if(!((nr>>hr)&1)) {
9511         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9512         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9513            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9514            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9515         {
9516           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9517           {
9518             if(likely[i]) {
9519               regs[i].regmap[hr]=-1;
9520               regs[i].isconst&=~(1<<hr);
9521               if(i<slen-2) regmap_pre[i+2][hr]=-1;
9522             }
9523           }
9524         }
9525         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9526         {
9527           int d1=0,d2=0,map=0,temp=0;
9528           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9529           {
9530             d1=dep1[i+1];
9531             d2=dep2[i+1];
9532           }
9533           if(using_tlb) {
9534             if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9535                itype[i+1]==STORE || itype[i+1]==STORELR ||
9536                itype[i+1]==C1LS || itype[i+1]==C2LS)
9537             map=TLREG;
9538           } else
9539           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9540              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9541             map=INVCP;
9542           }
9543           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9544              itype[i+1]==C1LS || itype[i+1]==C2LS)
9545             temp=FTEMP;
9546           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9547              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9548              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9549              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9550              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9551              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9552              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9553              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9554              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9555              regs[i].regmap[hr]!=map )
9556           {
9557             regs[i].regmap[hr]=-1;
9558             regs[i].isconst&=~(1<<hr);
9559             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9560                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9561                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9562                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9563                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9564                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9565                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9566                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9567                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9568                branch_regs[i].regmap[hr]!=map)
9569             {
9570               branch_regs[i].regmap[hr]=-1;
9571               branch_regs[i].regmap_entry[hr]=-1;
9572               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9573               {
9574                 if(!likely[i]&&i<slen-2) {
9575                   regmap_pre[i+2][hr]=-1;
9576                 }
9577               }
9578             }
9579           }
9580         }
9581         else
9582         {
9583           // Non-branch
9584           if(i>0)
9585           {
9586             int d1=0,d2=0,map=-1,temp=-1;
9587             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9588             {
9589               d1=dep1[i];
9590               d2=dep2[i];
9591             }
9592             if(using_tlb) {
9593               if(itype[i]==LOAD || itype[i]==LOADLR ||
9594                  itype[i]==STORE || itype[i]==STORELR ||
9595                  itype[i]==C1LS || itype[i]==C2LS)
9596               map=TLREG;
9597             } else if(itype[i]==STORE || itype[i]==STORELR ||
9598                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9599               map=INVCP;
9600             }
9601             if(itype[i]==LOADLR || itype[i]==STORELR ||
9602                itype[i]==C1LS || itype[i]==C2LS)
9603               temp=FTEMP;
9604             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9605                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9606                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9607                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9608                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9609                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9610             {
9611               if(i<slen-1&&!is_ds[i]) {
9612                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9613                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9614                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9615                 {
9616                   printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9617                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9618                 }
9619                 regmap_pre[i+1][hr]=-1;
9620                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9621               }
9622               regs[i].regmap[hr]=-1;
9623               regs[i].isconst&=~(1<<hr);
9624             }
9625           }
9626         }
9627       }
9628     }
9629   }
9630   
9631   /* Pass 5 - Pre-allocate registers */
9632   
9633   // If a register is allocated during a loop, try to allocate it for the
9634   // entire loop, if possible.  This avoids loading/storing registers
9635   // inside of the loop.
9636
9637   signed char f_regmap[HOST_REGS];
9638   clear_all_regs(f_regmap);
9639   for(i=0;i<slen-1;i++)
9640   {
9641     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9642     {
9643       if(ba[i]>=start && ba[i]<(start+i*4)) 
9644       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9645       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9646       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9647       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9648       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9649       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9650       {
9651         int t=(ba[i]-start)>>2;
9652         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9653         if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
9654         for(hr=0;hr<HOST_REGS;hr++)
9655         {
9656           if(regs[i].regmap[hr]>64) {
9657             if(!((regs[i].dirty>>hr)&1))
9658               f_regmap[hr]=regs[i].regmap[hr];
9659             else f_regmap[hr]=-1;
9660           }
9661           else if(regs[i].regmap[hr]>=0) f_regmap[hr]=regs[i].regmap[hr];
9662           if(branch_regs[i].regmap[hr]>64) {
9663             if(!((branch_regs[i].dirty>>hr)&1))
9664               f_regmap[hr]=branch_regs[i].regmap[hr];
9665             else f_regmap[hr]=-1;
9666           }
9667           else if(branch_regs[i].regmap[hr]>=0) f_regmap[hr]=branch_regs[i].regmap[hr];
9668           if(itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9669           ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9670           ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9671           ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9672           {
9673             // Test both in case the delay slot is ooo,
9674             // could be done better...
9675             if(count_free_regs(branch_regs[i].regmap)<2
9676              ||count_free_regs(regs[i].regmap)<2) 
9677               f_regmap[hr]=branch_regs[i].regmap[hr];
9678           }
9679           // Avoid dirty->clean transition
9680           // #ifdef DESTRUCTIVE_WRITEBACK here?
9681           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9682           if(f_regmap[hr]>0) {
9683             if(regs[t].regmap_entry[hr]<0) {
9684               int r=f_regmap[hr];
9685               for(j=t;j<=i;j++)
9686               {
9687                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9688                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9689                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9690                 if(r>63) {
9691                   // NB This can exclude the case where the upper-half
9692                   // register is lower numbered than the lower-half
9693                   // register.  Not sure if it's worth fixing...
9694                   if(get_reg(regs[j].regmap,r&63)<0) break;
9695                   if(regs[j].is32&(1LL<<(r&63))) break;
9696                 }
9697                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9698                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9699                   int k;
9700                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9701                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9702                     if(r>63) {
9703                       if(get_reg(regs[i].regmap,r&63)<0) break;
9704                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9705                     }
9706                     k=i;
9707                     while(k>1&&regs[k-1].regmap[hr]==-1) {
9708                       if(itype[k-1]==STORE||itype[k-1]==STORELR
9709                       ||itype[k-1]==C1LS||itype[k-1]==SHIFT||itype[k-1]==COP1
9710                       ||itype[k-1]==FLOAT||itype[k-1]==FCONV||itype[k-1]==FCOMP
9711                       ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9712                         if(count_free_regs(regs[k-1].regmap)<2) {
9713                           //printf("no free regs for store %x\n",start+(k-1)*4);
9714                           break;
9715                         }
9716                       }
9717                       else
9718                       if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9719                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9720                         //printf("no-match due to different register\n");
9721                         break;
9722                       }
9723                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9724                         //printf("no-match due to branch\n");
9725                         break;
9726                       }
9727                       // call/ret fast path assumes no registers allocated
9728                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
9729                         break;
9730                       }
9731                       if(r>63) {
9732                         // NB This can exclude the case where the upper-half
9733                         // register is lower numbered than the lower-half
9734                         // register.  Not sure if it's worth fixing...
9735                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
9736                         if(regs[k-1].is32&(1LL<<(r&63))) break;
9737                       }
9738                       k--;
9739                     }
9740                     if(i<slen-1) {
9741                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9742                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9743                         //printf("bad match after branch\n");
9744                         break;
9745                       }
9746                     }
9747                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9748                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
9749                       while(k<i) {
9750                         regs[k].regmap_entry[hr]=f_regmap[hr];
9751                         regs[k].regmap[hr]=f_regmap[hr];
9752                         regmap_pre[k+1][hr]=f_regmap[hr];
9753                         regs[k].wasdirty&=~(1<<hr);
9754                         regs[k].dirty&=~(1<<hr);
9755                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9756                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9757                         regs[k].wasconst&=~(1<<hr);
9758                         regs[k].isconst&=~(1<<hr);
9759                         k++;
9760                       }
9761                     }
9762                     else {
9763                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9764                       break;
9765                     }
9766                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9767                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9768                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
9769                       regs[i].regmap_entry[hr]=f_regmap[hr];
9770                       regs[i].regmap[hr]=f_regmap[hr];
9771                       regs[i].wasdirty&=~(1<<hr);
9772                       regs[i].dirty&=~(1<<hr);
9773                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9774                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9775                       regs[i].wasconst&=~(1<<hr);
9776                       regs[i].isconst&=~(1<<hr);
9777                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9778                       branch_regs[i].wasdirty&=~(1<<hr);
9779                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9780                       branch_regs[i].regmap[hr]=f_regmap[hr];
9781                       branch_regs[i].dirty&=~(1<<hr);
9782                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9783                       branch_regs[i].wasconst&=~(1<<hr);
9784                       branch_regs[i].isconst&=~(1<<hr);
9785                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9786                         regmap_pre[i+2][hr]=f_regmap[hr];
9787                         regs[i+2].wasdirty&=~(1<<hr);
9788                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9789                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9790                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
9791                       }
9792                     }
9793                   }
9794                   for(k=t;k<j;k++) {
9795                     regs[k].regmap_entry[hr]=f_regmap[hr];
9796                     regs[k].regmap[hr]=f_regmap[hr];
9797                     regmap_pre[k+1][hr]=f_regmap[hr];
9798                     regs[k+1].wasdirty&=~(1<<hr);
9799                     regs[k].dirty&=~(1<<hr);
9800                     regs[k].wasconst&=~(1<<hr);
9801                     regs[k].isconst&=~(1<<hr);
9802                   }
9803                   if(regs[j].regmap[hr]==f_regmap[hr])
9804                     regs[j].regmap_entry[hr]=f_regmap[hr];
9805                   break;
9806                 }
9807                 if(j==i) break;
9808                 if(regs[j].regmap[hr]>=0)
9809                   break;
9810                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9811                   //printf("no-match due to different register\n");
9812                   break;
9813                 }
9814                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9815                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9816                   break;
9817                 }
9818                 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9819                 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9820                 ||itype[j]==FCOMP||itype[j]==FCONV
9821                 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9822                   if(count_free_regs(regs[j].regmap)<2) {
9823                     //printf("No free regs for store %x\n",start+j*4);
9824                     break;
9825                   }
9826                 }
9827                 else if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9828                 if(f_regmap[hr]>=64) {
9829                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9830                     break;
9831                   }
9832                   else
9833                   {
9834                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9835                       break;
9836                     }
9837                   }
9838                 }
9839               }
9840             }
9841           }
9842         }
9843       }
9844     }else{
9845       int count=0;
9846       for(hr=0;hr<HOST_REGS;hr++)
9847       {
9848         if(hr!=EXCLUDE_REG) {
9849           if(regs[i].regmap[hr]>64) {
9850             if(!((regs[i].dirty>>hr)&1))
9851               f_regmap[hr]=regs[i].regmap[hr];
9852           }
9853           else if(regs[i].regmap[hr]>=0) f_regmap[hr]=regs[i].regmap[hr];
9854           else if(regs[i].regmap[hr]<0) count++;
9855         }
9856       }
9857       // Try to restore cycle count at branch targets
9858       if(bt[i]) {
9859         for(j=i;j<slen-1;j++) {
9860           if(regs[j].regmap[HOST_CCREG]!=-1) break;
9861           if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9862           ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9863           ||itype[j]==FCOMP||itype[j]==FCONV
9864           ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9865             if(count_free_regs(regs[j].regmap)<2) {
9866               //printf("no free regs for store %x\n",start+j*4);
9867               break;
9868             }
9869           }
9870           else
9871           if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9872         }
9873         if(regs[j].regmap[HOST_CCREG]==CCREG) {
9874           int k=i;
9875           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9876           while(k<j) {
9877             regs[k].regmap_entry[HOST_CCREG]=CCREG;
9878             regs[k].regmap[HOST_CCREG]=CCREG;
9879             regmap_pre[k+1][HOST_CCREG]=CCREG;
9880             regs[k+1].wasdirty|=1<<HOST_CCREG;
9881             regs[k].dirty|=1<<HOST_CCREG;
9882             regs[k].wasconst&=~(1<<HOST_CCREG);
9883             regs[k].isconst&=~(1<<HOST_CCREG);
9884             k++;
9885           }
9886           regs[j].regmap_entry[HOST_CCREG]=CCREG;          
9887         }
9888         // Work backwards from the branch target
9889         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9890         {
9891           //printf("Extend backwards\n");
9892           int k;
9893           k=i;
9894           while(regs[k-1].regmap[HOST_CCREG]==-1) {
9895             if(itype[k-1]==STORE||itype[k-1]==STORELR||itype[k-1]==C1LS
9896             ||itype[k-1]==SHIFT||itype[k-1]==COP1||itype[k-1]==FLOAT
9897             ||itype[k-1]==FCONV||itype[k-1]==FCOMP
9898             ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9899               if(count_free_regs(regs[k-1].regmap)<2) {
9900                 //printf("no free regs for store %x\n",start+(k-1)*4);
9901                 break;
9902               }
9903             }
9904             else
9905             if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9906             k--;
9907           }
9908           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
9909             //printf("Extend CC, %x ->\n",start+k*4);
9910             while(k<=i) {
9911               regs[k].regmap_entry[HOST_CCREG]=CCREG;
9912               regs[k].regmap[HOST_CCREG]=CCREG;
9913               regmap_pre[k+1][HOST_CCREG]=CCREG;
9914               regs[k+1].wasdirty|=1<<HOST_CCREG;
9915               regs[k].dirty|=1<<HOST_CCREG;
9916               regs[k].wasconst&=~(1<<HOST_CCREG);
9917               regs[k].isconst&=~(1<<HOST_CCREG);
9918               k++;
9919             }
9920           }
9921           else {
9922             //printf("Fail Extend CC, %x ->\n",start+k*4);
9923           }
9924         }
9925       }
9926       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
9927          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
9928          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
9929          itype[i]!=FCONV&&itype[i]!=FCOMP&&
9930          itype[i]!=COP2&&itype[i]!=C2LS&&itype[i]!=C2OP)
9931       {
9932         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
9933       }
9934     }
9935   }
9936   
9937   // This allocates registers (if possible) one instruction prior
9938   // to use, which can avoid a load-use penalty on certain CPUs.
9939   for(i=0;i<slen-1;i++)
9940   {
9941     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
9942     {
9943       if(!bt[i+1])
9944       {
9945         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
9946            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
9947         {
9948           if(rs1[i+1]) {
9949             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
9950             {
9951               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9952               {
9953                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9954                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9955                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9956                 regs[i].isconst&=~(1<<hr);
9957                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9958                 constmap[i][hr]=constmap[i+1][hr];
9959                 regs[i+1].wasdirty&=~(1<<hr);
9960                 regs[i].dirty&=~(1<<hr);
9961               }
9962             }
9963           }
9964           if(rs2[i+1]) {
9965             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
9966             {
9967               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9968               {
9969                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9970                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9971                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9972                 regs[i].isconst&=~(1<<hr);
9973                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9974                 constmap[i][hr]=constmap[i+1][hr];
9975                 regs[i+1].wasdirty&=~(1<<hr);
9976                 regs[i].dirty&=~(1<<hr);
9977               }
9978             }
9979           }
9980           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9981             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9982             {
9983               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9984               {
9985                 regs[i].regmap[hr]=rs1[i+1];
9986                 regmap_pre[i+1][hr]=rs1[i+1];
9987                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9988                 regs[i].isconst&=~(1<<hr);
9989                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9990                 constmap[i][hr]=constmap[i+1][hr];
9991                 regs[i+1].wasdirty&=~(1<<hr);
9992                 regs[i].dirty&=~(1<<hr);
9993               }
9994             }
9995           }
9996           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9997             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9998             {
9999               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10000               {
10001                 regs[i].regmap[hr]=rs1[i+1];
10002                 regmap_pre[i+1][hr]=rs1[i+1];
10003                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10004                 regs[i].isconst&=~(1<<hr);
10005                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10006                 constmap[i][hr]=constmap[i+1][hr];
10007                 regs[i+1].wasdirty&=~(1<<hr);
10008                 regs[i].dirty&=~(1<<hr);
10009               }
10010             }
10011           }
10012           #ifndef HOST_IMM_ADDR32
10013           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10014             hr=get_reg(regs[i+1].regmap,TLREG);
10015             if(hr>=0) {
10016               int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10017               if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10018                 int nr;
10019                 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10020                 {
10021                   regs[i].regmap[hr]=MGEN1+((i+1)&1);
10022                   regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10023                   regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10024                   regs[i].isconst&=~(1<<hr);
10025                   regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10026                   constmap[i][hr]=constmap[i+1][hr];
10027                   regs[i+1].wasdirty&=~(1<<hr);
10028                   regs[i].dirty&=~(1<<hr);
10029                 }
10030                 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10031                 {
10032                   // move it to another register
10033                   regs[i+1].regmap[hr]=-1;
10034                   regmap_pre[i+2][hr]=-1;
10035                   regs[i+1].regmap[nr]=TLREG;
10036                   regmap_pre[i+2][nr]=TLREG;
10037                   regs[i].regmap[nr]=MGEN1+((i+1)&1);
10038                   regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10039                   regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10040                   regs[i].isconst&=~(1<<nr);
10041                   regs[i+1].isconst&=~(1<<nr);
10042                   regs[i].dirty&=~(1<<nr);
10043                   regs[i+1].wasdirty&=~(1<<nr);
10044                   regs[i+1].dirty&=~(1<<nr);
10045                   regs[i+2].wasdirty&=~(1<<nr);
10046                 }
10047               }
10048             }
10049           }
10050           #endif
10051           if(itype[i+1]==STORE||itype[i+1]==STORELR
10052              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10053             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10054               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10055               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10056               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10057               assert(hr>=0);
10058               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10059               {
10060                 regs[i].regmap[hr]=rs1[i+1];
10061                 regmap_pre[i+1][hr]=rs1[i+1];
10062                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10063                 regs[i].isconst&=~(1<<hr);
10064                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10065                 constmap[i][hr]=constmap[i+1][hr];
10066                 regs[i+1].wasdirty&=~(1<<hr);
10067                 regs[i].dirty&=~(1<<hr);
10068               }
10069             }
10070           }
10071           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10072             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10073               int nr;
10074               hr=get_reg(regs[i+1].regmap,FTEMP);
10075               assert(hr>=0);
10076               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10077               {
10078                 regs[i].regmap[hr]=rs1[i+1];
10079                 regmap_pre[i+1][hr]=rs1[i+1];
10080                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10081                 regs[i].isconst&=~(1<<hr);
10082                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10083                 constmap[i][hr]=constmap[i+1][hr];
10084                 regs[i+1].wasdirty&=~(1<<hr);
10085                 regs[i].dirty&=~(1<<hr);
10086               }
10087               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10088               {
10089                 // move it to another register
10090                 regs[i+1].regmap[hr]=-1;
10091                 regmap_pre[i+2][hr]=-1;
10092                 regs[i+1].regmap[nr]=FTEMP;
10093                 regmap_pre[i+2][nr]=FTEMP;
10094                 regs[i].regmap[nr]=rs1[i+1];
10095                 regmap_pre[i+1][nr]=rs1[i+1];
10096                 regs[i+1].regmap_entry[nr]=rs1[i+1];
10097                 regs[i].isconst&=~(1<<nr);
10098                 regs[i+1].isconst&=~(1<<nr);
10099                 regs[i].dirty&=~(1<<nr);
10100                 regs[i+1].wasdirty&=~(1<<nr);
10101                 regs[i+1].dirty&=~(1<<nr);
10102                 regs[i+2].wasdirty&=~(1<<nr);
10103               }
10104             }
10105           }
10106           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10107             if(itype[i+1]==LOAD) 
10108               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10109             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10110               hr=get_reg(regs[i+1].regmap,FTEMP);
10111             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10112               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10113               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10114             }
10115             if(hr>=0&&regs[i].regmap[hr]<0) {
10116               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10117               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10118                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10119                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10120                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10121                 regs[i].isconst&=~(1<<hr);
10122                 regs[i+1].wasdirty&=~(1<<hr);
10123                 regs[i].dirty&=~(1<<hr);
10124               }
10125             }
10126           }
10127         }
10128       }
10129     }
10130   }
10131   
10132   /* Pass 6 - Optimize clean/dirty state */
10133   clean_registers(0,slen-1,1);
10134   
10135   /* Pass 7 - Identify 32-bit registers */
10136   
10137   provisional_r32();
10138
10139   u_int r32=0;
10140   
10141   for (i=slen-1;i>=0;i--)
10142   {
10143     int hr;
10144     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10145     {
10146       if(ba[i]<start || ba[i]>=(start+slen*4))
10147       {
10148         // Branch out of this block, don't need anything
10149         r32=0;
10150       }
10151       else
10152       {
10153         // Internal branch
10154         // Need whatever matches the target
10155         // (and doesn't get overwritten by the delay slot instruction)
10156         r32=0;
10157         int t=(ba[i]-start)>>2;
10158         if(ba[i]>start+i*4) {
10159           // Forward branch
10160           if(!(requires_32bit[t]&~regs[i].was32))
10161             r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10162         }else{
10163           // Backward branch
10164           //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10165           //  r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10166           if(!(pr32[t]&~regs[i].was32))
10167             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10168         }
10169       }
10170       // Conditional branch may need registers for following instructions
10171       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10172       {
10173         if(i<slen-2) {
10174           r32|=requires_32bit[i+2];
10175           r32&=regs[i].was32;
10176           // Mark this address as a branch target since it may be called
10177           // upon return from interrupt
10178           bt[i+2]=1;
10179         }
10180       }
10181       // Merge in delay slot
10182       if(!likely[i]) {
10183         // These are overwritten unless the branch is "likely"
10184         // and the delay slot is nullified if not taken
10185         r32&=~(1LL<<rt1[i+1]);
10186         r32&=~(1LL<<rt2[i+1]);
10187       }
10188       // Assume these are needed (delay slot)
10189       if(us1[i+1]>0)
10190       {
10191         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10192       }
10193       if(us2[i+1]>0)
10194       {
10195         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10196       }
10197       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10198       {
10199         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10200       }
10201       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10202       {
10203         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10204       }
10205     }
10206     else if(itype[i]==SYSCALL||itype[i]==HLECALL)
10207     {
10208       // SYSCALL instruction (software interrupt)
10209       r32=0;
10210     }
10211     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10212     {
10213       // ERET instruction (return from interrupt)
10214       r32=0;
10215     }
10216     // Check 32 bits
10217     r32&=~(1LL<<rt1[i]);
10218     r32&=~(1LL<<rt2[i]);
10219     if(us1[i]>0)
10220     {
10221       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10222     }
10223     if(us2[i]>0)
10224     {
10225       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10226     }
10227     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10228     {
10229       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10230     }
10231     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10232     {
10233       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10234     }
10235     requires_32bit[i]=r32;
10236     
10237     // Dirty registers which are 32-bit, require 32-bit input
10238     // as they will be written as 32-bit values
10239     for(hr=0;hr<HOST_REGS;hr++)
10240     {
10241       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10242         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10243           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10244           requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10245         }
10246       }
10247     }
10248     //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10249   }
10250
10251   if(itype[slen-1]==SPAN) {
10252     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10253   }
10254   
10255   /* Debug/disassembly */
10256   if((void*)assem_debug==(void*)printf) 
10257   for(i=0;i<slen;i++)
10258   {
10259     printf("U:");
10260     int r;
10261     for(r=1;r<=CCREG;r++) {
10262       if((unneeded_reg[i]>>r)&1) {
10263         if(r==HIREG) printf(" HI");
10264         else if(r==LOREG) printf(" LO");
10265         else printf(" r%d",r);
10266       }
10267     }
10268 #ifndef FORCE32
10269     printf(" UU:");
10270     for(r=1;r<=CCREG;r++) {
10271       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10272         if(r==HIREG) printf(" HI");
10273         else if(r==LOREG) printf(" LO");
10274         else printf(" r%d",r);
10275       }
10276     }
10277     printf(" 32:");
10278     for(r=0;r<=CCREG;r++) {
10279       //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10280       if((regs[i].was32>>r)&1) {
10281         if(r==CCREG) printf(" CC");
10282         else if(r==HIREG) printf(" HI");
10283         else if(r==LOREG) printf(" LO");
10284         else printf(" r%d",r);
10285       }
10286     }
10287 #endif
10288     printf("\n");
10289     #if defined(__i386__) || defined(__x86_64__)
10290     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10291     #endif
10292     #ifdef __arm__
10293     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10294     #endif
10295     printf("needs: ");
10296     if(needed_reg[i]&1) printf("eax ");
10297     if((needed_reg[i]>>1)&1) printf("ecx ");
10298     if((needed_reg[i]>>2)&1) printf("edx ");
10299     if((needed_reg[i]>>3)&1) printf("ebx ");
10300     if((needed_reg[i]>>5)&1) printf("ebp ");
10301     if((needed_reg[i]>>6)&1) printf("esi ");
10302     if((needed_reg[i]>>7)&1) printf("edi ");
10303     printf("r:");
10304     for(r=0;r<=CCREG;r++) {
10305       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10306       if((requires_32bit[i]>>r)&1) {
10307         if(r==CCREG) printf(" CC");
10308         else if(r==HIREG) printf(" HI");
10309         else if(r==LOREG) printf(" LO");
10310         else printf(" r%d",r);
10311       }
10312     }
10313     printf("\n");
10314     /*printf("pr:");
10315     for(r=0;r<=CCREG;r++) {
10316       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10317       if((pr32[i]>>r)&1) {
10318         if(r==CCREG) printf(" CC");
10319         else if(r==HIREG) printf(" HI");
10320         else if(r==LOREG) printf(" LO");
10321         else printf(" r%d",r);
10322       }
10323     }
10324     if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10325     printf("\n");*/
10326     #if defined(__i386__) || defined(__x86_64__)
10327     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10328     printf("dirty: ");
10329     if(regs[i].wasdirty&1) printf("eax ");
10330     if((regs[i].wasdirty>>1)&1) printf("ecx ");
10331     if((regs[i].wasdirty>>2)&1) printf("edx ");
10332     if((regs[i].wasdirty>>3)&1) printf("ebx ");
10333     if((regs[i].wasdirty>>5)&1) printf("ebp ");
10334     if((regs[i].wasdirty>>6)&1) printf("esi ");
10335     if((regs[i].wasdirty>>7)&1) printf("edi ");
10336     #endif
10337     #ifdef __arm__
10338     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10339     printf("dirty: ");
10340     if(regs[i].wasdirty&1) printf("r0 ");
10341     if((regs[i].wasdirty>>1)&1) printf("r1 ");
10342     if((regs[i].wasdirty>>2)&1) printf("r2 ");
10343     if((regs[i].wasdirty>>3)&1) printf("r3 ");
10344     if((regs[i].wasdirty>>4)&1) printf("r4 ");
10345     if((regs[i].wasdirty>>5)&1) printf("r5 ");
10346     if((regs[i].wasdirty>>6)&1) printf("r6 ");
10347     if((regs[i].wasdirty>>7)&1) printf("r7 ");
10348     if((regs[i].wasdirty>>8)&1) printf("r8 ");
10349     if((regs[i].wasdirty>>9)&1) printf("r9 ");
10350     if((regs[i].wasdirty>>10)&1) printf("r10 ");
10351     if((regs[i].wasdirty>>12)&1) printf("r12 ");
10352     #endif
10353     printf("\n");
10354     disassemble_inst(i);
10355     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10356     #if defined(__i386__) || defined(__x86_64__)
10357     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
10358     if(regs[i].dirty&1) printf("eax ");
10359     if((regs[i].dirty>>1)&1) printf("ecx ");
10360     if((regs[i].dirty>>2)&1) printf("edx ");
10361     if((regs[i].dirty>>3)&1) printf("ebx ");
10362     if((regs[i].dirty>>5)&1) printf("ebp ");
10363     if((regs[i].dirty>>6)&1) printf("esi ");
10364     if((regs[i].dirty>>7)&1) printf("edi ");
10365     #endif
10366     #ifdef __arm__
10367     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
10368     if(regs[i].dirty&1) printf("r0 ");
10369     if((regs[i].dirty>>1)&1) printf("r1 ");
10370     if((regs[i].dirty>>2)&1) printf("r2 ");
10371     if((regs[i].dirty>>3)&1) printf("r3 ");
10372     if((regs[i].dirty>>4)&1) printf("r4 ");
10373     if((regs[i].dirty>>5)&1) printf("r5 ");
10374     if((regs[i].dirty>>6)&1) printf("r6 ");
10375     if((regs[i].dirty>>7)&1) printf("r7 ");
10376     if((regs[i].dirty>>8)&1) printf("r8 ");
10377     if((regs[i].dirty>>9)&1) printf("r9 ");
10378     if((regs[i].dirty>>10)&1) printf("r10 ");
10379     if((regs[i].dirty>>12)&1) printf("r12 ");
10380     #endif
10381     printf("\n");
10382     if(regs[i].isconst) {
10383       printf("constants: ");
10384       #if defined(__i386__) || defined(__x86_64__)
10385       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
10386       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
10387       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
10388       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
10389       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
10390       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
10391       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
10392       #endif
10393       #ifdef __arm__
10394       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
10395       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
10396       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
10397       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
10398       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
10399       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
10400       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
10401       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
10402       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
10403       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
10404       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
10405       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
10406       #endif
10407       printf("\n");
10408     }
10409 #ifndef FORCE32
10410     printf(" 32:");
10411     for(r=0;r<=CCREG;r++) {
10412       if((regs[i].is32>>r)&1) {
10413         if(r==CCREG) printf(" CC");
10414         else if(r==HIREG) printf(" HI");
10415         else if(r==LOREG) printf(" LO");
10416         else printf(" r%d",r);
10417       }
10418     }
10419     printf("\n");
10420 #endif
10421     /*printf(" p32:");
10422     for(r=0;r<=CCREG;r++) {
10423       if((p32[i]>>r)&1) {
10424         if(r==CCREG) printf(" CC");
10425         else if(r==HIREG) printf(" HI");
10426         else if(r==LOREG) printf(" LO");
10427         else printf(" r%d",r);
10428       }
10429     }
10430     if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
10431     else printf("\n");*/
10432     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10433       #if defined(__i386__) || defined(__x86_64__)
10434       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
10435       if(branch_regs[i].dirty&1) printf("eax ");
10436       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
10437       if((branch_regs[i].dirty>>2)&1) printf("edx ");
10438       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
10439       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
10440       if((branch_regs[i].dirty>>6)&1) printf("esi ");
10441       if((branch_regs[i].dirty>>7)&1) printf("edi ");
10442       #endif
10443       #ifdef __arm__
10444       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
10445       if(branch_regs[i].dirty&1) printf("r0 ");
10446       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
10447       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
10448       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
10449       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
10450       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
10451       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
10452       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
10453       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
10454       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
10455       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
10456       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
10457       #endif
10458 #ifndef FORCE32
10459       printf(" 32:");
10460       for(r=0;r<=CCREG;r++) {
10461         if((branch_regs[i].is32>>r)&1) {
10462           if(r==CCREG) printf(" CC");
10463           else if(r==HIREG) printf(" HI");
10464           else if(r==LOREG) printf(" LO");
10465           else printf(" r%d",r);
10466         }
10467       }
10468       printf("\n");
10469 #endif
10470     }
10471   }
10472
10473   /* Pass 8 - Assembly */
10474   linkcount=0;stubcount=0;
10475   ds=0;is_delayslot=0;
10476   cop1_usable=0;
10477   uint64_t is32_pre=0;
10478   u_int dirty_pre=0;
10479   u_int beginning=(u_int)out;
10480   if((u_int)addr&1) {
10481     ds=1;
10482     pagespan_ds();
10483   }
10484   for(i=0;i<slen;i++)
10485   {
10486     //if(ds) printf("ds: ");
10487     if((void*)assem_debug==(void*)printf) disassemble_inst(i);
10488     if(ds) {
10489       ds=0; // Skip delay slot
10490       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10491       instr_addr[i]=0;
10492     } else {
10493       #ifndef DESTRUCTIVE_WRITEBACK
10494       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10495       {
10496         wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
10497               unneeded_reg[i],unneeded_reg_upper[i]);
10498         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10499               unneeded_reg[i],unneeded_reg_upper[i]);
10500       }
10501       is32_pre=regs[i].is32;
10502       dirty_pre=regs[i].dirty;
10503       #endif
10504       // write back
10505       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10506       {
10507         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10508                       unneeded_reg[i],unneeded_reg_upper[i]);
10509         loop_preload(regmap_pre[i],regs[i].regmap_entry);
10510       }
10511       // branch target entry point
10512       instr_addr[i]=(u_int)out;
10513       assem_debug("<->\n");
10514       // load regs
10515       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10516         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10517       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10518       address_generation(i,&regs[i],regs[i].regmap_entry);
10519       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10520       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10521       {
10522         // Load the delay slot registers if necessary
10523         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10524           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10525         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10526           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10527         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10528           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10529       }
10530       else if(i+1<slen)
10531       {
10532         // Preload registers for following instruction
10533         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10534           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10535             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10536         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10537           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10538             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10539       }
10540       // TODO: if(is_ooo(i)) address_generation(i+1);
10541       if(itype[i]==CJUMP||itype[i]==FJUMP)
10542         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10543       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10544         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10545       if(bt[i]) cop1_usable=0;
10546       // assemble
10547       switch(itype[i]) {
10548         case ALU:
10549           alu_assemble(i,&regs[i]);break;
10550         case IMM16:
10551           imm16_assemble(i,&regs[i]);break;
10552         case SHIFT:
10553           shift_assemble(i,&regs[i]);break;
10554         case SHIFTIMM:
10555           shiftimm_assemble(i,&regs[i]);break;
10556         case LOAD:
10557           load_assemble(i,&regs[i]);break;
10558         case LOADLR:
10559           loadlr_assemble(i,&regs[i]);break;
10560         case STORE:
10561           store_assemble(i,&regs[i]);break;
10562         case STORELR:
10563           storelr_assemble(i,&regs[i]);break;
10564         case COP0:
10565           cop0_assemble(i,&regs[i]);break;
10566         case COP1:
10567           cop1_assemble(i,&regs[i]);break;
10568         case C1LS:
10569           c1ls_assemble(i,&regs[i]);break;
10570         case COP2:
10571           cop2_assemble(i,&regs[i]);break;
10572         case C2LS:
10573           c2ls_assemble(i,&regs[i]);break;
10574         case C2OP:
10575           c2op_assemble(i,&regs[i]);break;
10576         case FCONV:
10577           fconv_assemble(i,&regs[i]);break;
10578         case FLOAT:
10579           float_assemble(i,&regs[i]);break;
10580         case FCOMP:
10581           fcomp_assemble(i,&regs[i]);break;
10582         case MULTDIV:
10583           multdiv_assemble(i,&regs[i]);break;
10584         case MOV:
10585           mov_assemble(i,&regs[i]);break;
10586         case SYSCALL:
10587           syscall_assemble(i,&regs[i]);break;
10588         case HLECALL:
10589           hlecall_assemble(i,&regs[i]);break;
10590         case UJUMP:
10591           ujump_assemble(i,&regs[i]);ds=1;break;
10592         case RJUMP:
10593           rjump_assemble(i,&regs[i]);ds=1;break;
10594         case CJUMP:
10595           cjump_assemble(i,&regs[i]);ds=1;break;
10596         case SJUMP:
10597           sjump_assemble(i,&regs[i]);ds=1;break;
10598         case FJUMP:
10599           fjump_assemble(i,&regs[i]);ds=1;break;
10600         case SPAN:
10601           pagespan_assemble(i,&regs[i]);break;
10602       }
10603       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10604         literal_pool(1024);
10605       else
10606         literal_pool_jumpover(256);
10607     }
10608   }
10609   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10610   // If the block did not end with an unconditional branch,
10611   // add a jump to the next instruction.
10612   if(i>1) {
10613     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10614       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10615       assert(i==slen);
10616       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10617         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10618         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10619           emit_loadreg(CCREG,HOST_CCREG);
10620         emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10621       }
10622       else if(!likely[i-2])
10623       {
10624         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10625         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10626       }
10627       else
10628       {
10629         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10630         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10631       }
10632       add_to_linker((int)out,start+i*4,0);
10633       emit_jmp(0);
10634     }
10635   }
10636   else
10637   {
10638     assert(i>0);
10639     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10640     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10641     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10642       emit_loadreg(CCREG,HOST_CCREG);
10643     emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10644     add_to_linker((int)out,start+i*4,0);
10645     emit_jmp(0);
10646   }
10647
10648   // TODO: delay slot stubs?
10649   // Stubs
10650   for(i=0;i<stubcount;i++)
10651   {
10652     switch(stubs[i][0])
10653     {
10654       case LOADB_STUB:
10655       case LOADH_STUB:
10656       case LOADW_STUB:
10657       case LOADD_STUB:
10658       case LOADBU_STUB:
10659       case LOADHU_STUB:
10660         do_readstub(i);break;
10661       case STOREB_STUB:
10662       case STOREH_STUB:
10663       case STOREW_STUB:
10664       case STORED_STUB:
10665         do_writestub(i);break;
10666       case CC_STUB:
10667         do_ccstub(i);break;
10668       case INVCODE_STUB:
10669         do_invstub(i);break;
10670       case FP_STUB:
10671         do_cop1stub(i);break;
10672       case STORELR_STUB:
10673         do_unalignedwritestub(i);break;
10674     }
10675   }
10676
10677   /* Pass 9 - Linker */
10678   for(i=0;i<linkcount;i++)
10679   {
10680     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10681     literal_pool(64);
10682     if(!link_addr[i][2])
10683     {
10684       void *stub=out;
10685       void *addr=check_addr(link_addr[i][1]);
10686       emit_extjump(link_addr[i][0],link_addr[i][1]);
10687       if(addr) {
10688         set_jump_target(link_addr[i][0],(int)addr);
10689         add_link(link_addr[i][1],stub);
10690       }
10691       else set_jump_target(link_addr[i][0],(int)stub);
10692     }
10693     else
10694     {
10695       // Internal branch
10696       int target=(link_addr[i][1]-start)>>2;
10697       assert(target>=0&&target<slen);
10698       assert(instr_addr[target]);
10699       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10700       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10701       //#else
10702       set_jump_target(link_addr[i][0],instr_addr[target]);
10703       //#endif
10704     }
10705   }
10706   // External Branch Targets (jump_in)
10707   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10708   for(i=0;i<slen;i++)
10709   {
10710     if(bt[i]||i==0)
10711     {
10712       if(instr_addr[i]) // TODO - delay slots (=null)
10713       {
10714         u_int vaddr=start+i*4;
10715         u_int page=get_page(vaddr);
10716         u_int vpage=get_vpage(vaddr);
10717         literal_pool(256);
10718         //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
10719         if(!requires_32bit[i])
10720         {
10721           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10722           assem_debug("jump_in: %x\n",start+i*4);
10723           ll_add(jump_dirty+vpage,vaddr,(void *)out);
10724           int entry_point=do_dirty_stub(i);
10725           ll_add(jump_in+page,vaddr,(void *)entry_point);
10726           // If there was an existing entry in the hash table,
10727           // replace it with the new address.
10728           // Don't add new entries.  We'll insert the
10729           // ones that actually get used in check_addr().
10730           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10731           if(ht_bin[0]==vaddr) {
10732             ht_bin[1]=entry_point;
10733           }
10734           if(ht_bin[2]==vaddr) {
10735             ht_bin[3]=entry_point;
10736           }
10737         }
10738         else
10739         {
10740           u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
10741           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10742           assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
10743           //int entry_point=(int)out;
10744           ////assem_debug("entry_point: %x\n",entry_point);
10745           //load_regs_entry(i);
10746           //if(entry_point==(int)out)
10747           //  entry_point=instr_addr[i];
10748           //else
10749           //  emit_jmp(instr_addr[i]);
10750           //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10751           ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
10752           int entry_point=do_dirty_stub(i);
10753           ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10754         }
10755       }
10756     }
10757   }
10758   // Write out the literal pool if necessary
10759   literal_pool(0);
10760   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10761   // Align code
10762   if(((u_int)out)&7) emit_addnop(13);
10763   #endif
10764   assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
10765   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10766   memcpy(copy,source,slen*4);
10767   copy+=slen*4;
10768   
10769   #ifdef __arm__
10770   __clear_cache((void *)beginning,out);
10771   #endif
10772   
10773   // If we're within 256K of the end of the buffer,
10774   // start over from the beginning. (Is 256K enough?)
10775   if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10776   
10777   // Trap writes to any of the pages we compiled
10778   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10779     invalid_code[i]=0;
10780 #ifndef DISABLE_TLB
10781     memory_map[i]|=0x40000000;
10782     if((signed int)start>=(signed int)0xC0000000) {
10783       assert(using_tlb);
10784       j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
10785       invalid_code[j]=0;
10786       memory_map[j]|=0x40000000;
10787       //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
10788     }
10789 #endif
10790   }
10791   
10792   /* Pass 10 - Free memory by expiring oldest blocks */
10793   
10794   int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10795   while(expirep!=end)
10796   {
10797     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10798     int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10799     inv_debug("EXP: Phase %d\n",expirep);
10800     switch((expirep>>11)&3)
10801     {
10802       case 0:
10803         // Clear jump_in and jump_dirty
10804         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10805         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10806         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10807         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10808         break;
10809       case 1:
10810         // Clear pointers
10811         ll_kill_pointers(jump_out[expirep&2047],base,shift);
10812         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10813         break;
10814       case 2:
10815         // Clear hash table
10816         for(i=0;i<32;i++) {
10817           int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10818           if((ht_bin[3]>>shift)==(base>>shift) ||
10819              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10820             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10821             ht_bin[2]=ht_bin[3]=-1;
10822           }
10823           if((ht_bin[1]>>shift)==(base>>shift) ||
10824              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10825             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10826             ht_bin[0]=ht_bin[2];
10827             ht_bin[1]=ht_bin[3];
10828             ht_bin[2]=ht_bin[3]=-1;
10829           }
10830         }
10831         break;
10832       case 3:
10833         // Clear jump_out
10834         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10835         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10836         break;
10837     }
10838     expirep=(expirep+1)&65535;
10839   }
10840   return 0;
10841 }
10842
10843 // vim:shiftwidth=2:expandtab