00b23ee0b82ffd9d10a5283fe000a1db893fb772
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2010 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24
25 #include "emu_if.h" //emulator interface
26
27 #include <sys/mman.h>
28
29 #ifdef __i386__
30 #include "assem_x86.h"
31 #endif
32 #ifdef __x86_64__
33 #include "assem_x64.h"
34 #endif
35 #ifdef __arm__
36 #include "assem_arm.h"
37 #endif
38
39 #define MAXBLOCK 4096
40 #define MAX_OUTPUT_BLOCK_SIZE 262144
41 #define CLOCK_DIVIDER 2
42
43 struct regstat
44 {
45   signed char regmap_entry[HOST_REGS];
46   signed char regmap[HOST_REGS];
47   uint64_t was32;
48   uint64_t is32;
49   uint64_t wasdirty;
50   uint64_t dirty;
51   uint64_t u;
52   uint64_t uu;
53   u_int wasconst;
54   u_int isconst;
55   uint64_t constmap[HOST_REGS];
56 };
57
58 struct ll_entry
59 {
60   u_int vaddr;
61   u_int reg32;
62   void *addr;
63   struct ll_entry *next;
64 };
65
66   u_int start;
67   u_int *source;
68   u_int pagelimit;
69   char insn[MAXBLOCK][10];
70   u_char itype[MAXBLOCK];
71   u_char opcode[MAXBLOCK];
72   u_char opcode2[MAXBLOCK];
73   u_char bt[MAXBLOCK];
74   u_char rs1[MAXBLOCK];
75   u_char rs2[MAXBLOCK];
76   u_char rt1[MAXBLOCK];
77   u_char rt2[MAXBLOCK];
78   u_char us1[MAXBLOCK];
79   u_char us2[MAXBLOCK];
80   u_char dep1[MAXBLOCK];
81   u_char dep2[MAXBLOCK];
82   u_char lt1[MAXBLOCK];
83   int imm[MAXBLOCK];
84   u_int ba[MAXBLOCK];
85   char likely[MAXBLOCK];
86   char is_ds[MAXBLOCK];
87   uint64_t unneeded_reg[MAXBLOCK];
88   uint64_t unneeded_reg_upper[MAXBLOCK];
89   uint64_t branch_unneeded_reg[MAXBLOCK];
90   uint64_t branch_unneeded_reg_upper[MAXBLOCK];
91   uint64_t p32[MAXBLOCK];
92   uint64_t pr32[MAXBLOCK];
93   signed char regmap_pre[MAXBLOCK][HOST_REGS];
94   signed char regmap[MAXBLOCK][HOST_REGS];
95   signed char regmap_entry[MAXBLOCK][HOST_REGS];
96   uint64_t constmap[MAXBLOCK][HOST_REGS];
97   uint64_t known_value[HOST_REGS];
98   u_int known_reg;
99   struct regstat regs[MAXBLOCK];
100   struct regstat branch_regs[MAXBLOCK];
101   u_int needed_reg[MAXBLOCK];
102   uint64_t requires_32bit[MAXBLOCK];
103   u_int wont_dirty[MAXBLOCK];
104   u_int will_dirty[MAXBLOCK];
105   int ccadj[MAXBLOCK];
106   int slen;
107   u_int instr_addr[MAXBLOCK];
108   u_int link_addr[MAXBLOCK][3];
109   int linkcount;
110   u_int stubs[MAXBLOCK*3][8];
111   int stubcount;
112   u_int literals[1024][2];
113   int literalcount;
114   int is_delayslot;
115   int cop1_usable;
116   u_char *out;
117   struct ll_entry *jump_in[4096];
118   struct ll_entry *jump_out[4096];
119   struct ll_entry *jump_dirty[4096];
120   u_int hash_table[65536][4]  __attribute__((aligned(16)));
121   char shadow[1048576]  __attribute__((aligned(16)));
122   void *copy;
123   int expirep;
124   u_int using_tlb;
125   u_int stop_after_jal;
126   extern u_char restore_candidate[512];
127   extern int cycle_count;
128
129   /* registers that may be allocated */
130   /* 1-31 gpr */
131 #define HIREG 32 // hi
132 #define LOREG 33 // lo
133 #define FSREG 34 // FPU status (FCSR)
134 #define CSREG 35 // Coprocessor status
135 #define CCREG 36 // Cycle count
136 #define INVCP 37 // Pointer to invalid_code
137 #define TEMPREG 38
138 #define FTEMP 38 // FPU/LDL/LDR temporary register
139 #define PTEMP 39 // Prefetch temporary register
140 #define TLREG 40 // TLB mapping offset
141 #define RHASH 41 // Return address hash
142 #define RHTBL 42 // Return address hash table address
143 #define RTEMP 43 // JR/JALR address register
144 #define MAXREG 43
145 #define AGEN1 44 // Address generation temporary register
146 #define AGEN2 45 // Address generation temporary register
147 #define MGEN1 46 // Maptable address generation temporary register
148 #define MGEN2 47 // Maptable address generation temporary register
149 #define BTREG 48 // Branch target temporary register
150
151   /* instruction types */
152 #define NOP 0     // No operation
153 #define LOAD 1    // Load
154 #define STORE 2   // Store
155 #define LOADLR 3  // Unaligned load
156 #define STORELR 4 // Unaligned store
157 #define MOV 5     // Move 
158 #define ALU 6     // Arithmetic/logic
159 #define MULTDIV 7 // Multiply/divide
160 #define SHIFT 8   // Shift by register
161 #define SHIFTIMM 9// Shift by immediate
162 #define IMM16 10  // 16-bit immediate
163 #define RJUMP 11  // Unconditional jump to register
164 #define UJUMP 12  // Unconditional jump
165 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
166 #define SJUMP 14  // Conditional branch (regimm format)
167 #define COP0 15   // Coprocessor 0
168 #define COP1 16   // Coprocessor 1
169 #define C1LS 17   // Coprocessor 1 load/store
170 #define FJUMP 18  // Conditional branch (floating point)
171 #define FLOAT 19  // Floating point unit
172 #define FCONV 20  // Convert integer to float
173 #define FCOMP 21  // Floating point compare (sets FSREG)
174 #define SYSCALL 22// SYSCALL
175 #define OTHER 23  // Other
176 #define SPAN 24   // Branch/delay slot spans 2 pages
177 #define NI 25     // Not implemented
178 #define HLECALL 26// PCSX fake opcodes for HLE
179 #define COP2 27   // Coprocessor 2 move
180 #define C2LS 28   // Coprocessor 2 load/store
181 #define C2OP 29   // Coprocessor 2 operation
182 #define INTCALL 30// Call interpreter to handle rare corner cases
183
184   /* stubs */
185 #define CC_STUB 1
186 #define FP_STUB 2
187 #define LOADB_STUB 3
188 #define LOADH_STUB 4
189 #define LOADW_STUB 5
190 #define LOADD_STUB 6
191 #define LOADBU_STUB 7
192 #define LOADHU_STUB 8
193 #define STOREB_STUB 9
194 #define STOREH_STUB 10
195 #define STOREW_STUB 11
196 #define STORED_STUB 12
197 #define STORELR_STUB 13
198 #define INVCODE_STUB 14
199
200   /* branch codes */
201 #define TAKEN 1
202 #define NOTTAKEN 2
203 #define NULLDS 3
204
205 // asm linkage
206 int new_recompile_block(int addr);
207 void *get_addr_ht(u_int vaddr);
208 void invalidate_block(u_int block);
209 void invalidate_addr(u_int addr);
210 void remove_hash(int vaddr);
211 void jump_vaddr();
212 void dyna_linker();
213 void dyna_linker_ds();
214 void verify_code();
215 void verify_code_vm();
216 void verify_code_ds();
217 void cc_interrupt();
218 void fp_exception();
219 void fp_exception_ds();
220 void jump_syscall();
221 void jump_syscall_hle();
222 void jump_eret();
223 void jump_hlecall();
224 void jump_intcall();
225 void new_dyna_leave();
226
227 // TLB
228 void TLBWI_new();
229 void TLBWR_new();
230 void read_nomem_new();
231 void read_nomemb_new();
232 void read_nomemh_new();
233 void read_nomemd_new();
234 void write_nomem_new();
235 void write_nomemb_new();
236 void write_nomemh_new();
237 void write_nomemd_new();
238 void write_rdram_new();
239 void write_rdramb_new();
240 void write_rdramh_new();
241 void write_rdramd_new();
242 extern u_int memory_map[1048576];
243
244 // Needed by assembler
245 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
246 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
247 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
248 void load_all_regs(signed char i_regmap[]);
249 void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
250 void load_regs_entry(int t);
251 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
252
253 int tracedebug=0;
254
255 //#define DEBUG_CYCLE_COUNT 1
256
257 void nullf() {}
258 //#define assem_debug printf
259 //#define inv_debug printf
260 #define assem_debug nullf
261 #define inv_debug nullf
262
263 static void tlb_hacks()
264 {
265 #ifndef DISABLE_TLB
266   // Goldeneye hack
267   if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
268   {
269     u_int addr;
270     int n;
271     switch (ROM_HEADER->Country_code&0xFF) 
272     {
273       case 0x45: // U
274         addr=0x34b30;
275         break;                   
276       case 0x4A: // J 
277         addr=0x34b70;    
278         break;    
279       case 0x50: // E 
280         addr=0x329f0;
281         break;                        
282       default: 
283         // Unknown country code
284         addr=0;
285         break;
286     }
287     u_int rom_addr=(u_int)rom;
288     #ifdef ROM_COPY
289     // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
290     // in the lower 4G of memory to use this hack.  Copy it if necessary.
291     if((void *)rom>(void *)0xffffffff) {
292       munmap(ROM_COPY, 67108864);
293       if(mmap(ROM_COPY, 12582912,
294               PROT_READ | PROT_WRITE,
295               MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
296               -1, 0) <= 0) {printf("mmap() failed\n");}
297       memcpy(ROM_COPY,rom,12582912);
298       rom_addr=(u_int)ROM_COPY;
299     }
300     #endif
301     if(addr) {
302       for(n=0x7F000;n<0x80000;n++) {
303         memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
304       }
305     }
306   }
307 #endif
308 }
309
310 static u_int get_page(u_int vaddr)
311 {
312   u_int page=(vaddr^0x80000000)>>12;
313 #ifndef DISABLE_TLB
314   if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
315 #endif
316   if(page>2048) page=2048+(page&2047);
317   return page;
318 }
319
320 static u_int get_vpage(u_int vaddr)
321 {
322   u_int vpage=(vaddr^0x80000000)>>12;
323 #ifndef DISABLE_TLB
324   if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
325 #endif
326   if(vpage>2048) vpage=2048+(vpage&2047);
327   return vpage;
328 }
329
330 // Get address from virtual address
331 // This is called from the recompiled JR/JALR instructions
332 void *get_addr(u_int vaddr)
333 {
334   u_int page=get_page(vaddr);
335   u_int vpage=get_vpage(vaddr);
336   struct ll_entry *head;
337   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
338   head=jump_in[page];
339   while(head!=NULL) {
340     if(head->vaddr==vaddr&&head->reg32==0) {
341   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
342       int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
343       ht_bin[3]=ht_bin[1];
344       ht_bin[2]=ht_bin[0];
345       ht_bin[1]=(int)head->addr;
346       ht_bin[0]=vaddr;
347       return head->addr;
348     }
349     head=head->next;
350   }
351   head=jump_dirty[vpage];
352   while(head!=NULL) {
353     if(head->vaddr==vaddr&&head->reg32==0) {
354       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
355       // Don't restore blocks which are about to expire from the cache
356       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
357       if(verify_dirty(head->addr)) {
358         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
359         invalid_code[vaddr>>12]=0;
360         memory_map[vaddr>>12]|=0x40000000;
361         if(vpage<2048) {
362 #ifndef DISABLE_TLB
363           if(tlb_LUT_r[vaddr>>12]) {
364             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
365             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
366           }
367 #endif
368           restore_candidate[vpage>>3]|=1<<(vpage&7);
369         }
370         else restore_candidate[page>>3]|=1<<(page&7);
371         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
372         if(ht_bin[0]==vaddr) {
373           ht_bin[1]=(int)head->addr; // Replace existing entry
374         }
375         else
376         {
377           ht_bin[3]=ht_bin[1];
378           ht_bin[2]=ht_bin[0];
379           ht_bin[1]=(int)head->addr;
380           ht_bin[0]=vaddr;
381         }
382         return head->addr;
383       }
384     }
385     head=head->next;
386   }
387   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
388   int r=new_recompile_block(vaddr);
389   if(r==0) return get_addr(vaddr);
390   // Execute in unmapped page, generate pagefault execption
391   Status|=2;
392   Cause=(vaddr<<31)|0x8;
393   EPC=(vaddr&1)?vaddr-5:vaddr;
394   BadVAddr=(vaddr&~1);
395   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
396   EntryHi=BadVAddr&0xFFFFE000;
397   return get_addr_ht(0x80000000);
398 }
399 // Look up address in hash table first
400 void *get_addr_ht(u_int vaddr)
401 {
402   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
403   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
404   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
405   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
406   return get_addr(vaddr);
407 }
408
409 void *get_addr_32(u_int vaddr,u_int flags)
410 {
411 #ifdef FORCE32
412   return get_addr(vaddr);
413 #else
414   //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
415   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
416   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
417   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
418   u_int page=get_page(vaddr);
419   u_int vpage=get_vpage(vaddr);
420   struct ll_entry *head;
421   head=jump_in[page];
422   while(head!=NULL) {
423     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
424       //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
425       if(head->reg32==0) {
426         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
427         if(ht_bin[0]==-1) {
428           ht_bin[1]=(int)head->addr;
429           ht_bin[0]=vaddr;
430         }else if(ht_bin[2]==-1) {
431           ht_bin[3]=(int)head->addr;
432           ht_bin[2]=vaddr;
433         }
434         //ht_bin[3]=ht_bin[1];
435         //ht_bin[2]=ht_bin[0];
436         //ht_bin[1]=(int)head->addr;
437         //ht_bin[0]=vaddr;
438       }
439       return head->addr;
440     }
441     head=head->next;
442   }
443   head=jump_dirty[vpage];
444   while(head!=NULL) {
445     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
446       //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
447       // Don't restore blocks which are about to expire from the cache
448       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
449       if(verify_dirty(head->addr)) {
450         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
451         invalid_code[vaddr>>12]=0;
452         memory_map[vaddr>>12]|=0x40000000;
453         if(vpage<2048) {
454 #ifndef DISABLE_TLB
455           if(tlb_LUT_r[vaddr>>12]) {
456             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
457             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
458           }
459 #endif
460           restore_candidate[vpage>>3]|=1<<(vpage&7);
461         }
462         else restore_candidate[page>>3]|=1<<(page&7);
463         if(head->reg32==0) {
464           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
465           if(ht_bin[0]==-1) {
466             ht_bin[1]=(int)head->addr;
467             ht_bin[0]=vaddr;
468           }else if(ht_bin[2]==-1) {
469             ht_bin[3]=(int)head->addr;
470             ht_bin[2]=vaddr;
471           }
472           //ht_bin[3]=ht_bin[1];
473           //ht_bin[2]=ht_bin[0];
474           //ht_bin[1]=(int)head->addr;
475           //ht_bin[0]=vaddr;
476         }
477         return head->addr;
478       }
479     }
480     head=head->next;
481   }
482   //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
483   int r=new_recompile_block(vaddr);
484   if(r==0) return get_addr(vaddr);
485   // Execute in unmapped page, generate pagefault execption
486   Status|=2;
487   Cause=(vaddr<<31)|0x8;
488   EPC=(vaddr&1)?vaddr-5:vaddr;
489   BadVAddr=(vaddr&~1);
490   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
491   EntryHi=BadVAddr&0xFFFFE000;
492   return get_addr_ht(0x80000000);
493 #endif
494 }
495
496 void clear_all_regs(signed char regmap[])
497 {
498   int hr;
499   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
500 }
501
502 signed char get_reg(signed char regmap[],int r)
503 {
504   int hr;
505   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
506   return -1;
507 }
508
509 // Find a register that is available for two consecutive cycles
510 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
511 {
512   int hr;
513   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
514   return -1;
515 }
516
517 int count_free_regs(signed char regmap[])
518 {
519   int count=0;
520   int hr;
521   for(hr=0;hr<HOST_REGS;hr++)
522   {
523     if(hr!=EXCLUDE_REG) {
524       if(regmap[hr]<0) count++;
525     }
526   }
527   return count;
528 }
529
530 void dirty_reg(struct regstat *cur,signed char reg)
531 {
532   int hr;
533   if(!reg) return;
534   for (hr=0;hr<HOST_REGS;hr++) {
535     if((cur->regmap[hr]&63)==reg) {
536       cur->dirty|=1<<hr;
537     }
538   }
539 }
540
541 // If we dirty the lower half of a 64 bit register which is now being
542 // sign-extended, we need to dump the upper half.
543 // Note: Do this only after completion of the instruction, because
544 // some instructions may need to read the full 64-bit value even if
545 // overwriting it (eg SLTI, DSRA32).
546 static void flush_dirty_uppers(struct regstat *cur)
547 {
548   int hr,reg;
549   for (hr=0;hr<HOST_REGS;hr++) {
550     if((cur->dirty>>hr)&1) {
551       reg=cur->regmap[hr];
552       if(reg>=64) 
553         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
554     }
555   }
556 }
557
558 void set_const(struct regstat *cur,signed char reg,uint64_t value)
559 {
560   int hr;
561   if(!reg) return;
562   for (hr=0;hr<HOST_REGS;hr++) {
563     if(cur->regmap[hr]==reg) {
564       cur->isconst|=1<<hr;
565       cur->constmap[hr]=value;
566     }
567     else if((cur->regmap[hr]^64)==reg) {
568       cur->isconst|=1<<hr;
569       cur->constmap[hr]=value>>32;
570     }
571   }
572 }
573
574 void clear_const(struct regstat *cur,signed char reg)
575 {
576   int hr;
577   if(!reg) return;
578   for (hr=0;hr<HOST_REGS;hr++) {
579     if((cur->regmap[hr]&63)==reg) {
580       cur->isconst&=~(1<<hr);
581     }
582   }
583 }
584
585 int is_const(struct regstat *cur,signed char reg)
586 {
587   int hr;
588   if(!reg) return 1;
589   for (hr=0;hr<HOST_REGS;hr++) {
590     if((cur->regmap[hr]&63)==reg) {
591       return (cur->isconst>>hr)&1;
592     }
593   }
594   return 0;
595 }
596 uint64_t get_const(struct regstat *cur,signed char reg)
597 {
598   int hr;
599   if(!reg) return 0;
600   for (hr=0;hr<HOST_REGS;hr++) {
601     if(cur->regmap[hr]==reg) {
602       return cur->constmap[hr];
603     }
604   }
605   printf("Unknown constant in r%d\n",reg);
606   exit(1);
607 }
608
609 // Least soon needed registers
610 // Look at the next ten instructions and see which registers
611 // will be used.  Try not to reallocate these.
612 void lsn(u_char hsn[], int i, int *preferred_reg)
613 {
614   int j;
615   int b=-1;
616   for(j=0;j<9;j++)
617   {
618     if(i+j>=slen) {
619       j=slen-i-1;
620       break;
621     }
622     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
623     {
624       // Don't go past an unconditonal jump
625       j++;
626       break;
627     }
628   }
629   for(;j>=0;j--)
630   {
631     if(rs1[i+j]) hsn[rs1[i+j]]=j;
632     if(rs2[i+j]) hsn[rs2[i+j]]=j;
633     if(rt1[i+j]) hsn[rt1[i+j]]=j;
634     if(rt2[i+j]) hsn[rt2[i+j]]=j;
635     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
636       // Stores can allocate zero
637       hsn[rs1[i+j]]=j;
638       hsn[rs2[i+j]]=j;
639     }
640     // On some architectures stores need invc_ptr
641     #if defined(HOST_IMM8)
642     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
643       hsn[INVCP]=j;
644     }
645     #endif
646     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
647     {
648       hsn[CCREG]=j;
649       b=j;
650     }
651   }
652   if(b>=0)
653   {
654     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
655     {
656       // Follow first branch
657       int t=(ba[i+b]-start)>>2;
658       j=7-b;if(t+j>=slen) j=slen-t-1;
659       for(;j>=0;j--)
660       {
661         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
662         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
663         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
664         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
665       }
666     }
667     // TODO: preferred register based on backward branch
668   }
669   // Delay slot should preferably not overwrite branch conditions or cycle count
670   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
671     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
672     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
673     hsn[CCREG]=1;
674     // ...or hash tables
675     hsn[RHASH]=1;
676     hsn[RHTBL]=1;
677   }
678   // Coprocessor load/store needs FTEMP, even if not declared
679   if(itype[i]==C1LS||itype[i]==C2LS) {
680     hsn[FTEMP]=0;
681   }
682   // Load L/R also uses FTEMP as a temporary register
683   if(itype[i]==LOADLR) {
684     hsn[FTEMP]=0;
685   }
686   // Also SWL/SWR/SDL/SDR
687   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
688     hsn[FTEMP]=0;
689   }
690   // Don't remove the TLB registers either
691   if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
692     hsn[TLREG]=0;
693   }
694   // Don't remove the miniht registers
695   if(itype[i]==UJUMP||itype[i]==RJUMP)
696   {
697     hsn[RHASH]=0;
698     hsn[RHTBL]=0;
699   }
700 }
701
702 // We only want to allocate registers if we're going to use them again soon
703 int needed_again(int r, int i)
704 {
705   int j;
706   int b=-1;
707   int rn=10;
708   int hr;
709   u_char hsn[MAXREG+1];
710   int preferred_reg;
711   
712   memset(hsn,10,sizeof(hsn));
713   lsn(hsn,i,&preferred_reg);
714   
715   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
716   {
717     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
718       return 0; // Don't need any registers if exiting the block
719   }
720   for(j=0;j<9;j++)
721   {
722     if(i+j>=slen) {
723       j=slen-i-1;
724       break;
725     }
726     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
727     {
728       // Don't go past an unconditonal jump
729       j++;
730       break;
731     }
732     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
733     {
734       break;
735     }
736   }
737   for(;j>=1;j--)
738   {
739     if(rs1[i+j]==r) rn=j;
740     if(rs2[i+j]==r) rn=j;
741     if((unneeded_reg[i+j]>>r)&1) rn=10;
742     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
743     {
744       b=j;
745     }
746   }
747   /*
748   if(b>=0)
749   {
750     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
751     {
752       // Follow first branch
753       int o=rn;
754       int t=(ba[i+b]-start)>>2;
755       j=7-b;if(t+j>=slen) j=slen-t-1;
756       for(;j>=0;j--)
757       {
758         if(!((unneeded_reg[t+j]>>r)&1)) {
759           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
760           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
761         }
762         else rn=o;
763       }
764     }
765   }*/
766   for(hr=0;hr<HOST_REGS;hr++) {
767     if(hr!=EXCLUDE_REG) {
768       if(rn<hsn[hr]) return 1;
769     }
770   }
771   return 0;
772 }
773
774 // Try to match register allocations at the end of a loop with those
775 // at the beginning
776 int loop_reg(int i, int r, int hr)
777 {
778   int j,k;
779   for(j=0;j<9;j++)
780   {
781     if(i+j>=slen) {
782       j=slen-i-1;
783       break;
784     }
785     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
786     {
787       // Don't go past an unconditonal jump
788       j++;
789       break;
790     }
791   }
792   k=0;
793   if(i>0){
794     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
795       k--;
796   }
797   for(;k<j;k++)
798   {
799     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
800     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
801     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
802     {
803       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
804       {
805         int t=(ba[i+k]-start)>>2;
806         int reg=get_reg(regs[t].regmap_entry,r);
807         if(reg>=0) return reg;
808         //reg=get_reg(regs[t+1].regmap_entry,r);
809         //if(reg>=0) return reg;
810       }
811     }
812   }
813   return hr;
814 }
815
816
817 // Allocate every register, preserving source/target regs
818 void alloc_all(struct regstat *cur,int i)
819 {
820   int hr;
821   
822   for(hr=0;hr<HOST_REGS;hr++) {
823     if(hr!=EXCLUDE_REG) {
824       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
825          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
826       {
827         cur->regmap[hr]=-1;
828         cur->dirty&=~(1<<hr);
829       }
830       // Don't need zeros
831       if((cur->regmap[hr]&63)==0)
832       {
833         cur->regmap[hr]=-1;
834         cur->dirty&=~(1<<hr);
835       }
836     }
837   }
838 }
839
840
841 void div64(int64_t dividend,int64_t divisor)
842 {
843   lo=dividend/divisor;
844   hi=dividend%divisor;
845   //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
846   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
847 }
848 void divu64(uint64_t dividend,uint64_t divisor)
849 {
850   lo=dividend/divisor;
851   hi=dividend%divisor;
852   //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
853   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
854 }
855
856 void mult64(uint64_t m1,uint64_t m2)
857 {
858    unsigned long long int op1, op2, op3, op4;
859    unsigned long long int result1, result2, result3, result4;
860    unsigned long long int temp1, temp2, temp3, temp4;
861    int sign = 0;
862    
863    if (m1 < 0)
864      {
865     op2 = -m1;
866     sign = 1 - sign;
867      }
868    else op2 = m1;
869    if (m2 < 0)
870      {
871     op4 = -m2;
872     sign = 1 - sign;
873      }
874    else op4 = m2;
875    
876    op1 = op2 & 0xFFFFFFFF;
877    op2 = (op2 >> 32) & 0xFFFFFFFF;
878    op3 = op4 & 0xFFFFFFFF;
879    op4 = (op4 >> 32) & 0xFFFFFFFF;
880    
881    temp1 = op1 * op3;
882    temp2 = (temp1 >> 32) + op1 * op4;
883    temp3 = op2 * op3;
884    temp4 = (temp3 >> 32) + op2 * op4;
885    
886    result1 = temp1 & 0xFFFFFFFF;
887    result2 = temp2 + (temp3 & 0xFFFFFFFF);
888    result3 = (result2 >> 32) + temp4;
889    result4 = (result3 >> 32);
890    
891    lo = result1 | (result2 << 32);
892    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
893    if (sign)
894      {
895     hi = ~hi;
896     if (!lo) hi++;
897     else lo = ~lo + 1;
898      }
899 }
900
901 void multu64(uint64_t m1,uint64_t m2)
902 {
903    unsigned long long int op1, op2, op3, op4;
904    unsigned long long int result1, result2, result3, result4;
905    unsigned long long int temp1, temp2, temp3, temp4;
906    
907    op1 = m1 & 0xFFFFFFFF;
908    op2 = (m1 >> 32) & 0xFFFFFFFF;
909    op3 = m2 & 0xFFFFFFFF;
910    op4 = (m2 >> 32) & 0xFFFFFFFF;
911    
912    temp1 = op1 * op3;
913    temp2 = (temp1 >> 32) + op1 * op4;
914    temp3 = op2 * op3;
915    temp4 = (temp3 >> 32) + op2 * op4;
916    
917    result1 = temp1 & 0xFFFFFFFF;
918    result2 = temp2 + (temp3 & 0xFFFFFFFF);
919    result3 = (result2 >> 32) + temp4;
920    result4 = (result3 >> 32);
921    
922    lo = result1 | (result2 << 32);
923    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
924    
925   //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
926   //                                      ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
927 }
928
929 uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
930 {
931   if(bits) {
932     original<<=64-bits;
933     original>>=64-bits;
934     loaded<<=bits;
935     original|=loaded;
936   }
937   else original=loaded;
938   return original;
939 }
940 uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
941 {
942   if(bits^56) {
943     original>>=64-(bits^56);
944     original<<=64-(bits^56);
945     loaded>>=bits^56;
946     original|=loaded;
947   }
948   else original=loaded;
949   return original;
950 }
951
952 #ifdef __i386__
953 #include "assem_x86.c"
954 #endif
955 #ifdef __x86_64__
956 #include "assem_x64.c"
957 #endif
958 #ifdef __arm__
959 #include "assem_arm.c"
960 #endif
961
962 // Add virtual address mapping to linked list
963 void ll_add(struct ll_entry **head,int vaddr,void *addr)
964 {
965   struct ll_entry *new_entry;
966   new_entry=malloc(sizeof(struct ll_entry));
967   assert(new_entry!=NULL);
968   new_entry->vaddr=vaddr;
969   new_entry->reg32=0;
970   new_entry->addr=addr;
971   new_entry->next=*head;
972   *head=new_entry;
973 }
974
975 // Add virtual address mapping for 32-bit compiled block
976 void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
977 {
978   ll_add(head,vaddr,addr);
979 #ifndef FORCE32
980   (*head)->reg32=reg32;
981 #endif
982 }
983
984 // Check if an address is already compiled
985 // but don't return addresses which are about to expire from the cache
986 void *check_addr(u_int vaddr)
987 {
988   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
989   if(ht_bin[0]==vaddr) {
990     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
991       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
992   }
993   if(ht_bin[2]==vaddr) {
994     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
995       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
996   }
997   u_int page=get_page(vaddr);
998   struct ll_entry *head;
999   head=jump_in[page];
1000   while(head!=NULL) {
1001     if(head->vaddr==vaddr&&head->reg32==0) {
1002       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1003         // Update existing entry with current address
1004         if(ht_bin[0]==vaddr) {
1005           ht_bin[1]=(int)head->addr;
1006           return head->addr;
1007         }
1008         if(ht_bin[2]==vaddr) {
1009           ht_bin[3]=(int)head->addr;
1010           return head->addr;
1011         }
1012         // Insert into hash table with low priority.
1013         // Don't evict existing entries, as they are probably
1014         // addresses that are being accessed frequently.
1015         if(ht_bin[0]==-1) {
1016           ht_bin[1]=(int)head->addr;
1017           ht_bin[0]=vaddr;
1018         }else if(ht_bin[2]==-1) {
1019           ht_bin[3]=(int)head->addr;
1020           ht_bin[2]=vaddr;
1021         }
1022         return head->addr;
1023       }
1024     }
1025     head=head->next;
1026   }
1027   return 0;
1028 }
1029
1030 void remove_hash(int vaddr)
1031 {
1032   //printf("remove hash: %x\n",vaddr);
1033   int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1034   if(ht_bin[2]==vaddr) {
1035     ht_bin[2]=ht_bin[3]=-1;
1036   }
1037   if(ht_bin[0]==vaddr) {
1038     ht_bin[0]=ht_bin[2];
1039     ht_bin[1]=ht_bin[3];
1040     ht_bin[2]=ht_bin[3]=-1;
1041   }
1042 }
1043
1044 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1045 {
1046   struct ll_entry *next;
1047   while(*head) {
1048     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) || 
1049        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1050     {
1051       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1052       remove_hash((*head)->vaddr);
1053       next=(*head)->next;
1054       free(*head);
1055       *head=next;
1056     }
1057     else
1058     {
1059       head=&((*head)->next);
1060     }
1061   }
1062 }
1063
1064 // Remove all entries from linked list
1065 void ll_clear(struct ll_entry **head)
1066 {
1067   struct ll_entry *cur;
1068   struct ll_entry *next;
1069   if(cur=*head) {
1070     *head=0;
1071     while(cur) {
1072       next=cur->next;
1073       free(cur);
1074       cur=next;
1075     }
1076   }
1077 }
1078
1079 // Dereference the pointers and remove if it matches
1080 void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1081 {
1082   u_int old_host_addr=0;
1083   while(head) {
1084     int ptr=get_pointer(head->addr);
1085     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1086     if(((ptr>>shift)==(addr>>shift)) ||
1087        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1088     {
1089       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1090       u_int host_addr=(u_int)kill_pointer(head->addr);
1091
1092       if((host_addr>>12)!=(old_host_addr>>12)) {
1093         #ifdef __arm__
1094         __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1095         #endif
1096         old_host_addr=host_addr;
1097       }
1098     }
1099     head=head->next;
1100   }
1101   #ifdef __arm__
1102   if (old_host_addr)
1103     __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1104   #endif
1105 }
1106
1107 // This is called when we write to a compiled block (see do_invstub)
1108 void invalidate_page(u_int page)
1109 {
1110   struct ll_entry *head;
1111   struct ll_entry *next;
1112   u_int old_host_addr=0;
1113   head=jump_in[page];
1114   jump_in[page]=0;
1115   while(head!=NULL) {
1116     inv_debug("INVALIDATE: %x\n",head->vaddr);
1117     remove_hash(head->vaddr);
1118     next=head->next;
1119     free(head);
1120     head=next;
1121   }
1122   head=jump_out[page];
1123   jump_out[page]=0;
1124   while(head!=NULL) {
1125     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1126     u_int host_addr=(u_int)kill_pointer(head->addr);
1127
1128     if((host_addr>>12)!=(old_host_addr>>12)) {
1129       #ifdef __arm__
1130       __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1131       #endif
1132       old_host_addr=host_addr;
1133     }
1134     next=head->next;
1135     free(head);
1136     head=next;
1137   }
1138   #ifdef __arm__
1139   if (old_host_addr)
1140     __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1141   #endif
1142 }
1143 void invalidate_block(u_int block)
1144 {
1145   u_int page=get_page(block<<12);
1146   u_int vpage=get_vpage(block<<12);
1147   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1148   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1149   u_int first,last;
1150   first=last=page;
1151   struct ll_entry *head;
1152   head=jump_dirty[vpage];
1153   //printf("page=%d vpage=%d\n",page,vpage);
1154   while(head!=NULL) {
1155     u_int start,end;
1156     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1157       get_bounds((int)head->addr,&start,&end);
1158       //printf("start: %x end: %x\n",start,end);
1159       if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
1160         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1161           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1162           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1163         }
1164       }
1165 #ifndef DISABLE_TLB
1166       if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1167         if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1168           if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1169           if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1170         }
1171       }
1172 #endif
1173     }
1174     head=head->next;
1175   }
1176   //printf("first=%d last=%d\n",first,last);
1177   invalidate_page(page);
1178   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1179   assert(last<page+5);
1180   // Invalidate the adjacent pages if a block crosses a 4K boundary
1181   while(first<page) {
1182     invalidate_page(first);
1183     first++;
1184   }
1185   for(first=page+1;first<last;first++) {
1186     invalidate_page(first);
1187   }
1188   
1189   // Don't trap writes
1190   invalid_code[block]=1;
1191 #ifndef DISABLE_TLB
1192   // If there is a valid TLB entry for this page, remove write protect
1193   if(tlb_LUT_w[block]) {
1194     assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1195     // CHECK: Is this right?
1196     memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1197     u_int real_block=tlb_LUT_w[block]>>12;
1198     invalid_code[real_block]=1;
1199     if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1200   }
1201   else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1202 #endif
1203
1204   #ifdef USE_MINI_HT
1205   memset(mini_ht,-1,sizeof(mini_ht));
1206   #endif
1207 }
1208 void invalidate_addr(u_int addr)
1209 {
1210   invalidate_block(addr>>12);
1211 }
1212 void invalidate_all_pages()
1213 {
1214   u_int page,n;
1215   for(page=0;page<4096;page++)
1216     invalidate_page(page);
1217   for(page=0;page<1048576;page++)
1218     if(!invalid_code[page]) {
1219       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1220       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1221     }
1222   #ifdef __arm__
1223   __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1224   #endif
1225   #ifdef USE_MINI_HT
1226   memset(mini_ht,-1,sizeof(mini_ht));
1227   #endif
1228   #ifndef DISABLE_TLB
1229   // TLB
1230   for(page=0;page<0x100000;page++) {
1231     if(tlb_LUT_r[page]) {
1232       memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1233       if(!tlb_LUT_w[page]||!invalid_code[page])
1234         memory_map[page]|=0x40000000; // Write protect
1235     }
1236     else memory_map[page]=-1;
1237     if(page==0x80000) page=0xC0000;
1238   }
1239   tlb_hacks();
1240   #endif
1241 }
1242
1243 // Add an entry to jump_out after making a link
1244 void add_link(u_int vaddr,void *src)
1245 {
1246   u_int page=get_page(vaddr);
1247   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1248   ll_add(jump_out+page,vaddr,src);
1249   //int ptr=get_pointer(src);
1250   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1251 }
1252
1253 // If a code block was found to be unmodified (bit was set in
1254 // restore_candidate) and it remains unmodified (bit is clear
1255 // in invalid_code) then move the entries for that 4K page from
1256 // the dirty list to the clean list.
1257 void clean_blocks(u_int page)
1258 {
1259   struct ll_entry *head;
1260   inv_debug("INV: clean_blocks page=%d\n",page);
1261   head=jump_dirty[page];
1262   while(head!=NULL) {
1263     if(!invalid_code[head->vaddr>>12]) {
1264       // Don't restore blocks which are about to expire from the cache
1265       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1266         u_int start,end;
1267         if(verify_dirty((int)head->addr)) {
1268           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1269           u_int i;
1270           u_int inv=0;
1271           get_bounds((int)head->addr,&start,&end);
1272           if(start-(u_int)rdram<RAM_SIZE) {
1273             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1274               inv|=invalid_code[i];
1275             }
1276           }
1277           if((signed int)head->vaddr>=(signed int)0xC0000000) {
1278             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1279             //printf("addr=%x start=%x end=%x\n",addr,start,end);
1280             if(addr<start||addr>=end) inv=1;
1281           }
1282           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1283             inv=1;
1284           }
1285           if(!inv) {
1286             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1287             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1288               u_int ppage=page;
1289 #ifndef DISABLE_TLB
1290               if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1291 #endif
1292               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1293               //printf("page=%x, addr=%x\n",page,head->vaddr);
1294               //assert(head->vaddr>>12==(page|0x80000));
1295               ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1296               int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1297               if(!head->reg32) {
1298                 if(ht_bin[0]==head->vaddr) {
1299                   ht_bin[1]=(int)clean_addr; // Replace existing entry
1300                 }
1301                 if(ht_bin[2]==head->vaddr) {
1302                   ht_bin[3]=(int)clean_addr; // Replace existing entry
1303                 }
1304               }
1305             }
1306           }
1307         }
1308       }
1309     }
1310     head=head->next;
1311   }
1312 }
1313
1314
1315 void mov_alloc(struct regstat *current,int i)
1316 {
1317   // Note: Don't need to actually alloc the source registers
1318   if((~current->is32>>rs1[i])&1) {
1319     //alloc_reg64(current,i,rs1[i]);
1320     alloc_reg64(current,i,rt1[i]);
1321     current->is32&=~(1LL<<rt1[i]);
1322   } else {
1323     //alloc_reg(current,i,rs1[i]);
1324     alloc_reg(current,i,rt1[i]);
1325     current->is32|=(1LL<<rt1[i]);
1326   }
1327   clear_const(current,rs1[i]);
1328   clear_const(current,rt1[i]);
1329   dirty_reg(current,rt1[i]);
1330 }
1331
1332 void shiftimm_alloc(struct regstat *current,int i)
1333 {
1334   clear_const(current,rs1[i]);
1335   clear_const(current,rt1[i]);
1336   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1337   {
1338     if(rt1[i]) {
1339       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1340       else lt1[i]=rs1[i];
1341       alloc_reg(current,i,rt1[i]);
1342       current->is32|=1LL<<rt1[i];
1343       dirty_reg(current,rt1[i]);
1344     }
1345   }
1346   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1347   {
1348     if(rt1[i]) {
1349       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1350       alloc_reg64(current,i,rt1[i]);
1351       current->is32&=~(1LL<<rt1[i]);
1352       dirty_reg(current,rt1[i]);
1353     }
1354   }
1355   if(opcode2[i]==0x3c) // DSLL32
1356   {
1357     if(rt1[i]) {
1358       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1359       alloc_reg64(current,i,rt1[i]);
1360       current->is32&=~(1LL<<rt1[i]);
1361       dirty_reg(current,rt1[i]);
1362     }
1363   }
1364   if(opcode2[i]==0x3e) // DSRL32
1365   {
1366     if(rt1[i]) {
1367       alloc_reg64(current,i,rs1[i]);
1368       if(imm[i]==32) {
1369         alloc_reg64(current,i,rt1[i]);
1370         current->is32&=~(1LL<<rt1[i]);
1371       } else {
1372         alloc_reg(current,i,rt1[i]);
1373         current->is32|=1LL<<rt1[i];
1374       }
1375       dirty_reg(current,rt1[i]);
1376     }
1377   }
1378   if(opcode2[i]==0x3f) // DSRA32
1379   {
1380     if(rt1[i]) {
1381       alloc_reg64(current,i,rs1[i]);
1382       alloc_reg(current,i,rt1[i]);
1383       current->is32|=1LL<<rt1[i];
1384       dirty_reg(current,rt1[i]);
1385     }
1386   }
1387 }
1388
1389 void shift_alloc(struct regstat *current,int i)
1390 {
1391   if(rt1[i]) {
1392     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1393     {
1394       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1395       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1396       alloc_reg(current,i,rt1[i]);
1397       if(rt1[i]==rs2[i]) alloc_reg_temp(current,i,-1);
1398       current->is32|=1LL<<rt1[i];
1399     } else { // DSLLV/DSRLV/DSRAV
1400       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1401       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1402       alloc_reg64(current,i,rt1[i]);
1403       current->is32&=~(1LL<<rt1[i]);
1404       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1405         alloc_reg_temp(current,i,-1);
1406     }
1407     clear_const(current,rs1[i]);
1408     clear_const(current,rs2[i]);
1409     clear_const(current,rt1[i]);
1410     dirty_reg(current,rt1[i]);
1411   }
1412 }
1413
1414 void alu_alloc(struct regstat *current,int i)
1415 {
1416   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1417     if(rt1[i]) {
1418       if(rs1[i]&&rs2[i]) {
1419         alloc_reg(current,i,rs1[i]);
1420         alloc_reg(current,i,rs2[i]);
1421       }
1422       else {
1423         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1424         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1425       }
1426       alloc_reg(current,i,rt1[i]);
1427     }
1428     current->is32|=1LL<<rt1[i];
1429   }
1430   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1431     if(rt1[i]) {
1432       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1433       {
1434         alloc_reg64(current,i,rs1[i]);
1435         alloc_reg64(current,i,rs2[i]);
1436         alloc_reg(current,i,rt1[i]);
1437       } else {
1438         alloc_reg(current,i,rs1[i]);
1439         alloc_reg(current,i,rs2[i]);
1440         alloc_reg(current,i,rt1[i]);
1441       }
1442     }
1443     current->is32|=1LL<<rt1[i];
1444   }
1445   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1446     if(rt1[i]) {
1447       if(rs1[i]&&rs2[i]) {
1448         alloc_reg(current,i,rs1[i]);
1449         alloc_reg(current,i,rs2[i]);
1450       }
1451       else
1452       {
1453         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1454         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1455       }
1456       alloc_reg(current,i,rt1[i]);
1457       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1458       {
1459         if(!((current->uu>>rt1[i])&1)) {
1460           alloc_reg64(current,i,rt1[i]);
1461         }
1462         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1463           if(rs1[i]&&rs2[i]) {
1464             alloc_reg64(current,i,rs1[i]);
1465             alloc_reg64(current,i,rs2[i]);
1466           }
1467           else
1468           {
1469             // Is is really worth it to keep 64-bit values in registers?
1470             #ifdef NATIVE_64BIT
1471             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1472             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1473             #endif
1474           }
1475         }
1476         current->is32&=~(1LL<<rt1[i]);
1477       } else {
1478         current->is32|=1LL<<rt1[i];
1479       }
1480     }
1481   }
1482   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1483     if(rt1[i]) {
1484       if(rs1[i]&&rs2[i]) {
1485         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1486           alloc_reg64(current,i,rs1[i]);
1487           alloc_reg64(current,i,rs2[i]);
1488           alloc_reg64(current,i,rt1[i]);
1489         } else {
1490           alloc_reg(current,i,rs1[i]);
1491           alloc_reg(current,i,rs2[i]);
1492           alloc_reg(current,i,rt1[i]);
1493         }
1494       }
1495       else {
1496         alloc_reg(current,i,rt1[i]);
1497         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1498           // DADD used as move, or zeroing
1499           // If we have a 64-bit source, then make the target 64 bits too
1500           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1501             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1502             alloc_reg64(current,i,rt1[i]);
1503           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1504             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1505             alloc_reg64(current,i,rt1[i]);
1506           }
1507           if(opcode2[i]>=0x2e&&rs2[i]) {
1508             // DSUB used as negation - 64-bit result
1509             // If we have a 32-bit register, extend it to 64 bits
1510             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1511             alloc_reg64(current,i,rt1[i]);
1512           }
1513         }
1514       }
1515       if(rs1[i]&&rs2[i]) {
1516         current->is32&=~(1LL<<rt1[i]);
1517       } else if(rs1[i]) {
1518         current->is32&=~(1LL<<rt1[i]);
1519         if((current->is32>>rs1[i])&1)
1520           current->is32|=1LL<<rt1[i];
1521       } else if(rs2[i]) {
1522         current->is32&=~(1LL<<rt1[i]);
1523         if((current->is32>>rs2[i])&1)
1524           current->is32|=1LL<<rt1[i];
1525       } else {
1526         current->is32|=1LL<<rt1[i];
1527       }
1528     }
1529   }
1530   clear_const(current,rs1[i]);
1531   clear_const(current,rs2[i]);
1532   clear_const(current,rt1[i]);
1533   dirty_reg(current,rt1[i]);
1534 }
1535
1536 void imm16_alloc(struct regstat *current,int i)
1537 {
1538   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1539   else lt1[i]=rs1[i];
1540   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1541   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1542     current->is32&=~(1LL<<rt1[i]);
1543     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1544       // TODO: Could preserve the 32-bit flag if the immediate is zero
1545       alloc_reg64(current,i,rt1[i]);
1546       alloc_reg64(current,i,rs1[i]);
1547     }
1548     clear_const(current,rs1[i]);
1549     clear_const(current,rt1[i]);
1550   }
1551   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1552     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1553     current->is32|=1LL<<rt1[i];
1554     clear_const(current,rs1[i]);
1555     clear_const(current,rt1[i]);
1556   }
1557   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1558     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1559       if(rs1[i]!=rt1[i]) {
1560         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1561         alloc_reg64(current,i,rt1[i]);
1562         current->is32&=~(1LL<<rt1[i]);
1563       }
1564     }
1565     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1566     if(is_const(current,rs1[i])) {
1567       int v=get_const(current,rs1[i]);
1568       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1569       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1570       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1571     }
1572     else clear_const(current,rt1[i]);
1573   }
1574   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1575     if(is_const(current,rs1[i])) {
1576       int v=get_const(current,rs1[i]);
1577       set_const(current,rt1[i],v+imm[i]);
1578     }
1579     else clear_const(current,rt1[i]);
1580     current->is32|=1LL<<rt1[i];
1581   }
1582   else {
1583     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1584     current->is32|=1LL<<rt1[i];
1585   }
1586   dirty_reg(current,rt1[i]);
1587 }
1588
1589 void load_alloc(struct regstat *current,int i)
1590 {
1591   clear_const(current,rt1[i]);
1592   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1593   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1594   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1595   if(rt1[i]) {
1596     alloc_reg(current,i,rt1[i]);
1597     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1598     {
1599       current->is32&=~(1LL<<rt1[i]);
1600       alloc_reg64(current,i,rt1[i]);
1601     }
1602     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1603     {
1604       current->is32&=~(1LL<<rt1[i]);
1605       alloc_reg64(current,i,rt1[i]);
1606       alloc_all(current,i);
1607       alloc_reg64(current,i,FTEMP);
1608     }
1609     else current->is32|=1LL<<rt1[i];
1610     dirty_reg(current,rt1[i]);
1611     // If using TLB, need a register for pointer to the mapping table
1612     if(using_tlb) alloc_reg(current,i,TLREG);
1613     // LWL/LWR need a temporary register for the old value
1614     if(opcode[i]==0x22||opcode[i]==0x26)
1615     {
1616       alloc_reg(current,i,FTEMP);
1617       alloc_reg_temp(current,i,-1);
1618     }
1619   }
1620   else
1621   {
1622     // Load to r0 (dummy load)
1623     // but we still need a register to calculate the address
1624     alloc_reg_temp(current,i,-1);
1625   }
1626 }
1627
1628 void store_alloc(struct regstat *current,int i)
1629 {
1630   clear_const(current,rs2[i]);
1631   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1632   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1633   alloc_reg(current,i,rs2[i]);
1634   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1635     alloc_reg64(current,i,rs2[i]);
1636     if(rs2[i]) alloc_reg(current,i,FTEMP);
1637   }
1638   // If using TLB, need a register for pointer to the mapping table
1639   if(using_tlb) alloc_reg(current,i,TLREG);
1640   #if defined(HOST_IMM8)
1641   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1642   else alloc_reg(current,i,INVCP);
1643   #endif
1644   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1645     alloc_reg(current,i,FTEMP);
1646   }
1647   // We need a temporary register for address generation
1648   alloc_reg_temp(current,i,-1);
1649 }
1650
1651 void c1ls_alloc(struct regstat *current,int i)
1652 {
1653   //clear_const(current,rs1[i]); // FIXME
1654   clear_const(current,rt1[i]);
1655   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1656   alloc_reg(current,i,CSREG); // Status
1657   alloc_reg(current,i,FTEMP);
1658   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1659     alloc_reg64(current,i,FTEMP);
1660   }
1661   // If using TLB, need a register for pointer to the mapping table
1662   if(using_tlb) alloc_reg(current,i,TLREG);
1663   #if defined(HOST_IMM8)
1664   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1665   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1666     alloc_reg(current,i,INVCP);
1667   #endif
1668   // We need a temporary register for address generation
1669   alloc_reg_temp(current,i,-1);
1670 }
1671
1672 void c2ls_alloc(struct regstat *current,int i)
1673 {
1674   clear_const(current,rt1[i]);
1675   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1676   alloc_reg(current,i,FTEMP);
1677   // If using TLB, need a register for pointer to the mapping table
1678   if(using_tlb) alloc_reg(current,i,TLREG);
1679   #if defined(HOST_IMM8)
1680   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1681   else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1682     alloc_reg(current,i,INVCP);
1683   #endif
1684   // We need a temporary register for address generation
1685   alloc_reg_temp(current,i,-1);
1686 }
1687
1688 #ifndef multdiv_alloc
1689 void multdiv_alloc(struct regstat *current,int i)
1690 {
1691   //  case 0x18: MULT
1692   //  case 0x19: MULTU
1693   //  case 0x1A: DIV
1694   //  case 0x1B: DIVU
1695   //  case 0x1C: DMULT
1696   //  case 0x1D: DMULTU
1697   //  case 0x1E: DDIV
1698   //  case 0x1F: DDIVU
1699   clear_const(current,rs1[i]);
1700   clear_const(current,rs2[i]);
1701   if(rs1[i]&&rs2[i])
1702   {
1703     if((opcode2[i]&4)==0) // 32-bit
1704     {
1705       current->u&=~(1LL<<HIREG);
1706       current->u&=~(1LL<<LOREG);
1707       alloc_reg(current,i,HIREG);
1708       alloc_reg(current,i,LOREG);
1709       alloc_reg(current,i,rs1[i]);
1710       alloc_reg(current,i,rs2[i]);
1711       current->is32|=1LL<<HIREG;
1712       current->is32|=1LL<<LOREG;
1713       dirty_reg(current,HIREG);
1714       dirty_reg(current,LOREG);
1715     }
1716     else // 64-bit
1717     {
1718       current->u&=~(1LL<<HIREG);
1719       current->u&=~(1LL<<LOREG);
1720       current->uu&=~(1LL<<HIREG);
1721       current->uu&=~(1LL<<LOREG);
1722       alloc_reg64(current,i,HIREG);
1723       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1724       alloc_reg64(current,i,rs1[i]);
1725       alloc_reg64(current,i,rs2[i]);
1726       alloc_all(current,i);
1727       current->is32&=~(1LL<<HIREG);
1728       current->is32&=~(1LL<<LOREG);
1729       dirty_reg(current,HIREG);
1730       dirty_reg(current,LOREG);
1731     }
1732   }
1733   else
1734   {
1735     // Multiply by zero is zero.
1736     // MIPS does not have a divide by zero exception.
1737     // The result is undefined, we return zero.
1738     alloc_reg(current,i,HIREG);
1739     alloc_reg(current,i,LOREG);
1740     current->is32|=1LL<<HIREG;
1741     current->is32|=1LL<<LOREG;
1742     dirty_reg(current,HIREG);
1743     dirty_reg(current,LOREG);
1744   }
1745 }
1746 #endif
1747
1748 void cop0_alloc(struct regstat *current,int i)
1749 {
1750   if(opcode2[i]==0) // MFC0
1751   {
1752     if(rt1[i]) {
1753       clear_const(current,rt1[i]);
1754       alloc_all(current,i);
1755       alloc_reg(current,i,rt1[i]);
1756       current->is32|=1LL<<rt1[i];
1757       dirty_reg(current,rt1[i]);
1758     }
1759   }
1760   else if(opcode2[i]==4) // MTC0
1761   {
1762     if(rs1[i]){
1763       clear_const(current,rs1[i]);
1764       alloc_reg(current,i,rs1[i]);
1765       alloc_all(current,i);
1766     }
1767     else {
1768       alloc_all(current,i); // FIXME: Keep r0
1769       current->u&=~1LL;
1770       alloc_reg(current,i,0);
1771     }
1772   }
1773   else
1774   {
1775     // TLBR/TLBWI/TLBWR/TLBP/ERET
1776     assert(opcode2[i]==0x10);
1777     alloc_all(current,i);
1778   }
1779 }
1780
1781 void cop1_alloc(struct regstat *current,int i)
1782 {
1783   alloc_reg(current,i,CSREG); // Load status
1784   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1785   {
1786     assert(rt1[i]);
1787     clear_const(current,rt1[i]);
1788     if(opcode2[i]==1) {
1789       alloc_reg64(current,i,rt1[i]); // DMFC1
1790       current->is32&=~(1LL<<rt1[i]);
1791     }else{
1792       alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1793       current->is32|=1LL<<rt1[i];
1794     }
1795     dirty_reg(current,rt1[i]);
1796     alloc_reg_temp(current,i,-1);
1797   }
1798   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1799   {
1800     if(rs1[i]){
1801       clear_const(current,rs1[i]);
1802       if(opcode2[i]==5)
1803         alloc_reg64(current,i,rs1[i]); // DMTC1
1804       else
1805         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1806       alloc_reg_temp(current,i,-1);
1807     }
1808     else {
1809       current->u&=~1LL;
1810       alloc_reg(current,i,0);
1811       alloc_reg_temp(current,i,-1);
1812     }
1813   }
1814 }
1815 void fconv_alloc(struct regstat *current,int i)
1816 {
1817   alloc_reg(current,i,CSREG); // Load status
1818   alloc_reg_temp(current,i,-1);
1819 }
1820 void float_alloc(struct regstat *current,int i)
1821 {
1822   alloc_reg(current,i,CSREG); // Load status
1823   alloc_reg_temp(current,i,-1);
1824 }
1825 void c2op_alloc(struct regstat *current,int i)
1826 {
1827   alloc_reg_temp(current,i,-1);
1828 }
1829 void fcomp_alloc(struct regstat *current,int i)
1830 {
1831   alloc_reg(current,i,CSREG); // Load status
1832   alloc_reg(current,i,FSREG); // Load flags
1833   dirty_reg(current,FSREG); // Flag will be modified
1834   alloc_reg_temp(current,i,-1);
1835 }
1836
1837 void syscall_alloc(struct regstat *current,int i)
1838 {
1839   alloc_cc(current,i);
1840   dirty_reg(current,CCREG);
1841   alloc_all(current,i);
1842   current->isconst=0;
1843 }
1844
1845 void delayslot_alloc(struct regstat *current,int i)
1846 {
1847   switch(itype[i]) {
1848     case UJUMP:
1849     case CJUMP:
1850     case SJUMP:
1851     case RJUMP:
1852     case FJUMP:
1853     case SYSCALL:
1854     case HLECALL:
1855     case SPAN:
1856       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1857       printf("Disabled speculative precompilation\n");
1858       stop_after_jal=1;
1859       break;
1860     case IMM16:
1861       imm16_alloc(current,i);
1862       break;
1863     case LOAD:
1864     case LOADLR:
1865       load_alloc(current,i);
1866       break;
1867     case STORE:
1868     case STORELR:
1869       store_alloc(current,i);
1870       break;
1871     case ALU:
1872       alu_alloc(current,i);
1873       break;
1874     case SHIFT:
1875       shift_alloc(current,i);
1876       break;
1877     case MULTDIV:
1878       multdiv_alloc(current,i);
1879       break;
1880     case SHIFTIMM:
1881       shiftimm_alloc(current,i);
1882       break;
1883     case MOV:
1884       mov_alloc(current,i);
1885       break;
1886     case COP0:
1887       cop0_alloc(current,i);
1888       break;
1889     case COP1:
1890     case COP2:
1891       cop1_alloc(current,i);
1892       break;
1893     case C1LS:
1894       c1ls_alloc(current,i);
1895       break;
1896     case C2LS:
1897       c2ls_alloc(current,i);
1898       break;
1899     case FCONV:
1900       fconv_alloc(current,i);
1901       break;
1902     case FLOAT:
1903       float_alloc(current,i);
1904       break;
1905     case FCOMP:
1906       fcomp_alloc(current,i);
1907       break;
1908     case C2OP:
1909       c2op_alloc(current,i);
1910       break;
1911   }
1912 }
1913
1914 // Special case where a branch and delay slot span two pages in virtual memory
1915 static void pagespan_alloc(struct regstat *current,int i)
1916 {
1917   current->isconst=0;
1918   current->wasconst=0;
1919   regs[i].wasconst=0;
1920   alloc_all(current,i);
1921   alloc_cc(current,i);
1922   dirty_reg(current,CCREG);
1923   if(opcode[i]==3) // JAL
1924   {
1925     alloc_reg(current,i,31);
1926     dirty_reg(current,31);
1927   }
1928   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1929   {
1930     alloc_reg(current,i,rs1[i]);
1931     if (rt1[i]!=0) {
1932       alloc_reg(current,i,rt1[i]);
1933       dirty_reg(current,rt1[i]);
1934     }
1935   }
1936   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1937   {
1938     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1939     if(rs2[i]) alloc_reg(current,i,rs2[i]);
1940     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1941     {
1942       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1943       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1944     }
1945   }
1946   else
1947   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1948   {
1949     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1950     if(!((current->is32>>rs1[i])&1))
1951     {
1952       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1953     }
1954   }
1955   else
1956   if(opcode[i]==0x11) // BC1
1957   {
1958     alloc_reg(current,i,FSREG);
1959     alloc_reg(current,i,CSREG);
1960   }
1961   //else ...
1962 }
1963
1964 add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1965 {
1966   stubs[stubcount][0]=type;
1967   stubs[stubcount][1]=addr;
1968   stubs[stubcount][2]=retaddr;
1969   stubs[stubcount][3]=a;
1970   stubs[stubcount][4]=b;
1971   stubs[stubcount][5]=c;
1972   stubs[stubcount][6]=d;
1973   stubs[stubcount][7]=e;
1974   stubcount++;
1975 }
1976
1977 // Write out a single register
1978 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1979 {
1980   int hr;
1981   for(hr=0;hr<HOST_REGS;hr++) {
1982     if(hr!=EXCLUDE_REG) {
1983       if((regmap[hr]&63)==r) {
1984         if((dirty>>hr)&1) {
1985           if(regmap[hr]<64) {
1986             emit_storereg(r,hr);
1987 #ifndef FORCE32
1988             if((is32>>regmap[hr])&1) {
1989               emit_sarimm(hr,31,hr);
1990               emit_storereg(r|64,hr);
1991             }
1992 #endif
1993           }else{
1994             emit_storereg(r|64,hr);
1995           }
1996         }
1997       }
1998     }
1999   }
2000 }
2001
2002 int mchecksum()
2003 {
2004   //if(!tracedebug) return 0;
2005   int i;
2006   int sum=0;
2007   for(i=0;i<2097152;i++) {
2008     unsigned int temp=sum;
2009     sum<<=1;
2010     sum|=(~temp)>>31;
2011     sum^=((u_int *)rdram)[i];
2012   }
2013   return sum;
2014 }
2015 int rchecksum()
2016 {
2017   int i;
2018   int sum=0;
2019   for(i=0;i<64;i++)
2020     sum^=((u_int *)reg)[i];
2021   return sum;
2022 }
2023 void rlist()
2024 {
2025   int i;
2026   printf("TRACE: ");
2027   for(i=0;i<32;i++)
2028     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2029   printf("\n");
2030 #ifndef DISABLE_COP1
2031   printf("TRACE: ");
2032   for(i=0;i<32;i++)
2033     printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2034   printf("\n");
2035 #endif
2036 }
2037
2038 void enabletrace()
2039 {
2040   tracedebug=1;
2041 }
2042
2043 void memdebug(int i)
2044 {
2045   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2046   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2047   //rlist();
2048   //if(tracedebug) {
2049   //if(Count>=-2084597794) {
2050   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2051   //if(0) {
2052     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2053     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2054     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2055     rlist();
2056     #ifdef __i386__
2057     printf("TRACE: %x\n",(&i)[-1]);
2058     #endif
2059     #ifdef __arm__
2060     int j;
2061     printf("TRACE: %x \n",(&j)[10]);
2062     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2063     #endif
2064     //fflush(stdout);
2065   }
2066   //printf("TRACE: %x\n",(&i)[-1]);
2067 }
2068
2069 void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2070 {
2071   printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2072 }
2073
2074 void alu_assemble(int i,struct regstat *i_regs)
2075 {
2076   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2077     if(rt1[i]) {
2078       signed char s1,s2,t;
2079       t=get_reg(i_regs->regmap,rt1[i]);
2080       if(t>=0) {
2081         s1=get_reg(i_regs->regmap,rs1[i]);
2082         s2=get_reg(i_regs->regmap,rs2[i]);
2083         if(rs1[i]&&rs2[i]) {
2084           assert(s1>=0);
2085           assert(s2>=0);
2086           if(opcode2[i]&2) emit_sub(s1,s2,t);
2087           else emit_add(s1,s2,t);
2088         }
2089         else if(rs1[i]) {
2090           if(s1>=0) emit_mov(s1,t);
2091           else emit_loadreg(rs1[i],t);
2092         }
2093         else if(rs2[i]) {
2094           if(s2>=0) {
2095             if(opcode2[i]&2) emit_neg(s2,t);
2096             else emit_mov(s2,t);
2097           }
2098           else {
2099             emit_loadreg(rs2[i],t);
2100             if(opcode2[i]&2) emit_neg(t,t);
2101           }
2102         }
2103         else emit_zeroreg(t);
2104       }
2105     }
2106   }
2107   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2108     if(rt1[i]) {
2109       signed char s1l,s2l,s1h,s2h,tl,th;
2110       tl=get_reg(i_regs->regmap,rt1[i]);
2111       th=get_reg(i_regs->regmap,rt1[i]|64);
2112       if(tl>=0) {
2113         s1l=get_reg(i_regs->regmap,rs1[i]);
2114         s2l=get_reg(i_regs->regmap,rs2[i]);
2115         s1h=get_reg(i_regs->regmap,rs1[i]|64);
2116         s2h=get_reg(i_regs->regmap,rs2[i]|64);
2117         if(rs1[i]&&rs2[i]) {
2118           assert(s1l>=0);
2119           assert(s2l>=0);
2120           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2121           else emit_adds(s1l,s2l,tl);
2122           if(th>=0) {
2123             #ifdef INVERTED_CARRY
2124             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2125             #else
2126             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2127             #endif
2128             else emit_add(s1h,s2h,th);
2129           }
2130         }
2131         else if(rs1[i]) {
2132           if(s1l>=0) emit_mov(s1l,tl);
2133           else emit_loadreg(rs1[i],tl);
2134           if(th>=0) {
2135             if(s1h>=0) emit_mov(s1h,th);
2136             else emit_loadreg(rs1[i]|64,th);
2137           }
2138         }
2139         else if(rs2[i]) {
2140           if(s2l>=0) {
2141             if(opcode2[i]&2) emit_negs(s2l,tl);
2142             else emit_mov(s2l,tl);
2143           }
2144           else {
2145             emit_loadreg(rs2[i],tl);
2146             if(opcode2[i]&2) emit_negs(tl,tl);
2147           }
2148           if(th>=0) {
2149             #ifdef INVERTED_CARRY
2150             if(s2h>=0) emit_mov(s2h,th);
2151             else emit_loadreg(rs2[i]|64,th);
2152             if(opcode2[i]&2) {
2153               emit_adcimm(-1,th); // x86 has inverted carry flag
2154               emit_not(th,th);
2155             }
2156             #else
2157             if(opcode2[i]&2) {
2158               if(s2h>=0) emit_rscimm(s2h,0,th);
2159               else {
2160                 emit_loadreg(rs2[i]|64,th);
2161                 emit_rscimm(th,0,th);
2162               }
2163             }else{
2164               if(s2h>=0) emit_mov(s2h,th);
2165               else emit_loadreg(rs2[i]|64,th);
2166             }
2167             #endif
2168           }
2169         }
2170         else {
2171           emit_zeroreg(tl);
2172           if(th>=0) emit_zeroreg(th);
2173         }
2174       }
2175     }
2176   }
2177   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2178     if(rt1[i]) {
2179       signed char s1l,s1h,s2l,s2h,t;
2180       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2181       {
2182         t=get_reg(i_regs->regmap,rt1[i]);
2183         //assert(t>=0);
2184         if(t>=0) {
2185           s1l=get_reg(i_regs->regmap,rs1[i]);
2186           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2187           s2l=get_reg(i_regs->regmap,rs2[i]);
2188           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2189           if(rs2[i]==0) // rx<r0
2190           {
2191             assert(s1h>=0);
2192             if(opcode2[i]==0x2a) // SLT
2193               emit_shrimm(s1h,31,t);
2194             else // SLTU (unsigned can not be less than zero)
2195               emit_zeroreg(t);
2196           }
2197           else if(rs1[i]==0) // r0<rx
2198           {
2199             assert(s2h>=0);
2200             if(opcode2[i]==0x2a) // SLT
2201               emit_set_gz64_32(s2h,s2l,t);
2202             else // SLTU (set if not zero)
2203               emit_set_nz64_32(s2h,s2l,t);
2204           }
2205           else {
2206             assert(s1l>=0);assert(s1h>=0);
2207             assert(s2l>=0);assert(s2h>=0);
2208             if(opcode2[i]==0x2a) // SLT
2209               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2210             else // SLTU
2211               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2212           }
2213         }
2214       } else {
2215         t=get_reg(i_regs->regmap,rt1[i]);
2216         //assert(t>=0);
2217         if(t>=0) {
2218           s1l=get_reg(i_regs->regmap,rs1[i]);
2219           s2l=get_reg(i_regs->regmap,rs2[i]);
2220           if(rs2[i]==0) // rx<r0
2221           {
2222             assert(s1l>=0);
2223             if(opcode2[i]==0x2a) // SLT
2224               emit_shrimm(s1l,31,t);
2225             else // SLTU (unsigned can not be less than zero)
2226               emit_zeroreg(t);
2227           }
2228           else if(rs1[i]==0) // r0<rx
2229           {
2230             assert(s2l>=0);
2231             if(opcode2[i]==0x2a) // SLT
2232               emit_set_gz32(s2l,t);
2233             else // SLTU (set if not zero)
2234               emit_set_nz32(s2l,t);
2235           }
2236           else{
2237             assert(s1l>=0);assert(s2l>=0);
2238             if(opcode2[i]==0x2a) // SLT
2239               emit_set_if_less32(s1l,s2l,t);
2240             else // SLTU
2241               emit_set_if_carry32(s1l,s2l,t);
2242           }
2243         }
2244       }
2245     }
2246   }
2247   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2248     if(rt1[i]) {
2249       signed char s1l,s1h,s2l,s2h,th,tl;
2250       tl=get_reg(i_regs->regmap,rt1[i]);
2251       th=get_reg(i_regs->regmap,rt1[i]|64);
2252       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2253       {
2254         assert(tl>=0);
2255         if(tl>=0) {
2256           s1l=get_reg(i_regs->regmap,rs1[i]);
2257           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2258           s2l=get_reg(i_regs->regmap,rs2[i]);
2259           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2260           if(rs1[i]&&rs2[i]) {
2261             assert(s1l>=0);assert(s1h>=0);
2262             assert(s2l>=0);assert(s2h>=0);
2263             if(opcode2[i]==0x24) { // AND
2264               emit_and(s1l,s2l,tl);
2265               emit_and(s1h,s2h,th);
2266             } else
2267             if(opcode2[i]==0x25) { // OR
2268               emit_or(s1l,s2l,tl);
2269               emit_or(s1h,s2h,th);
2270             } else
2271             if(opcode2[i]==0x26) { // XOR
2272               emit_xor(s1l,s2l,tl);
2273               emit_xor(s1h,s2h,th);
2274             } else
2275             if(opcode2[i]==0x27) { // NOR
2276               emit_or(s1l,s2l,tl);
2277               emit_or(s1h,s2h,th);
2278               emit_not(tl,tl);
2279               emit_not(th,th);
2280             }
2281           }
2282           else
2283           {
2284             if(opcode2[i]==0x24) { // AND
2285               emit_zeroreg(tl);
2286               emit_zeroreg(th);
2287             } else
2288             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2289               if(rs1[i]){
2290                 if(s1l>=0) emit_mov(s1l,tl);
2291                 else emit_loadreg(rs1[i],tl);
2292                 if(s1h>=0) emit_mov(s1h,th);
2293                 else emit_loadreg(rs1[i]|64,th);
2294               }
2295               else
2296               if(rs2[i]){
2297                 if(s2l>=0) emit_mov(s2l,tl);
2298                 else emit_loadreg(rs2[i],tl);
2299                 if(s2h>=0) emit_mov(s2h,th);
2300                 else emit_loadreg(rs2[i]|64,th);
2301               }
2302               else{
2303                 emit_zeroreg(tl);
2304                 emit_zeroreg(th);
2305               }
2306             } else
2307             if(opcode2[i]==0x27) { // NOR
2308               if(rs1[i]){
2309                 if(s1l>=0) emit_not(s1l,tl);
2310                 else{
2311                   emit_loadreg(rs1[i],tl);
2312                   emit_not(tl,tl);
2313                 }
2314                 if(s1h>=0) emit_not(s1h,th);
2315                 else{
2316                   emit_loadreg(rs1[i]|64,th);
2317                   emit_not(th,th);
2318                 }
2319               }
2320               else
2321               if(rs2[i]){
2322                 if(s2l>=0) emit_not(s2l,tl);
2323                 else{
2324                   emit_loadreg(rs2[i],tl);
2325                   emit_not(tl,tl);
2326                 }
2327                 if(s2h>=0) emit_not(s2h,th);
2328                 else{
2329                   emit_loadreg(rs2[i]|64,th);
2330                   emit_not(th,th);
2331                 }
2332               }
2333               else {
2334                 emit_movimm(-1,tl);
2335                 emit_movimm(-1,th);
2336               }
2337             }
2338           }
2339         }
2340       }
2341       else
2342       {
2343         // 32 bit
2344         if(tl>=0) {
2345           s1l=get_reg(i_regs->regmap,rs1[i]);
2346           s2l=get_reg(i_regs->regmap,rs2[i]);
2347           if(rs1[i]&&rs2[i]) {
2348             assert(s1l>=0);
2349             assert(s2l>=0);
2350             if(opcode2[i]==0x24) { // AND
2351               emit_and(s1l,s2l,tl);
2352             } else
2353             if(opcode2[i]==0x25) { // OR
2354               emit_or(s1l,s2l,tl);
2355             } else
2356             if(opcode2[i]==0x26) { // XOR
2357               emit_xor(s1l,s2l,tl);
2358             } else
2359             if(opcode2[i]==0x27) { // NOR
2360               emit_or(s1l,s2l,tl);
2361               emit_not(tl,tl);
2362             }
2363           }
2364           else
2365           {
2366             if(opcode2[i]==0x24) { // AND
2367               emit_zeroreg(tl);
2368             } else
2369             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2370               if(rs1[i]){
2371                 if(s1l>=0) emit_mov(s1l,tl);
2372                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2373               }
2374               else
2375               if(rs2[i]){
2376                 if(s2l>=0) emit_mov(s2l,tl);
2377                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2378               }
2379               else emit_zeroreg(tl);
2380             } else
2381             if(opcode2[i]==0x27) { // NOR
2382               if(rs1[i]){
2383                 if(s1l>=0) emit_not(s1l,tl);
2384                 else {
2385                   emit_loadreg(rs1[i],tl);
2386                   emit_not(tl,tl);
2387                 }
2388               }
2389               else
2390               if(rs2[i]){
2391                 if(s2l>=0) emit_not(s2l,tl);
2392                 else {
2393                   emit_loadreg(rs2[i],tl);
2394                   emit_not(tl,tl);
2395                 }
2396               }
2397               else emit_movimm(-1,tl);
2398             }
2399           }
2400         }
2401       }
2402     }
2403   }
2404 }
2405
2406 void imm16_assemble(int i,struct regstat *i_regs)
2407 {
2408   if (opcode[i]==0x0f) { // LUI
2409     if(rt1[i]) {
2410       signed char t;
2411       t=get_reg(i_regs->regmap,rt1[i]);
2412       //assert(t>=0);
2413       if(t>=0) {
2414         if(!((i_regs->isconst>>t)&1))
2415           emit_movimm(imm[i]<<16,t);
2416       }
2417     }
2418   }
2419   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2420     if(rt1[i]) {
2421       signed char s,t;
2422       t=get_reg(i_regs->regmap,rt1[i]);
2423       s=get_reg(i_regs->regmap,rs1[i]);
2424       if(rs1[i]) {
2425         //assert(t>=0);
2426         //assert(s>=0);
2427         if(t>=0) {
2428           if(!((i_regs->isconst>>t)&1)) {
2429             if(s<0) {
2430               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2431               emit_addimm(t,imm[i],t);
2432             }else{
2433               if(!((i_regs->wasconst>>s)&1))
2434                 emit_addimm(s,imm[i],t);
2435               else
2436                 emit_movimm(constmap[i][s]+imm[i],t);
2437             }
2438           }
2439         }
2440       } else {
2441         if(t>=0) {
2442           if(!((i_regs->isconst>>t)&1))
2443             emit_movimm(imm[i],t);
2444         }
2445       }
2446     }
2447   }
2448   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2449     if(rt1[i]) {
2450       signed char sh,sl,th,tl;
2451       th=get_reg(i_regs->regmap,rt1[i]|64);
2452       tl=get_reg(i_regs->regmap,rt1[i]);
2453       sh=get_reg(i_regs->regmap,rs1[i]|64);
2454       sl=get_reg(i_regs->regmap,rs1[i]);
2455       if(tl>=0) {
2456         if(rs1[i]) {
2457           assert(sh>=0);
2458           assert(sl>=0);
2459           if(th>=0) {
2460             emit_addimm64_32(sh,sl,imm[i],th,tl);
2461           }
2462           else {
2463             emit_addimm(sl,imm[i],tl);
2464           }
2465         } else {
2466           emit_movimm(imm[i],tl);
2467           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2468         }
2469       }
2470     }
2471   }
2472   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2473     if(rt1[i]) {
2474       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2475       signed char sh,sl,t;
2476       t=get_reg(i_regs->regmap,rt1[i]);
2477       sh=get_reg(i_regs->regmap,rs1[i]|64);
2478       sl=get_reg(i_regs->regmap,rs1[i]);
2479       //assert(t>=0);
2480       if(t>=0) {
2481         if(rs1[i]>0) {
2482           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2483           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2484             if(opcode[i]==0x0a) { // SLTI
2485               if(sl<0) {
2486                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2487                 emit_slti32(t,imm[i],t);
2488               }else{
2489                 emit_slti32(sl,imm[i],t);
2490               }
2491             }
2492             else { // SLTIU
2493               if(sl<0) {
2494                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2495                 emit_sltiu32(t,imm[i],t);
2496               }else{
2497                 emit_sltiu32(sl,imm[i],t);
2498               }
2499             }
2500           }else{ // 64-bit
2501             assert(sl>=0);
2502             if(opcode[i]==0x0a) // SLTI
2503               emit_slti64_32(sh,sl,imm[i],t);
2504             else // SLTIU
2505               emit_sltiu64_32(sh,sl,imm[i],t);
2506           }
2507         }else{
2508           // SLTI(U) with r0 is just stupid,
2509           // nonetheless examples can be found
2510           if(opcode[i]==0x0a) // SLTI
2511             if(0<imm[i]) emit_movimm(1,t);
2512             else emit_zeroreg(t);
2513           else // SLTIU
2514           {
2515             if(imm[i]) emit_movimm(1,t);
2516             else emit_zeroreg(t);
2517           }
2518         }
2519       }
2520     }
2521   }
2522   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2523     if(rt1[i]) {
2524       signed char sh,sl,th,tl;
2525       th=get_reg(i_regs->regmap,rt1[i]|64);
2526       tl=get_reg(i_regs->regmap,rt1[i]);
2527       sh=get_reg(i_regs->regmap,rs1[i]|64);
2528       sl=get_reg(i_regs->regmap,rs1[i]);
2529       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2530         if(opcode[i]==0x0c) //ANDI
2531         {
2532           if(rs1[i]) {
2533             if(sl<0) {
2534               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2535               emit_andimm(tl,imm[i],tl);
2536             }else{
2537               if(!((i_regs->wasconst>>sl)&1))
2538                 emit_andimm(sl,imm[i],tl);
2539               else
2540                 emit_movimm(constmap[i][sl]&imm[i],tl);
2541             }
2542           }
2543           else
2544             emit_zeroreg(tl);
2545           if(th>=0) emit_zeroreg(th);
2546         }
2547         else
2548         {
2549           if(rs1[i]) {
2550             if(sl<0) {
2551               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2552             }
2553             if(th>=0) {
2554               if(sh<0) {
2555                 emit_loadreg(rs1[i]|64,th);
2556               }else{
2557                 emit_mov(sh,th);
2558               }
2559             }
2560             if(opcode[i]==0x0d) //ORI
2561             if(sl<0) {
2562               emit_orimm(tl,imm[i],tl);
2563             }else{
2564               if(!((i_regs->wasconst>>sl)&1))
2565                 emit_orimm(sl,imm[i],tl);
2566               else
2567                 emit_movimm(constmap[i][sl]|imm[i],tl);
2568             }
2569             if(opcode[i]==0x0e) //XORI
2570             if(sl<0) {
2571               emit_xorimm(tl,imm[i],tl);
2572             }else{
2573               if(!((i_regs->wasconst>>sl)&1))
2574                 emit_xorimm(sl,imm[i],tl);
2575               else
2576                 emit_movimm(constmap[i][sl]^imm[i],tl);
2577             }
2578           }
2579           else {
2580             emit_movimm(imm[i],tl);
2581             if(th>=0) emit_zeroreg(th);
2582           }
2583         }
2584       }
2585     }
2586   }
2587 }
2588
2589 void shiftimm_assemble(int i,struct regstat *i_regs)
2590 {
2591   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2592   {
2593     if(rt1[i]) {
2594       signed char s,t;
2595       t=get_reg(i_regs->regmap,rt1[i]);
2596       s=get_reg(i_regs->regmap,rs1[i]);
2597       //assert(t>=0);
2598       if(t>=0){
2599         if(rs1[i]==0)
2600         {
2601           emit_zeroreg(t);
2602         }
2603         else
2604         {
2605           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2606           if(imm[i]) {
2607             if(opcode2[i]==0) // SLL
2608             {
2609               emit_shlimm(s<0?t:s,imm[i],t);
2610             }
2611             if(opcode2[i]==2) // SRL
2612             {
2613               emit_shrimm(s<0?t:s,imm[i],t);
2614             }
2615             if(opcode2[i]==3) // SRA
2616             {
2617               emit_sarimm(s<0?t:s,imm[i],t);
2618             }
2619           }else{
2620             // Shift by zero
2621             if(s>=0 && s!=t) emit_mov(s,t);
2622           }
2623         }
2624       }
2625       //emit_storereg(rt1[i],t); //DEBUG
2626     }
2627   }
2628   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2629   {
2630     if(rt1[i]) {
2631       signed char sh,sl,th,tl;
2632       th=get_reg(i_regs->regmap,rt1[i]|64);
2633       tl=get_reg(i_regs->regmap,rt1[i]);
2634       sh=get_reg(i_regs->regmap,rs1[i]|64);
2635       sl=get_reg(i_regs->regmap,rs1[i]);
2636       if(tl>=0) {
2637         if(rs1[i]==0)
2638         {
2639           emit_zeroreg(tl);
2640           if(th>=0) emit_zeroreg(th);
2641         }
2642         else
2643         {
2644           assert(sl>=0);
2645           assert(sh>=0);
2646           if(imm[i]) {
2647             if(opcode2[i]==0x38) // DSLL
2648             {
2649               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2650               emit_shlimm(sl,imm[i],tl);
2651             }
2652             if(opcode2[i]==0x3a) // DSRL
2653             {
2654               emit_shrdimm(sl,sh,imm[i],tl);
2655               if(th>=0) emit_shrimm(sh,imm[i],th);
2656             }
2657             if(opcode2[i]==0x3b) // DSRA
2658             {
2659               emit_shrdimm(sl,sh,imm[i],tl);
2660               if(th>=0) emit_sarimm(sh,imm[i],th);
2661             }
2662           }else{
2663             // Shift by zero
2664             if(sl!=tl) emit_mov(sl,tl);
2665             if(th>=0&&sh!=th) emit_mov(sh,th);
2666           }
2667         }
2668       }
2669     }
2670   }
2671   if(opcode2[i]==0x3c) // DSLL32
2672   {
2673     if(rt1[i]) {
2674       signed char sl,tl,th;
2675       tl=get_reg(i_regs->regmap,rt1[i]);
2676       th=get_reg(i_regs->regmap,rt1[i]|64);
2677       sl=get_reg(i_regs->regmap,rs1[i]);
2678       if(th>=0||tl>=0){
2679         assert(tl>=0);
2680         assert(th>=0);
2681         assert(sl>=0);
2682         emit_mov(sl,th);
2683         emit_zeroreg(tl);
2684         if(imm[i]>32)
2685         {
2686           emit_shlimm(th,imm[i]&31,th);
2687         }
2688       }
2689     }
2690   }
2691   if(opcode2[i]==0x3e) // DSRL32
2692   {
2693     if(rt1[i]) {
2694       signed char sh,tl,th;
2695       tl=get_reg(i_regs->regmap,rt1[i]);
2696       th=get_reg(i_regs->regmap,rt1[i]|64);
2697       sh=get_reg(i_regs->regmap,rs1[i]|64);
2698       if(tl>=0){
2699         assert(sh>=0);
2700         emit_mov(sh,tl);
2701         if(th>=0) emit_zeroreg(th);
2702         if(imm[i]>32)
2703         {
2704           emit_shrimm(tl,imm[i]&31,tl);
2705         }
2706       }
2707     }
2708   }
2709   if(opcode2[i]==0x3f) // DSRA32
2710   {
2711     if(rt1[i]) {
2712       signed char sh,tl;
2713       tl=get_reg(i_regs->regmap,rt1[i]);
2714       sh=get_reg(i_regs->regmap,rs1[i]|64);
2715       if(tl>=0){
2716         assert(sh>=0);
2717         emit_mov(sh,tl);
2718         if(imm[i]>32)
2719         {
2720           emit_sarimm(tl,imm[i]&31,tl);
2721         }
2722       }
2723     }
2724   }
2725 }
2726
2727 #ifndef shift_assemble
2728 void shift_assemble(int i,struct regstat *i_regs)
2729 {
2730   printf("Need shift_assemble for this architecture.\n");
2731   exit(1);
2732 }
2733 #endif
2734
2735 void load_assemble(int i,struct regstat *i_regs)
2736 {
2737   int s,th,tl,addr,map=-1;
2738   int offset;
2739   int jaddr=0;
2740   int memtarget=0,c=0;
2741   u_int hr,reglist=0;
2742   th=get_reg(i_regs->regmap,rt1[i]|64);
2743   tl=get_reg(i_regs->regmap,rt1[i]);
2744   s=get_reg(i_regs->regmap,rs1[i]);
2745   offset=imm[i];
2746   for(hr=0;hr<HOST_REGS;hr++) {
2747     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2748   }
2749   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2750   if(s>=0) {
2751     c=(i_regs->wasconst>>s)&1;
2752     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2753     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2754   }
2755   //printf("load_assemble: c=%d\n",c);
2756   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2757   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2758 #ifdef PCSX
2759   if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2760     ||rt1[i]==0) {
2761       // could be FIFO, must perform the read
2762       // ||dummy read
2763       assem_debug("(forced read)\n");
2764       tl=get_reg(i_regs->regmap,-1);
2765       assert(tl>=0);
2766   }
2767 #endif
2768   if(offset||s<0||c) addr=tl;
2769   else addr=s;
2770   if(tl>=0) {
2771     //assert(tl>=0);
2772     //assert(rt1[i]);
2773     reglist&=~(1<<tl);
2774     if(th>=0) reglist&=~(1<<th);
2775     if(!using_tlb) {
2776       if(!c) {
2777 //#define R29_HACK 1
2778         #ifdef R29_HACK
2779         // Strmnnrmn's speed hack
2780         if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2781         #endif
2782         {
2783           emit_cmpimm(addr,RAM_SIZE);
2784           jaddr=(int)out;
2785           #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2786           // Hint to branch predictor that the branch is unlikely to be taken
2787           if(rs1[i]>=28)
2788             emit_jno_unlikely(0);
2789           else
2790           #endif
2791           emit_jno(0);
2792         }
2793       }
2794     }else{ // using tlb
2795       int x=0;
2796       if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2797       if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2798       map=get_reg(i_regs->regmap,TLREG);
2799       assert(map>=0);
2800       map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2801       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2802     }
2803     if (opcode[i]==0x20) { // LB
2804       if(!c||memtarget) {
2805         #ifdef HOST_IMM_ADDR32
2806         if(c)
2807           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2808         else
2809         #endif
2810         {
2811           //emit_xorimm(addr,3,tl);
2812           //gen_tlb_addr_r(tl,map);
2813           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2814           int x=0;
2815 #ifdef BIG_ENDIAN_MIPS
2816           if(!c) emit_xorimm(addr,3,tl);
2817           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2818 #else
2819           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2820           else if (tl!=addr) emit_mov(addr,tl);
2821 #endif
2822           emit_movsbl_indexed_tlb(x,tl,map,tl);
2823         }
2824         if(jaddr)
2825           add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2826       }
2827       else
2828         inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2829     }
2830     if (opcode[i]==0x21) { // LH
2831       if(!c||memtarget) {
2832         #ifdef HOST_IMM_ADDR32
2833         if(c)
2834           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2835         else
2836         #endif
2837         {
2838           int x=0;
2839 #ifdef BIG_ENDIAN_MIPS
2840           if(!c) emit_xorimm(addr,2,tl);
2841           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2842 #else
2843           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2844           else if (tl!=addr) emit_mov(addr,tl);
2845 #endif
2846           //#ifdef
2847           //emit_movswl_indexed_tlb(x,tl,map,tl);
2848           //else
2849           if(map>=0) {
2850             gen_tlb_addr_r(tl,map);
2851             emit_movswl_indexed(x,tl,tl);
2852           }else
2853             emit_movswl_indexed((int)rdram-0x80000000+x,tl,tl);
2854         }
2855         if(jaddr)
2856           add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2857       }
2858       else
2859         inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2860     }
2861     if (opcode[i]==0x23) { // LW
2862       if(!c||memtarget) {
2863         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2864         #ifdef HOST_IMM_ADDR32
2865         if(c)
2866           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2867         else
2868         #endif
2869         emit_readword_indexed_tlb(0,addr,map,tl);
2870         if(jaddr)
2871           add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2872       }
2873       else
2874         inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2875     }
2876     if (opcode[i]==0x24) { // LBU
2877       if(!c||memtarget) {
2878         #ifdef HOST_IMM_ADDR32
2879         if(c)
2880           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2881         else
2882         #endif
2883         {
2884           //emit_xorimm(addr,3,tl);
2885           //gen_tlb_addr_r(tl,map);
2886           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2887           int x=0;
2888 #ifdef BIG_ENDIAN_MIPS
2889           if(!c) emit_xorimm(addr,3,tl);
2890           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2891 #else
2892           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2893           else if (tl!=addr) emit_mov(addr,tl);
2894 #endif
2895           emit_movzbl_indexed_tlb(x,tl,map,tl);
2896         }
2897         if(jaddr)
2898           add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2899       }
2900       else
2901         inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2902     }
2903     if (opcode[i]==0x25) { // LHU
2904       if(!c||memtarget) {
2905         #ifdef HOST_IMM_ADDR32
2906         if(c)
2907           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2908         else
2909         #endif
2910         {
2911           int x=0;
2912 #ifdef BIG_ENDIAN_MIPS
2913           if(!c) emit_xorimm(addr,2,tl);
2914           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2915 #else
2916           if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2917           else if (tl!=addr) emit_mov(addr,tl);
2918 #endif
2919           //#ifdef
2920           //emit_movzwl_indexed_tlb(x,tl,map,tl);
2921           //#else
2922           if(map>=0) {
2923             gen_tlb_addr_r(tl,map);
2924             emit_movzwl_indexed(x,tl,tl);
2925           }else
2926             emit_movzwl_indexed((int)rdram-0x80000000+x,tl,tl);
2927           if(jaddr)
2928             add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2929         }
2930       }
2931       else
2932         inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2933     }
2934     if (opcode[i]==0x27) { // LWU
2935       assert(th>=0);
2936       if(!c||memtarget) {
2937         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2938         #ifdef HOST_IMM_ADDR32
2939         if(c)
2940           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2941         else
2942         #endif
2943         emit_readword_indexed_tlb(0,addr,map,tl);
2944         if(jaddr)
2945           add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2946       }
2947       else {
2948         inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2949       }
2950       emit_zeroreg(th);
2951     }
2952     if (opcode[i]==0x37) { // LD
2953       if(!c||memtarget) {
2954         //gen_tlb_addr_r(tl,map);
2955         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2956         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2957         #ifdef HOST_IMM_ADDR32
2958         if(c)
2959           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2960         else
2961         #endif
2962         emit_readdword_indexed_tlb(0,addr,map,th,tl);
2963         if(jaddr)
2964           add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2965       }
2966       else
2967         inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2968     }
2969     //emit_storereg(rt1[i],tl); // DEBUG
2970   }
2971   //if(opcode[i]==0x23)
2972   //if(opcode[i]==0x24)
2973   //if(opcode[i]==0x23||opcode[i]==0x24)
2974   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2975   {
2976     //emit_pusha();
2977     save_regs(0x100f);
2978         emit_readword((int)&last_count,ECX);
2979         #ifdef __i386__
2980         if(get_reg(i_regs->regmap,CCREG)<0)
2981           emit_loadreg(CCREG,HOST_CCREG);
2982         emit_add(HOST_CCREG,ECX,HOST_CCREG);
2983         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2984         emit_writeword(HOST_CCREG,(int)&Count);
2985         #endif
2986         #ifdef __arm__
2987         if(get_reg(i_regs->regmap,CCREG)<0)
2988           emit_loadreg(CCREG,0);
2989         else
2990           emit_mov(HOST_CCREG,0);
2991         emit_add(0,ECX,0);
2992         emit_addimm(0,2*ccadj[i],0);
2993         emit_writeword(0,(int)&Count);
2994         #endif
2995     emit_call((int)memdebug);
2996     //emit_popa();
2997     restore_regs(0x100f);
2998   }/**/
2999 }
3000
3001 #ifndef loadlr_assemble
3002 void loadlr_assemble(int i,struct regstat *i_regs)
3003 {
3004   printf("Need loadlr_assemble for this architecture.\n");
3005   exit(1);
3006 }
3007 #endif
3008
3009 void store_assemble(int i,struct regstat *i_regs)
3010 {
3011   int s,th,tl,map=-1;
3012   int addr,temp;
3013   int offset;
3014   int jaddr=0,jaddr2,type;
3015   int memtarget=0,c=0;
3016   int agr=AGEN1+(i&1);
3017   u_int hr,reglist=0;
3018   th=get_reg(i_regs->regmap,rs2[i]|64);
3019   tl=get_reg(i_regs->regmap,rs2[i]);
3020   s=get_reg(i_regs->regmap,rs1[i]);
3021   temp=get_reg(i_regs->regmap,agr);
3022   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3023   offset=imm[i];
3024   if(s>=0) {
3025     c=(i_regs->wasconst>>s)&1;
3026     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3027     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3028   }
3029   assert(tl>=0);
3030   assert(temp>=0);
3031   for(hr=0;hr<HOST_REGS;hr++) {
3032     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3033   }
3034   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3035   if(offset||s<0||c) addr=temp;
3036   else addr=s;
3037   if(!using_tlb) {
3038     if(!c) {
3039       #ifdef R29_HACK
3040       // Strmnnrmn's speed hack
3041       memtarget=1;
3042       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3043       #endif
3044       emit_cmpimm(addr,RAM_SIZE);
3045       #ifdef DESTRUCTIVE_SHIFT
3046       if(s==addr) emit_mov(s,temp);
3047       #endif
3048       #ifdef R29_HACK
3049       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3050       #endif
3051       {
3052         jaddr=(int)out;
3053         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3054         // Hint to branch predictor that the branch is unlikely to be taken
3055         if(rs1[i]>=28)
3056           emit_jno_unlikely(0);
3057         else
3058         #endif
3059         emit_jno(0);
3060       }
3061     }
3062   }else{ // using tlb
3063     int x=0;
3064     if (opcode[i]==0x28) x=3; // SB
3065     if (opcode[i]==0x29) x=2; // SH
3066     map=get_reg(i_regs->regmap,TLREG);
3067     assert(map>=0);
3068     map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3069     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3070   }
3071
3072   if (opcode[i]==0x28) { // SB
3073     if(!c||memtarget) {
3074       int x=0;
3075 #ifdef BIG_ENDIAN_MIPS
3076       if(!c) emit_xorimm(addr,3,temp);
3077       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3078 #else
3079       if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3080       else if (addr!=temp) emit_mov(addr,temp);
3081 #endif
3082       //gen_tlb_addr_w(temp,map);
3083       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3084       emit_writebyte_indexed_tlb(tl,x,temp,map,temp);
3085     }
3086     type=STOREB_STUB;
3087   }
3088   if (opcode[i]==0x29) { // SH
3089     if(!c||memtarget) {
3090       int x=0;
3091 #ifdef BIG_ENDIAN_MIPS
3092       if(!c) emit_xorimm(addr,2,temp);
3093       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3094 #else
3095       if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3096       else if (addr!=temp) emit_mov(addr,temp);
3097 #endif
3098       //#ifdef
3099       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3100       //#else
3101       if(map>=0) {
3102         gen_tlb_addr_w(temp,map);
3103         emit_writehword_indexed(tl,x,temp);
3104       }else
3105         emit_writehword_indexed(tl,(int)rdram-0x80000000+x,temp);
3106     }
3107     type=STOREH_STUB;
3108   }
3109   if (opcode[i]==0x2B) { // SW
3110     if(!c||memtarget)
3111       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3112       emit_writeword_indexed_tlb(tl,0,addr,map,temp);
3113     type=STOREW_STUB;
3114   }
3115   if (opcode[i]==0x3F) { // SD
3116     if(!c||memtarget) {
3117       if(rs2[i]) {
3118         assert(th>=0);
3119         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3120         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3121         emit_writedword_indexed_tlb(th,tl,0,addr,map,temp);
3122       }else{
3123         // Store zero
3124         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3125         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3126         emit_writedword_indexed_tlb(tl,tl,0,addr,map,temp);
3127       }
3128     }
3129     type=STORED_STUB;
3130   }
3131   if(!using_tlb&&(!c||memtarget))
3132     // addr could be a temp, make sure it survives STORE*_STUB
3133     reglist|=1<<addr;
3134   if(jaddr) {
3135     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3136   } else if(!memtarget) {
3137     inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3138   }
3139   if(!using_tlb) {
3140     if(!c||memtarget) {
3141       #ifdef DESTRUCTIVE_SHIFT
3142       // The x86 shift operation is 'destructive'; it overwrites the
3143       // source register, so we need to make a copy first and use that.
3144       addr=temp;
3145       #endif
3146       #if defined(HOST_IMM8)
3147       int ir=get_reg(i_regs->regmap,INVCP);
3148       assert(ir>=0);
3149       emit_cmpmem_indexedsr12_reg(ir,addr,1);
3150       #else
3151       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3152       #endif
3153       jaddr2=(int)out;
3154       emit_jne(0);
3155       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3156     }
3157   }
3158   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3159   //if(opcode[i]==0x2B || opcode[i]==0x28)
3160   //if(opcode[i]==0x2B || opcode[i]==0x29)
3161   //if(opcode[i]==0x2B)
3162   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3163   {
3164     //emit_pusha();
3165     save_regs(0x100f);
3166         emit_readword((int)&last_count,ECX);
3167         #ifdef __i386__
3168         if(get_reg(i_regs->regmap,CCREG)<0)
3169           emit_loadreg(CCREG,HOST_CCREG);
3170         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3171         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3172         emit_writeword(HOST_CCREG,(int)&Count);
3173         #endif
3174         #ifdef __arm__
3175         if(get_reg(i_regs->regmap,CCREG)<0)
3176           emit_loadreg(CCREG,0);
3177         else
3178           emit_mov(HOST_CCREG,0);
3179         emit_add(0,ECX,0);
3180         emit_addimm(0,2*ccadj[i],0);
3181         emit_writeword(0,(int)&Count);
3182         #endif
3183     emit_call((int)memdebug);
3184     //emit_popa();
3185     restore_regs(0x100f);
3186   }/**/
3187 }
3188
3189 void storelr_assemble(int i,struct regstat *i_regs)
3190 {
3191   int s,th,tl;
3192   int temp;
3193   int temp2;
3194   int offset;
3195   int jaddr=0,jaddr2;
3196   int case1,case2,case3;
3197   int done0,done1,done2;
3198   int memtarget,c=0;
3199   int agr=AGEN1+(i&1);
3200   u_int hr,reglist=0;
3201   th=get_reg(i_regs->regmap,rs2[i]|64);
3202   tl=get_reg(i_regs->regmap,rs2[i]);
3203   s=get_reg(i_regs->regmap,rs1[i]);
3204   temp=get_reg(i_regs->regmap,agr);
3205   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3206   offset=imm[i];
3207   if(s>=0) {
3208     c=(i_regs->isconst>>s)&1;
3209     memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3210     if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3211   }
3212   assert(tl>=0);
3213   for(hr=0;hr<HOST_REGS;hr++) {
3214     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3215   }
3216   if(tl>=0) {
3217     assert(temp>=0);
3218     if(!using_tlb) {
3219       if(!c) {
3220         emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3221         if(!offset&&s!=temp) emit_mov(s,temp);
3222         jaddr=(int)out;
3223         emit_jno(0);
3224       }
3225       else
3226       {
3227         if(!memtarget||!rs1[i]) {
3228           jaddr=(int)out;
3229           emit_jmp(0);
3230         }
3231       }
3232       if((u_int)rdram!=0x80000000) 
3233         emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3234     }else{ // using tlb
3235       int map=get_reg(i_regs->regmap,TLREG);
3236       assert(map>=0);
3237       map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3238       if(!c&&!offset&&s>=0) emit_mov(s,temp);
3239       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3240       if(!jaddr&&!memtarget) {
3241         jaddr=(int)out;
3242         emit_jmp(0);
3243       }
3244       gen_tlb_addr_w(temp,map);
3245     }
3246
3247     if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3248       temp2=get_reg(i_regs->regmap,FTEMP);
3249       if(!rs2[i]) temp2=th=tl;
3250     }
3251
3252 #ifndef BIG_ENDIAN_MIPS
3253     emit_xorimm(temp,3,temp);
3254 #endif
3255     emit_testimm(temp,2);
3256     case2=(int)out;
3257     emit_jne(0);
3258     emit_testimm(temp,1);
3259     case1=(int)out;
3260     emit_jne(0);
3261     // 0
3262     if (opcode[i]==0x2A) { // SWL
3263       emit_writeword_indexed(tl,0,temp);
3264     }
3265     if (opcode[i]==0x2E) { // SWR
3266       emit_writebyte_indexed(tl,3,temp);
3267     }
3268     if (opcode[i]==0x2C) { // SDL
3269       emit_writeword_indexed(th,0,temp);
3270       if(rs2[i]) emit_mov(tl,temp2);
3271     }
3272     if (opcode[i]==0x2D) { // SDR
3273       emit_writebyte_indexed(tl,3,temp);
3274       if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3275     }
3276     done0=(int)out;
3277     emit_jmp(0);
3278     // 1
3279     set_jump_target(case1,(int)out);
3280     if (opcode[i]==0x2A) { // SWL
3281       // Write 3 msb into three least significant bytes
3282       if(rs2[i]) emit_rorimm(tl,8,tl);
3283       emit_writehword_indexed(tl,-1,temp);
3284       if(rs2[i]) emit_rorimm(tl,16,tl);
3285       emit_writebyte_indexed(tl,1,temp);
3286       if(rs2[i]) emit_rorimm(tl,8,tl);
3287     }
3288     if (opcode[i]==0x2E) { // SWR
3289       // Write two lsb into two most significant bytes
3290       emit_writehword_indexed(tl,1,temp);
3291     }
3292     if (opcode[i]==0x2C) { // SDL
3293       if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3294       // Write 3 msb into three least significant bytes
3295       if(rs2[i]) emit_rorimm(th,8,th);
3296       emit_writehword_indexed(th,-1,temp);
3297       if(rs2[i]) emit_rorimm(th,16,th);
3298       emit_writebyte_indexed(th,1,temp);
3299       if(rs2[i]) emit_rorimm(th,8,th);
3300     }
3301     if (opcode[i]==0x2D) { // SDR
3302       if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3303       // Write two lsb into two most significant bytes
3304       emit_writehword_indexed(tl,1,temp);
3305     }
3306     done1=(int)out;
3307     emit_jmp(0);
3308     // 2
3309     set_jump_target(case2,(int)out);
3310     emit_testimm(temp,1);
3311     case3=(int)out;
3312     emit_jne(0);
3313     if (opcode[i]==0x2A) { // SWL
3314       // Write two msb into two least significant bytes
3315       if(rs2[i]) emit_rorimm(tl,16,tl);
3316       emit_writehword_indexed(tl,-2,temp);
3317       if(rs2[i]) emit_rorimm(tl,16,tl);
3318     }
3319     if (opcode[i]==0x2E) { // SWR
3320       // Write 3 lsb into three most significant bytes
3321       emit_writebyte_indexed(tl,-1,temp);
3322       if(rs2[i]) emit_rorimm(tl,8,tl);
3323       emit_writehword_indexed(tl,0,temp);
3324       if(rs2[i]) emit_rorimm(tl,24,tl);
3325     }
3326     if (opcode[i]==0x2C) { // SDL
3327       if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3328       // Write two msb into two least significant bytes
3329       if(rs2[i]) emit_rorimm(th,16,th);
3330       emit_writehword_indexed(th,-2,temp);
3331       if(rs2[i]) emit_rorimm(th,16,th);
3332     }
3333     if (opcode[i]==0x2D) { // SDR
3334       if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3335       // Write 3 lsb into three most significant bytes
3336       emit_writebyte_indexed(tl,-1,temp);
3337       if(rs2[i]) emit_rorimm(tl,8,tl);
3338       emit_writehword_indexed(tl,0,temp);
3339       if(rs2[i]) emit_rorimm(tl,24,tl);
3340     }
3341     done2=(int)out;
3342     emit_jmp(0);
3343     // 3
3344     set_jump_target(case3,(int)out);
3345     if (opcode[i]==0x2A) { // SWL
3346       // Write msb into least significant byte
3347       if(rs2[i]) emit_rorimm(tl,24,tl);
3348       emit_writebyte_indexed(tl,-3,temp);
3349       if(rs2[i]) emit_rorimm(tl,8,tl);
3350     }
3351     if (opcode[i]==0x2E) { // SWR
3352       // Write entire word
3353       emit_writeword_indexed(tl,-3,temp);
3354     }
3355     if (opcode[i]==0x2C) { // SDL
3356       if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3357       // Write msb into least significant byte
3358       if(rs2[i]) emit_rorimm(th,24,th);
3359       emit_writebyte_indexed(th,-3,temp);
3360       if(rs2[i]) emit_rorimm(th,8,th);
3361     }
3362     if (opcode[i]==0x2D) { // SDR
3363       if(rs2[i]) emit_mov(th,temp2);
3364       // Write entire word
3365       emit_writeword_indexed(tl,-3,temp);
3366     }
3367     set_jump_target(done0,(int)out);
3368     set_jump_target(done1,(int)out);
3369     set_jump_target(done2,(int)out);
3370     if (opcode[i]==0x2C) { // SDL
3371       emit_testimm(temp,4);
3372       done0=(int)out;
3373       emit_jne(0);
3374       emit_andimm(temp,~3,temp);
3375       emit_writeword_indexed(temp2,4,temp);
3376       set_jump_target(done0,(int)out);
3377     }
3378     if (opcode[i]==0x2D) { // SDR
3379       emit_testimm(temp,4);
3380       done0=(int)out;
3381       emit_jeq(0);
3382       emit_andimm(temp,~3,temp);
3383       emit_writeword_indexed(temp2,-4,temp);
3384       set_jump_target(done0,(int)out);
3385     }
3386     if(!c||!memtarget)
3387       add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3388   }
3389   if(!using_tlb) {
3390     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3391     #if defined(HOST_IMM8)
3392     int ir=get_reg(i_regs->regmap,INVCP);
3393     assert(ir>=0);
3394     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3395     #else
3396     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3397     #endif
3398     jaddr2=(int)out;
3399     emit_jne(0);
3400     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3401   }
3402   /*
3403     emit_pusha();
3404     //save_regs(0x100f);
3405         emit_readword((int)&last_count,ECX);
3406         if(get_reg(i_regs->regmap,CCREG)<0)
3407           emit_loadreg(CCREG,HOST_CCREG);
3408         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3409         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3410         emit_writeword(HOST_CCREG,(int)&Count);
3411     emit_call((int)memdebug);
3412     emit_popa();
3413     //restore_regs(0x100f);
3414   /**/
3415 }
3416
3417 void c1ls_assemble(int i,struct regstat *i_regs)
3418 {
3419 #ifndef DISABLE_COP1
3420   int s,th,tl;
3421   int temp,ar;
3422   int map=-1;
3423   int offset;
3424   int c=0;
3425   int jaddr,jaddr2=0,jaddr3,type;
3426   int agr=AGEN1+(i&1);
3427   u_int hr,reglist=0;
3428   th=get_reg(i_regs->regmap,FTEMP|64);
3429   tl=get_reg(i_regs->regmap,FTEMP);
3430   s=get_reg(i_regs->regmap,rs1[i]);
3431   temp=get_reg(i_regs->regmap,agr);
3432   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3433   offset=imm[i];
3434   assert(tl>=0);
3435   assert(rs1[i]>0);
3436   assert(temp>=0);
3437   for(hr=0;hr<HOST_REGS;hr++) {
3438     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3439   }
3440   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3441   if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3442   {
3443     // Loads use a temporary register which we need to save
3444     reglist|=1<<temp;
3445   }
3446   if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3447     ar=temp;
3448   else // LWC1/LDC1
3449     ar=tl;
3450   //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3451   //else c=(i_regs->wasconst>>s)&1;
3452   if(s>=0) c=(i_regs->wasconst>>s)&1;
3453   // Check cop1 unusable
3454   if(!cop1_usable) {
3455     signed char rs=get_reg(i_regs->regmap,CSREG);
3456     assert(rs>=0);
3457     emit_testimm(rs,0x20000000);
3458     jaddr=(int)out;
3459     emit_jeq(0);
3460     add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3461     cop1_usable=1;
3462   }
3463   if (opcode[i]==0x39) { // SWC1 (get float address)
3464     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3465   }
3466   if (opcode[i]==0x3D) { // SDC1 (get double address)
3467     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3468   }
3469   // Generate address + offset
3470   if(!using_tlb) {
3471     if(!c)
3472       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3473   }
3474   else
3475   {
3476     map=get_reg(i_regs->regmap,TLREG);
3477     assert(map>=0);
3478     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3479       map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3480     }
3481     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3482       map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3483     }
3484   }
3485   if (opcode[i]==0x39) { // SWC1 (read float)
3486     emit_readword_indexed(0,tl,tl);
3487   }
3488   if (opcode[i]==0x3D) { // SDC1 (read double)
3489     emit_readword_indexed(4,tl,th);
3490     emit_readword_indexed(0,tl,tl);
3491   }
3492   if (opcode[i]==0x31) { // LWC1 (get target address)
3493     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3494   }
3495   if (opcode[i]==0x35) { // LDC1 (get target address)
3496     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3497   }
3498   if(!using_tlb) {
3499     if(!c) {
3500       jaddr2=(int)out;
3501       emit_jno(0);
3502     }
3503     else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3504       jaddr2=(int)out;
3505       emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
3506     }
3507     #ifdef DESTRUCTIVE_SHIFT
3508     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3509       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3510     }
3511     #endif
3512   }else{
3513     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3514       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3515     }
3516     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3517       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3518     }
3519   }
3520   if (opcode[i]==0x31) { // LWC1
3521     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3522     //gen_tlb_addr_r(ar,map);
3523     //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3524     #ifdef HOST_IMM_ADDR32
3525     if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3526     else
3527     #endif
3528     emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3529     type=LOADW_STUB;
3530   }
3531   if (opcode[i]==0x35) { // LDC1
3532     assert(th>=0);
3533     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3534     //gen_tlb_addr_r(ar,map);
3535     //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3536     //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3537     #ifdef HOST_IMM_ADDR32
3538     if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3539     else
3540     #endif
3541     emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3542     type=LOADD_STUB;
3543   }
3544   if (opcode[i]==0x39) { // SWC1
3545     //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3546     emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3547     type=STOREW_STUB;
3548   }
3549   if (opcode[i]==0x3D) { // SDC1
3550     assert(th>=0);
3551     //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3552     //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3553     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3554     type=STORED_STUB;
3555   }
3556   if(!using_tlb) {
3557     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3558       #ifndef DESTRUCTIVE_SHIFT
3559       temp=offset||c||s<0?ar:s;
3560       #endif
3561       #if defined(HOST_IMM8)
3562       int ir=get_reg(i_regs->regmap,INVCP);
3563       assert(ir>=0);
3564       emit_cmpmem_indexedsr12_reg(ir,temp,1);
3565       #else
3566       emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3567       #endif
3568       jaddr3=(int)out;
3569       emit_jne(0);
3570       add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3571     }
3572   }
3573   if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3574   if (opcode[i]==0x31) { // LWC1 (write float)
3575     emit_writeword_indexed(tl,0,temp);
3576   }
3577   if (opcode[i]==0x35) { // LDC1 (write double)
3578     emit_writeword_indexed(th,4,temp);
3579     emit_writeword_indexed(tl,0,temp);
3580   }
3581   //if(opcode[i]==0x39)
3582   /*if(opcode[i]==0x39||opcode[i]==0x31)
3583   {
3584     emit_pusha();
3585         emit_readword((int)&last_count,ECX);
3586         if(get_reg(i_regs->regmap,CCREG)<0)
3587           emit_loadreg(CCREG,HOST_CCREG);
3588         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3589         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3590         emit_writeword(HOST_CCREG,(int)&Count);
3591     emit_call((int)memdebug);
3592     emit_popa();
3593   }/**/
3594 #else
3595   cop1_unusable(i, i_regs);
3596 #endif
3597 }
3598
3599 void c2ls_assemble(int i,struct regstat *i_regs)
3600 {
3601   int s,tl;
3602   int ar;
3603   int offset;
3604   int memtarget=0,c=0;
3605   int jaddr,jaddr2=0,jaddr3,type;
3606   int agr=AGEN1+(i&1);
3607   u_int hr,reglist=0;
3608   u_int copr=(source[i]>>16)&0x1f;
3609   s=get_reg(i_regs->regmap,rs1[i]);
3610   tl=get_reg(i_regs->regmap,FTEMP);
3611   offset=imm[i];
3612   assert(rs1[i]>0);
3613   assert(tl>=0);
3614   assert(!using_tlb);
3615
3616   for(hr=0;hr<HOST_REGS;hr++) {
3617     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3618   }
3619   if(i_regs->regmap[HOST_CCREG]==CCREG)
3620     reglist&=~(1<<HOST_CCREG);
3621
3622   // get the address
3623   if (opcode[i]==0x3a) { // SWC2
3624     ar=get_reg(i_regs->regmap,agr);
3625     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3626     reglist|=1<<ar;
3627   } else { // LWC2
3628     ar=tl;
3629   }
3630   if(s>=0) c=(i_regs->wasconst>>s)&1;
3631   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3632   if (!offset&&!c&&s>=0) ar=s;
3633   assert(ar>=0);
3634
3635   if (opcode[i]==0x3a) { // SWC2
3636     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3637     type=STOREW_STUB;
3638   }
3639   else
3640     type=LOADW_STUB;
3641
3642   if(c&&!memtarget) {
3643     jaddr2=(int)out;
3644     emit_jmp(0); // inline_readstub/inline_writestub?
3645   }
3646   else {
3647     if(!c) {
3648       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3649       jaddr2=(int)out;
3650       emit_jno(0);
3651     }
3652     if (opcode[i]==0x32) { // LWC2
3653       #ifdef HOST_IMM_ADDR32
3654       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3655       else
3656       #endif
3657       emit_readword_indexed(0,ar,tl);
3658     }
3659     if (opcode[i]==0x3a) { // SWC2
3660       #ifdef DESTRUCTIVE_SHIFT
3661       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3662       #endif
3663       emit_writeword_indexed(tl,0,ar);
3664     }
3665   }
3666   if(jaddr2)
3667     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3668   if (opcode[i]==0x3a) { // SWC2
3669 #if defined(HOST_IMM8)
3670     int ir=get_reg(i_regs->regmap,INVCP);
3671     assert(ir>=0);
3672     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3673 #else
3674     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3675 #endif
3676     jaddr3=(int)out;
3677     emit_jne(0);
3678     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3679   }
3680   if (opcode[i]==0x32) { // LWC2
3681     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3682   }
3683 }
3684
3685 #ifndef multdiv_assemble
3686 void multdiv_assemble(int i,struct regstat *i_regs)
3687 {
3688   printf("Need multdiv_assemble for this architecture.\n");
3689   exit(1);
3690 }
3691 #endif
3692
3693 void mov_assemble(int i,struct regstat *i_regs)
3694 {
3695   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3696   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3697   //assert(rt1[i]>0);
3698   if(rt1[i]) {
3699     signed char sh,sl,th,tl;
3700     th=get_reg(i_regs->regmap,rt1[i]|64);
3701     tl=get_reg(i_regs->regmap,rt1[i]);
3702     //assert(tl>=0);
3703     if(tl>=0) {
3704       sh=get_reg(i_regs->regmap,rs1[i]|64);
3705       sl=get_reg(i_regs->regmap,rs1[i]);
3706       if(sl>=0) emit_mov(sl,tl);
3707       else emit_loadreg(rs1[i],tl);
3708       if(th>=0) {
3709         if(sh>=0) emit_mov(sh,th);
3710         else emit_loadreg(rs1[i]|64,th);
3711       }
3712     }
3713   }
3714 }
3715
3716 #ifndef fconv_assemble
3717 void fconv_assemble(int i,struct regstat *i_regs)
3718 {
3719   printf("Need fconv_assemble for this architecture.\n");
3720   exit(1);
3721 }
3722 #endif
3723
3724 #if 0
3725 void float_assemble(int i,struct regstat *i_regs)
3726 {
3727   printf("Need float_assemble for this architecture.\n");
3728   exit(1);
3729 }
3730 #endif
3731
3732 void syscall_assemble(int i,struct regstat *i_regs)
3733 {
3734   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3735   assert(ccreg==HOST_CCREG);
3736   assert(!is_delayslot);
3737   emit_movimm(start+i*4,EAX); // Get PC
3738   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3739   emit_jmp((int)jump_syscall_hle); // XXX
3740 }
3741
3742 void hlecall_assemble(int i,struct regstat *i_regs)
3743 {
3744   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3745   assert(ccreg==HOST_CCREG);
3746   assert(!is_delayslot);
3747   emit_movimm(start+i*4+4,0); // Get PC
3748   emit_movimm((int)psxHLEt[source[i]&7],1);
3749   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
3750   emit_jmp((int)jump_hlecall);
3751 }
3752
3753 void intcall_assemble(int i,struct regstat *i_regs)
3754 {
3755   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3756   assert(ccreg==HOST_CCREG);
3757   assert(!is_delayslot);
3758   emit_movimm(start+i*4,0); // Get PC
3759   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG);
3760   emit_jmp((int)jump_intcall);
3761 }
3762
3763 void ds_assemble(int i,struct regstat *i_regs)
3764 {
3765   is_delayslot=1;
3766   switch(itype[i]) {
3767     case ALU:
3768       alu_assemble(i,i_regs);break;
3769     case IMM16:
3770       imm16_assemble(i,i_regs);break;
3771     case SHIFT:
3772       shift_assemble(i,i_regs);break;
3773     case SHIFTIMM:
3774       shiftimm_assemble(i,i_regs);break;
3775     case LOAD:
3776       load_assemble(i,i_regs);break;
3777     case LOADLR:
3778       loadlr_assemble(i,i_regs);break;
3779     case STORE:
3780       store_assemble(i,i_regs);break;
3781     case STORELR:
3782       storelr_assemble(i,i_regs);break;
3783     case COP0:
3784       cop0_assemble(i,i_regs);break;
3785     case COP1:
3786       cop1_assemble(i,i_regs);break;
3787     case C1LS:
3788       c1ls_assemble(i,i_regs);break;
3789     case COP2:
3790       cop2_assemble(i,i_regs);break;
3791     case C2LS:
3792       c2ls_assemble(i,i_regs);break;
3793     case C2OP:
3794       c2op_assemble(i,i_regs);break;
3795     case FCONV:
3796       fconv_assemble(i,i_regs);break;
3797     case FLOAT:
3798       float_assemble(i,i_regs);break;
3799     case FCOMP:
3800       fcomp_assemble(i,i_regs);break;
3801     case MULTDIV:
3802       multdiv_assemble(i,i_regs);break;
3803     case MOV:
3804       mov_assemble(i,i_regs);break;
3805     case SYSCALL:
3806     case HLECALL:
3807     case INTCALL:
3808     case SPAN:
3809     case UJUMP:
3810     case RJUMP:
3811     case CJUMP:
3812     case SJUMP:
3813     case FJUMP:
3814       printf("Jump in the delay slot.  This is probably a bug.\n");
3815   }
3816   is_delayslot=0;
3817 }
3818
3819 // Is the branch target a valid internal jump?
3820 int internal_branch(uint64_t i_is32,int addr)
3821 {
3822   if(addr&1) return 0; // Indirect (register) jump
3823   if(addr>=start && addr<start+slen*4-4)
3824   {
3825     int t=(addr-start)>>2;
3826     // Delay slots are not valid branch targets
3827     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3828     // 64 -> 32 bit transition requires a recompile
3829     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3830     {
3831       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3832       else printf("optimizable: yes\n");
3833     }*/
3834     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3835     if(requires_32bit[t]&~i_is32) return 0;
3836     else return 1;
3837   }
3838   return 0;
3839 }
3840
3841 #ifndef wb_invalidate
3842 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3843   uint64_t u,uint64_t uu)
3844 {
3845   int hr;
3846   for(hr=0;hr<HOST_REGS;hr++) {
3847     if(hr!=EXCLUDE_REG) {
3848       if(pre[hr]!=entry[hr]) {
3849         if(pre[hr]>=0) {
3850           if((dirty>>hr)&1) {
3851             if(get_reg(entry,pre[hr])<0) {
3852               if(pre[hr]<64) {
3853                 if(!((u>>pre[hr])&1)) {
3854                   emit_storereg(pre[hr],hr);
3855                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3856                     emit_sarimm(hr,31,hr);
3857                     emit_storereg(pre[hr]|64,hr);
3858                   }
3859                 }
3860               }else{
3861                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3862                   emit_storereg(pre[hr],hr);
3863                 }
3864               }
3865             }
3866           }
3867         }
3868       }
3869     }
3870   }
3871   // Move from one register to another (no writeback)
3872   for(hr=0;hr<HOST_REGS;hr++) {
3873     if(hr!=EXCLUDE_REG) {
3874       if(pre[hr]!=entry[hr]) {
3875         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3876           int nr;
3877           if((nr=get_reg(entry,pre[hr]))>=0) {
3878             emit_mov(hr,nr);
3879           }
3880         }
3881       }
3882     }
3883   }
3884 }
3885 #endif
3886
3887 // Load the specified registers
3888 // This only loads the registers given as arguments because
3889 // we don't want to load things that will be overwritten
3890 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3891 {
3892   int hr;
3893   // Load 32-bit regs
3894   for(hr=0;hr<HOST_REGS;hr++) {
3895     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3896       if(entry[hr]!=regmap[hr]) {
3897         if(regmap[hr]==rs1||regmap[hr]==rs2)
3898         {
3899           if(regmap[hr]==0) {
3900             emit_zeroreg(hr);
3901           }
3902           else
3903           {
3904             emit_loadreg(regmap[hr],hr);
3905           }
3906         }
3907       }
3908     }
3909   }
3910   //Load 64-bit regs
3911   for(hr=0;hr<HOST_REGS;hr++) {
3912     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3913       if(entry[hr]!=regmap[hr]) {
3914         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3915         {
3916           assert(regmap[hr]!=64);
3917           if((is32>>(regmap[hr]&63))&1) {
3918             int lr=get_reg(regmap,regmap[hr]-64);
3919             if(lr>=0)
3920               emit_sarimm(lr,31,hr);
3921             else
3922               emit_loadreg(regmap[hr],hr);
3923           }
3924           else
3925           {
3926             emit_loadreg(regmap[hr],hr);
3927           }
3928         }
3929       }
3930     }
3931   }
3932 }
3933
3934 // Load registers prior to the start of a loop
3935 // so that they are not loaded within the loop
3936 static void loop_preload(signed char pre[],signed char entry[])
3937 {
3938   int hr;
3939   for(hr=0;hr<HOST_REGS;hr++) {
3940     if(hr!=EXCLUDE_REG) {
3941       if(pre[hr]!=entry[hr]) {
3942         if(entry[hr]>=0) {
3943           if(get_reg(pre,entry[hr])<0) {
3944             assem_debug("loop preload:\n");
3945             //printf("loop preload: %d\n",hr);
3946             if(entry[hr]==0) {
3947               emit_zeroreg(hr);
3948             }
3949             else if(entry[hr]<TEMPREG)
3950             {
3951               emit_loadreg(entry[hr],hr);
3952             }
3953             else if(entry[hr]-64<TEMPREG)
3954             {
3955               emit_loadreg(entry[hr],hr);
3956             }
3957           }
3958         }
3959       }
3960     }
3961   }
3962 }
3963
3964 // Generate address for load/store instruction
3965 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3966 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3967 {
3968   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3969     int ra;
3970     int agr=AGEN1+(i&1);
3971     int mgr=MGEN1+(i&1);
3972     if(itype[i]==LOAD) {
3973       ra=get_reg(i_regs->regmap,rt1[i]);
3974       //if(rt1[i]) assert(ra>=0);
3975     }
3976     if(itype[i]==LOADLR) {
3977       ra=get_reg(i_regs->regmap,FTEMP);
3978     }
3979     if(itype[i]==STORE||itype[i]==STORELR) {
3980       ra=get_reg(i_regs->regmap,agr);
3981       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3982     }
3983     if(itype[i]==C1LS||itype[i]==C2LS) {
3984       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3985         ra=get_reg(i_regs->regmap,FTEMP);
3986       else { // SWC1/SDC1/SWC2/SDC2
3987         ra=get_reg(i_regs->regmap,agr);
3988         if(ra<0) ra=get_reg(i_regs->regmap,-1);
3989       }
3990     }
3991     int rs=get_reg(i_regs->regmap,rs1[i]);
3992     int rm=get_reg(i_regs->regmap,TLREG);
3993     if(ra>=0) {
3994       int offset=imm[i];
3995       int c=(i_regs->wasconst>>rs)&1;
3996       if(rs1[i]==0) {
3997         // Using r0 as a base address
3998         /*if(rm>=0) {
3999           if(!entry||entry[rm]!=mgr) {
4000             generate_map_const(offset,rm);
4001           } // else did it in the previous cycle
4002         }*/
4003         if(!entry||entry[ra]!=agr) {
4004           if (opcode[i]==0x22||opcode[i]==0x26) {
4005             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4006           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4007             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4008           }else{
4009             emit_movimm(offset,ra);
4010           }
4011         } // else did it in the previous cycle
4012       }
4013       else if(rs<0) {
4014         if(!entry||entry[ra]!=rs1[i])
4015           emit_loadreg(rs1[i],ra);
4016         //if(!entry||entry[ra]!=rs1[i])
4017         //  printf("poor load scheduling!\n");
4018       }
4019       else if(c) {
4020         if(rm>=0) {
4021           if(!entry||entry[rm]!=mgr) {
4022             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4023               // Stores to memory go thru the mapper to detect self-modifying
4024               // code, loads don't.
4025               if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4026                  (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
4027                 generate_map_const(constmap[i][rs]+offset,rm);
4028             }else{
4029               if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4030                 generate_map_const(constmap[i][rs]+offset,rm);
4031             }
4032           }
4033         }
4034         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4035           if(!entry||entry[ra]!=agr) {
4036             if (opcode[i]==0x22||opcode[i]==0x26) {
4037               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4038             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4039               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4040             }else{
4041               #ifdef HOST_IMM_ADDR32
4042               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4043                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4044               #endif
4045               emit_movimm(constmap[i][rs]+offset,ra);
4046             }
4047           } // else did it in the previous cycle
4048         } // else load_consts already did it
4049       }
4050       if(offset&&!c&&rs1[i]) {
4051         if(rs>=0) {
4052           emit_addimm(rs,offset,ra);
4053         }else{
4054           emit_addimm(ra,offset,ra);
4055         }
4056       }
4057     }
4058   }
4059   // Preload constants for next instruction
4060   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4061     int agr,ra;
4062     #ifndef HOST_IMM_ADDR32
4063     // Mapper entry
4064     agr=MGEN1+((i+1)&1);
4065     ra=get_reg(i_regs->regmap,agr);
4066     if(ra>=0) {
4067       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4068       int offset=imm[i+1];
4069       int c=(regs[i+1].wasconst>>rs)&1;
4070       if(c) {
4071         if(itype[i+1]==STORE||itype[i+1]==STORELR
4072            ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4073           // Stores to memory go thru the mapper to detect self-modifying
4074           // code, loads don't.
4075           if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4076              (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
4077             generate_map_const(constmap[i+1][rs]+offset,ra);
4078         }else{
4079           if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4080             generate_map_const(constmap[i+1][rs]+offset,ra);
4081         }
4082       }
4083       /*else if(rs1[i]==0) {
4084         generate_map_const(offset,ra);
4085       }*/
4086     }
4087     #endif
4088     // Actual address
4089     agr=AGEN1+((i+1)&1);
4090     ra=get_reg(i_regs->regmap,agr);
4091     if(ra>=0) {
4092       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4093       int offset=imm[i+1];
4094       int c=(regs[i+1].wasconst>>rs)&1;
4095       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4096         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4097           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4098         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4099           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4100         }else{
4101           #ifdef HOST_IMM_ADDR32
4102           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4103              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4104           #endif
4105           emit_movimm(constmap[i+1][rs]+offset,ra);
4106         }
4107       }
4108       else if(rs1[i+1]==0) {
4109         // Using r0 as a base address
4110         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4111           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4112         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4113           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4114         }else{
4115           emit_movimm(offset,ra);
4116         }
4117       }
4118     }
4119   }
4120 }
4121
4122 int get_final_value(int hr, int i, int *value)
4123 {
4124   int reg=regs[i].regmap[hr];
4125   while(i<slen-1) {
4126     if(regs[i+1].regmap[hr]!=reg) break;
4127     if(!((regs[i+1].isconst>>hr)&1)) break;
4128     if(bt[i+1]) break;
4129     i++;
4130   }
4131   if(i<slen-1) {
4132     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4133       *value=constmap[i][hr];
4134       return 1;
4135     }
4136     if(!bt[i+1]) {
4137       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4138         // Load in delay slot, out-of-order execution
4139         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4140         {
4141           #ifdef HOST_IMM_ADDR32
4142           if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4143           #endif
4144           // Precompute load address
4145           *value=constmap[i][hr]+imm[i+2];
4146           return 1;
4147         }
4148       }
4149       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4150       {
4151         #ifdef HOST_IMM_ADDR32
4152         if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4153         #endif
4154         // Precompute load address
4155         *value=constmap[i][hr]+imm[i+1];
4156         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4157         return 1;
4158       }
4159     }
4160   }
4161   *value=constmap[i][hr];
4162   //printf("c=%x\n",(int)constmap[i][hr]);
4163   if(i==slen-1) return 1;
4164   if(reg<64) {
4165     return !((unneeded_reg[i+1]>>reg)&1);
4166   }else{
4167     return !((unneeded_reg_upper[i+1]>>reg)&1);
4168   }
4169 }
4170
4171 // Load registers with known constants
4172 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4173 {
4174   int hr;
4175   // Load 32-bit regs
4176   for(hr=0;hr<HOST_REGS;hr++) {
4177     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4178       //if(entry[hr]!=regmap[hr]) {
4179       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4180         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4181           int value;
4182           if(get_final_value(hr,i,&value)) {
4183             if(value==0) {
4184               emit_zeroreg(hr);
4185             }
4186             else {
4187               emit_movimm(value,hr);
4188             }
4189           }
4190         }
4191       }
4192     }
4193   }
4194   // Load 64-bit regs
4195   for(hr=0;hr<HOST_REGS;hr++) {
4196     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4197       //if(entry[hr]!=regmap[hr]) {
4198       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4199         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4200           if((is32>>(regmap[hr]&63))&1) {
4201             int lr=get_reg(regmap,regmap[hr]-64);
4202             assert(lr>=0);
4203             emit_sarimm(lr,31,hr);
4204           }
4205           else
4206           {
4207             int value;
4208             if(get_final_value(hr,i,&value)) {
4209               if(value==0) {
4210                 emit_zeroreg(hr);
4211               }
4212               else {
4213                 emit_movimm(value,hr);
4214               }
4215             }
4216           }
4217         }
4218       }
4219     }
4220   }
4221 }
4222 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4223 {
4224   int hr;
4225   // Load 32-bit regs
4226   for(hr=0;hr<HOST_REGS;hr++) {
4227     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4228       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4229         int value=constmap[i][hr];
4230         if(value==0) {
4231           emit_zeroreg(hr);
4232         }
4233         else {
4234           emit_movimm(value,hr);
4235         }
4236       }
4237     }
4238   }
4239   // Load 64-bit regs
4240   for(hr=0;hr<HOST_REGS;hr++) {
4241     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4242       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4243         if((is32>>(regmap[hr]&63))&1) {
4244           int lr=get_reg(regmap,regmap[hr]-64);
4245           assert(lr>=0);
4246           emit_sarimm(lr,31,hr);
4247         }
4248         else
4249         {
4250           int value=constmap[i][hr];
4251           if(value==0) {
4252             emit_zeroreg(hr);
4253           }
4254           else {
4255             emit_movimm(value,hr);
4256           }
4257         }
4258       }
4259     }
4260   }
4261 }
4262
4263 // Write out all dirty registers (except cycle count)
4264 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4265 {
4266   int hr;
4267   for(hr=0;hr<HOST_REGS;hr++) {
4268     if(hr!=EXCLUDE_REG) {
4269       if(i_regmap[hr]>0) {
4270         if(i_regmap[hr]!=CCREG) {
4271           if((i_dirty>>hr)&1) {
4272             if(i_regmap[hr]<64) {
4273               emit_storereg(i_regmap[hr],hr);
4274 #ifndef FORCE32
4275               if( ((i_is32>>i_regmap[hr])&1) ) {
4276                 #ifdef DESTRUCTIVE_WRITEBACK
4277                 emit_sarimm(hr,31,hr);
4278                 emit_storereg(i_regmap[hr]|64,hr);
4279                 #else
4280                 emit_sarimm(hr,31,HOST_TEMPREG);
4281                 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4282                 #endif
4283               }
4284 #endif
4285             }else{
4286               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4287                 emit_storereg(i_regmap[hr],hr);
4288               }
4289             }
4290           }
4291         }
4292       }
4293     }
4294   }
4295 }
4296 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4297 // This writes the registers not written by store_regs_bt
4298 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4299 {
4300   int hr;
4301   int t=(addr-start)>>2;
4302   for(hr=0;hr<HOST_REGS;hr++) {
4303     if(hr!=EXCLUDE_REG) {
4304       if(i_regmap[hr]>0) {
4305         if(i_regmap[hr]!=CCREG) {
4306           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4307             if((i_dirty>>hr)&1) {
4308               if(i_regmap[hr]<64) {
4309                 emit_storereg(i_regmap[hr],hr);
4310 #ifndef FORCE32
4311                 if( ((i_is32>>i_regmap[hr])&1) ) {
4312                   #ifdef DESTRUCTIVE_WRITEBACK
4313                   emit_sarimm(hr,31,hr);
4314                   emit_storereg(i_regmap[hr]|64,hr);
4315                   #else
4316                   emit_sarimm(hr,31,HOST_TEMPREG);
4317                   emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4318                   #endif
4319                 }
4320 #endif
4321               }else{
4322                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4323                   emit_storereg(i_regmap[hr],hr);
4324                 }
4325               }
4326             }
4327           }
4328         }
4329       }
4330     }
4331   }
4332 }
4333
4334 // Load all registers (except cycle count)
4335 void load_all_regs(signed char i_regmap[])
4336 {
4337   int hr;
4338   for(hr=0;hr<HOST_REGS;hr++) {
4339     if(hr!=EXCLUDE_REG) {
4340       if(i_regmap[hr]==0) {
4341         emit_zeroreg(hr);
4342       }
4343       else
4344       if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4345       {
4346         emit_loadreg(i_regmap[hr],hr);
4347       }
4348     }
4349   }
4350 }
4351
4352 // Load all current registers also needed by next instruction
4353 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4354 {
4355   int hr;
4356   for(hr=0;hr<HOST_REGS;hr++) {
4357     if(hr!=EXCLUDE_REG) {
4358       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4359         if(i_regmap[hr]==0) {
4360           emit_zeroreg(hr);
4361         }
4362         else
4363         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4364         {
4365           emit_loadreg(i_regmap[hr],hr);
4366         }
4367       }
4368     }
4369   }
4370 }
4371
4372 // Load all regs, storing cycle count if necessary
4373 void load_regs_entry(int t)
4374 {
4375   int hr;
4376   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4377   else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4378   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4379     emit_storereg(CCREG,HOST_CCREG);
4380   }
4381   // Load 32-bit regs
4382   for(hr=0;hr<HOST_REGS;hr++) {
4383     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4384       if(regs[t].regmap_entry[hr]==0) {
4385         emit_zeroreg(hr);
4386       }
4387       else if(regs[t].regmap_entry[hr]!=CCREG)
4388       {
4389         emit_loadreg(regs[t].regmap_entry[hr],hr);
4390       }
4391     }
4392   }
4393   // Load 64-bit regs
4394   for(hr=0;hr<HOST_REGS;hr++) {
4395     if(regs[t].regmap_entry[hr]>=64) {
4396       assert(regs[t].regmap_entry[hr]!=64);
4397       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4398         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4399         if(lr<0) {
4400           emit_loadreg(regs[t].regmap_entry[hr],hr);
4401         }
4402         else
4403         {
4404           emit_sarimm(lr,31,hr);
4405         }
4406       }
4407       else
4408       {
4409         emit_loadreg(regs[t].regmap_entry[hr],hr);
4410       }
4411     }
4412   }
4413 }
4414
4415 // Store dirty registers prior to branch
4416 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4417 {
4418   if(internal_branch(i_is32,addr))
4419   {
4420     int t=(addr-start)>>2;
4421     int hr;
4422     for(hr=0;hr<HOST_REGS;hr++) {
4423       if(hr!=EXCLUDE_REG) {
4424         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4425           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4426             if((i_dirty>>hr)&1) {
4427               if(i_regmap[hr]<64) {
4428                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4429                   emit_storereg(i_regmap[hr],hr);
4430                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4431                     #ifdef DESTRUCTIVE_WRITEBACK
4432                     emit_sarimm(hr,31,hr);
4433                     emit_storereg(i_regmap[hr]|64,hr);
4434                     #else
4435                     emit_sarimm(hr,31,HOST_TEMPREG);
4436                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4437                     #endif
4438                   }
4439                 }
4440               }else{
4441                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4442                   emit_storereg(i_regmap[hr],hr);
4443                 }
4444               }
4445             }
4446           }
4447         }
4448       }
4449     }
4450   }
4451   else
4452   {
4453     // Branch out of this block, write out all dirty regs
4454     wb_dirtys(i_regmap,i_is32,i_dirty);
4455   }
4456 }
4457
4458 // Load all needed registers for branch target
4459 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4460 {
4461   //if(addr>=start && addr<(start+slen*4))
4462   if(internal_branch(i_is32,addr))
4463   {
4464     int t=(addr-start)>>2;
4465     int hr;
4466     // Store the cycle count before loading something else
4467     if(i_regmap[HOST_CCREG]!=CCREG) {
4468       assert(i_regmap[HOST_CCREG]==-1);
4469     }
4470     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4471       emit_storereg(CCREG,HOST_CCREG);
4472     }
4473     // Load 32-bit regs
4474     for(hr=0;hr<HOST_REGS;hr++) {
4475       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4476         #ifdef DESTRUCTIVE_WRITEBACK
4477         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4478         #else
4479         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4480         #endif
4481           if(regs[t].regmap_entry[hr]==0) {
4482             emit_zeroreg(hr);
4483           }
4484           else if(regs[t].regmap_entry[hr]!=CCREG)
4485           {
4486             emit_loadreg(regs[t].regmap_entry[hr],hr);
4487           }
4488         }
4489       }
4490     }
4491     //Load 64-bit regs
4492     for(hr=0;hr<HOST_REGS;hr++) {
4493       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64) {
4494         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4495           assert(regs[t].regmap_entry[hr]!=64);
4496           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4497             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4498             if(lr<0) {
4499               emit_loadreg(regs[t].regmap_entry[hr],hr);
4500             }
4501             else
4502             {
4503               emit_sarimm(lr,31,hr);
4504             }
4505           }
4506           else
4507           {
4508             emit_loadreg(regs[t].regmap_entry[hr],hr);
4509           }
4510         }
4511         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4512           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4513           assert(lr>=0);
4514           emit_sarimm(lr,31,hr);
4515         }
4516       }
4517     }
4518   }
4519 }
4520
4521 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4522 {
4523   if(addr>=start && addr<start+slen*4-4)
4524   {
4525     int t=(addr-start)>>2;
4526     int hr;
4527     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4528     for(hr=0;hr<HOST_REGS;hr++)
4529     {
4530       if(hr!=EXCLUDE_REG)
4531       {
4532         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4533         {
4534           if(regs[t].regmap_entry[hr]!=-1)
4535           {
4536             return 0;
4537           }
4538           else 
4539           if((i_dirty>>hr)&1)
4540           {
4541             if(i_regmap[hr]<64)
4542             {
4543               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4544                 return 0;
4545             }
4546             else
4547             {
4548               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4549                 return 0;
4550             }
4551           }
4552         }
4553         else // Same register but is it 32-bit or dirty?
4554         if(i_regmap[hr]>=0)
4555         {
4556           if(!((regs[t].dirty>>hr)&1))
4557           {
4558             if((i_dirty>>hr)&1)
4559             {
4560               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4561               {
4562                 //printf("%x: dirty no match\n",addr);
4563                 return 0;
4564               }
4565             }
4566           }
4567           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4568           {
4569             //printf("%x: is32 no match\n",addr);
4570             return 0;
4571           }
4572         }
4573       }
4574     }
4575     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4576     if(requires_32bit[t]&~i_is32) return 0;
4577     // Delay slots are not valid branch targets
4578     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4579     // Delay slots require additional processing, so do not match
4580     if(is_ds[t]) return 0;
4581   }
4582   else
4583   {
4584     int hr;
4585     for(hr=0;hr<HOST_REGS;hr++)
4586     {
4587       if(hr!=EXCLUDE_REG)
4588       {
4589         if(i_regmap[hr]>=0)
4590         {
4591           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4592           {
4593             if((i_dirty>>hr)&1)
4594             {
4595               return 0;
4596             }
4597           }
4598         }
4599       }
4600     }
4601   }
4602   return 1;
4603 }
4604
4605 // Used when a branch jumps into the delay slot of another branch
4606 void ds_assemble_entry(int i)
4607 {
4608   int t=(ba[i]-start)>>2;
4609   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4610   assem_debug("Assemble delay slot at %x\n",ba[i]);
4611   assem_debug("<->\n");
4612   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4613     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4614   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4615   address_generation(t,&regs[t],regs[t].regmap_entry);
4616   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4617     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4618   cop1_usable=0;
4619   is_delayslot=0;
4620   switch(itype[t]) {
4621     case ALU:
4622       alu_assemble(t,&regs[t]);break;
4623     case IMM16:
4624       imm16_assemble(t,&regs[t]);break;
4625     case SHIFT:
4626       shift_assemble(t,&regs[t]);break;
4627     case SHIFTIMM:
4628       shiftimm_assemble(t,&regs[t]);break;
4629     case LOAD:
4630       load_assemble(t,&regs[t]);break;
4631     case LOADLR:
4632       loadlr_assemble(t,&regs[t]);break;
4633     case STORE:
4634       store_assemble(t,&regs[t]);break;
4635     case STORELR:
4636       storelr_assemble(t,&regs[t]);break;
4637     case COP0:
4638       cop0_assemble(t,&regs[t]);break;
4639     case COP1:
4640       cop1_assemble(t,&regs[t]);break;
4641     case C1LS:
4642       c1ls_assemble(t,&regs[t]);break;
4643     case COP2:
4644       cop2_assemble(t,&regs[t]);break;
4645     case C2LS:
4646       c2ls_assemble(t,&regs[t]);break;
4647     case C2OP:
4648       c2op_assemble(t,&regs[t]);break;
4649     case FCONV:
4650       fconv_assemble(t,&regs[t]);break;
4651     case FLOAT:
4652       float_assemble(t,&regs[t]);break;
4653     case FCOMP:
4654       fcomp_assemble(t,&regs[t]);break;
4655     case MULTDIV:
4656       multdiv_assemble(t,&regs[t]);break;
4657     case MOV:
4658       mov_assemble(t,&regs[t]);break;
4659     case SYSCALL:
4660     case HLECALL:
4661     case INTCALL:
4662     case SPAN:
4663     case UJUMP:
4664     case RJUMP:
4665     case CJUMP:
4666     case SJUMP:
4667     case FJUMP:
4668       printf("Jump in the delay slot.  This is probably a bug.\n");
4669   }
4670   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4671   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4672   if(internal_branch(regs[t].is32,ba[i]+4))
4673     assem_debug("branch: internal\n");
4674   else
4675     assem_debug("branch: external\n");
4676   assert(internal_branch(regs[t].is32,ba[i]+4));
4677   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4678   emit_jmp(0);
4679 }
4680
4681 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4682 {
4683   int count;
4684   int jaddr;
4685   int idle=0;
4686   if(itype[i]==RJUMP)
4687   {
4688     *adj=0;
4689   }
4690   //if(ba[i]>=start && ba[i]<(start+slen*4))
4691   if(internal_branch(branch_regs[i].is32,ba[i]))
4692   {
4693     int t=(ba[i]-start)>>2;
4694     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4695     else *adj=ccadj[t];
4696   }
4697   else
4698   {
4699     *adj=0;
4700   }
4701   count=ccadj[i];
4702   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4703     // Idle loop
4704     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4705     idle=(int)out;
4706     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4707     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4708     jaddr=(int)out;
4709     emit_jmp(0);
4710   }
4711   else if(*adj==0||invert) {
4712     emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4713     jaddr=(int)out;
4714     emit_jns(0);
4715   }
4716   else
4717   {
4718     emit_cmpimm(HOST_CCREG,-2*(count+2));
4719     jaddr=(int)out;
4720     emit_jns(0);
4721   }
4722   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4723 }
4724
4725 void do_ccstub(int n)
4726 {
4727   literal_pool(256);
4728   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4729   set_jump_target(stubs[n][1],(int)out);
4730   int i=stubs[n][4];
4731   if(stubs[n][6]==NULLDS) {
4732     // Delay slot instruction is nullified ("likely" branch)
4733     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4734   }
4735   else if(stubs[n][6]!=TAKEN) {
4736     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4737   }
4738   else {
4739     if(internal_branch(branch_regs[i].is32,ba[i]))
4740       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4741   }
4742   if(stubs[n][5]!=-1)
4743   {
4744     // Save PC as return address
4745     emit_movimm(stubs[n][5],EAX);
4746     emit_writeword(EAX,(int)&pcaddr);
4747   }
4748   else
4749   {
4750     // Return address depends on which way the branch goes
4751     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4752     {
4753       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4754       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4755       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4756       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4757       if(rs1[i]==0)
4758       {
4759         s1l=s2l;s1h=s2h;
4760         s2l=s2h=-1;
4761       }
4762       else if(rs2[i]==0)
4763       {
4764         s2l=s2h=-1;
4765       }
4766       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4767         s1h=s2h=-1;
4768       }
4769       assert(s1l>=0);
4770       #ifdef DESTRUCTIVE_WRITEBACK
4771       if(rs1[i]) {
4772         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4773           emit_loadreg(rs1[i],s1l);
4774       } 
4775       else {
4776         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4777           emit_loadreg(rs2[i],s1l);
4778       }
4779       if(s2l>=0)
4780         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4781           emit_loadreg(rs2[i],s2l);
4782       #endif
4783       int hr=0;
4784       int addr,alt,ntaddr;
4785       while(hr<HOST_REGS)
4786       {
4787         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4788            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4789            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4790         {
4791           addr=hr++;break;
4792         }
4793         hr++;
4794       }
4795       while(hr<HOST_REGS)
4796       {
4797         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4798            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4799            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4800         {
4801           alt=hr++;break;
4802         }
4803         hr++;
4804       }
4805       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4806       {
4807         while(hr<HOST_REGS)
4808         {
4809           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4810              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4811              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4812           {
4813             ntaddr=hr;break;
4814           }
4815           hr++;
4816         }
4817         assert(hr<HOST_REGS);
4818       }
4819       if((opcode[i]&0x2f)==4) // BEQ
4820       {
4821         #ifdef HAVE_CMOV_IMM
4822         if(s1h<0) {
4823           if(s2l>=0) emit_cmp(s1l,s2l);
4824           else emit_test(s1l,s1l);
4825           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4826         }
4827         else
4828         #endif
4829         {
4830           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4831           if(s1h>=0) {
4832             if(s2h>=0) emit_cmp(s1h,s2h);
4833             else emit_test(s1h,s1h);
4834             emit_cmovne_reg(alt,addr);
4835           }
4836           if(s2l>=0) emit_cmp(s1l,s2l);
4837           else emit_test(s1l,s1l);
4838           emit_cmovne_reg(alt,addr);
4839         }
4840       }
4841       if((opcode[i]&0x2f)==5) // BNE
4842       {
4843         #ifdef HAVE_CMOV_IMM
4844         if(s1h<0) {
4845           if(s2l>=0) emit_cmp(s1l,s2l);
4846           else emit_test(s1l,s1l);
4847           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4848         }
4849         else
4850         #endif
4851         {
4852           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4853           if(s1h>=0) {
4854             if(s2h>=0) emit_cmp(s1h,s2h);
4855             else emit_test(s1h,s1h);
4856             emit_cmovne_reg(alt,addr);
4857           }
4858           if(s2l>=0) emit_cmp(s1l,s2l);
4859           else emit_test(s1l,s1l);
4860           emit_cmovne_reg(alt,addr);
4861         }
4862       }
4863       if((opcode[i]&0x2f)==6) // BLEZ
4864       {
4865         //emit_movimm(ba[i],alt);
4866         //emit_movimm(start+i*4+8,addr);
4867         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4868         emit_cmpimm(s1l,1);
4869         if(s1h>=0) emit_mov(addr,ntaddr);
4870         emit_cmovl_reg(alt,addr);
4871         if(s1h>=0) {
4872           emit_test(s1h,s1h);
4873           emit_cmovne_reg(ntaddr,addr);
4874           emit_cmovs_reg(alt,addr);
4875         }
4876       }
4877       if((opcode[i]&0x2f)==7) // BGTZ
4878       {
4879         //emit_movimm(ba[i],addr);
4880         //emit_movimm(start+i*4+8,ntaddr);
4881         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4882         emit_cmpimm(s1l,1);
4883         if(s1h>=0) emit_mov(addr,alt);
4884         emit_cmovl_reg(ntaddr,addr);
4885         if(s1h>=0) {
4886           emit_test(s1h,s1h);
4887           emit_cmovne_reg(alt,addr);
4888           emit_cmovs_reg(ntaddr,addr);
4889         }
4890       }
4891       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4892       {
4893         //emit_movimm(ba[i],alt);
4894         //emit_movimm(start+i*4+8,addr);
4895         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4896         if(s1h>=0) emit_test(s1h,s1h);
4897         else emit_test(s1l,s1l);
4898         emit_cmovs_reg(alt,addr);
4899       }
4900       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4901       {
4902         //emit_movimm(ba[i],addr);
4903         //emit_movimm(start+i*4+8,alt);
4904         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4905         if(s1h>=0) emit_test(s1h,s1h);
4906         else emit_test(s1l,s1l);
4907         emit_cmovs_reg(alt,addr);
4908       }
4909       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4910         if(source[i]&0x10000) // BC1T
4911         {
4912           //emit_movimm(ba[i],alt);
4913           //emit_movimm(start+i*4+8,addr);
4914           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4915           emit_testimm(s1l,0x800000);
4916           emit_cmovne_reg(alt,addr);
4917         }
4918         else // BC1F
4919         {
4920           //emit_movimm(ba[i],addr);
4921           //emit_movimm(start+i*4+8,alt);
4922           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4923           emit_testimm(s1l,0x800000);
4924           emit_cmovne_reg(alt,addr);
4925         }
4926       }
4927       emit_writeword(addr,(int)&pcaddr);
4928     }
4929     else
4930     if(itype[i]==RJUMP)
4931     {
4932       int r=get_reg(branch_regs[i].regmap,rs1[i]);
4933       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4934         r=get_reg(branch_regs[i].regmap,RTEMP);
4935       }
4936       emit_writeword(r,(int)&pcaddr);
4937     }
4938     else {printf("Unknown branch type in do_ccstub\n");exit(1);}
4939   }
4940   // Update cycle count
4941   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4942   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4943   emit_call((int)cc_interrupt);
4944   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4945   if(stubs[n][6]==TAKEN) {
4946     if(internal_branch(branch_regs[i].is32,ba[i]))
4947       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4948     else if(itype[i]==RJUMP) {
4949       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4950         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4951       else
4952         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4953     }
4954   }else if(stubs[n][6]==NOTTAKEN) {
4955     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4956     else load_all_regs(branch_regs[i].regmap);
4957   }else if(stubs[n][6]==NULLDS) {
4958     // Delay slot instruction is nullified ("likely" branch)
4959     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4960     else load_all_regs(regs[i].regmap);
4961   }else{
4962     load_all_regs(branch_regs[i].regmap);
4963   }
4964   emit_jmp(stubs[n][2]); // return address
4965   
4966   /* This works but uses a lot of memory...
4967   emit_readword((int)&last_count,ECX);
4968   emit_add(HOST_CCREG,ECX,EAX);
4969   emit_writeword(EAX,(int)&Count);
4970   emit_call((int)gen_interupt);
4971   emit_readword((int)&Count,HOST_CCREG);
4972   emit_readword((int)&next_interupt,EAX);
4973   emit_readword((int)&pending_exception,EBX);
4974   emit_writeword(EAX,(int)&last_count);
4975   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4976   emit_test(EBX,EBX);
4977   int jne_instr=(int)out;
4978   emit_jne(0);
4979   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4980   load_all_regs(branch_regs[i].regmap);
4981   emit_jmp(stubs[n][2]); // return address
4982   set_jump_target(jne_instr,(int)out);
4983   emit_readword((int)&pcaddr,EAX);
4984   // Call get_addr_ht instead of doing the hash table here.
4985   // This code is executed infrequently and takes up a lot of space
4986   // so smaller is better.
4987   emit_storereg(CCREG,HOST_CCREG);
4988   emit_pushreg(EAX);
4989   emit_call((int)get_addr_ht);
4990   emit_loadreg(CCREG,HOST_CCREG);
4991   emit_addimm(ESP,4,ESP);
4992   emit_jmpreg(EAX);*/
4993 }
4994
4995 add_to_linker(int addr,int target,int ext)
4996 {
4997   link_addr[linkcount][0]=addr;
4998   link_addr[linkcount][1]=target;
4999   link_addr[linkcount][2]=ext;  
5000   linkcount++;
5001 }
5002
5003 void ujump_assemble(int i,struct regstat *i_regs)
5004 {
5005   signed char *i_regmap=i_regs->regmap;
5006   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5007   address_generation(i+1,i_regs,regs[i].regmap_entry);
5008   #ifdef REG_PREFETCH
5009   int temp=get_reg(branch_regs[i].regmap,PTEMP);
5010   if(rt1[i]==31&&temp>=0) 
5011   {
5012     int return_address=start+i*4+8;
5013     if(get_reg(branch_regs[i].regmap,31)>0) 
5014     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5015   }
5016   #endif
5017   ds_assemble(i+1,i_regs);
5018   uint64_t bc_unneeded=branch_regs[i].u;
5019   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5020   bc_unneeded|=1|(1LL<<rt1[i]);
5021   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5022   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5023                 bc_unneeded,bc_unneeded_upper);
5024   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5025   if(rt1[i]==31) {
5026     int rt;
5027     unsigned int return_address;
5028     assert(rt1[i+1]!=31);
5029     assert(rt2[i+1]!=31);
5030     rt=get_reg(branch_regs[i].regmap,31);
5031     assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5032     //assert(rt>=0);
5033     return_address=start+i*4+8;
5034     if(rt>=0) {
5035       #ifdef USE_MINI_HT
5036       if(internal_branch(branch_regs[i].is32,return_address)) {
5037         int temp=rt+1;
5038         if(temp==EXCLUDE_REG||temp>=HOST_REGS||
5039            branch_regs[i].regmap[temp]>=0)
5040         {
5041           temp=get_reg(branch_regs[i].regmap,-1);
5042         }
5043         #ifdef HOST_TEMPREG
5044         if(temp<0) temp=HOST_TEMPREG;
5045         #endif
5046         if(temp>=0) do_miniht_insert(return_address,rt,temp);
5047         else emit_movimm(return_address,rt);
5048       }
5049       else
5050       #endif
5051       {
5052         #ifdef REG_PREFETCH
5053         if(temp>=0) 
5054         {
5055           if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5056         }
5057         #endif
5058         emit_movimm(return_address,rt); // PC into link register
5059         #ifdef IMM_PREFETCH
5060         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5061         #endif
5062       }
5063     }
5064   }
5065   int cc,adj;
5066   cc=get_reg(branch_regs[i].regmap,CCREG);
5067   assert(cc==HOST_CCREG);
5068   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5069   #ifdef REG_PREFETCH
5070   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5071   #endif
5072   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5073   if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5074   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5075   if(internal_branch(branch_regs[i].is32,ba[i]))
5076     assem_debug("branch: internal\n");
5077   else
5078     assem_debug("branch: external\n");
5079   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5080     ds_assemble_entry(i);
5081   }
5082   else {
5083     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5084     emit_jmp(0);
5085   }
5086 }
5087
5088 void rjump_assemble(int i,struct regstat *i_regs)
5089 {
5090   signed char *i_regmap=i_regs->regmap;
5091   int temp;
5092   int rs,cc,adj;
5093   rs=get_reg(branch_regs[i].regmap,rs1[i]);
5094   assert(rs>=0);
5095   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5096     // Delay slot abuse, make a copy of the branch address register
5097     temp=get_reg(branch_regs[i].regmap,RTEMP);
5098     assert(temp>=0);
5099     assert(regs[i].regmap[temp]==RTEMP);
5100     emit_mov(rs,temp);
5101     rs=temp;
5102   }
5103   address_generation(i+1,i_regs,regs[i].regmap_entry);
5104   #ifdef REG_PREFETCH
5105   if(rt1[i]==31) 
5106   {
5107     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5108       int return_address=start+i*4+8;
5109       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5110     }
5111   }
5112   #endif
5113   #ifdef USE_MINI_HT
5114   if(rs1[i]==31) {
5115     int rh=get_reg(regs[i].regmap,RHASH);
5116     if(rh>=0) do_preload_rhash(rh);
5117   }
5118   #endif
5119   ds_assemble(i+1,i_regs);
5120   uint64_t bc_unneeded=branch_regs[i].u;
5121   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5122   bc_unneeded|=1|(1LL<<rt1[i]);
5123   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5124   bc_unneeded&=~(1LL<<rs1[i]);
5125   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5126                 bc_unneeded,bc_unneeded_upper);
5127   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5128   if(rt1[i]!=0) {
5129     int rt,return_address;
5130     assert(rt1[i+1]!=rt1[i]);
5131     assert(rt2[i+1]!=rt1[i]);
5132     rt=get_reg(branch_regs[i].regmap,rt1[i]);
5133     assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5134     assert(rt>=0);
5135     return_address=start+i*4+8;
5136     #ifdef REG_PREFETCH
5137     if(temp>=0) 
5138     {
5139       if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5140     }
5141     #endif
5142     emit_movimm(return_address,rt); // PC into link register
5143     #ifdef IMM_PREFETCH
5144     emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5145     #endif
5146   }
5147   cc=get_reg(branch_regs[i].regmap,CCREG);
5148   assert(cc==HOST_CCREG);
5149   #ifdef USE_MINI_HT
5150   int rh=get_reg(branch_regs[i].regmap,RHASH);
5151   int ht=get_reg(branch_regs[i].regmap,RHTBL);
5152   if(rs1[i]==31) {
5153     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5154     do_preload_rhtbl(ht);
5155     do_rhash(rs,rh);
5156   }
5157   #endif
5158   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5159   #ifdef DESTRUCTIVE_WRITEBACK
5160   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5161     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5162       emit_loadreg(rs1[i],rs);
5163     }
5164   }
5165   #endif
5166   #ifdef REG_PREFETCH
5167   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5168   #endif
5169   #ifdef USE_MINI_HT
5170   if(rs1[i]==31) {
5171     do_miniht_load(ht,rh);
5172   }
5173   #endif
5174   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5175   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5176   //assert(adj==0);
5177   emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5178   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5179   emit_jns(0);
5180   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5181   #ifdef USE_MINI_HT
5182   if(rs1[i]==31) {
5183     do_miniht_jump(rs,rh,ht);
5184   }
5185   else
5186   #endif
5187   {
5188     //if(rs!=EAX) emit_mov(rs,EAX);
5189     //emit_jmp((int)jump_vaddr_eax);
5190     emit_jmp(jump_vaddr_reg[rs]);
5191   }
5192   /* Check hash table
5193   temp=!rs;
5194   emit_mov(rs,temp);
5195   emit_shrimm(rs,16,rs);
5196   emit_xor(temp,rs,rs);
5197   emit_movzwl_reg(rs,rs);
5198   emit_shlimm(rs,4,rs);
5199   emit_cmpmem_indexed((int)hash_table,rs,temp);
5200   emit_jne((int)out+14);
5201   emit_readword_indexed((int)hash_table+4,rs,rs);
5202   emit_jmpreg(rs);
5203   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5204   emit_addimm_no_flags(8,rs);
5205   emit_jeq((int)out-17);
5206   // No hit on hash table, call compiler
5207   emit_pushreg(temp);
5208 //DEBUG >
5209 #ifdef DEBUG_CYCLE_COUNT
5210   emit_readword((int)&last_count,ECX);
5211   emit_add(HOST_CCREG,ECX,HOST_CCREG);
5212   emit_readword((int)&next_interupt,ECX);
5213   emit_writeword(HOST_CCREG,(int)&Count);
5214   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5215   emit_writeword(ECX,(int)&last_count);
5216 #endif
5217 //DEBUG <
5218   emit_storereg(CCREG,HOST_CCREG);
5219   emit_call((int)get_addr);
5220   emit_loadreg(CCREG,HOST_CCREG);
5221   emit_addimm(ESP,4,ESP);
5222   emit_jmpreg(EAX);*/
5223   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5224   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5225   #endif
5226 }
5227
5228 void cjump_assemble(int i,struct regstat *i_regs)
5229 {
5230   signed char *i_regmap=i_regs->regmap;
5231   int cc;
5232   int match;
5233   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5234   assem_debug("match=%d\n",match);
5235   int s1h,s1l,s2h,s2l;
5236   int prev_cop1_usable=cop1_usable;
5237   int unconditional=0,nop=0;
5238   int only32=0;
5239   int ooo=1;
5240   int invert=0;
5241   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5242   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5243   if(likely[i]) ooo=0;
5244   if(!match) invert=1;
5245   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5246   if(i>(ba[i]-start)>>2) invert=1;
5247   #endif
5248     
5249   if(ooo)
5250     if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
5251        (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1])))
5252   {
5253     // Write-after-read dependency prevents out of order execution
5254     // First test branch condition, then execute delay slot, then branch
5255     ooo=0;
5256   }
5257
5258   if(ooo) {
5259     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5260     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5261     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5262     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5263   }
5264   else {
5265     s1l=get_reg(i_regmap,rs1[i]);
5266     s1h=get_reg(i_regmap,rs1[i]|64);
5267     s2l=get_reg(i_regmap,rs2[i]);
5268     s2h=get_reg(i_regmap,rs2[i]|64);
5269   }
5270   if(rs1[i]==0&&rs2[i]==0)
5271   {
5272     if(opcode[i]&1) nop=1;
5273     else unconditional=1;
5274     //assert(opcode[i]!=5);
5275     //assert(opcode[i]!=7);
5276     //assert(opcode[i]!=0x15);
5277     //assert(opcode[i]!=0x17);
5278   }
5279   else if(rs1[i]==0)
5280   {
5281     s1l=s2l;s1h=s2h;
5282     s2l=s2h=-1;
5283     only32=(regs[i].was32>>rs2[i])&1;
5284   }
5285   else if(rs2[i]==0)
5286   {
5287     s2l=s2h=-1;
5288     only32=(regs[i].was32>>rs1[i])&1;
5289   }
5290   else {
5291     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5292   }
5293
5294   if(ooo) {
5295     // Out of order execution (delay slot first)
5296     //printf("OOOE\n");
5297     address_generation(i+1,i_regs,regs[i].regmap_entry);
5298     ds_assemble(i+1,i_regs);
5299     int adj;
5300     uint64_t bc_unneeded=branch_regs[i].u;
5301     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5302     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5303     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5304     bc_unneeded|=1;
5305     bc_unneeded_upper|=1;
5306     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5307                   bc_unneeded,bc_unneeded_upper);
5308     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5309     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5310     cc=get_reg(branch_regs[i].regmap,CCREG);
5311     assert(cc==HOST_CCREG);
5312     if(unconditional) 
5313       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5314     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5315     //assem_debug("cycle count (adj)\n");
5316     if(unconditional) {
5317       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5318       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5319         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5320         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5321         if(internal)
5322           assem_debug("branch: internal\n");
5323         else
5324           assem_debug("branch: external\n");
5325         if(internal&&is_ds[(ba[i]-start)>>2]) {
5326           ds_assemble_entry(i);
5327         }
5328         else {
5329           add_to_linker((int)out,ba[i],internal);
5330           emit_jmp(0);
5331         }
5332         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5333         if(((u_int)out)&7) emit_addnop(0);
5334         #endif
5335       }
5336     }
5337     else if(nop) {
5338       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5339       int jaddr=(int)out;
5340       emit_jns(0);
5341       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5342     }
5343     else {
5344       int taken=0,nottaken=0,nottaken1=0;
5345       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5346       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5347       if(!only32)
5348       {
5349         assert(s1h>=0);
5350         if(opcode[i]==4) // BEQ
5351         {
5352           if(s2h>=0) emit_cmp(s1h,s2h);
5353           else emit_test(s1h,s1h);
5354           nottaken1=(int)out;
5355           emit_jne(1);
5356         }
5357         if(opcode[i]==5) // BNE
5358         {
5359           if(s2h>=0) emit_cmp(s1h,s2h);
5360           else emit_test(s1h,s1h);
5361           if(invert) taken=(int)out;
5362           else add_to_linker((int)out,ba[i],internal);
5363           emit_jne(0);
5364         }
5365         if(opcode[i]==6) // BLEZ
5366         {
5367           emit_test(s1h,s1h);
5368           if(invert) taken=(int)out;
5369           else add_to_linker((int)out,ba[i],internal);
5370           emit_js(0);
5371           nottaken1=(int)out;
5372           emit_jne(1);
5373         }
5374         if(opcode[i]==7) // BGTZ
5375         {
5376           emit_test(s1h,s1h);
5377           nottaken1=(int)out;
5378           emit_js(1);
5379           if(invert) taken=(int)out;
5380           else add_to_linker((int)out,ba[i],internal);
5381           emit_jne(0);
5382         }
5383       } // if(!only32)
5384           
5385       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5386       assert(s1l>=0);
5387       if(opcode[i]==4) // BEQ
5388       {
5389         if(s2l>=0) emit_cmp(s1l,s2l);
5390         else emit_test(s1l,s1l);
5391         if(invert){
5392           nottaken=(int)out;
5393           emit_jne(1);
5394         }else{
5395           add_to_linker((int)out,ba[i],internal);
5396           emit_jeq(0);
5397         }
5398       }
5399       if(opcode[i]==5) // BNE
5400       {
5401         if(s2l>=0) emit_cmp(s1l,s2l);
5402         else emit_test(s1l,s1l);
5403         if(invert){
5404           nottaken=(int)out;
5405           emit_jeq(1);
5406         }else{
5407           add_to_linker((int)out,ba[i],internal);
5408           emit_jne(0);
5409         }
5410       }
5411       if(opcode[i]==6) // BLEZ
5412       {
5413         emit_cmpimm(s1l,1);
5414         if(invert){
5415           nottaken=(int)out;
5416           emit_jge(1);
5417         }else{
5418           add_to_linker((int)out,ba[i],internal);
5419           emit_jl(0);
5420         }
5421       }
5422       if(opcode[i]==7) // BGTZ
5423       {
5424         emit_cmpimm(s1l,1);
5425         if(invert){
5426           nottaken=(int)out;
5427           emit_jl(1);
5428         }else{
5429           add_to_linker((int)out,ba[i],internal);
5430           emit_jge(0);
5431         }
5432       }
5433       if(invert) {
5434         if(taken) set_jump_target(taken,(int)out);
5435         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5436         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5437           if(adj) {
5438             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5439             add_to_linker((int)out,ba[i],internal);
5440           }else{
5441             emit_addnop(13);
5442             add_to_linker((int)out,ba[i],internal*2);
5443           }
5444           emit_jmp(0);
5445         }else
5446         #endif
5447         {
5448           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5449           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5450           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5451           if(internal)
5452             assem_debug("branch: internal\n");
5453           else
5454             assem_debug("branch: external\n");
5455           if(internal&&is_ds[(ba[i]-start)>>2]) {
5456             ds_assemble_entry(i);
5457           }
5458           else {
5459             add_to_linker((int)out,ba[i],internal);
5460             emit_jmp(0);
5461           }
5462         }
5463         set_jump_target(nottaken,(int)out);
5464       }
5465
5466       if(nottaken1) set_jump_target(nottaken1,(int)out);
5467       if(adj) {
5468         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5469       }
5470     } // (!unconditional)
5471   } // if(ooo)
5472   else
5473   {
5474     // In-order execution (branch first)
5475     //if(likely[i]) printf("IOL\n");
5476     //else
5477     //printf("IOE\n");
5478     int taken=0,nottaken=0,nottaken1=0;
5479     if(!unconditional&&!nop) {
5480       if(!only32)
5481       {
5482         assert(s1h>=0);
5483         if((opcode[i]&0x2f)==4) // BEQ
5484         {
5485           if(s2h>=0) emit_cmp(s1h,s2h);
5486           else emit_test(s1h,s1h);
5487           nottaken1=(int)out;
5488           emit_jne(2);
5489         }
5490         if((opcode[i]&0x2f)==5) // BNE
5491         {
5492           if(s2h>=0) emit_cmp(s1h,s2h);
5493           else emit_test(s1h,s1h);
5494           taken=(int)out;
5495           emit_jne(1);
5496         }
5497         if((opcode[i]&0x2f)==6) // BLEZ
5498         {
5499           emit_test(s1h,s1h);
5500           taken=(int)out;
5501           emit_js(1);
5502           nottaken1=(int)out;
5503           emit_jne(2);
5504         }
5505         if((opcode[i]&0x2f)==7) // BGTZ
5506         {
5507           emit_test(s1h,s1h);
5508           nottaken1=(int)out;
5509           emit_js(2);
5510           taken=(int)out;
5511           emit_jne(1);
5512         }
5513       } // if(!only32)
5514           
5515       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5516       assert(s1l>=0);
5517       if((opcode[i]&0x2f)==4) // BEQ
5518       {
5519         if(s2l>=0) emit_cmp(s1l,s2l);
5520         else emit_test(s1l,s1l);
5521         nottaken=(int)out;
5522         emit_jne(2);
5523       }
5524       if((opcode[i]&0x2f)==5) // BNE
5525       {
5526         if(s2l>=0) emit_cmp(s1l,s2l);
5527         else emit_test(s1l,s1l);
5528         nottaken=(int)out;
5529         emit_jeq(2);
5530       }
5531       if((opcode[i]&0x2f)==6) // BLEZ
5532       {
5533         emit_cmpimm(s1l,1);
5534         nottaken=(int)out;
5535         emit_jge(2);
5536       }
5537       if((opcode[i]&0x2f)==7) // BGTZ
5538       {
5539         emit_cmpimm(s1l,1);
5540         nottaken=(int)out;
5541         emit_jl(2);
5542       }
5543     } // if(!unconditional)
5544     int adj;
5545     uint64_t ds_unneeded=branch_regs[i].u;
5546     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5547     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5548     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5549     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5550     ds_unneeded|=1;
5551     ds_unneeded_upper|=1;
5552     // branch taken
5553     if(!nop) {
5554       if(taken) set_jump_target(taken,(int)out);
5555       assem_debug("1:\n");
5556       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5557                     ds_unneeded,ds_unneeded_upper);
5558       // load regs
5559       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5560       address_generation(i+1,&branch_regs[i],0);
5561       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5562       ds_assemble(i+1,&branch_regs[i]);
5563       cc=get_reg(branch_regs[i].regmap,CCREG);
5564       if(cc==-1) {
5565         emit_loadreg(CCREG,cc=HOST_CCREG);
5566         // CHECK: Is the following instruction (fall thru) allocated ok?
5567       }
5568       assert(cc==HOST_CCREG);
5569       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5570       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5571       assem_debug("cycle count (adj)\n");
5572       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5573       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5574       if(internal)
5575         assem_debug("branch: internal\n");
5576       else
5577         assem_debug("branch: external\n");
5578       if(internal&&is_ds[(ba[i]-start)>>2]) {
5579         ds_assemble_entry(i);
5580       }
5581       else {
5582         add_to_linker((int)out,ba[i],internal);
5583         emit_jmp(0);
5584       }
5585     }
5586     // branch not taken
5587     cop1_usable=prev_cop1_usable;
5588     if(!unconditional) {
5589       if(nottaken1) set_jump_target(nottaken1,(int)out);
5590       set_jump_target(nottaken,(int)out);
5591       assem_debug("2:\n");
5592       if(!likely[i]) {
5593         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5594                       ds_unneeded,ds_unneeded_upper);
5595         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5596         address_generation(i+1,&branch_regs[i],0);
5597         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5598         ds_assemble(i+1,&branch_regs[i]);
5599       }
5600       cc=get_reg(branch_regs[i].regmap,CCREG);
5601       if(cc==-1&&!likely[i]) {
5602         // Cycle count isn't in a register, temporarily load it then write it out
5603         emit_loadreg(CCREG,HOST_CCREG);
5604         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5605         int jaddr=(int)out;
5606         emit_jns(0);
5607         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5608         emit_storereg(CCREG,HOST_CCREG);
5609       }
5610       else{
5611         cc=get_reg(i_regmap,CCREG);
5612         assert(cc==HOST_CCREG);
5613         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5614         int jaddr=(int)out;
5615         emit_jns(0);
5616         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5617       }
5618     }
5619   }
5620 }
5621
5622 void sjump_assemble(int i,struct regstat *i_regs)
5623 {
5624   signed char *i_regmap=i_regs->regmap;
5625   int cc;
5626   int match;
5627   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5628   assem_debug("smatch=%d\n",match);
5629   int s1h,s1l;
5630   int prev_cop1_usable=cop1_usable;
5631   int unconditional=0,nevertaken=0;
5632   int only32=0;
5633   int ooo=1;
5634   int invert=0;
5635   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5636   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5637   if(likely[i]) ooo=0;
5638   if(!match) invert=1;
5639   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5640   if(i>(ba[i]-start)>>2) invert=1;
5641   #endif
5642
5643   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5644   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5645
5646   if(ooo)
5647     if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))
5648   {
5649     // Write-after-read dependency prevents out of order execution
5650     // First test branch condition, then execute delay slot, then branch
5651     ooo=0;
5652   }
5653   assert(opcode2[i]<0x10||ooo); // FIXME (BxxZALL)
5654
5655   if(ooo) {
5656     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5657     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5658   }
5659   else {
5660     s1l=get_reg(i_regmap,rs1[i]);
5661     s1h=get_reg(i_regmap,rs1[i]|64);
5662   }
5663   if(rs1[i]==0)
5664   {
5665     if(opcode2[i]&1) unconditional=1;
5666     else nevertaken=1;
5667     // These are never taken (r0 is never less than zero)
5668     //assert(opcode2[i]!=0);
5669     //assert(opcode2[i]!=2);
5670     //assert(opcode2[i]!=0x10);
5671     //assert(opcode2[i]!=0x12);
5672   }
5673   else {
5674     only32=(regs[i].was32>>rs1[i])&1;
5675   }
5676
5677   if(ooo) {
5678     // Out of order execution (delay slot first)
5679     //printf("OOOE\n");
5680     address_generation(i+1,i_regs,regs[i].regmap_entry);
5681     ds_assemble(i+1,i_regs);
5682     int adj;
5683     uint64_t bc_unneeded=branch_regs[i].u;
5684     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5685     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5686     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5687     bc_unneeded|=1;
5688     bc_unneeded_upper|=1;
5689     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5690                   bc_unneeded,bc_unneeded_upper);
5691     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5692     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5693     if(rt1[i]==31) {
5694       int rt,return_address;
5695       assert(rt1[i+1]!=31);
5696       assert(rt2[i+1]!=31);
5697       rt=get_reg(branch_regs[i].regmap,31);
5698       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5699       if(rt>=0) {
5700         // Save the PC even if the branch is not taken
5701         return_address=start+i*4+8;
5702         emit_movimm(return_address,rt); // PC into link register
5703         #ifdef IMM_PREFETCH
5704         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5705         #endif
5706       }
5707     }
5708     cc=get_reg(branch_regs[i].regmap,CCREG);
5709     assert(cc==HOST_CCREG);
5710     if(unconditional) 
5711       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5712     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5713     assem_debug("cycle count (adj)\n");
5714     if(unconditional) {
5715       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5716       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5717         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5718         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5719         if(internal)
5720           assem_debug("branch: internal\n");
5721         else
5722           assem_debug("branch: external\n");
5723         if(internal&&is_ds[(ba[i]-start)>>2]) {
5724           ds_assemble_entry(i);
5725         }
5726         else {
5727           add_to_linker((int)out,ba[i],internal);
5728           emit_jmp(0);
5729         }
5730         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5731         if(((u_int)out)&7) emit_addnop(0);
5732         #endif
5733       }
5734     }
5735     else if(nevertaken) {
5736       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5737       int jaddr=(int)out;
5738       emit_jns(0);
5739       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5740     }
5741     else {
5742       int nottaken=0;
5743       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5744       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5745       if(!only32)
5746       {
5747         assert(s1h>=0);
5748         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5749         {
5750           emit_test(s1h,s1h);
5751           if(invert){
5752             nottaken=(int)out;
5753             emit_jns(1);
5754           }else{
5755             add_to_linker((int)out,ba[i],internal);
5756             emit_js(0);
5757           }
5758         }
5759         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5760         {
5761           emit_test(s1h,s1h);
5762           if(invert){
5763             nottaken=(int)out;
5764             emit_js(1);
5765           }else{
5766             add_to_linker((int)out,ba[i],internal);
5767             emit_jns(0);
5768           }
5769         }
5770       } // if(!only32)
5771       else
5772       {
5773         assert(s1l>=0);
5774         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5775         {
5776           emit_test(s1l,s1l);
5777           if(invert){
5778             nottaken=(int)out;
5779             emit_jns(1);
5780           }else{
5781             add_to_linker((int)out,ba[i],internal);
5782             emit_js(0);
5783           }
5784         }
5785         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5786         {
5787           emit_test(s1l,s1l);
5788           if(invert){
5789             nottaken=(int)out;
5790             emit_js(1);
5791           }else{
5792             add_to_linker((int)out,ba[i],internal);
5793             emit_jns(0);
5794           }
5795         }
5796       } // if(!only32)
5797           
5798       if(invert) {
5799         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5800         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5801           if(adj) {
5802             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5803             add_to_linker((int)out,ba[i],internal);
5804           }else{
5805             emit_addnop(13);
5806             add_to_linker((int)out,ba[i],internal*2);
5807           }
5808           emit_jmp(0);
5809         }else
5810         #endif
5811         {
5812           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5813           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5814           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5815           if(internal)
5816             assem_debug("branch: internal\n");
5817           else
5818             assem_debug("branch: external\n");
5819           if(internal&&is_ds[(ba[i]-start)>>2]) {
5820             ds_assemble_entry(i);
5821           }
5822           else {
5823             add_to_linker((int)out,ba[i],internal);
5824             emit_jmp(0);
5825           }
5826         }
5827         set_jump_target(nottaken,(int)out);
5828       }
5829
5830       if(adj) {
5831         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5832       }
5833     } // (!unconditional)
5834   } // if(ooo)
5835   else
5836   {
5837     // In-order execution (branch first)
5838     //printf("IOE\n");
5839     int nottaken=0;
5840     if(!unconditional) {
5841       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5842       if(!only32)
5843       {
5844         assert(s1h>=0);
5845         if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5846         {
5847           emit_test(s1h,s1h);
5848           nottaken=(int)out;
5849           emit_jns(1);
5850         }
5851         if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5852         {
5853           emit_test(s1h,s1h);
5854           nottaken=(int)out;
5855           emit_js(1);
5856         }
5857       } // if(!only32)
5858       else
5859       {
5860         assert(s1l>=0);
5861         if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5862         {
5863           emit_test(s1l,s1l);
5864           nottaken=(int)out;
5865           emit_jns(1);
5866         }
5867         if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5868         {
5869           emit_test(s1l,s1l);
5870           nottaken=(int)out;
5871           emit_js(1);
5872         }
5873       }
5874     } // if(!unconditional)
5875     int adj;
5876     uint64_t ds_unneeded=branch_regs[i].u;
5877     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5878     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5879     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5880     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5881     ds_unneeded|=1;
5882     ds_unneeded_upper|=1;
5883     // branch taken
5884     if(!nevertaken) {
5885       //assem_debug("1:\n");
5886       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5887                     ds_unneeded,ds_unneeded_upper);
5888       // load regs
5889       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5890       address_generation(i+1,&branch_regs[i],0);
5891       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5892       ds_assemble(i+1,&branch_regs[i]);
5893       cc=get_reg(branch_regs[i].regmap,CCREG);
5894       if(cc==-1) {
5895         emit_loadreg(CCREG,cc=HOST_CCREG);
5896         // CHECK: Is the following instruction (fall thru) allocated ok?
5897       }
5898       assert(cc==HOST_CCREG);
5899       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5900       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5901       assem_debug("cycle count (adj)\n");
5902       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5903       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5904       if(internal)
5905         assem_debug("branch: internal\n");
5906       else
5907         assem_debug("branch: external\n");
5908       if(internal&&is_ds[(ba[i]-start)>>2]) {
5909         ds_assemble_entry(i);
5910       }
5911       else {
5912         add_to_linker((int)out,ba[i],internal);
5913         emit_jmp(0);
5914       }
5915     }
5916     // branch not taken
5917     cop1_usable=prev_cop1_usable;
5918     if(!unconditional) {
5919       set_jump_target(nottaken,(int)out);
5920       assem_debug("1:\n");
5921       if(!likely[i]) {
5922         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5923                       ds_unneeded,ds_unneeded_upper);
5924         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5925         address_generation(i+1,&branch_regs[i],0);
5926         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5927         ds_assemble(i+1,&branch_regs[i]);
5928       }
5929       cc=get_reg(branch_regs[i].regmap,CCREG);
5930       if(cc==-1&&!likely[i]) {
5931         // Cycle count isn't in a register, temporarily load it then write it out
5932         emit_loadreg(CCREG,HOST_CCREG);
5933         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5934         int jaddr=(int)out;
5935         emit_jns(0);
5936         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5937         emit_storereg(CCREG,HOST_CCREG);
5938       }
5939       else{
5940         cc=get_reg(i_regmap,CCREG);
5941         assert(cc==HOST_CCREG);
5942         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5943         int jaddr=(int)out;
5944         emit_jns(0);
5945         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5946       }
5947     }
5948   }
5949 }
5950
5951 void fjump_assemble(int i,struct regstat *i_regs)
5952 {
5953   signed char *i_regmap=i_regs->regmap;
5954   int cc;
5955   int match;
5956   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5957   assem_debug("fmatch=%d\n",match);
5958   int fs,cs;
5959   int eaddr;
5960   int ooo=1;
5961   int invert=0;
5962   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5963   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5964   if(likely[i]) ooo=0;
5965   if(!match) invert=1;
5966   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5967   if(i>(ba[i]-start)>>2) invert=1;
5968   #endif
5969
5970   if(ooo)
5971     if(itype[i+1]==FCOMP)
5972   {
5973     // Write-after-read dependency prevents out of order execution
5974     // First test branch condition, then execute delay slot, then branch
5975     ooo=0;
5976   }
5977
5978   if(ooo) {
5979     fs=get_reg(branch_regs[i].regmap,FSREG);
5980     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5981   }
5982   else {
5983     fs=get_reg(i_regmap,FSREG);
5984   }
5985
5986   // Check cop1 unusable
5987   if(!cop1_usable) {
5988     cs=get_reg(i_regmap,CSREG);
5989     assert(cs>=0);
5990     emit_testimm(cs,0x20000000);
5991     eaddr=(int)out;
5992     emit_jeq(0);
5993     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5994     cop1_usable=1;
5995   }
5996
5997   if(ooo) {
5998     // Out of order execution (delay slot first)
5999     //printf("OOOE\n");
6000     ds_assemble(i+1,i_regs);
6001     int adj;
6002     uint64_t bc_unneeded=branch_regs[i].u;
6003     uint64_t bc_unneeded_upper=branch_regs[i].uu;
6004     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6005     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
6006     bc_unneeded|=1;
6007     bc_unneeded_upper|=1;
6008     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6009                   bc_unneeded,bc_unneeded_upper);
6010     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
6011     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6012     cc=get_reg(branch_regs[i].regmap,CCREG);
6013     assert(cc==HOST_CCREG);
6014     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6015     assem_debug("cycle count (adj)\n");
6016     if(1) {
6017       int nottaken=0;
6018       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6019       if(1) {
6020         assert(fs>=0);
6021         emit_testimm(fs,0x800000);
6022         if(source[i]&0x10000) // BC1T
6023         {
6024           if(invert){
6025             nottaken=(int)out;
6026             emit_jeq(1);
6027           }else{
6028             add_to_linker((int)out,ba[i],internal);
6029             emit_jne(0);
6030           }
6031         }
6032         else // BC1F
6033           if(invert){
6034             nottaken=(int)out;
6035             emit_jne(1);
6036           }else{
6037             add_to_linker((int)out,ba[i],internal);
6038             emit_jeq(0);
6039           }
6040         {
6041         }
6042       } // if(!only32)
6043           
6044       if(invert) {
6045         if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6046         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6047         else if(match) emit_addnop(13);
6048         #endif
6049         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6050         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6051         if(internal)
6052           assem_debug("branch: internal\n");
6053         else
6054           assem_debug("branch: external\n");
6055         if(internal&&is_ds[(ba[i]-start)>>2]) {
6056           ds_assemble_entry(i);
6057         }
6058         else {
6059           add_to_linker((int)out,ba[i],internal);
6060           emit_jmp(0);
6061         }
6062         set_jump_target(nottaken,(int)out);
6063       }
6064
6065       if(adj) {
6066         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6067       }
6068     } // (!unconditional)
6069   } // if(ooo)
6070   else
6071   {
6072     // In-order execution (branch first)
6073     //printf("IOE\n");
6074     int nottaken=0;
6075     if(1) {
6076       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6077       if(1) {
6078         assert(fs>=0);
6079         emit_testimm(fs,0x800000);
6080         if(source[i]&0x10000) // BC1T
6081         {
6082           nottaken=(int)out;
6083           emit_jeq(1);
6084         }
6085         else // BC1F
6086         {
6087           nottaken=(int)out;
6088           emit_jne(1);
6089         }
6090       }
6091     } // if(!unconditional)
6092     int adj;
6093     uint64_t ds_unneeded=branch_regs[i].u;
6094     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6095     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6096     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6097     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6098     ds_unneeded|=1;
6099     ds_unneeded_upper|=1;
6100     // branch taken
6101     //assem_debug("1:\n");
6102     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6103                   ds_unneeded,ds_unneeded_upper);
6104     // load regs
6105     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6106     address_generation(i+1,&branch_regs[i],0);
6107     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6108     ds_assemble(i+1,&branch_regs[i]);
6109     cc=get_reg(branch_regs[i].regmap,CCREG);
6110     if(cc==-1) {
6111       emit_loadreg(CCREG,cc=HOST_CCREG);
6112       // CHECK: Is the following instruction (fall thru) allocated ok?
6113     }
6114     assert(cc==HOST_CCREG);
6115     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6116     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6117     assem_debug("cycle count (adj)\n");
6118     if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6119     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6120     if(internal)
6121       assem_debug("branch: internal\n");
6122     else
6123       assem_debug("branch: external\n");
6124     if(internal&&is_ds[(ba[i]-start)>>2]) {
6125       ds_assemble_entry(i);
6126     }
6127     else {
6128       add_to_linker((int)out,ba[i],internal);
6129       emit_jmp(0);
6130     }
6131
6132     // branch not taken
6133     if(1) { // <- FIXME (don't need this)
6134       set_jump_target(nottaken,(int)out);
6135       assem_debug("1:\n");
6136       if(!likely[i]) {
6137         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6138                       ds_unneeded,ds_unneeded_upper);
6139         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6140         address_generation(i+1,&branch_regs[i],0);
6141         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6142         ds_assemble(i+1,&branch_regs[i]);
6143       }
6144       cc=get_reg(branch_regs[i].regmap,CCREG);
6145       if(cc==-1&&!likely[i]) {
6146         // Cycle count isn't in a register, temporarily load it then write it out
6147         emit_loadreg(CCREG,HOST_CCREG);
6148         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6149         int jaddr=(int)out;
6150         emit_jns(0);
6151         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6152         emit_storereg(CCREG,HOST_CCREG);
6153       }
6154       else{
6155         cc=get_reg(i_regmap,CCREG);
6156         assert(cc==HOST_CCREG);
6157         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6158         int jaddr=(int)out;
6159         emit_jns(0);
6160         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6161       }
6162     }
6163   }
6164 }
6165
6166 static void pagespan_assemble(int i,struct regstat *i_regs)
6167 {
6168   int s1l=get_reg(i_regs->regmap,rs1[i]);
6169   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6170   int s2l=get_reg(i_regs->regmap,rs2[i]);
6171   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6172   void *nt_branch=NULL;
6173   int taken=0;
6174   int nottaken=0;
6175   int unconditional=0;
6176   if(rs1[i]==0)
6177   {
6178     s1l=s2l;s1h=s2h;
6179     s2l=s2h=-1;
6180   }
6181   else if(rs2[i]==0)
6182   {
6183     s2l=s2h=-1;
6184   }
6185   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6186     s1h=s2h=-1;
6187   }
6188   int hr=0;
6189   int addr,alt,ntaddr;
6190   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6191   else {
6192     while(hr<HOST_REGS)
6193     {
6194       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6195          (i_regs->regmap[hr]&63)!=rs1[i] &&
6196          (i_regs->regmap[hr]&63)!=rs2[i] )
6197       {
6198         addr=hr++;break;
6199       }
6200       hr++;
6201     }
6202   }
6203   while(hr<HOST_REGS)
6204   {
6205     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6206        (i_regs->regmap[hr]&63)!=rs1[i] &&
6207        (i_regs->regmap[hr]&63)!=rs2[i] )
6208     {
6209       alt=hr++;break;
6210     }
6211     hr++;
6212   }
6213   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6214   {
6215     while(hr<HOST_REGS)
6216     {
6217       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6218          (i_regs->regmap[hr]&63)!=rs1[i] &&
6219          (i_regs->regmap[hr]&63)!=rs2[i] )
6220       {
6221         ntaddr=hr;break;
6222       }
6223       hr++;
6224     }
6225   }
6226   assert(hr<HOST_REGS);
6227   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6228     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6229   }
6230   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6231   if(opcode[i]==2) // J
6232   {
6233     unconditional=1;
6234   }
6235   if(opcode[i]==3) // JAL
6236   {
6237     // TODO: mini_ht
6238     int rt=get_reg(i_regs->regmap,31);
6239     emit_movimm(start+i*4+8,rt);
6240     unconditional=1;
6241   }
6242   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6243   {
6244     emit_mov(s1l,addr);
6245     if(opcode2[i]==9) // JALR
6246     {
6247       int rt=get_reg(i_regs->regmap,rt1[i]);
6248       emit_movimm(start+i*4+8,rt);
6249     }
6250   }
6251   if((opcode[i]&0x3f)==4) // BEQ
6252   {
6253     if(rs1[i]==rs2[i])
6254     {
6255       unconditional=1;
6256     }
6257     else
6258     #ifdef HAVE_CMOV_IMM
6259     if(s1h<0) {
6260       if(s2l>=0) emit_cmp(s1l,s2l);
6261       else emit_test(s1l,s1l);
6262       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6263     }
6264     else
6265     #endif
6266     {
6267       assert(s1l>=0);
6268       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6269       if(s1h>=0) {
6270         if(s2h>=0) emit_cmp(s1h,s2h);
6271         else emit_test(s1h,s1h);
6272         emit_cmovne_reg(alt,addr);
6273       }
6274       if(s2l>=0) emit_cmp(s1l,s2l);
6275       else emit_test(s1l,s1l);
6276       emit_cmovne_reg(alt,addr);
6277     }
6278   }
6279   if((opcode[i]&0x3f)==5) // BNE
6280   {
6281     #ifdef HAVE_CMOV_IMM
6282     if(s1h<0) {
6283       if(s2l>=0) emit_cmp(s1l,s2l);
6284       else emit_test(s1l,s1l);
6285       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6286     }
6287     else
6288     #endif
6289     {
6290       assert(s1l>=0);
6291       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6292       if(s1h>=0) {
6293         if(s2h>=0) emit_cmp(s1h,s2h);
6294         else emit_test(s1h,s1h);
6295         emit_cmovne_reg(alt,addr);
6296       }
6297       if(s2l>=0) emit_cmp(s1l,s2l);
6298       else emit_test(s1l,s1l);
6299       emit_cmovne_reg(alt,addr);
6300     }
6301   }
6302   if((opcode[i]&0x3f)==0x14) // BEQL
6303   {
6304     if(s1h>=0) {
6305       if(s2h>=0) emit_cmp(s1h,s2h);
6306       else emit_test(s1h,s1h);
6307       nottaken=(int)out;
6308       emit_jne(0);
6309     }
6310     if(s2l>=0) emit_cmp(s1l,s2l);
6311     else emit_test(s1l,s1l);
6312     if(nottaken) set_jump_target(nottaken,(int)out);
6313     nottaken=(int)out;
6314     emit_jne(0);
6315   }
6316   if((opcode[i]&0x3f)==0x15) // BNEL
6317   {
6318     if(s1h>=0) {
6319       if(s2h>=0) emit_cmp(s1h,s2h);
6320       else emit_test(s1h,s1h);
6321       taken=(int)out;
6322       emit_jne(0);
6323     }
6324     if(s2l>=0) emit_cmp(s1l,s2l);
6325     else emit_test(s1l,s1l);
6326     nottaken=(int)out;
6327     emit_jeq(0);
6328     if(taken) set_jump_target(taken,(int)out);
6329   }
6330   if((opcode[i]&0x3f)==6) // BLEZ
6331   {
6332     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6333     emit_cmpimm(s1l,1);
6334     if(s1h>=0) emit_mov(addr,ntaddr);
6335     emit_cmovl_reg(alt,addr);
6336     if(s1h>=0) {
6337       emit_test(s1h,s1h);
6338       emit_cmovne_reg(ntaddr,addr);
6339       emit_cmovs_reg(alt,addr);
6340     }
6341   }
6342   if((opcode[i]&0x3f)==7) // BGTZ
6343   {
6344     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6345     emit_cmpimm(s1l,1);
6346     if(s1h>=0) emit_mov(addr,alt);
6347     emit_cmovl_reg(ntaddr,addr);
6348     if(s1h>=0) {
6349       emit_test(s1h,s1h);
6350       emit_cmovne_reg(alt,addr);
6351       emit_cmovs_reg(ntaddr,addr);
6352     }
6353   }
6354   if((opcode[i]&0x3f)==0x16) // BLEZL
6355   {
6356     assert((opcode[i]&0x3f)!=0x16);
6357   }
6358   if((opcode[i]&0x3f)==0x17) // BGTZL
6359   {
6360     assert((opcode[i]&0x3f)!=0x17);
6361   }
6362   assert(opcode[i]!=1); // BLTZ/BGEZ
6363
6364   //FIXME: Check CSREG
6365   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6366     if((source[i]&0x30000)==0) // BC1F
6367     {
6368       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6369       emit_testimm(s1l,0x800000);
6370       emit_cmovne_reg(alt,addr);
6371     }
6372     if((source[i]&0x30000)==0x10000) // BC1T
6373     {
6374       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6375       emit_testimm(s1l,0x800000);
6376       emit_cmovne_reg(alt,addr);
6377     }
6378     if((source[i]&0x30000)==0x20000) // BC1FL
6379     {
6380       emit_testimm(s1l,0x800000);
6381       nottaken=(int)out;
6382       emit_jne(0);
6383     }
6384     if((source[i]&0x30000)==0x30000) // BC1TL
6385     {
6386       emit_testimm(s1l,0x800000);
6387       nottaken=(int)out;
6388       emit_jeq(0);
6389     }
6390   }
6391
6392   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6393   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6394   if(likely[i]||unconditional)
6395   {
6396     emit_movimm(ba[i],HOST_BTREG);
6397   }
6398   else if(addr!=HOST_BTREG)
6399   {
6400     emit_mov(addr,HOST_BTREG);
6401   }
6402   void *branch_addr=out;
6403   emit_jmp(0);
6404   int target_addr=start+i*4+5;
6405   void *stub=out;
6406   void *compiled_target_addr=check_addr(target_addr);
6407   emit_extjump_ds((int)branch_addr,target_addr);
6408   if(compiled_target_addr) {
6409     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6410     add_link(target_addr,stub);
6411   }
6412   else set_jump_target((int)branch_addr,(int)stub);
6413   if(likely[i]) {
6414     // Not-taken path
6415     set_jump_target((int)nottaken,(int)out);
6416     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6417     void *branch_addr=out;
6418     emit_jmp(0);
6419     int target_addr=start+i*4+8;
6420     void *stub=out;
6421     void *compiled_target_addr=check_addr(target_addr);
6422     emit_extjump_ds((int)branch_addr,target_addr);
6423     if(compiled_target_addr) {
6424       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6425       add_link(target_addr,stub);
6426     }
6427     else set_jump_target((int)branch_addr,(int)stub);
6428   }
6429 }
6430
6431 // Assemble the delay slot for the above
6432 static void pagespan_ds()
6433 {
6434   assem_debug("initial delay slot:\n");
6435   u_int vaddr=start+1;
6436   u_int page=get_page(vaddr);
6437   u_int vpage=get_vpage(vaddr);
6438   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6439   do_dirty_stub_ds();
6440   ll_add(jump_in+page,vaddr,(void *)out);
6441   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6442   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6443     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6444   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6445     emit_writeword(HOST_BTREG,(int)&branch_target);
6446   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6447   address_generation(0,&regs[0],regs[0].regmap_entry);
6448   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6449     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6450   cop1_usable=0;
6451   is_delayslot=0;
6452   switch(itype[0]) {
6453     case ALU:
6454       alu_assemble(0,&regs[0]);break;
6455     case IMM16:
6456       imm16_assemble(0,&regs[0]);break;
6457     case SHIFT:
6458       shift_assemble(0,&regs[0]);break;
6459     case SHIFTIMM:
6460       shiftimm_assemble(0,&regs[0]);break;
6461     case LOAD:
6462       load_assemble(0,&regs[0]);break;
6463     case LOADLR:
6464       loadlr_assemble(0,&regs[0]);break;
6465     case STORE:
6466       store_assemble(0,&regs[0]);break;
6467     case STORELR:
6468       storelr_assemble(0,&regs[0]);break;
6469     case COP0:
6470       cop0_assemble(0,&regs[0]);break;
6471     case COP1:
6472       cop1_assemble(0,&regs[0]);break;
6473     case C1LS:
6474       c1ls_assemble(0,&regs[0]);break;
6475     case COP2:
6476       cop2_assemble(0,&regs[0]);break;
6477     case C2LS:
6478       c2ls_assemble(0,&regs[0]);break;
6479     case C2OP:
6480       c2op_assemble(0,&regs[0]);break;
6481     case FCONV:
6482       fconv_assemble(0,&regs[0]);break;
6483     case FLOAT:
6484       float_assemble(0,&regs[0]);break;
6485     case FCOMP:
6486       fcomp_assemble(0,&regs[0]);break;
6487     case MULTDIV:
6488       multdiv_assemble(0,&regs[0]);break;
6489     case MOV:
6490       mov_assemble(0,&regs[0]);break;
6491     case SYSCALL:
6492     case HLECALL:
6493     case INTCALL:
6494     case SPAN:
6495     case UJUMP:
6496     case RJUMP:
6497     case CJUMP:
6498     case SJUMP:
6499     case FJUMP:
6500       printf("Jump in the delay slot.  This is probably a bug.\n");
6501   }
6502   int btaddr=get_reg(regs[0].regmap,BTREG);
6503   if(btaddr<0) {
6504     btaddr=get_reg(regs[0].regmap,-1);
6505     emit_readword((int)&branch_target,btaddr);
6506   }
6507   assert(btaddr!=HOST_CCREG);
6508   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6509 #ifdef HOST_IMM8
6510   emit_movimm(start+4,HOST_TEMPREG);
6511   emit_cmp(btaddr,HOST_TEMPREG);
6512 #else
6513   emit_cmpimm(btaddr,start+4);
6514 #endif
6515   int branch=(int)out;
6516   emit_jeq(0);
6517   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6518   emit_jmp(jump_vaddr_reg[btaddr]);
6519   set_jump_target(branch,(int)out);
6520   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6521   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6522 }
6523
6524 // Basic liveness analysis for MIPS registers
6525 void unneeded_registers(int istart,int iend,int r)
6526 {
6527   int i;
6528   uint64_t u,uu,b,bu;
6529   uint64_t temp_u,temp_uu;
6530   uint64_t tdep;
6531   if(iend==slen-1) {
6532     u=1;uu=1;
6533   }else{
6534     u=unneeded_reg[iend+1];
6535     uu=unneeded_reg_upper[iend+1];
6536     u=1;uu=1;
6537   }
6538   for (i=iend;i>=istart;i--)
6539   {
6540     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6541     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6542     {
6543       // If subroutine call, flag return address as a possible branch target
6544       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6545       
6546       if(ba[i]<start || ba[i]>=(start+slen*4))
6547       {
6548         // Branch out of this block, flush all regs
6549         u=1;
6550         uu=1;
6551         /* Hexagon hack 
6552         if(itype[i]==UJUMP&&rt1[i]==31)
6553         {
6554           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6555         }
6556         if(itype[i]==RJUMP&&rs1[i]==31)
6557         {
6558           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6559         }
6560         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6561           if(itype[i]==UJUMP&&rt1[i]==31)
6562           {
6563             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6564             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6565           }
6566           if(itype[i]==RJUMP&&rs1[i]==31)
6567           {
6568             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6569             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6570           }
6571         }*/
6572         branch_unneeded_reg[i]=u;
6573         branch_unneeded_reg_upper[i]=uu;
6574         // Merge in delay slot
6575         tdep=(~uu>>rt1[i+1])&1;
6576         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6577         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6578         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6579         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6580         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6581         u|=1;uu|=1;
6582         // If branch is "likely" (and conditional)
6583         // then we skip the delay slot on the fall-thru path
6584         if(likely[i]) {
6585           if(i<slen-1) {
6586             u&=unneeded_reg[i+2];
6587             uu&=unneeded_reg_upper[i+2];
6588           }
6589           else
6590           {
6591             u=1;
6592             uu=1;
6593           }
6594         }
6595       }
6596       else
6597       {
6598         // Internal branch, flag target
6599         bt[(ba[i]-start)>>2]=1;
6600         if(ba[i]<=start+i*4) {
6601           // Backward branch
6602           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6603           {
6604             // Unconditional branch
6605             temp_u=1;temp_uu=1;
6606           } else {
6607             // Conditional branch (not taken case)
6608             temp_u=unneeded_reg[i+2];
6609             temp_uu=unneeded_reg_upper[i+2];
6610           }
6611           // Merge in delay slot
6612           tdep=(~temp_uu>>rt1[i+1])&1;
6613           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6614           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6615           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6616           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6617           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6618           temp_u|=1;temp_uu|=1;
6619           // If branch is "likely" (and conditional)
6620           // then we skip the delay slot on the fall-thru path
6621           if(likely[i]) {
6622             if(i<slen-1) {
6623               temp_u&=unneeded_reg[i+2];
6624               temp_uu&=unneeded_reg_upper[i+2];
6625             }
6626             else
6627             {
6628               temp_u=1;
6629               temp_uu=1;
6630             }
6631           }
6632           tdep=(~temp_uu>>rt1[i])&1;
6633           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6634           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6635           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6636           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6637           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6638           temp_u|=1;temp_uu|=1;
6639           unneeded_reg[i]=temp_u;
6640           unneeded_reg_upper[i]=temp_uu;
6641           // Only go three levels deep.  This recursion can take an
6642           // excessive amount of time if there are a lot of nested loops.
6643           if(r<2) {
6644             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6645           }else{
6646             unneeded_reg[(ba[i]-start)>>2]=1;
6647             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6648           }
6649         } /*else*/ if(1) {
6650           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6651           {
6652             // Unconditional branch
6653             u=unneeded_reg[(ba[i]-start)>>2];
6654             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6655             branch_unneeded_reg[i]=u;
6656             branch_unneeded_reg_upper[i]=uu;
6657         //u=1;
6658         //uu=1;
6659         //branch_unneeded_reg[i]=u;
6660         //branch_unneeded_reg_upper[i]=uu;
6661             // Merge in delay slot
6662             tdep=(~uu>>rt1[i+1])&1;
6663             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6664             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6665             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6666             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6667             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6668             u|=1;uu|=1;
6669           } else {
6670             // Conditional branch
6671             b=unneeded_reg[(ba[i]-start)>>2];
6672             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6673             branch_unneeded_reg[i]=b;
6674             branch_unneeded_reg_upper[i]=bu;
6675         //b=1;
6676         //bu=1;
6677         //branch_unneeded_reg[i]=b;
6678         //branch_unneeded_reg_upper[i]=bu;
6679             // Branch delay slot
6680             tdep=(~uu>>rt1[i+1])&1;
6681             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6682             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6683             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6684             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6685             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6686             b|=1;bu|=1;
6687             // If branch is "likely" then we skip the
6688             // delay slot on the fall-thru path
6689             if(likely[i]) {
6690               u=b;
6691               uu=bu;
6692               if(i<slen-1) {
6693                 u&=unneeded_reg[i+2];
6694                 uu&=unneeded_reg_upper[i+2];
6695         //u=1;
6696         //uu=1;
6697               }
6698             } else {
6699               u&=b;
6700               uu&=bu;
6701         //u=1;
6702         //uu=1;
6703             }
6704             if(i<slen-1) {
6705               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6706               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6707         //branch_unneeded_reg[i]=1;
6708         //branch_unneeded_reg_upper[i]=1;
6709             } else {
6710               branch_unneeded_reg[i]=1;
6711               branch_unneeded_reg_upper[i]=1;
6712             }
6713           }
6714         }
6715       }
6716     }
6717     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6718     {
6719       // SYSCALL instruction (software interrupt)
6720       u=1;
6721       uu=1;
6722     }
6723     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6724     {
6725       // ERET instruction (return from interrupt)
6726       u=1;
6727       uu=1;
6728     }
6729     //u=uu=1; // DEBUG
6730     tdep=(~uu>>rt1[i])&1;
6731     // Written registers are unneeded
6732     u|=1LL<<rt1[i];
6733     u|=1LL<<rt2[i];
6734     uu|=1LL<<rt1[i];
6735     uu|=1LL<<rt2[i];
6736     // Accessed registers are needed
6737     u&=~(1LL<<rs1[i]);
6738     u&=~(1LL<<rs2[i]);
6739     uu&=~(1LL<<us1[i]);
6740     uu&=~(1LL<<us2[i]);
6741     // Source-target dependencies
6742     uu&=~(tdep<<dep1[i]);
6743     uu&=~(tdep<<dep2[i]);
6744     // R0 is always unneeded
6745     u|=1;uu|=1;
6746     // Save it
6747     unneeded_reg[i]=u;
6748     unneeded_reg_upper[i]=uu;
6749     /*
6750     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6751     printf("U:");
6752     int r;
6753     for(r=1;r<=CCREG;r++) {
6754       if((unneeded_reg[i]>>r)&1) {
6755         if(r==HIREG) printf(" HI");
6756         else if(r==LOREG) printf(" LO");
6757         else printf(" r%d",r);
6758       }
6759     }
6760     printf(" UU:");
6761     for(r=1;r<=CCREG;r++) {
6762       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6763         if(r==HIREG) printf(" HI");
6764         else if(r==LOREG) printf(" LO");
6765         else printf(" r%d",r);
6766       }
6767     }
6768     printf("\n");*/
6769   }
6770 #ifdef FORCE32
6771   for (i=iend;i>=istart;i--)
6772   {
6773     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6774   }
6775 #endif
6776 }
6777
6778 // Identify registers which are likely to contain 32-bit values
6779 // This is used to predict whether any branches will jump to a
6780 // location with 64-bit values in registers.
6781 static void provisional_32bit()
6782 {
6783   int i,j;
6784   uint64_t is32=1;
6785   uint64_t lastbranch=1;
6786   
6787   for(i=0;i<slen;i++)
6788   {
6789     if(i>0) {
6790       if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
6791         if(i>1) is32=lastbranch;
6792         else is32=1;
6793       }
6794     }
6795     if(i>1)
6796     {
6797       if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
6798         if(likely[i-2]) {
6799           if(i>2) is32=lastbranch;
6800           else is32=1;
6801         }
6802       }
6803       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
6804       {
6805         if(rs1[i-2]==0||rs2[i-2]==0)
6806         {
6807           if(rs1[i-2]) {
6808             is32|=1LL<<rs1[i-2];
6809           }
6810           if(rs2[i-2]) {
6811             is32|=1LL<<rs2[i-2];
6812           }
6813         }
6814       }
6815     }
6816     // If something jumps here with 64-bit values
6817     // then promote those registers to 64 bits
6818     if(bt[i])
6819     {
6820       uint64_t temp_is32=is32;
6821       for(j=i-1;j>=0;j--)
6822       {
6823         if(ba[j]==start+i*4) 
6824           //temp_is32&=branch_regs[j].is32;
6825           temp_is32&=p32[j];
6826       }
6827       for(j=i;j<slen;j++)
6828       {
6829         if(ba[j]==start+i*4) 
6830           temp_is32=1;
6831       }
6832       is32=temp_is32;
6833     }
6834     int type=itype[i];
6835     int op=opcode[i];
6836     int op2=opcode2[i];
6837     int rt=rt1[i];
6838     int s1=rs1[i];
6839     int s2=rs2[i];
6840     if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
6841       // Branches don't write registers, consider the delay slot instead.
6842       type=itype[i+1];
6843       op=opcode[i+1];
6844       op2=opcode2[i+1];
6845       rt=rt1[i+1];
6846       s1=rs1[i+1];
6847       s2=rs2[i+1];
6848       lastbranch=is32;
6849     }
6850     switch(type) {
6851       case LOAD:
6852         if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
6853            opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
6854           is32&=~(1LL<<rt);
6855         else
6856           is32|=1LL<<rt;
6857         break;
6858       case STORE:
6859       case STORELR:
6860         break;
6861       case LOADLR:
6862         if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
6863         if(op==0x22) is32|=1LL<<rt; // LWL
6864         break;
6865       case IMM16:
6866         if (op==0x08||op==0x09|| // ADDI/ADDIU
6867             op==0x0a||op==0x0b|| // SLTI/SLTIU
6868             op==0x0c|| // ANDI
6869             op==0x0f)  // LUI
6870         {
6871           is32|=1LL<<rt;
6872         }
6873         if(op==0x18||op==0x19) { // DADDI/DADDIU
6874           is32&=~(1LL<<rt);
6875           //if(imm[i]==0)
6876           //  is32|=((is32>>s1)&1LL)<<rt;
6877         }
6878         if(op==0x0d||op==0x0e) { // ORI/XORI
6879           uint64_t sr=((is32>>s1)&1LL);
6880           is32&=~(1LL<<rt);
6881           is32|=sr<<rt;
6882         }
6883         break;
6884       case UJUMP:
6885         break;
6886       case RJUMP:
6887         break;
6888       case CJUMP:
6889         break;
6890       case SJUMP:
6891         break;
6892       case FJUMP:
6893         break;
6894       case ALU:
6895         if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
6896           is32|=1LL<<rt;
6897         }
6898         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
6899           is32|=1LL<<rt;
6900         }
6901         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
6902           uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
6903           is32&=~(1LL<<rt);
6904           is32|=sr<<rt;
6905         }
6906         else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
6907           if(s1==0&&s2==0) {
6908             is32|=1LL<<rt;
6909           }
6910           else if(s2==0) {
6911             uint64_t sr=((is32>>s1)&1LL);
6912             is32&=~(1LL<<rt);
6913             is32|=sr<<rt;
6914           }
6915           else if(s1==0) {
6916             uint64_t sr=((is32>>s2)&1LL);
6917             is32&=~(1LL<<rt);
6918             is32|=sr<<rt;
6919           }
6920           else {
6921             is32&=~(1LL<<rt);
6922           }
6923         }
6924         else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
6925           if(s1==0&&s2==0) {
6926             is32|=1LL<<rt;
6927           }
6928           else if(s2==0) {
6929             uint64_t sr=((is32>>s1)&1LL);
6930             is32&=~(1LL<<rt);
6931             is32|=sr<<rt;
6932           }
6933           else {
6934             is32&=~(1LL<<rt);
6935           }
6936         }
6937         break;
6938       case MULTDIV:
6939         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
6940           is32&=~((1LL<<HIREG)|(1LL<<LOREG));
6941         }
6942         else {
6943           is32|=(1LL<<HIREG)|(1LL<<LOREG);
6944         }
6945         break;
6946       case MOV:
6947         {
6948           uint64_t sr=((is32>>s1)&1LL);
6949           is32&=~(1LL<<rt);
6950           is32|=sr<<rt;
6951         }
6952         break;
6953       case SHIFT:
6954         if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
6955         else is32|=1LL<<rt; // SLLV/SRLV/SRAV
6956         break;
6957       case SHIFTIMM:
6958         is32|=1LL<<rt;
6959         // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
6960         if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
6961         break;
6962       case COP0:
6963         if(op2==0) is32|=1LL<<rt; // MFC0
6964         break;
6965       case COP1:
6966       case COP2:
6967         if(op2==0) is32|=1LL<<rt; // MFC1
6968         if(op2==1) is32&=~(1LL<<rt); // DMFC1
6969         if(op2==2) is32|=1LL<<rt; // CFC1
6970         break;
6971       case C1LS:
6972       case C2LS:
6973         break;
6974       case FLOAT:
6975       case FCONV:
6976         break;
6977       case FCOMP:
6978         break;
6979       case C2OP:
6980       case SYSCALL:
6981       case HLECALL:
6982         break;
6983       default:
6984         break;
6985     }
6986     is32|=1;
6987     p32[i]=is32;
6988
6989     if(i>0)
6990     {
6991       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
6992       {
6993         if(rt1[i-1]==31) // JAL/JALR
6994         {
6995           // Subroutine call will return here, don't alloc any registers
6996           is32=1;
6997         }
6998         else if(i+1<slen)
6999         {
7000           // Internal branch will jump here, match registers to caller
7001           is32=0x3FFFFFFFFLL;
7002         }
7003       }
7004     }
7005   }
7006 }
7007
7008 // Identify registers which may be assumed to contain 32-bit values
7009 // and where optimizations will rely on this.
7010 // This is used to determine whether backward branches can safely
7011 // jump to a location with 64-bit values in registers.
7012 static void provisional_r32()
7013 {
7014   u_int r32=0;
7015   int i;
7016   
7017   for (i=slen-1;i>=0;i--)
7018   {
7019     int hr;
7020     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7021     {
7022       if(ba[i]<start || ba[i]>=(start+slen*4))
7023       {
7024         // Branch out of this block, don't need anything
7025         r32=0;
7026       }
7027       else
7028       {
7029         // Internal branch
7030         // Need whatever matches the target
7031         // (and doesn't get overwritten by the delay slot instruction)
7032         r32=0;
7033         int t=(ba[i]-start)>>2;
7034         if(ba[i]>start+i*4) {
7035           // Forward branch
7036           //if(!(requires_32bit[t]&~regs[i].was32))
7037           //  r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7038           if(!(pr32[t]&~regs[i].was32))
7039             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7040         }else{
7041           // Backward branch
7042           if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7043             r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7044         }
7045       }
7046       // Conditional branch may need registers for following instructions
7047       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7048       {
7049         if(i<slen-2) {
7050           //r32|=requires_32bit[i+2];
7051           r32|=pr32[i+2];
7052           r32&=regs[i].was32;
7053           // Mark this address as a branch target since it may be called
7054           // upon return from interrupt
7055           //bt[i+2]=1;
7056         }
7057       }
7058       // Merge in delay slot
7059       if(!likely[i]) {
7060         // These are overwritten unless the branch is "likely"
7061         // and the delay slot is nullified if not taken
7062         r32&=~(1LL<<rt1[i+1]);
7063         r32&=~(1LL<<rt2[i+1]);
7064       }
7065       // Assume these are needed (delay slot)
7066       if(us1[i+1]>0)
7067       {
7068         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7069       }
7070       if(us2[i+1]>0)
7071       {
7072         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7073       }
7074       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7075       {
7076         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7077       }
7078       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7079       {
7080         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7081       }
7082     }
7083     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7084     {
7085       // SYSCALL instruction (software interrupt)
7086       r32=0;
7087     }
7088     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7089     {
7090       // ERET instruction (return from interrupt)
7091       r32=0;
7092     }
7093     // Check 32 bits
7094     r32&=~(1LL<<rt1[i]);
7095     r32&=~(1LL<<rt2[i]);
7096     if(us1[i]>0)
7097     {
7098       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7099     }
7100     if(us2[i]>0)
7101     {
7102       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7103     }
7104     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7105     {
7106       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7107     }
7108     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7109     {
7110       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7111     }
7112     //requires_32bit[i]=r32;
7113     pr32[i]=r32;
7114     
7115     // Dirty registers which are 32-bit, require 32-bit input
7116     // as they will be written as 32-bit values
7117     for(hr=0;hr<HOST_REGS;hr++)
7118     {
7119       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7120         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7121           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7122           pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7123           //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7124         }
7125       }
7126     }
7127   }
7128 }
7129
7130 // Write back dirty registers as soon as we will no longer modify them,
7131 // so that we don't end up with lots of writes at the branches.
7132 void clean_registers(int istart,int iend,int wr)
7133 {
7134   int i;
7135   int r;
7136   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7137   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7138   if(iend==slen-1) {
7139     will_dirty_i=will_dirty_next=0;
7140     wont_dirty_i=wont_dirty_next=0;
7141   }else{
7142     will_dirty_i=will_dirty_next=will_dirty[iend+1];
7143     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7144   }
7145   for (i=iend;i>=istart;i--)
7146   {
7147     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7148     {
7149       if(ba[i]<start || ba[i]>=(start+slen*4))
7150       {
7151         // Branch out of this block, flush all regs
7152         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7153         {
7154           // Unconditional branch
7155           will_dirty_i=0;
7156           wont_dirty_i=0;
7157           // Merge in delay slot (will dirty)
7158           for(r=0;r<HOST_REGS;r++) {
7159             if(r!=EXCLUDE_REG) {
7160               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7161               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7162               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7163               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7164               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7165               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7166               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7167               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7168               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7169               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7170               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7171               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7172               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7173               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7174             }
7175           }
7176         }
7177         else
7178         {
7179           // Conditional branch
7180           will_dirty_i=0;
7181           wont_dirty_i=wont_dirty_next;
7182           // Merge in delay slot (will dirty)
7183           for(r=0;r<HOST_REGS;r++) {
7184             if(r!=EXCLUDE_REG) {
7185               if(!likely[i]) {
7186                 // Might not dirty if likely branch is not taken
7187                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7188                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7189                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7190                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7191                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7192                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7193                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7194                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7195                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7196                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7197                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7198                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7199                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7200                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7201               }
7202             }
7203           }
7204         }
7205         // Merge in delay slot (wont dirty)
7206         for(r=0;r<HOST_REGS;r++) {
7207           if(r!=EXCLUDE_REG) {
7208             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7209             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7210             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7211             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7212             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7213             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7214             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7215             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7216             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7217             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7218           }
7219         }
7220         if(wr) {
7221           #ifndef DESTRUCTIVE_WRITEBACK
7222           branch_regs[i].dirty&=wont_dirty_i;
7223           #endif
7224           branch_regs[i].dirty|=will_dirty_i;
7225         }
7226       }
7227       else
7228       {
7229         // Internal branch
7230         if(ba[i]<=start+i*4) {
7231           // Backward branch
7232           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7233           {
7234             // Unconditional branch
7235             temp_will_dirty=0;
7236             temp_wont_dirty=0;
7237             // Merge in delay slot (will dirty)
7238             for(r=0;r<HOST_REGS;r++) {
7239               if(r!=EXCLUDE_REG) {
7240                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7241                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7242                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7243                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7244                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7245                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7246                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7247                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7248                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7249                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7250                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7251                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7252                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7253                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7254               }
7255             }
7256           } else {
7257             // Conditional branch (not taken case)
7258             temp_will_dirty=will_dirty_next;
7259             temp_wont_dirty=wont_dirty_next;
7260             // Merge in delay slot (will dirty)
7261             for(r=0;r<HOST_REGS;r++) {
7262               if(r!=EXCLUDE_REG) {
7263                 if(!likely[i]) {
7264                   // Will not dirty if likely branch is not taken
7265                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7266                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7267                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7268                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7269                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7270                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7271                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7272                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7273                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7274                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7275                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7276                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7277                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7278                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7279                 }
7280               }
7281             }
7282           }
7283           // Merge in delay slot (wont dirty)
7284           for(r=0;r<HOST_REGS;r++) {
7285             if(r!=EXCLUDE_REG) {
7286               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7287               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7288               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7289               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7290               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7291               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7292               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7293               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7294               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7295               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7296             }
7297           }
7298           // Deal with changed mappings
7299           if(i<iend) {
7300             for(r=0;r<HOST_REGS;r++) {
7301               if(r!=EXCLUDE_REG) {
7302                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7303                   temp_will_dirty&=~(1<<r);
7304                   temp_wont_dirty&=~(1<<r);
7305                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7306                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7307                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7308                   } else {
7309                     temp_will_dirty|=1<<r;
7310                     temp_wont_dirty|=1<<r;
7311                   }
7312                 }
7313               }
7314             }
7315           }
7316           if(wr) {
7317             will_dirty[i]=temp_will_dirty;
7318             wont_dirty[i]=temp_wont_dirty;
7319             clean_registers((ba[i]-start)>>2,i-1,0);
7320           }else{
7321             // Limit recursion.  It can take an excessive amount
7322             // of time if there are a lot of nested loops.
7323             will_dirty[(ba[i]-start)>>2]=0;
7324             wont_dirty[(ba[i]-start)>>2]=-1;
7325           }
7326         }
7327         /*else*/ if(1)
7328         {
7329           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7330           {
7331             // Unconditional branch
7332             will_dirty_i=0;
7333             wont_dirty_i=0;
7334           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7335             for(r=0;r<HOST_REGS;r++) {
7336               if(r!=EXCLUDE_REG) {
7337                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7338                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7339                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7340                 }
7341               }
7342             }
7343           //}
7344             // Merge in delay slot
7345             for(r=0;r<HOST_REGS;r++) {
7346               if(r!=EXCLUDE_REG) {
7347                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7348                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7349                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7350                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7351                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7352                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7353                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7354                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7355                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7356                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7357                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7358                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7359                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7360                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7361               }
7362             }
7363           } else {
7364             // Conditional branch
7365             will_dirty_i=will_dirty_next;
7366             wont_dirty_i=wont_dirty_next;
7367           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7368             for(r=0;r<HOST_REGS;r++) {
7369               if(r!=EXCLUDE_REG) {
7370                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7371                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7372                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7373                 }
7374                 else
7375                 {
7376                   will_dirty_i&=~(1<<r);
7377                 }
7378                 // Treat delay slot as part of branch too
7379                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7380                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7381                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7382                 }
7383                 else
7384                 {
7385                   will_dirty[i+1]&=~(1<<r);
7386                 }*/
7387               }
7388             }
7389           //}
7390             // Merge in delay slot
7391             for(r=0;r<HOST_REGS;r++) {
7392               if(r!=EXCLUDE_REG) {
7393                 if(!likely[i]) {
7394                   // Might not dirty if likely branch is not taken
7395                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7396                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7397                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7398                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7399                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7400                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7401                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7402                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7403                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7404                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7405                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7406                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7407                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7408                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7409                 }
7410               }
7411             }
7412           }
7413           // Merge in delay slot
7414           for(r=0;r<HOST_REGS;r++) {
7415             if(r!=EXCLUDE_REG) {
7416               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7417               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7418               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7419               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7420               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7421               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7422               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7423               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7424               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7425               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7426             }
7427           }
7428           if(wr) {
7429             #ifndef DESTRUCTIVE_WRITEBACK
7430             branch_regs[i].dirty&=wont_dirty_i;
7431             #endif
7432             branch_regs[i].dirty|=will_dirty_i;
7433           }
7434         }
7435       }
7436     }
7437     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7438     {
7439       // SYSCALL instruction (software interrupt)
7440       will_dirty_i=0;
7441       wont_dirty_i=0;
7442     }
7443     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7444     {
7445       // ERET instruction (return from interrupt)
7446       will_dirty_i=0;
7447       wont_dirty_i=0;
7448     }
7449     will_dirty_next=will_dirty_i;
7450     wont_dirty_next=wont_dirty_i;
7451     for(r=0;r<HOST_REGS;r++) {
7452       if(r!=EXCLUDE_REG) {
7453         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7454         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7455         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7456         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7457         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7458         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7459         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7460         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7461         if(i>istart) {
7462           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP) 
7463           {
7464             // Don't store a register immediately after writing it,
7465             // may prevent dual-issue.
7466             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7467             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7468           }
7469         }
7470       }
7471     }
7472     // Save it
7473     will_dirty[i]=will_dirty_i;
7474     wont_dirty[i]=wont_dirty_i;
7475     // Mark registers that won't be dirtied as not dirty
7476     if(wr) {
7477       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7478       for(r=0;r<HOST_REGS;r++) {
7479         if((will_dirty_i>>r)&1) {
7480           printf(" r%d",r);
7481         }
7482       }
7483       printf("\n");*/
7484
7485       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7486         regs[i].dirty|=will_dirty_i;
7487         #ifndef DESTRUCTIVE_WRITEBACK
7488         regs[i].dirty&=wont_dirty_i;
7489         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7490         {
7491           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7492             for(r=0;r<HOST_REGS;r++) {
7493               if(r!=EXCLUDE_REG) {
7494                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7495                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7496                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7497               }
7498             }
7499           }
7500         }
7501         else
7502         {
7503           if(i<iend) {
7504             for(r=0;r<HOST_REGS;r++) {
7505               if(r!=EXCLUDE_REG) {
7506                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7507                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7508                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7509               }
7510             }
7511           }
7512         }
7513         #endif
7514       //}
7515     }
7516     // Deal with changed mappings
7517     temp_will_dirty=will_dirty_i;
7518     temp_wont_dirty=wont_dirty_i;
7519     for(r=0;r<HOST_REGS;r++) {
7520       if(r!=EXCLUDE_REG) {
7521         int nr;
7522         if(regs[i].regmap[r]==regmap_pre[i][r]) {
7523           if(wr) {
7524             #ifndef DESTRUCTIVE_WRITEBACK
7525             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7526             #endif
7527             regs[i].wasdirty|=will_dirty_i&(1<<r);
7528           }
7529         }
7530         else if((nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7531           // Register moved to a different register
7532           will_dirty_i&=~(1<<r);
7533           wont_dirty_i&=~(1<<r);
7534           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7535           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7536           if(wr) {
7537             #ifndef DESTRUCTIVE_WRITEBACK
7538             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7539             #endif
7540             regs[i].wasdirty|=will_dirty_i&(1<<r);
7541           }
7542         }
7543         else {
7544           will_dirty_i&=~(1<<r);
7545           wont_dirty_i&=~(1<<r);
7546           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7547             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7548             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7549           } else {
7550             wont_dirty_i|=1<<r;
7551             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7552           }
7553         }
7554       }
7555     }
7556   }
7557 }
7558
7559   /* disassembly */
7560 void disassemble_inst(int i)
7561 {
7562     if (bt[i]) printf("*"); else printf(" ");
7563     switch(itype[i]) {
7564       case UJUMP:
7565         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7566       case CJUMP:
7567         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7568       case SJUMP:
7569         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7570       case FJUMP:
7571         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7572       case RJUMP:
7573         if (opcode[i]==0x9&&rt1[i]!=31)
7574           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7575         else
7576           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7577         break;
7578       case SPAN:
7579         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7580       case IMM16:
7581         if(opcode[i]==0xf) //LUI
7582           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7583         else
7584           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7585         break;
7586       case LOAD:
7587       case LOADLR:
7588         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7589         break;
7590       case STORE:
7591       case STORELR:
7592         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7593         break;
7594       case ALU:
7595       case SHIFT:
7596         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7597         break;
7598       case MULTDIV:
7599         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7600         break;
7601       case SHIFTIMM:
7602         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7603         break;
7604       case MOV:
7605         if((opcode2[i]&0x1d)==0x10)
7606           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7607         else if((opcode2[i]&0x1d)==0x11)
7608           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7609         else
7610           printf (" %x: %s\n",start+i*4,insn[i]);
7611         break;
7612       case COP0:
7613         if(opcode2[i]==0)
7614           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7615         else if(opcode2[i]==4)
7616           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7617         else printf (" %x: %s\n",start+i*4,insn[i]);
7618         break;
7619       case COP1:
7620         if(opcode2[i]<3)
7621           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7622         else if(opcode2[i]>3)
7623           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7624         else printf (" %x: %s\n",start+i*4,insn[i]);
7625         break;
7626       case COP2:
7627         if(opcode2[i]<3)
7628           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7629         else if(opcode2[i]>3)
7630           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7631         else printf (" %x: %s\n",start+i*4,insn[i]);
7632         break;
7633       case C1LS:
7634         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7635         break;
7636       case C2LS:
7637         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7638         break;
7639       case INTCALL:
7640         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7641         break;
7642       default:
7643         //printf (" %s %8x\n",insn[i],source[i]);
7644         printf (" %x: %s\n",start+i*4,insn[i]);
7645     }
7646 }
7647
7648 void new_dynarec_init()
7649 {
7650   printf("Init new dynarec\n");
7651   out=(u_char *)BASE_ADDR;
7652   if (mmap (out, 1<<TARGET_SIZE_2,
7653             PROT_READ | PROT_WRITE | PROT_EXEC,
7654             MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7655             -1, 0) <= 0) {printf("mmap() failed\n");}
7656 #ifdef MUPEN64
7657   rdword=&readmem_dword;
7658   fake_pc.f.r.rs=&readmem_dword;
7659   fake_pc.f.r.rt=&readmem_dword;
7660   fake_pc.f.r.rd=&readmem_dword;
7661 #endif
7662   int n;
7663   for(n=0x80000;n<0x80800;n++)
7664     invalid_code[n]=1;
7665   for(n=0;n<65536;n++)
7666     hash_table[n][0]=hash_table[n][2]=-1;
7667   memset(mini_ht,-1,sizeof(mini_ht));
7668   memset(restore_candidate,0,sizeof(restore_candidate));
7669   copy=shadow;
7670   expirep=16384; // Expiry pointer, +2 blocks
7671   pending_exception=0;
7672   literalcount=0;
7673 #ifdef HOST_IMM8
7674   // Copy this into local area so we don't have to put it in every literal pool
7675   invc_ptr=invalid_code;
7676 #endif
7677   stop_after_jal=0;
7678   // TLB
7679   using_tlb=0;
7680   for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7681     memory_map[n]=-1;
7682   for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7683     memory_map[n]=((u_int)rdram-0x80000000)>>2;
7684   for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7685     memory_map[n]=-1;
7686 #ifdef MUPEN64
7687   for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7688     writemem[n] = write_nomem_new;
7689     writememb[n] = write_nomemb_new;
7690     writememh[n] = write_nomemh_new;
7691 #ifndef FORCE32
7692     writememd[n] = write_nomemd_new;
7693 #endif
7694     readmem[n] = read_nomem_new;
7695     readmemb[n] = read_nomemb_new;
7696     readmemh[n] = read_nomemh_new;
7697 #ifndef FORCE32
7698     readmemd[n] = read_nomemd_new;
7699 #endif
7700   }
7701   for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7702     writemem[n] = write_rdram_new;
7703     writememb[n] = write_rdramb_new;
7704     writememh[n] = write_rdramh_new;
7705 #ifndef FORCE32
7706     writememd[n] = write_rdramd_new;
7707 #endif
7708   }
7709   for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7710     writemem[n] = write_nomem_new;
7711     writememb[n] = write_nomemb_new;
7712     writememh[n] = write_nomemh_new;
7713 #ifndef FORCE32
7714     writememd[n] = write_nomemd_new;
7715 #endif
7716     readmem[n] = read_nomem_new;
7717     readmemb[n] = read_nomemb_new;
7718     readmemh[n] = read_nomemh_new;
7719 #ifndef FORCE32
7720     readmemd[n] = read_nomemd_new;
7721 #endif
7722   }
7723 #endif
7724   tlb_hacks();
7725   arch_init();
7726 }
7727
7728 void new_dynarec_cleanup()
7729 {
7730   int n;
7731   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7732   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7733   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7734   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7735   #ifdef ROM_COPY
7736   if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7737   #endif
7738 }
7739
7740 int new_recompile_block(int addr)
7741 {
7742 /*
7743   if(addr==0x800cd050) {
7744     int block;
7745     for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7746     int n;
7747     for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7748   }
7749 */
7750   //if(Count==365117028) tracedebug=1;
7751   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7752   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7753   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7754   //if(debug) 
7755   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7756   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7757   /*if(Count>=312978186) {
7758     rlist();
7759   }*/
7760   //rlist();
7761   start = (u_int)addr&~3;
7762   //assert(((u_int)addr&1)==0);
7763 #ifdef PCSX
7764   if (Config.HLE && start == 0x80001000) // hlecall
7765   {
7766     // XXX: is this enough? Maybe check hleSoftCall?
7767     u_int beginning=(u_int)out;
7768     u_int page=get_page(start);
7769     invalid_code[start>>12]=0;
7770     emit_movimm(start,0);
7771     emit_writeword(0,(int)&pcaddr);
7772     emit_jmp((int)new_dyna_leave);
7773 #ifdef __arm__
7774     __clear_cache((void *)beginning,out);
7775 #endif
7776     ll_add(jump_in+page,start,(void *)beginning);
7777     return 0;
7778   }
7779   else if ((u_int)addr < 0x00200000 ||
7780     (0xa0000000 <= addr && addr < 0xa0200000)) {
7781     // used for BIOS calls mostly?
7782     source = (u_int *)((u_int)rdram+(start&0x1fffff));
7783     pagelimit = (addr&0xa0000000)|0x00200000;
7784   }
7785   else if (!Config.HLE && (
7786 /*    (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
7787     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
7788     // BIOS
7789     source = (u_int *)((u_int)psxR+(start&0x7ffff));
7790     pagelimit = (addr&0xfff00000)|0x80000;
7791   }
7792   else
7793 #endif
7794 #ifdef MUPEN64
7795   if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
7796     source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
7797     pagelimit = 0xa4001000;
7798   }
7799   else
7800 #endif
7801   if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
7802     source = (u_int *)((u_int)rdram+start-0x80000000);
7803     pagelimit = 0x80000000+RAM_SIZE;
7804   }
7805 #ifndef DISABLE_TLB
7806   else if ((signed int)addr >= (signed int)0xC0000000) {
7807     //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
7808     //if(tlb_LUT_r[start>>12])
7809       //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
7810     if((signed int)memory_map[start>>12]>=0) {
7811       source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
7812       pagelimit=(start+4096)&0xFFFFF000;
7813       int map=memory_map[start>>12];
7814       int i;
7815       for(i=0;i<5;i++) {
7816         //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
7817         if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
7818       }
7819       assem_debug("pagelimit=%x\n",pagelimit);
7820       assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
7821     }
7822     else {
7823       assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
7824       //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
7825       return -1; // Caller will invoke exception handler
7826     }
7827     //printf("source= %x\n",(int)source);
7828   }
7829 #endif
7830   else {
7831     printf("Compile at bogus memory address: %x \n", (int)addr);
7832     exit(1);
7833   }
7834
7835   /* Pass 1: disassemble */
7836   /* Pass 2: register dependencies, branch targets */
7837   /* Pass 3: register allocation */
7838   /* Pass 4: branch dependencies */
7839   /* Pass 5: pre-alloc */
7840   /* Pass 6: optimize clean/dirty state */
7841   /* Pass 7: flag 32-bit registers */
7842   /* Pass 8: assembly */
7843   /* Pass 9: linker */
7844   /* Pass 10: garbage collection / free memory */
7845
7846   int i,j;
7847   int done=0;
7848   unsigned int type,op,op2;
7849
7850   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7851   
7852   /* Pass 1 disassembly */
7853
7854   for(i=0;!done;i++) {
7855     bt[i]=0;likely[i]=0;op2=0;
7856     opcode[i]=op=source[i]>>26;
7857     switch(op)
7858     {
7859       case 0x00: strcpy(insn[i],"special"); type=NI;
7860         op2=source[i]&0x3f;
7861         switch(op2)
7862         {
7863           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7864           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7865           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7866           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7867           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7868           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7869           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7870           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7871           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7872           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7873           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7874           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7875           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7876           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7877           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7878           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7879           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7880           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7881           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7882           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7883           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7884           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7885           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7886           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7887           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7888           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7889           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7890           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7891           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7892           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7893           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7894           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7895           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7896           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7897           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7898           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7899           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7900           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7901           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7902           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7903           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7904           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7905           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7906           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7907           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7908           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7909           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7910           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7911           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7912           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7913           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7914           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7915         }
7916         break;
7917       case 0x01: strcpy(insn[i],"regimm"); type=NI;
7918         op2=(source[i]>>16)&0x1f;
7919         switch(op2)
7920         {
7921           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7922           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7923           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7924           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7925           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7926           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7927           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7928           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7929           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7930           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7931           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7932           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7933           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7934           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7935         }
7936         break;
7937       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7938       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7939       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7940       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7941       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7942       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7943       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7944       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7945       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7946       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7947       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7948       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7949       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7950       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7951       case 0x10: strcpy(insn[i],"cop0"); type=NI;
7952         op2=(source[i]>>21)&0x1f;
7953         switch(op2)
7954         {
7955           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7956           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7957           case 0x10: strcpy(insn[i],"tlb"); type=NI;
7958           switch(source[i]&0x3f)
7959           {
7960             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7961             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7962             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7963             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
7964 #ifdef PCSX
7965             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
7966 #else
7967             case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
7968 #endif
7969           }
7970         }
7971         break;
7972       case 0x11: strcpy(insn[i],"cop1"); type=NI;
7973         op2=(source[i]>>21)&0x1f;
7974         switch(op2)
7975         {
7976           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7977           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7978           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7979           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7980           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7981           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7982           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7983           switch((source[i]>>16)&0x3)
7984           {
7985             case 0x00: strcpy(insn[i],"BC1F"); break;
7986             case 0x01: strcpy(insn[i],"BC1T"); break;
7987             case 0x02: strcpy(insn[i],"BC1FL"); break;
7988             case 0x03: strcpy(insn[i],"BC1TL"); break;
7989           }
7990           break;
7991           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7992           switch(source[i]&0x3f)
7993           {
7994             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7995             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7996             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7997             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7998             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7999             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
8000             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
8001             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
8002             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
8003             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
8004             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
8005             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
8006             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
8007             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
8008             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
8009             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
8010             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
8011             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
8012             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
8013             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
8014             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
8015             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
8016             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
8017             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8018             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8019             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8020             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8021             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8022             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8023             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8024             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8025             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8026             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8027             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8028             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8029           }
8030           break;
8031           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8032           switch(source[i]&0x3f)
8033           {
8034             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8035             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8036             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8037             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8038             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8039             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8040             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8041             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8042             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8043             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8044             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8045             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8046             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8047             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8048             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8049             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8050             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8051             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8052             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8053             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8054             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8055             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8056             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8057             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8058             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8059             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8060             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8061             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8062             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8063             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8064             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8065             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8066             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8067             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8068             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8069           }
8070           break;
8071           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8072           switch(source[i]&0x3f)
8073           {
8074             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8075             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8076           }
8077           break;
8078           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8079           switch(source[i]&0x3f)
8080           {
8081             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8082             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8083           }
8084           break;
8085         }
8086         break;
8087       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8088       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8089       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8090       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8091 #ifndef FORCE32
8092       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8093       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8094       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8095       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8096 #endif
8097       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8098       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8099       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8100       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8101       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8102       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8103       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8104       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8105       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8106       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8107       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8108       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8109 #ifndef FORCE32
8110       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8111       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8112 #endif
8113       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8114       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8115       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8116       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8117 #ifndef FORCE32
8118       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8119       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8120       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8121 #endif
8122       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8123       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8124 #ifndef FORCE32
8125       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8126       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8127       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8128 #endif
8129 #ifdef PCSX
8130       case 0x12: strcpy(insn[i],"COP2"); type=NI;
8131         // note: COP MIPS-1 encoding differs from MIPS32
8132         op2=(source[i]>>21)&0x1f;
8133         if (source[i]&0x3f) {
8134           if (gte_handlers[source[i]&0x3f]!=NULL) {
8135             snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8136             type=C2OP;
8137           }
8138         }
8139         else switch(op2)
8140         {
8141           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8142           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8143           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8144           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8145         }
8146         break;
8147       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8148       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8149       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8150 #endif
8151       default: strcpy(insn[i],"???"); type=NI;
8152         printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
8153         break;
8154     }
8155 #ifdef PCSX
8156     /* detect branch in delay slot early */
8157     if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
8158       opcode[i+1]=source[i+1]>>26;
8159       opcode2[i+1]=source[i+1]&0x3f;
8160       if((0<opcode[i+1]&&opcode[i+1]<8)||(opcode[i+1]==0&&(opcode2[i+1]==8||opcode2[i+1]==9))) {
8161         printf("branch in delay slot @%08x (%08x)\n", addr + i*4+4, addr);
8162         // don't handle first branch and call interpreter if it's hit
8163         type=INTCALL;
8164       }
8165     }
8166 #endif
8167     itype[i]=type;
8168     opcode2[i]=op2;
8169     /* Get registers/immediates */
8170     lt1[i]=0;
8171     us1[i]=0;
8172     us2[i]=0;
8173     dep1[i]=0;
8174     dep2[i]=0;
8175     switch(type) {
8176       case LOAD:
8177         rs1[i]=(source[i]>>21)&0x1f;
8178         rs2[i]=0;
8179         rt1[i]=(source[i]>>16)&0x1f;
8180         rt2[i]=0;
8181         imm[i]=(short)source[i];
8182         break;
8183       case STORE:
8184       case STORELR:
8185         rs1[i]=(source[i]>>21)&0x1f;
8186         rs2[i]=(source[i]>>16)&0x1f;
8187         rt1[i]=0;
8188         rt2[i]=0;
8189         imm[i]=(short)source[i];
8190         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8191         break;
8192       case LOADLR:
8193         // LWL/LWR only load part of the register,
8194         // therefore the target register must be treated as a source too
8195         rs1[i]=(source[i]>>21)&0x1f;
8196         rs2[i]=(source[i]>>16)&0x1f;
8197         rt1[i]=(source[i]>>16)&0x1f;
8198         rt2[i]=0;
8199         imm[i]=(short)source[i];
8200         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8201         if(op==0x26) dep1[i]=rt1[i]; // LWR
8202         break;
8203       case IMM16:
8204         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8205         else rs1[i]=(source[i]>>21)&0x1f;
8206         rs2[i]=0;
8207         rt1[i]=(source[i]>>16)&0x1f;
8208         rt2[i]=0;
8209         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8210           imm[i]=(unsigned short)source[i];
8211         }else{
8212           imm[i]=(short)source[i];
8213         }
8214         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8215         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8216         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8217         break;
8218       case UJUMP:
8219         rs1[i]=0;
8220         rs2[i]=0;
8221         rt1[i]=0;
8222         rt2[i]=0;
8223         // The JAL instruction writes to r31.
8224         if (op&1) {
8225           rt1[i]=31;
8226         }
8227         rs2[i]=CCREG;
8228         break;
8229       case RJUMP:
8230         rs1[i]=(source[i]>>21)&0x1f;
8231         rs2[i]=0;
8232         rt1[i]=0;
8233         rt2[i]=0;
8234         // The JALR instruction writes to rd.
8235         if (op2&1) {
8236           rt1[i]=(source[i]>>11)&0x1f;
8237         }
8238         rs2[i]=CCREG;
8239         break;
8240       case CJUMP:
8241         rs1[i]=(source[i]>>21)&0x1f;
8242         rs2[i]=(source[i]>>16)&0x1f;
8243         rt1[i]=0;
8244         rt2[i]=0;
8245         if(op&2) { // BGTZ/BLEZ
8246           rs2[i]=0;
8247         }
8248         us1[i]=rs1[i];
8249         us2[i]=rs2[i];
8250         likely[i]=op>>4;
8251         break;
8252       case SJUMP:
8253         rs1[i]=(source[i]>>21)&0x1f;
8254         rs2[i]=CCREG;
8255         rt1[i]=0;
8256         rt2[i]=0;
8257         us1[i]=rs1[i];
8258         if(op2&0x10) { // BxxAL
8259           rt1[i]=31;
8260           // NOTE: If the branch is not taken, r31 is still overwritten
8261         }
8262         likely[i]=(op2&2)>>1;
8263         break;
8264       case FJUMP:
8265         rs1[i]=FSREG;
8266         rs2[i]=CSREG;
8267         rt1[i]=0;
8268         rt2[i]=0;
8269         likely[i]=((source[i])>>17)&1;
8270         break;
8271       case ALU:
8272         rs1[i]=(source[i]>>21)&0x1f; // source
8273         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8274         rt1[i]=(source[i]>>11)&0x1f; // destination
8275         rt2[i]=0;
8276         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8277           us1[i]=rs1[i];us2[i]=rs2[i];
8278         }
8279         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8280           dep1[i]=rs1[i];dep2[i]=rs2[i];
8281         }
8282         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8283           dep1[i]=rs1[i];dep2[i]=rs2[i];
8284         }
8285         break;
8286       case MULTDIV:
8287         rs1[i]=(source[i]>>21)&0x1f; // source
8288         rs2[i]=(source[i]>>16)&0x1f; // divisor
8289         rt1[i]=HIREG;
8290         rt2[i]=LOREG;
8291         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8292           us1[i]=rs1[i];us2[i]=rs2[i];
8293         }
8294         break;
8295       case MOV:
8296         rs1[i]=0;
8297         rs2[i]=0;
8298         rt1[i]=0;
8299         rt2[i]=0;
8300         if(op2==0x10) rs1[i]=HIREG; // MFHI
8301         if(op2==0x11) rt1[i]=HIREG; // MTHI
8302         if(op2==0x12) rs1[i]=LOREG; // MFLO
8303         if(op2==0x13) rt1[i]=LOREG; // MTLO
8304         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8305         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8306         dep1[i]=rs1[i];
8307         break;
8308       case SHIFT:
8309         rs1[i]=(source[i]>>16)&0x1f; // target of shift
8310         rs2[i]=(source[i]>>21)&0x1f; // shift amount
8311         rt1[i]=(source[i]>>11)&0x1f; // destination
8312         rt2[i]=0;
8313         // DSLLV/DSRLV/DSRAV are 64-bit
8314         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8315         break;
8316       case SHIFTIMM:
8317         rs1[i]=(source[i]>>16)&0x1f;
8318         rs2[i]=0;
8319         rt1[i]=(source[i]>>11)&0x1f;
8320         rt2[i]=0;
8321         imm[i]=(source[i]>>6)&0x1f;
8322         // DSxx32 instructions
8323         if(op2>=0x3c) imm[i]|=0x20;
8324         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8325         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8326         break;
8327       case COP0:
8328         rs1[i]=0;
8329         rs2[i]=0;
8330         rt1[i]=0;
8331         rt2[i]=0;
8332         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8333         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8334         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8335         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8336         break;
8337       case COP1:
8338       case COP2:
8339         rs1[i]=0;
8340         rs2[i]=0;
8341         rt1[i]=0;
8342         rt2[i]=0;
8343         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8344         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8345         if(op2==5) us1[i]=rs1[i]; // DMTC1
8346         rs2[i]=CSREG;
8347         break;
8348       case C1LS:
8349         rs1[i]=(source[i]>>21)&0x1F;
8350         rs2[i]=CSREG;
8351         rt1[i]=0;
8352         rt2[i]=0;
8353         imm[i]=(short)source[i];
8354         break;
8355       case C2LS:
8356         rs1[i]=(source[i]>>21)&0x1F;
8357         rs2[i]=0;
8358         rt1[i]=0;
8359         rt2[i]=0;
8360         imm[i]=(short)source[i];
8361         break;
8362       case FLOAT:
8363       case FCONV:
8364         rs1[i]=0;
8365         rs2[i]=CSREG;
8366         rt1[i]=0;
8367         rt2[i]=0;
8368         break;
8369       case FCOMP:
8370         rs1[i]=FSREG;
8371         rs2[i]=CSREG;
8372         rt1[i]=FSREG;
8373         rt2[i]=0;
8374         break;
8375       case SYSCALL:
8376       case HLECALL:
8377       case INTCALL:
8378         rs1[i]=CCREG;
8379         rs2[i]=0;
8380         rt1[i]=0;
8381         rt2[i]=0;
8382         break;
8383       default:
8384         rs1[i]=0;
8385         rs2[i]=0;
8386         rt1[i]=0;
8387         rt2[i]=0;
8388     }
8389     /* Calculate branch target addresses */
8390     if(type==UJUMP)
8391       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8392     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8393       ba[i]=start+i*4+8; // Ignore never taken branch
8394     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8395       ba[i]=start+i*4+8; // Ignore never taken branch
8396     else if(type==CJUMP||type==SJUMP||type==FJUMP)
8397       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8398     else ba[i]=-1;
8399     /* Is this the end of the block? */
8400     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8401       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8402         done=2;
8403       }
8404       else {
8405         if(stop_after_jal) done=1;
8406         // Stop on BREAK
8407         if((source[i+1]&0xfc00003f)==0x0d) done=1;
8408       }
8409       // Don't recompile stuff that's already compiled
8410       if(check_addr(start+i*4+4)) done=1;
8411       // Don't get too close to the limit
8412       if(i>MAXBLOCK/2) done=1;
8413     }
8414     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
8415     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
8416     if(done==2) {
8417       // Does the block continue due to a branch?
8418       for(j=i-1;j>=0;j--)
8419       {
8420         if(ba[j]==start+i*4+4) done=j=0;
8421         if(ba[j]==start+i*4+8) done=j=0;
8422       }
8423     }
8424     //assert(i<MAXBLOCK-1);
8425     if(start+i*4==pagelimit-4) done=1;
8426     assert(start+i*4<pagelimit);
8427     if (i==MAXBLOCK-1) done=1;
8428     // Stop if we're compiling junk
8429     if(itype[i]==NI&&opcode[i]==0x11) {
8430       done=stop_after_jal=1;
8431       printf("Disabled speculative precompilation\n");
8432     }
8433   }
8434   slen=i;
8435   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8436     if(start+i*4==pagelimit) {
8437       itype[i-1]=SPAN;
8438     }
8439   }
8440   assert(slen>0);
8441
8442   /* Pass 2 - Register dependencies and branch targets */
8443
8444   unneeded_registers(0,slen-1,0);
8445   
8446   /* Pass 3 - Register allocation */
8447
8448   struct regstat current; // Current register allocations/status
8449   current.is32=1;
8450   current.dirty=0;
8451   current.u=unneeded_reg[0];
8452   current.uu=unneeded_reg_upper[0];
8453   clear_all_regs(current.regmap);
8454   alloc_reg(&current,0,CCREG);
8455   dirty_reg(&current,CCREG);
8456   current.isconst=0;
8457   current.wasconst=0;
8458   int ds=0;
8459   int cc=0;
8460   int hr;
8461   
8462   provisional_32bit();
8463   
8464   if((u_int)addr&1) {
8465     // First instruction is delay slot
8466     cc=-1;
8467     bt[1]=1;
8468     ds=1;
8469     unneeded_reg[0]=1;
8470     unneeded_reg_upper[0]=1;
8471     current.regmap[HOST_BTREG]=BTREG;
8472   }
8473   
8474   for(i=0;i<slen;i++)
8475   {
8476     if(bt[i])
8477     {
8478       int hr;
8479       for(hr=0;hr<HOST_REGS;hr++)
8480       {
8481         // Is this really necessary?
8482         if(current.regmap[hr]==0) current.regmap[hr]=-1;
8483       }
8484       current.isconst=0;
8485     }
8486     if(i>1)
8487     {
8488       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8489       {
8490         if(rs1[i-2]==0||rs2[i-2]==0)
8491         {
8492           if(rs1[i-2]) {
8493             current.is32|=1LL<<rs1[i-2];
8494             int hr=get_reg(current.regmap,rs1[i-2]|64);
8495             if(hr>=0) current.regmap[hr]=-1;
8496           }
8497           if(rs2[i-2]) {
8498             current.is32|=1LL<<rs2[i-2];
8499             int hr=get_reg(current.regmap,rs2[i-2]|64);
8500             if(hr>=0) current.regmap[hr]=-1;
8501           }
8502         }
8503       }
8504     }
8505     // If something jumps here with 64-bit values
8506     // then promote those registers to 64 bits
8507     if(bt[i])
8508     {
8509       uint64_t temp_is32=current.is32;
8510       for(j=i-1;j>=0;j--)
8511       {
8512         if(ba[j]==start+i*4) 
8513           temp_is32&=branch_regs[j].is32;
8514       }
8515       for(j=i;j<slen;j++)
8516       {
8517         if(ba[j]==start+i*4) 
8518           //temp_is32=1;
8519           temp_is32&=p32[j];
8520       }
8521       if(temp_is32!=current.is32) {
8522         //printf("dumping 32-bit regs (%x)\n",start+i*4);
8523         #ifdef DESTRUCTIVE_WRITEBACK
8524         for(hr=0;hr<HOST_REGS;hr++)
8525         {
8526           int r=current.regmap[hr];
8527           if(r>0&&r<64)
8528           {
8529             if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8530               temp_is32|=1LL<<r;
8531               //printf("restore %d\n",r);
8532             }
8533           }
8534         }
8535         #endif
8536         current.is32=temp_is32;
8537       }
8538     }
8539 #ifdef FORCE32
8540     memset(p32, 0xff, sizeof(p32));
8541     current.is32=-1LL;
8542 #endif
8543
8544     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8545     regs[i].wasconst=current.isconst;
8546     regs[i].was32=current.is32;
8547     regs[i].wasdirty=current.dirty;
8548     #ifdef DESTRUCTIVE_WRITEBACK
8549     // To change a dirty register from 32 to 64 bits, we must write
8550     // it out during the previous cycle (for branches, 2 cycles)
8551     if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8552     {
8553       uint64_t temp_is32=current.is32;
8554       for(j=i-1;j>=0;j--)
8555       {
8556         if(ba[j]==start+i*4+4) 
8557           temp_is32&=branch_regs[j].is32;
8558       }
8559       for(j=i;j<slen;j++)
8560       {
8561         if(ba[j]==start+i*4+4) 
8562           //temp_is32=1;
8563           temp_is32&=p32[j];
8564       }
8565       if(temp_is32!=current.is32) {
8566         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8567         for(hr=0;hr<HOST_REGS;hr++)
8568         {
8569           int r=current.regmap[hr];
8570           if(r>0)
8571           {
8572             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8573               if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8574               {
8575                 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8576                 {
8577                   //printf("dump %d/r%d\n",hr,r);
8578                   current.regmap[hr]=-1;
8579                   if(get_reg(current.regmap,r|64)>=0) 
8580                     current.regmap[get_reg(current.regmap,r|64)]=-1;
8581                 }
8582               }
8583             }
8584           }
8585         }
8586       }
8587     }
8588     else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8589     {
8590       uint64_t temp_is32=current.is32;
8591       for(j=i-1;j>=0;j--)
8592       {
8593         if(ba[j]==start+i*4+8) 
8594           temp_is32&=branch_regs[j].is32;
8595       }
8596       for(j=i;j<slen;j++)
8597       {
8598         if(ba[j]==start+i*4+8) 
8599           //temp_is32=1;
8600           temp_is32&=p32[j];
8601       }
8602       if(temp_is32!=current.is32) {
8603         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8604         for(hr=0;hr<HOST_REGS;hr++)
8605         {
8606           int r=current.regmap[hr];
8607           if(r>0)
8608           {
8609             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8610               if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8611               {
8612                 //printf("dump %d/r%d\n",hr,r);
8613                 current.regmap[hr]=-1;
8614                 if(get_reg(current.regmap,r|64)>=0) 
8615                   current.regmap[get_reg(current.regmap,r|64)]=-1;
8616               }
8617             }
8618           }
8619         }
8620       }
8621     }
8622     #endif
8623     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8624       if(i+1<slen) {
8625         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8626         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8627         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8628         current.u|=1;
8629         current.uu|=1;
8630       } else {
8631         current.u=1;
8632         current.uu=1;
8633       }
8634     } else {
8635       if(i+1<slen) {
8636         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8637         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8638         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8639         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8640         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8641         current.u|=1;
8642         current.uu|=1;
8643       } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8644     }
8645     is_ds[i]=ds;
8646     if(ds) {
8647       ds=0; // Skip delay slot, already allocated as part of branch
8648       // ...but we need to alloc it in case something jumps here
8649       if(i+1<slen) {
8650         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8651         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8652       }else{
8653         current.u=branch_unneeded_reg[i-1];
8654         current.uu=branch_unneeded_reg_upper[i-1];
8655       }
8656       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8657       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8658       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8659       current.u|=1;
8660       current.uu|=1;
8661       struct regstat temp;
8662       memcpy(&temp,&current,sizeof(current));
8663       temp.wasdirty=temp.dirty;
8664       temp.was32=temp.is32;
8665       // TODO: Take into account unconditional branches, as below
8666       delayslot_alloc(&temp,i);
8667       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8668       regs[i].wasdirty=temp.wasdirty;
8669       regs[i].was32=temp.was32;
8670       regs[i].dirty=temp.dirty;
8671       regs[i].is32=temp.is32;
8672       regs[i].isconst=0;
8673       regs[i].wasconst=0;
8674       current.isconst=0;
8675       // Create entry (branch target) regmap
8676       for(hr=0;hr<HOST_REGS;hr++)
8677       {
8678         int r=temp.regmap[hr];
8679         if(r>=0) {
8680           if(r!=regmap_pre[i][hr]) {
8681             regs[i].regmap_entry[hr]=-1;
8682           }
8683           else
8684           {
8685             if(r<64){
8686               if((current.u>>r)&1) {
8687                 regs[i].regmap_entry[hr]=-1;
8688                 regs[i].regmap[hr]=-1;
8689                 //Don't clear regs in the delay slot as the branch might need them
8690                 //current.regmap[hr]=-1;
8691               }else
8692                 regs[i].regmap_entry[hr]=r;
8693             }
8694             else {
8695               if((current.uu>>(r&63))&1) {
8696                 regs[i].regmap_entry[hr]=-1;
8697                 regs[i].regmap[hr]=-1;
8698                 //Don't clear regs in the delay slot as the branch might need them
8699                 //current.regmap[hr]=-1;
8700               }else
8701                 regs[i].regmap_entry[hr]=r;
8702             }
8703           }
8704         } else {
8705           // First instruction expects CCREG to be allocated
8706           if(i==0&&hr==HOST_CCREG) 
8707             regs[i].regmap_entry[hr]=CCREG;
8708           else
8709             regs[i].regmap_entry[hr]=-1;
8710         }
8711       }
8712     }
8713     else { // Not delay slot
8714       switch(itype[i]) {
8715         case UJUMP:
8716           //current.isconst=0; // DEBUG
8717           //current.wasconst=0; // DEBUG
8718           //regs[i].wasconst=0; // DEBUG
8719           clear_const(&current,rt1[i]);
8720           alloc_cc(&current,i);
8721           dirty_reg(&current,CCREG);
8722           if (rt1[i]==31) {
8723             alloc_reg(&current,i,31);
8724             dirty_reg(&current,31);
8725             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8726             assert(rt1[i+1]!=rt1[i]);
8727             #ifdef REG_PREFETCH
8728             alloc_reg(&current,i,PTEMP);
8729             #endif
8730             //current.is32|=1LL<<rt1[i];
8731           }
8732           delayslot_alloc(&current,i+1);
8733           //current.isconst=0; // DEBUG
8734           ds=1;
8735           //printf("i=%d, isconst=%x\n",i,current.isconst);
8736           break;
8737         case RJUMP:
8738           //current.isconst=0;
8739           //current.wasconst=0;
8740           //regs[i].wasconst=0;
8741           clear_const(&current,rs1[i]);
8742           clear_const(&current,rt1[i]);
8743           alloc_cc(&current,i);
8744           dirty_reg(&current,CCREG);
8745           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8746             alloc_reg(&current,i,rs1[i]);
8747             if (rt1[i]!=0) {
8748               alloc_reg(&current,i,rt1[i]);
8749               dirty_reg(&current,rt1[i]);
8750               //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8751               assert(rt1[i+1]!=rt1[i]);
8752               #ifdef REG_PREFETCH
8753               alloc_reg(&current,i,PTEMP);
8754               #endif
8755             }
8756             #ifdef USE_MINI_HT
8757             if(rs1[i]==31) { // JALR
8758               alloc_reg(&current,i,RHASH);
8759               #ifndef HOST_IMM_ADDR32
8760               alloc_reg(&current,i,RHTBL);
8761               #endif
8762             }
8763             #endif
8764             delayslot_alloc(&current,i+1);
8765           } else {
8766             // The delay slot overwrites our source register,
8767             // allocate a temporary register to hold the old value.
8768             current.isconst=0;
8769             current.wasconst=0;
8770             regs[i].wasconst=0;
8771             delayslot_alloc(&current,i+1);
8772             current.isconst=0;
8773             alloc_reg(&current,i,RTEMP);
8774           }
8775           //current.isconst=0; // DEBUG
8776           ds=1;
8777           break;
8778         case CJUMP:
8779           //current.isconst=0;
8780           //current.wasconst=0;
8781           //regs[i].wasconst=0;
8782           clear_const(&current,rs1[i]);
8783           clear_const(&current,rs2[i]);
8784           if((opcode[i]&0x3E)==4) // BEQ/BNE
8785           {
8786             alloc_cc(&current,i);
8787             dirty_reg(&current,CCREG);
8788             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8789             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8790             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8791             {
8792               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8793               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8794             }
8795             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8796                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8797               // The delay slot overwrites one of our conditions.
8798               // Allocate the branch condition registers instead.
8799               // Note that such a sequence of instructions could
8800               // be considered a bug since the branch can not be
8801               // re-executed if an exception occurs.
8802               current.isconst=0;
8803               current.wasconst=0;
8804               regs[i].wasconst=0;
8805               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8806               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8807               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8808               {
8809                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8810                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8811               }
8812             }
8813             else delayslot_alloc(&current,i+1);
8814           }
8815           else
8816           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8817           {
8818             alloc_cc(&current,i);
8819             dirty_reg(&current,CCREG);
8820             alloc_reg(&current,i,rs1[i]);
8821             if(!(current.is32>>rs1[i]&1))
8822             {
8823               alloc_reg64(&current,i,rs1[i]);
8824             }
8825             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8826               // The delay slot overwrites one of our conditions.
8827               // Allocate the branch condition registers instead.
8828               // Note that such a sequence of instructions could
8829               // be considered a bug since the branch can not be
8830               // re-executed if an exception occurs.
8831               current.isconst=0;
8832               current.wasconst=0;
8833               regs[i].wasconst=0;
8834               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8835               if(!((current.is32>>rs1[i])&1))
8836               {
8837                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8838               }
8839             }
8840             else delayslot_alloc(&current,i+1);
8841           }
8842           else
8843           // Don't alloc the delay slot yet because we might not execute it
8844           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8845           {
8846             current.isconst=0;
8847             current.wasconst=0;
8848             regs[i].wasconst=0;
8849             alloc_cc(&current,i);
8850             dirty_reg(&current,CCREG);
8851             alloc_reg(&current,i,rs1[i]);
8852             alloc_reg(&current,i,rs2[i]);
8853             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8854             {
8855               alloc_reg64(&current,i,rs1[i]);
8856               alloc_reg64(&current,i,rs2[i]);
8857             }
8858           }
8859           else
8860           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8861           {
8862             current.isconst=0;
8863             current.wasconst=0;
8864             regs[i].wasconst=0;
8865             alloc_cc(&current,i);
8866             dirty_reg(&current,CCREG);
8867             alloc_reg(&current,i,rs1[i]);
8868             if(!(current.is32>>rs1[i]&1))
8869             {
8870               alloc_reg64(&current,i,rs1[i]);
8871             }
8872           }
8873           ds=1;
8874           //current.isconst=0;
8875           break;
8876         case SJUMP:
8877           //current.isconst=0;
8878           //current.wasconst=0;
8879           //regs[i].wasconst=0;
8880           clear_const(&current,rs1[i]);
8881           clear_const(&current,rt1[i]);
8882           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8883           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8884           {
8885             alloc_cc(&current,i);
8886             dirty_reg(&current,CCREG);
8887             alloc_reg(&current,i,rs1[i]);
8888             if(!(current.is32>>rs1[i]&1))
8889             {
8890               alloc_reg64(&current,i,rs1[i]);
8891             }
8892             if (rt1[i]==31) { // BLTZAL/BGEZAL
8893               alloc_reg(&current,i,31);
8894               dirty_reg(&current,31);
8895               assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8896               //#ifdef REG_PREFETCH
8897               //alloc_reg(&current,i,PTEMP);
8898               //#endif
8899               //current.is32|=1LL<<rt1[i];
8900             }
8901             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8902               // The delay slot overwrites the branch condition.
8903               // Allocate the branch condition registers instead.
8904               // Note that such a sequence of instructions could
8905               // be considered a bug since the branch can not be
8906               // re-executed if an exception occurs.
8907               current.isconst=0;
8908               current.wasconst=0;
8909               regs[i].wasconst=0;
8910               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8911               if(!((current.is32>>rs1[i])&1))
8912               {
8913                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8914               }
8915             }
8916             else delayslot_alloc(&current,i+1);
8917           }
8918           else
8919           // Don't alloc the delay slot yet because we might not execute it
8920           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8921           {
8922             current.isconst=0;
8923             current.wasconst=0;
8924             regs[i].wasconst=0;
8925             alloc_cc(&current,i);
8926             dirty_reg(&current,CCREG);
8927             alloc_reg(&current,i,rs1[i]);
8928             if(!(current.is32>>rs1[i]&1))
8929             {
8930               alloc_reg64(&current,i,rs1[i]);
8931             }
8932           }
8933           ds=1;
8934           //current.isconst=0;
8935           break;
8936         case FJUMP:
8937           current.isconst=0;
8938           current.wasconst=0;
8939           regs[i].wasconst=0;
8940           if(likely[i]==0) // BC1F/BC1T
8941           {
8942             // TODO: Theoretically we can run out of registers here on x86.
8943             // The delay slot can allocate up to six, and we need to check
8944             // CSREG before executing the delay slot.  Possibly we can drop
8945             // the cycle count and then reload it after checking that the
8946             // FPU is in a usable state, or don't do out-of-order execution.
8947             alloc_cc(&current,i);
8948             dirty_reg(&current,CCREG);
8949             alloc_reg(&current,i,FSREG);
8950             alloc_reg(&current,i,CSREG);
8951             if(itype[i+1]==FCOMP) {
8952               // The delay slot overwrites the branch condition.
8953               // Allocate the branch condition registers instead.
8954               // Note that such a sequence of instructions could
8955               // be considered a bug since the branch can not be
8956               // re-executed if an exception occurs.
8957               alloc_cc(&current,i);
8958               dirty_reg(&current,CCREG);
8959               alloc_reg(&current,i,CSREG);
8960               alloc_reg(&current,i,FSREG);
8961             }
8962             else {
8963               delayslot_alloc(&current,i+1);
8964               alloc_reg(&current,i+1,CSREG);
8965             }
8966           }
8967           else
8968           // Don't alloc the delay slot yet because we might not execute it
8969           if(likely[i]) // BC1FL/BC1TL
8970           {
8971             alloc_cc(&current,i);
8972             dirty_reg(&current,CCREG);
8973             alloc_reg(&current,i,CSREG);
8974             alloc_reg(&current,i,FSREG);
8975           }
8976           ds=1;
8977           current.isconst=0;
8978           break;
8979         case IMM16:
8980           imm16_alloc(&current,i);
8981           break;
8982         case LOAD:
8983         case LOADLR:
8984           load_alloc(&current,i);
8985           break;
8986         case STORE:
8987         case STORELR:
8988           store_alloc(&current,i);
8989           break;
8990         case ALU:
8991           alu_alloc(&current,i);
8992           break;
8993         case SHIFT:
8994           shift_alloc(&current,i);
8995           break;
8996         case MULTDIV:
8997           multdiv_alloc(&current,i);
8998           break;
8999         case SHIFTIMM:
9000           shiftimm_alloc(&current,i);
9001           break;
9002         case MOV:
9003           mov_alloc(&current,i);
9004           break;
9005         case COP0:
9006           cop0_alloc(&current,i);
9007           break;
9008         case COP1:
9009         case COP2:
9010           cop1_alloc(&current,i);
9011           break;
9012         case C1LS:
9013           c1ls_alloc(&current,i);
9014           break;
9015         case C2LS:
9016           c2ls_alloc(&current,i);
9017           break;
9018         case C2OP:
9019           c2op_alloc(&current,i);
9020           break;
9021         case FCONV:
9022           fconv_alloc(&current,i);
9023           break;
9024         case FLOAT:
9025           float_alloc(&current,i);
9026           break;
9027         case FCOMP:
9028           fcomp_alloc(&current,i);
9029           break;
9030         case SYSCALL:
9031         case HLECALL:
9032         case INTCALL:
9033           syscall_alloc(&current,i);
9034           break;
9035         case SPAN:
9036           pagespan_alloc(&current,i);
9037           break;
9038       }
9039       
9040       // Drop the upper half of registers that have become 32-bit
9041       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9042       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9043         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9044         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9045         current.uu|=1;
9046       } else {
9047         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9048         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9049         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9050         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9051         current.uu|=1;
9052       }
9053
9054       // Create entry (branch target) regmap
9055       for(hr=0;hr<HOST_REGS;hr++)
9056       {
9057         int r,or,er;
9058         r=current.regmap[hr];
9059         if(r>=0) {
9060           if(r!=regmap_pre[i][hr]) {
9061             // TODO: delay slot (?)
9062             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9063             if(or<0||(r&63)>=TEMPREG){
9064               regs[i].regmap_entry[hr]=-1;
9065             }
9066             else
9067             {
9068               // Just move it to a different register
9069               regs[i].regmap_entry[hr]=r;
9070               // If it was dirty before, it's still dirty
9071               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9072             }
9073           }
9074           else
9075           {
9076             // Unneeded
9077             if(r==0){
9078               regs[i].regmap_entry[hr]=0;
9079             }
9080             else
9081             if(r<64){
9082               if((current.u>>r)&1) {
9083                 regs[i].regmap_entry[hr]=-1;
9084                 //regs[i].regmap[hr]=-1;
9085                 current.regmap[hr]=-1;
9086               }else
9087                 regs[i].regmap_entry[hr]=r;
9088             }
9089             else {
9090               if((current.uu>>(r&63))&1) {
9091                 regs[i].regmap_entry[hr]=-1;
9092                 //regs[i].regmap[hr]=-1;
9093                 current.regmap[hr]=-1;
9094               }else
9095                 regs[i].regmap_entry[hr]=r;
9096             }
9097           }
9098         } else {
9099           // Branches expect CCREG to be allocated at the target
9100           if(regmap_pre[i][hr]==CCREG) 
9101             regs[i].regmap_entry[hr]=CCREG;
9102           else
9103             regs[i].regmap_entry[hr]=-1;
9104         }
9105       }
9106       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9107     }
9108     /* Branch post-alloc */
9109     if(i>0)
9110     {
9111       current.was32=current.is32;
9112       current.wasdirty=current.dirty;
9113       switch(itype[i-1]) {
9114         case UJUMP:
9115           memcpy(&branch_regs[i-1],&current,sizeof(current));
9116           branch_regs[i-1].isconst=0;
9117           branch_regs[i-1].wasconst=0;
9118           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9119           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9120           alloc_cc(&branch_regs[i-1],i-1);
9121           dirty_reg(&branch_regs[i-1],CCREG);
9122           if(rt1[i-1]==31) { // JAL
9123             alloc_reg(&branch_regs[i-1],i-1,31);
9124             dirty_reg(&branch_regs[i-1],31);
9125             branch_regs[i-1].is32|=1LL<<31;
9126           }
9127           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9128           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9129           break;
9130         case RJUMP:
9131           memcpy(&branch_regs[i-1],&current,sizeof(current));
9132           branch_regs[i-1].isconst=0;
9133           branch_regs[i-1].wasconst=0;
9134           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9135           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9136           alloc_cc(&branch_regs[i-1],i-1);
9137           dirty_reg(&branch_regs[i-1],CCREG);
9138           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9139           if(rt1[i-1]!=0) { // JALR
9140             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9141             dirty_reg(&branch_regs[i-1],rt1[i-1]);
9142             branch_regs[i-1].is32|=1LL<<rt1[i-1];
9143           }
9144           #ifdef USE_MINI_HT
9145           if(rs1[i-1]==31) { // JALR
9146             alloc_reg(&branch_regs[i-1],i-1,RHASH);
9147             #ifndef HOST_IMM_ADDR32
9148             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9149             #endif
9150           }
9151           #endif
9152           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9153           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9154           break;
9155         case CJUMP:
9156           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9157           {
9158             alloc_cc(&current,i-1);
9159             dirty_reg(&current,CCREG);
9160             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9161                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9162               // The delay slot overwrote one of our conditions
9163               // Delay slot goes after the test (in order)
9164               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9165               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9166               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9167               current.u|=1;
9168               current.uu|=1;
9169               delayslot_alloc(&current,i);
9170               current.isconst=0;
9171             }
9172             else
9173             {
9174               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9175               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9176               // Alloc the branch condition registers
9177               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9178               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9179               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9180               {
9181                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9182                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9183               }
9184             }
9185             memcpy(&branch_regs[i-1],&current,sizeof(current));
9186             branch_regs[i-1].isconst=0;
9187             branch_regs[i-1].wasconst=0;
9188             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9189             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9190           }
9191           else
9192           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9193           {
9194             alloc_cc(&current,i-1);
9195             dirty_reg(&current,CCREG);
9196             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9197               // The delay slot overwrote the branch condition
9198               // Delay slot goes after the test (in order)
9199               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9200               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9201               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9202               current.u|=1;
9203               current.uu|=1;
9204               delayslot_alloc(&current,i);
9205               current.isconst=0;
9206             }
9207             else
9208             {
9209               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9210               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9211               // Alloc the branch condition register
9212               alloc_reg(&current,i-1,rs1[i-1]);
9213               if(!(current.is32>>rs1[i-1]&1))
9214               {
9215                 alloc_reg64(&current,i-1,rs1[i-1]);
9216               }
9217             }
9218             memcpy(&branch_regs[i-1],&current,sizeof(current));
9219             branch_regs[i-1].isconst=0;
9220             branch_regs[i-1].wasconst=0;
9221             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9222             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9223           }
9224           else
9225           // Alloc the delay slot in case the branch is taken
9226           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9227           {
9228             memcpy(&branch_regs[i-1],&current,sizeof(current));
9229             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9230             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9231             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9232             alloc_cc(&branch_regs[i-1],i);
9233             dirty_reg(&branch_regs[i-1],CCREG);
9234             delayslot_alloc(&branch_regs[i-1],i);
9235             branch_regs[i-1].isconst=0;
9236             alloc_reg(&current,i,CCREG); // Not taken path
9237             dirty_reg(&current,CCREG);
9238             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9239           }
9240           else
9241           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9242           {
9243             memcpy(&branch_regs[i-1],&current,sizeof(current));
9244             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9245             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9246             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9247             alloc_cc(&branch_regs[i-1],i);
9248             dirty_reg(&branch_regs[i-1],CCREG);
9249             delayslot_alloc(&branch_regs[i-1],i);
9250             branch_regs[i-1].isconst=0;
9251             alloc_reg(&current,i,CCREG); // Not taken path
9252             dirty_reg(&current,CCREG);
9253             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9254           }
9255           break;
9256         case SJUMP:
9257           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9258           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9259           {
9260             alloc_cc(&current,i-1);
9261             dirty_reg(&current,CCREG);
9262             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9263               // The delay slot overwrote the branch condition
9264               // Delay slot goes after the test (in order)
9265               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9266               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9267               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9268               current.u|=1;
9269               current.uu|=1;
9270               delayslot_alloc(&current,i);
9271               current.isconst=0;
9272             }
9273             else
9274             {
9275               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9276               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9277               // Alloc the branch condition register
9278               alloc_reg(&current,i-1,rs1[i-1]);
9279               if(!(current.is32>>rs1[i-1]&1))
9280               {
9281                 alloc_reg64(&current,i-1,rs1[i-1]);
9282               }
9283             }
9284             memcpy(&branch_regs[i-1],&current,sizeof(current));
9285             branch_regs[i-1].isconst=0;
9286             branch_regs[i-1].wasconst=0;
9287             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9288             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9289           }
9290           else
9291           // Alloc the delay slot in case the branch is taken
9292           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9293           {
9294             memcpy(&branch_regs[i-1],&current,sizeof(current));
9295             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9296             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9297             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9298             alloc_cc(&branch_regs[i-1],i);
9299             dirty_reg(&branch_regs[i-1],CCREG);
9300             delayslot_alloc(&branch_regs[i-1],i);
9301             branch_regs[i-1].isconst=0;
9302             alloc_reg(&current,i,CCREG); // Not taken path
9303             dirty_reg(&current,CCREG);
9304             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9305           }
9306           // FIXME: BLTZAL/BGEZAL
9307           if(opcode2[i-1]&0x10) { // BxxZAL
9308             alloc_reg(&branch_regs[i-1],i-1,31);
9309             dirty_reg(&branch_regs[i-1],31);
9310             branch_regs[i-1].is32|=1LL<<31;
9311           }
9312           break;
9313         case FJUMP:
9314           if(likely[i-1]==0) // BC1F/BC1T
9315           {
9316             alloc_cc(&current,i-1);
9317             dirty_reg(&current,CCREG);
9318             if(itype[i]==FCOMP) {
9319               // The delay slot overwrote the branch condition
9320               // Delay slot goes after the test (in order)
9321               delayslot_alloc(&current,i);
9322               current.isconst=0;
9323             }
9324             else
9325             {
9326               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9327               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9328               // Alloc the branch condition register
9329               alloc_reg(&current,i-1,FSREG);
9330             }
9331             memcpy(&branch_regs[i-1],&current,sizeof(current));
9332             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9333           }
9334           else // BC1FL/BC1TL
9335           {
9336             // Alloc the delay slot in case the branch is taken
9337             memcpy(&branch_regs[i-1],&current,sizeof(current));
9338             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9339             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9340             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9341             alloc_cc(&branch_regs[i-1],i);
9342             dirty_reg(&branch_regs[i-1],CCREG);
9343             delayslot_alloc(&branch_regs[i-1],i);
9344             branch_regs[i-1].isconst=0;
9345             alloc_reg(&current,i,CCREG); // Not taken path
9346             dirty_reg(&current,CCREG);
9347             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9348           }
9349           break;
9350       }
9351
9352       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9353       {
9354         if(rt1[i-1]==31) // JAL/JALR
9355         {
9356           // Subroutine call will return here, don't alloc any registers
9357           current.is32=1;
9358           current.dirty=0;
9359           clear_all_regs(current.regmap);
9360           alloc_reg(&current,i,CCREG);
9361           dirty_reg(&current,CCREG);
9362         }
9363         else if(i+1<slen)
9364         {
9365           // Internal branch will jump here, match registers to caller
9366           current.is32=0x3FFFFFFFFLL;
9367           current.dirty=0;
9368           clear_all_regs(current.regmap);
9369           alloc_reg(&current,i,CCREG);
9370           dirty_reg(&current,CCREG);
9371           for(j=i-1;j>=0;j--)
9372           {
9373             if(ba[j]==start+i*4+4) {
9374               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9375               current.is32=branch_regs[j].is32;
9376               current.dirty=branch_regs[j].dirty;
9377               break;
9378             }
9379           }
9380           while(j>=0) {
9381             if(ba[j]==start+i*4+4) {
9382               for(hr=0;hr<HOST_REGS;hr++) {
9383                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9384                   current.regmap[hr]=-1;
9385                 }
9386                 current.is32&=branch_regs[j].is32;
9387                 current.dirty&=branch_regs[j].dirty;
9388               }
9389             }
9390             j--;
9391           }
9392         }
9393       }
9394     }
9395
9396     // Count cycles in between branches
9397     ccadj[i]=cc;
9398     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9399     {
9400       cc=0;
9401     }
9402     else
9403     {
9404       cc++;
9405     }
9406
9407     flush_dirty_uppers(&current);
9408     if(!is_ds[i]) {
9409       regs[i].is32=current.is32;
9410       regs[i].dirty=current.dirty;
9411       regs[i].isconst=current.isconst;
9412       memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9413     }
9414     for(hr=0;hr<HOST_REGS;hr++) {
9415       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9416         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9417           regs[i].wasconst&=~(1<<hr);
9418         }
9419       }
9420     }
9421     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9422   }
9423   
9424   /* Pass 4 - Cull unused host registers */
9425   
9426   uint64_t nr=0;
9427   
9428   for (i=slen-1;i>=0;i--)
9429   {
9430     int hr;
9431     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9432     {
9433       if(ba[i]<start || ba[i]>=(start+slen*4))
9434       {
9435         // Branch out of this block, don't need anything
9436         nr=0;
9437       }
9438       else
9439       {
9440         // Internal branch
9441         // Need whatever matches the target
9442         nr=0;
9443         int t=(ba[i]-start)>>2;
9444         for(hr=0;hr<HOST_REGS;hr++)
9445         {
9446           if(regs[i].regmap_entry[hr]>=0) {
9447             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9448           }
9449         }
9450       }
9451       // Conditional branch may need registers for following instructions
9452       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9453       {
9454         if(i<slen-2) {
9455           nr|=needed_reg[i+2];
9456           for(hr=0;hr<HOST_REGS;hr++)
9457           {
9458             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9459             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9460           }
9461         }
9462       }
9463       // Don't need stuff which is overwritten
9464       if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9465       if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9466       // Merge in delay slot
9467       for(hr=0;hr<HOST_REGS;hr++)
9468       {
9469         if(!likely[i]) {
9470           // These are overwritten unless the branch is "likely"
9471           // and the delay slot is nullified if not taken
9472           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9473           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9474         }
9475         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9476         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9477         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9478         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9479         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9480         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9481         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9482         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9483         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9484           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9485           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9486         }
9487         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9488           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9489           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9490         }
9491         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
9492           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9493           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9494         }
9495       }
9496     }
9497     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
9498     {
9499       // SYSCALL instruction (software interrupt)
9500       nr=0;
9501     }
9502     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9503     {
9504       // ERET instruction (return from interrupt)
9505       nr=0;
9506     }
9507     else // Non-branch
9508     {
9509       if(i<slen-1) {
9510         for(hr=0;hr<HOST_REGS;hr++) {
9511           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9512           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9513           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9514           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9515         }
9516       }
9517     }
9518     for(hr=0;hr<HOST_REGS;hr++)
9519     {
9520       // Overwritten registers are not needed
9521       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9522       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9523       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9524       // Source registers are needed
9525       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9526       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9527       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9528       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9529       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9530       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9531       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9532       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9533       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9534         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9535         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9536       }
9537       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9538         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9539         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9540       }
9541       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
9542         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9543         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9544       }
9545       // Don't store a register immediately after writing it,
9546       // may prevent dual-issue.
9547       // But do so if this is a branch target, otherwise we
9548       // might have to load the register before the branch.
9549       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9550         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9551            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9552           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9553           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9554         }
9555         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9556            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9557           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9558           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9559         }
9560       }
9561     }
9562     // Cycle count is needed at branches.  Assume it is needed at the target too.
9563     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9564       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9565       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9566     }
9567     // Save it
9568     needed_reg[i]=nr;
9569     
9570     // Deallocate unneeded registers
9571     for(hr=0;hr<HOST_REGS;hr++)
9572     {
9573       if(!((nr>>hr)&1)) {
9574         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9575         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9576            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9577            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9578         {
9579           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9580           {
9581             if(likely[i]) {
9582               regs[i].regmap[hr]=-1;
9583               regs[i].isconst&=~(1<<hr);
9584               if(i<slen-2) regmap_pre[i+2][hr]=-1;
9585             }
9586           }
9587         }
9588         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9589         {
9590           int d1=0,d2=0,map=0,temp=0;
9591           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9592           {
9593             d1=dep1[i+1];
9594             d2=dep2[i+1];
9595           }
9596           if(using_tlb) {
9597             if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9598                itype[i+1]==STORE || itype[i+1]==STORELR ||
9599                itype[i+1]==C1LS || itype[i+1]==C2LS)
9600             map=TLREG;
9601           } else
9602           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9603              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9604             map=INVCP;
9605           }
9606           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9607              itype[i+1]==C1LS || itype[i+1]==C2LS)
9608             temp=FTEMP;
9609           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9610              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9611              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9612              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9613              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9614              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9615              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9616              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9617              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9618              regs[i].regmap[hr]!=map )
9619           {
9620             regs[i].regmap[hr]=-1;
9621             regs[i].isconst&=~(1<<hr);
9622             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9623                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9624                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9625                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9626                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9627                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9628                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9629                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9630                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9631                branch_regs[i].regmap[hr]!=map)
9632             {
9633               branch_regs[i].regmap[hr]=-1;
9634               branch_regs[i].regmap_entry[hr]=-1;
9635               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9636               {
9637                 if(!likely[i]&&i<slen-2) {
9638                   regmap_pre[i+2][hr]=-1;
9639                 }
9640               }
9641             }
9642           }
9643         }
9644         else
9645         {
9646           // Non-branch
9647           if(i>0)
9648           {
9649             int d1=0,d2=0,map=-1,temp=-1;
9650             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9651             {
9652               d1=dep1[i];
9653               d2=dep2[i];
9654             }
9655             if(using_tlb) {
9656               if(itype[i]==LOAD || itype[i]==LOADLR ||
9657                  itype[i]==STORE || itype[i]==STORELR ||
9658                  itype[i]==C1LS || itype[i]==C2LS)
9659               map=TLREG;
9660             } else if(itype[i]==STORE || itype[i]==STORELR ||
9661                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9662               map=INVCP;
9663             }
9664             if(itype[i]==LOADLR || itype[i]==STORELR ||
9665                itype[i]==C1LS || itype[i]==C2LS)
9666               temp=FTEMP;
9667             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9668                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9669                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9670                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9671                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9672                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9673             {
9674               if(i<slen-1&&!is_ds[i]) {
9675                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9676                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9677                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9678                 {
9679                   printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9680                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9681                 }
9682                 regmap_pre[i+1][hr]=-1;
9683                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9684               }
9685               regs[i].regmap[hr]=-1;
9686               regs[i].isconst&=~(1<<hr);
9687             }
9688           }
9689         }
9690       }
9691     }
9692   }
9693   
9694   /* Pass 5 - Pre-allocate registers */
9695   
9696   // If a register is allocated during a loop, try to allocate it for the
9697   // entire loop, if possible.  This avoids loading/storing registers
9698   // inside of the loop.
9699
9700   signed char f_regmap[HOST_REGS];
9701   clear_all_regs(f_regmap);
9702   for(i=0;i<slen-1;i++)
9703   {
9704     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9705     {
9706       if(ba[i]>=start && ba[i]<(start+i*4)) 
9707       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9708       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9709       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9710       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9711       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9712       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9713       {
9714         int t=(ba[i]-start)>>2;
9715         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9716         if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
9717         for(hr=0;hr<HOST_REGS;hr++)
9718         {
9719           if(regs[i].regmap[hr]>64) {
9720             if(!((regs[i].dirty>>hr)&1))
9721               f_regmap[hr]=regs[i].regmap[hr];
9722             else f_regmap[hr]=-1;
9723           }
9724           else if(regs[i].regmap[hr]>=0) {
9725             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9726               // dealloc old register
9727               int n;
9728               for(n=0;n<HOST_REGS;n++)
9729               {
9730                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9731               }
9732               // and alloc new one
9733               f_regmap[hr]=regs[i].regmap[hr];
9734             }
9735           }
9736           if(branch_regs[i].regmap[hr]>64) {
9737             if(!((branch_regs[i].dirty>>hr)&1))
9738               f_regmap[hr]=branch_regs[i].regmap[hr];
9739             else f_regmap[hr]=-1;
9740           }
9741           else if(branch_regs[i].regmap[hr]>=0) {
9742             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
9743               // dealloc old register
9744               int n;
9745               for(n=0;n<HOST_REGS;n++)
9746               {
9747                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
9748               }
9749               // and alloc new one
9750               f_regmap[hr]=branch_regs[i].regmap[hr];
9751             }
9752           }
9753           if(itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9754           ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9755           ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9756           ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9757           {
9758             // Test both in case the delay slot is ooo,
9759             // could be done better...
9760             if(count_free_regs(branch_regs[i].regmap)<2
9761              ||count_free_regs(regs[i].regmap)<2) 
9762               f_regmap[hr]=branch_regs[i].regmap[hr];
9763           }
9764           // Avoid dirty->clean transition
9765           // #ifdef DESTRUCTIVE_WRITEBACK here?
9766           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9767           if(f_regmap[hr]>0) {
9768             if(regs[t].regmap_entry[hr]<0) {
9769               int r=f_regmap[hr];
9770               for(j=t;j<=i;j++)
9771               {
9772                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9773                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9774                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9775                 if(r>63) {
9776                   // NB This can exclude the case where the upper-half
9777                   // register is lower numbered than the lower-half
9778                   // register.  Not sure if it's worth fixing...
9779                   if(get_reg(regs[j].regmap,r&63)<0) break;
9780                   if(regs[j].is32&(1LL<<(r&63))) break;
9781                 }
9782                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9783                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9784                   int k;
9785                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9786                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9787                     if(r>63) {
9788                       if(get_reg(regs[i].regmap,r&63)<0) break;
9789                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9790                     }
9791                     k=i;
9792                     while(k>1&&regs[k-1].regmap[hr]==-1) {
9793                       if(itype[k-1]==STORE||itype[k-1]==STORELR
9794                       ||itype[k-1]==C1LS||itype[k-1]==SHIFT||itype[k-1]==COP1
9795                       ||itype[k-1]==FLOAT||itype[k-1]==FCONV||itype[k-1]==FCOMP
9796                       ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9797                         if(count_free_regs(regs[k-1].regmap)<2) {
9798                           //printf("no free regs for store %x\n",start+(k-1)*4);
9799                           break;
9800                         }
9801                       }
9802                       else
9803                       if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9804                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9805                         //printf("no-match due to different register\n");
9806                         break;
9807                       }
9808                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9809                         //printf("no-match due to branch\n");
9810                         break;
9811                       }
9812                       // call/ret fast path assumes no registers allocated
9813                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
9814                         break;
9815                       }
9816                       if(r>63) {
9817                         // NB This can exclude the case where the upper-half
9818                         // register is lower numbered than the lower-half
9819                         // register.  Not sure if it's worth fixing...
9820                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
9821                         if(regs[k-1].is32&(1LL<<(r&63))) break;
9822                       }
9823                       k--;
9824                     }
9825                     if(i<slen-1) {
9826                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9827                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9828                         //printf("bad match after branch\n");
9829                         break;
9830                       }
9831                     }
9832                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9833                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
9834                       while(k<i) {
9835                         regs[k].regmap_entry[hr]=f_regmap[hr];
9836                         regs[k].regmap[hr]=f_regmap[hr];
9837                         regmap_pre[k+1][hr]=f_regmap[hr];
9838                         regs[k].wasdirty&=~(1<<hr);
9839                         regs[k].dirty&=~(1<<hr);
9840                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9841                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9842                         regs[k].wasconst&=~(1<<hr);
9843                         regs[k].isconst&=~(1<<hr);
9844                         k++;
9845                       }
9846                     }
9847                     else {
9848                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9849                       break;
9850                     }
9851                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9852                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9853                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
9854                       regs[i].regmap_entry[hr]=f_regmap[hr];
9855                       regs[i].regmap[hr]=f_regmap[hr];
9856                       regs[i].wasdirty&=~(1<<hr);
9857                       regs[i].dirty&=~(1<<hr);
9858                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9859                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9860                       regs[i].wasconst&=~(1<<hr);
9861                       regs[i].isconst&=~(1<<hr);
9862                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9863                       branch_regs[i].wasdirty&=~(1<<hr);
9864                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9865                       branch_regs[i].regmap[hr]=f_regmap[hr];
9866                       branch_regs[i].dirty&=~(1<<hr);
9867                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9868                       branch_regs[i].wasconst&=~(1<<hr);
9869                       branch_regs[i].isconst&=~(1<<hr);
9870                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9871                         regmap_pre[i+2][hr]=f_regmap[hr];
9872                         regs[i+2].wasdirty&=~(1<<hr);
9873                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9874                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9875                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
9876                       }
9877                     }
9878                   }
9879                   for(k=t;k<j;k++) {
9880                     regs[k].regmap_entry[hr]=f_regmap[hr];
9881                     regs[k].regmap[hr]=f_regmap[hr];
9882                     regmap_pre[k+1][hr]=f_regmap[hr];
9883                     regs[k+1].wasdirty&=~(1<<hr);
9884                     regs[k].dirty&=~(1<<hr);
9885                     regs[k].wasconst&=~(1<<hr);
9886                     regs[k].isconst&=~(1<<hr);
9887                   }
9888                   if(regs[j].regmap[hr]==f_regmap[hr])
9889                     regs[j].regmap_entry[hr]=f_regmap[hr];
9890                   break;
9891                 }
9892                 if(j==i) break;
9893                 if(regs[j].regmap[hr]>=0)
9894                   break;
9895                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9896                   //printf("no-match due to different register\n");
9897                   break;
9898                 }
9899                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9900                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9901                   break;
9902                 }
9903                 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9904                 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9905                 ||itype[j]==FCOMP||itype[j]==FCONV
9906                 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9907                   if(count_free_regs(regs[j].regmap)<2) {
9908                     //printf("No free regs for store %x\n",start+j*4);
9909                     break;
9910                   }
9911                 }
9912                 else if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9913                 if(f_regmap[hr]>=64) {
9914                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9915                     break;
9916                   }
9917                   else
9918                   {
9919                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9920                       break;
9921                     }
9922                   }
9923                 }
9924               }
9925             }
9926           }
9927         }
9928       }
9929     }else{
9930       int count=0;
9931       for(hr=0;hr<HOST_REGS;hr++)
9932       {
9933         if(hr!=EXCLUDE_REG) {
9934           if(regs[i].regmap[hr]>64) {
9935             if(!((regs[i].dirty>>hr)&1))
9936               f_regmap[hr]=regs[i].regmap[hr];
9937           }
9938           else if(regs[i].regmap[hr]>=0) {
9939             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9940               // dealloc old register
9941               int n;
9942               for(n=0;n<HOST_REGS;n++)
9943               {
9944                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9945               }
9946               // and alloc new one
9947               f_regmap[hr]=regs[i].regmap[hr];
9948             }
9949           }
9950           else if(regs[i].regmap[hr]<0) count++;
9951         }
9952       }
9953       // Try to restore cycle count at branch targets
9954       if(bt[i]) {
9955         for(j=i;j<slen-1;j++) {
9956           if(regs[j].regmap[HOST_CCREG]!=-1) break;
9957           if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9958           ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9959           ||itype[j]==FCOMP||itype[j]==FCONV
9960           ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9961             if(count_free_regs(regs[j].regmap)<2) {
9962               //printf("no free regs for store %x\n",start+j*4);
9963               break;
9964             }
9965           }
9966           else
9967           if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9968         }
9969         if(regs[j].regmap[HOST_CCREG]==CCREG) {
9970           int k=i;
9971           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9972           while(k<j) {
9973             regs[k].regmap_entry[HOST_CCREG]=CCREG;
9974             regs[k].regmap[HOST_CCREG]=CCREG;
9975             regmap_pre[k+1][HOST_CCREG]=CCREG;
9976             regs[k+1].wasdirty|=1<<HOST_CCREG;
9977             regs[k].dirty|=1<<HOST_CCREG;
9978             regs[k].wasconst&=~(1<<HOST_CCREG);
9979             regs[k].isconst&=~(1<<HOST_CCREG);
9980             k++;
9981           }
9982           regs[j].regmap_entry[HOST_CCREG]=CCREG;          
9983         }
9984         // Work backwards from the branch target
9985         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9986         {
9987           //printf("Extend backwards\n");
9988           int k;
9989           k=i;
9990           while(regs[k-1].regmap[HOST_CCREG]==-1) {
9991             if(itype[k-1]==STORE||itype[k-1]==STORELR||itype[k-1]==C1LS
9992             ||itype[k-1]==SHIFT||itype[k-1]==COP1||itype[k-1]==FLOAT
9993             ||itype[k-1]==FCONV||itype[k-1]==FCOMP
9994             ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9995               if(count_free_regs(regs[k-1].regmap)<2) {
9996                 //printf("no free regs for store %x\n",start+(k-1)*4);
9997                 break;
9998               }
9999             }
10000             else
10001             if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
10002             k--;
10003           }
10004           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
10005             //printf("Extend CC, %x ->\n",start+k*4);
10006             while(k<=i) {
10007               regs[k].regmap_entry[HOST_CCREG]=CCREG;
10008               regs[k].regmap[HOST_CCREG]=CCREG;
10009               regmap_pre[k+1][HOST_CCREG]=CCREG;
10010               regs[k+1].wasdirty|=1<<HOST_CCREG;
10011               regs[k].dirty|=1<<HOST_CCREG;
10012               regs[k].wasconst&=~(1<<HOST_CCREG);
10013               regs[k].isconst&=~(1<<HOST_CCREG);
10014               k++;
10015             }
10016           }
10017           else {
10018             //printf("Fail Extend CC, %x ->\n",start+k*4);
10019           }
10020         }
10021       }
10022       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
10023          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
10024          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
10025          itype[i]!=FCONV&&itype[i]!=FCOMP&&
10026          itype[i]!=COP2&&itype[i]!=C2LS&&itype[i]!=C2OP)
10027       {
10028         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
10029       }
10030     }
10031   }
10032   
10033   // This allocates registers (if possible) one instruction prior
10034   // to use, which can avoid a load-use penalty on certain CPUs.
10035   for(i=0;i<slen-1;i++)
10036   {
10037     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10038     {
10039       if(!bt[i+1])
10040       {
10041         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10042            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
10043         {
10044           if(rs1[i+1]) {
10045             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10046             {
10047               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10048               {
10049                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10050                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10051                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10052                 regs[i].isconst&=~(1<<hr);
10053                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10054                 constmap[i][hr]=constmap[i+1][hr];
10055                 regs[i+1].wasdirty&=~(1<<hr);
10056                 regs[i].dirty&=~(1<<hr);
10057               }
10058             }
10059           }
10060           if(rs2[i+1]) {
10061             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10062             {
10063               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10064               {
10065                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10066                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10067                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10068                 regs[i].isconst&=~(1<<hr);
10069                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10070                 constmap[i][hr]=constmap[i+1][hr];
10071                 regs[i+1].wasdirty&=~(1<<hr);
10072                 regs[i].dirty&=~(1<<hr);
10073               }
10074             }
10075           }
10076           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10077             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10078             {
10079               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10080               {
10081                 regs[i].regmap[hr]=rs1[i+1];
10082                 regmap_pre[i+1][hr]=rs1[i+1];
10083                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10084                 regs[i].isconst&=~(1<<hr);
10085                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10086                 constmap[i][hr]=constmap[i+1][hr];
10087                 regs[i+1].wasdirty&=~(1<<hr);
10088                 regs[i].dirty&=~(1<<hr);
10089               }
10090             }
10091           }
10092           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10093             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10094             {
10095               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10096               {
10097                 regs[i].regmap[hr]=rs1[i+1];
10098                 regmap_pre[i+1][hr]=rs1[i+1];
10099                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10100                 regs[i].isconst&=~(1<<hr);
10101                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10102                 constmap[i][hr]=constmap[i+1][hr];
10103                 regs[i+1].wasdirty&=~(1<<hr);
10104                 regs[i].dirty&=~(1<<hr);
10105               }
10106             }
10107           }
10108           #ifndef HOST_IMM_ADDR32
10109           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10110             hr=get_reg(regs[i+1].regmap,TLREG);
10111             if(hr>=0) {
10112               int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10113               if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10114                 int nr;
10115                 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10116                 {
10117                   regs[i].regmap[hr]=MGEN1+((i+1)&1);
10118                   regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10119                   regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10120                   regs[i].isconst&=~(1<<hr);
10121                   regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10122                   constmap[i][hr]=constmap[i+1][hr];
10123                   regs[i+1].wasdirty&=~(1<<hr);
10124                   regs[i].dirty&=~(1<<hr);
10125                 }
10126                 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10127                 {
10128                   // move it to another register
10129                   regs[i+1].regmap[hr]=-1;
10130                   regmap_pre[i+2][hr]=-1;
10131                   regs[i+1].regmap[nr]=TLREG;
10132                   regmap_pre[i+2][nr]=TLREG;
10133                   regs[i].regmap[nr]=MGEN1+((i+1)&1);
10134                   regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10135                   regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10136                   regs[i].isconst&=~(1<<nr);
10137                   regs[i+1].isconst&=~(1<<nr);
10138                   regs[i].dirty&=~(1<<nr);
10139                   regs[i+1].wasdirty&=~(1<<nr);
10140                   regs[i+1].dirty&=~(1<<nr);
10141                   regs[i+2].wasdirty&=~(1<<nr);
10142                 }
10143               }
10144             }
10145           }
10146           #endif
10147           if(itype[i+1]==STORE||itype[i+1]==STORELR
10148              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10149             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10150               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10151               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10152               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10153               assert(hr>=0);
10154               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10155               {
10156                 regs[i].regmap[hr]=rs1[i+1];
10157                 regmap_pre[i+1][hr]=rs1[i+1];
10158                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10159                 regs[i].isconst&=~(1<<hr);
10160                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10161                 constmap[i][hr]=constmap[i+1][hr];
10162                 regs[i+1].wasdirty&=~(1<<hr);
10163                 regs[i].dirty&=~(1<<hr);
10164               }
10165             }
10166           }
10167           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10168             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10169               int nr;
10170               hr=get_reg(regs[i+1].regmap,FTEMP);
10171               assert(hr>=0);
10172               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10173               {
10174                 regs[i].regmap[hr]=rs1[i+1];
10175                 regmap_pre[i+1][hr]=rs1[i+1];
10176                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10177                 regs[i].isconst&=~(1<<hr);
10178                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10179                 constmap[i][hr]=constmap[i+1][hr];
10180                 regs[i+1].wasdirty&=~(1<<hr);
10181                 regs[i].dirty&=~(1<<hr);
10182               }
10183               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10184               {
10185                 // move it to another register
10186                 regs[i+1].regmap[hr]=-1;
10187                 regmap_pre[i+2][hr]=-1;
10188                 regs[i+1].regmap[nr]=FTEMP;
10189                 regmap_pre[i+2][nr]=FTEMP;
10190                 regs[i].regmap[nr]=rs1[i+1];
10191                 regmap_pre[i+1][nr]=rs1[i+1];
10192                 regs[i+1].regmap_entry[nr]=rs1[i+1];
10193                 regs[i].isconst&=~(1<<nr);
10194                 regs[i+1].isconst&=~(1<<nr);
10195                 regs[i].dirty&=~(1<<nr);
10196                 regs[i+1].wasdirty&=~(1<<nr);
10197                 regs[i+1].dirty&=~(1<<nr);
10198                 regs[i+2].wasdirty&=~(1<<nr);
10199               }
10200             }
10201           }
10202           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10203             if(itype[i+1]==LOAD) 
10204               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10205             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10206               hr=get_reg(regs[i+1].regmap,FTEMP);
10207             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10208               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10209               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10210             }
10211             if(hr>=0&&regs[i].regmap[hr]<0) {
10212               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10213               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10214                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10215                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10216                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10217                 regs[i].isconst&=~(1<<hr);
10218                 regs[i+1].wasdirty&=~(1<<hr);
10219                 regs[i].dirty&=~(1<<hr);
10220               }
10221             }
10222           }
10223         }
10224       }
10225     }
10226   }
10227   
10228   /* Pass 6 - Optimize clean/dirty state */
10229   clean_registers(0,slen-1,1);
10230   
10231   /* Pass 7 - Identify 32-bit registers */
10232   
10233   provisional_r32();
10234
10235   u_int r32=0;
10236   
10237   for (i=slen-1;i>=0;i--)
10238   {
10239     int hr;
10240     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10241     {
10242       if(ba[i]<start || ba[i]>=(start+slen*4))
10243       {
10244         // Branch out of this block, don't need anything
10245         r32=0;
10246       }
10247       else
10248       {
10249         // Internal branch
10250         // Need whatever matches the target
10251         // (and doesn't get overwritten by the delay slot instruction)
10252         r32=0;
10253         int t=(ba[i]-start)>>2;
10254         if(ba[i]>start+i*4) {
10255           // Forward branch
10256           if(!(requires_32bit[t]&~regs[i].was32))
10257             r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10258         }else{
10259           // Backward branch
10260           //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10261           //  r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10262           if(!(pr32[t]&~regs[i].was32))
10263             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10264         }
10265       }
10266       // Conditional branch may need registers for following instructions
10267       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10268       {
10269         if(i<slen-2) {
10270           r32|=requires_32bit[i+2];
10271           r32&=regs[i].was32;
10272           // Mark this address as a branch target since it may be called
10273           // upon return from interrupt
10274           bt[i+2]=1;
10275         }
10276       }
10277       // Merge in delay slot
10278       if(!likely[i]) {
10279         // These are overwritten unless the branch is "likely"
10280         // and the delay slot is nullified if not taken
10281         r32&=~(1LL<<rt1[i+1]);
10282         r32&=~(1LL<<rt2[i+1]);
10283       }
10284       // Assume these are needed (delay slot)
10285       if(us1[i+1]>0)
10286       {
10287         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10288       }
10289       if(us2[i+1]>0)
10290       {
10291         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10292       }
10293       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10294       {
10295         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10296       }
10297       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10298       {
10299         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10300       }
10301     }
10302     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
10303     {
10304       // SYSCALL instruction (software interrupt)
10305       r32=0;
10306     }
10307     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10308     {
10309       // ERET instruction (return from interrupt)
10310       r32=0;
10311     }
10312     // Check 32 bits
10313     r32&=~(1LL<<rt1[i]);
10314     r32&=~(1LL<<rt2[i]);
10315     if(us1[i]>0)
10316     {
10317       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10318     }
10319     if(us2[i]>0)
10320     {
10321       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10322     }
10323     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10324     {
10325       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10326     }
10327     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10328     {
10329       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10330     }
10331     requires_32bit[i]=r32;
10332     
10333     // Dirty registers which are 32-bit, require 32-bit input
10334     // as they will be written as 32-bit values
10335     for(hr=0;hr<HOST_REGS;hr++)
10336     {
10337       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10338         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10339           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10340           requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10341         }
10342       }
10343     }
10344     //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10345   }
10346
10347   if(itype[slen-1]==SPAN) {
10348     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10349   }
10350   
10351   /* Debug/disassembly */
10352   if((void*)assem_debug==(void*)printf) 
10353   for(i=0;i<slen;i++)
10354   {
10355     printf("U:");
10356     int r;
10357     for(r=1;r<=CCREG;r++) {
10358       if((unneeded_reg[i]>>r)&1) {
10359         if(r==HIREG) printf(" HI");
10360         else if(r==LOREG) printf(" LO");
10361         else printf(" r%d",r);
10362       }
10363     }
10364 #ifndef FORCE32
10365     printf(" UU:");
10366     for(r=1;r<=CCREG;r++) {
10367       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10368         if(r==HIREG) printf(" HI");
10369         else if(r==LOREG) printf(" LO");
10370         else printf(" r%d",r);
10371       }
10372     }
10373     printf(" 32:");
10374     for(r=0;r<=CCREG;r++) {
10375       //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10376       if((regs[i].was32>>r)&1) {
10377         if(r==CCREG) printf(" CC");
10378         else if(r==HIREG) printf(" HI");
10379         else if(r==LOREG) printf(" LO");
10380         else printf(" r%d",r);
10381       }
10382     }
10383 #endif
10384     printf("\n");
10385     #if defined(__i386__) || defined(__x86_64__)
10386     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10387     #endif
10388     #ifdef __arm__
10389     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10390     #endif
10391     printf("needs: ");
10392     if(needed_reg[i]&1) printf("eax ");
10393     if((needed_reg[i]>>1)&1) printf("ecx ");
10394     if((needed_reg[i]>>2)&1) printf("edx ");
10395     if((needed_reg[i]>>3)&1) printf("ebx ");
10396     if((needed_reg[i]>>5)&1) printf("ebp ");
10397     if((needed_reg[i]>>6)&1) printf("esi ");
10398     if((needed_reg[i]>>7)&1) printf("edi ");
10399     printf("r:");
10400     for(r=0;r<=CCREG;r++) {
10401       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10402       if((requires_32bit[i]>>r)&1) {
10403         if(r==CCREG) printf(" CC");
10404         else if(r==HIREG) printf(" HI");
10405         else if(r==LOREG) printf(" LO");
10406         else printf(" r%d",r);
10407       }
10408     }
10409     printf("\n");
10410     /*printf("pr:");
10411     for(r=0;r<=CCREG;r++) {
10412       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10413       if((pr32[i]>>r)&1) {
10414         if(r==CCREG) printf(" CC");
10415         else if(r==HIREG) printf(" HI");
10416         else if(r==LOREG) printf(" LO");
10417         else printf(" r%d",r);
10418       }
10419     }
10420     if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10421     printf("\n");*/
10422     #if defined(__i386__) || defined(__x86_64__)
10423     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10424     printf("dirty: ");
10425     if(regs[i].wasdirty&1) printf("eax ");
10426     if((regs[i].wasdirty>>1)&1) printf("ecx ");
10427     if((regs[i].wasdirty>>2)&1) printf("edx ");
10428     if((regs[i].wasdirty>>3)&1) printf("ebx ");
10429     if((regs[i].wasdirty>>5)&1) printf("ebp ");
10430     if((regs[i].wasdirty>>6)&1) printf("esi ");
10431     if((regs[i].wasdirty>>7)&1) printf("edi ");
10432     #endif
10433     #ifdef __arm__
10434     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10435     printf("dirty: ");
10436     if(regs[i].wasdirty&1) printf("r0 ");
10437     if((regs[i].wasdirty>>1)&1) printf("r1 ");
10438     if((regs[i].wasdirty>>2)&1) printf("r2 ");
10439     if((regs[i].wasdirty>>3)&1) printf("r3 ");
10440     if((regs[i].wasdirty>>4)&1) printf("r4 ");
10441     if((regs[i].wasdirty>>5)&1) printf("r5 ");
10442     if((regs[i].wasdirty>>6)&1) printf("r6 ");
10443     if((regs[i].wasdirty>>7)&1) printf("r7 ");
10444     if((regs[i].wasdirty>>8)&1) printf("r8 ");
10445     if((regs[i].wasdirty>>9)&1) printf("r9 ");
10446     if((regs[i].wasdirty>>10)&1) printf("r10 ");
10447     if((regs[i].wasdirty>>12)&1) printf("r12 ");
10448     #endif
10449     printf("\n");
10450     disassemble_inst(i);
10451     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10452     #if defined(__i386__) || defined(__x86_64__)
10453     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
10454     if(regs[i].dirty&1) printf("eax ");
10455     if((regs[i].dirty>>1)&1) printf("ecx ");
10456     if((regs[i].dirty>>2)&1) printf("edx ");
10457     if((regs[i].dirty>>3)&1) printf("ebx ");
10458     if((regs[i].dirty>>5)&1) printf("ebp ");
10459     if((regs[i].dirty>>6)&1) printf("esi ");
10460     if((regs[i].dirty>>7)&1) printf("edi ");
10461     #endif
10462     #ifdef __arm__
10463     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
10464     if(regs[i].dirty&1) printf("r0 ");
10465     if((regs[i].dirty>>1)&1) printf("r1 ");
10466     if((regs[i].dirty>>2)&1) printf("r2 ");
10467     if((regs[i].dirty>>3)&1) printf("r3 ");
10468     if((regs[i].dirty>>4)&1) printf("r4 ");
10469     if((regs[i].dirty>>5)&1) printf("r5 ");
10470     if((regs[i].dirty>>6)&1) printf("r6 ");
10471     if((regs[i].dirty>>7)&1) printf("r7 ");
10472     if((regs[i].dirty>>8)&1) printf("r8 ");
10473     if((regs[i].dirty>>9)&1) printf("r9 ");
10474     if((regs[i].dirty>>10)&1) printf("r10 ");
10475     if((regs[i].dirty>>12)&1) printf("r12 ");
10476     #endif
10477     printf("\n");
10478     if(regs[i].isconst) {
10479       printf("constants: ");
10480       #if defined(__i386__) || defined(__x86_64__)
10481       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
10482       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
10483       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
10484       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
10485       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
10486       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
10487       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
10488       #endif
10489       #ifdef __arm__
10490       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
10491       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
10492       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
10493       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
10494       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
10495       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
10496       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
10497       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
10498       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
10499       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
10500       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
10501       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
10502       #endif
10503       printf("\n");
10504     }
10505 #ifndef FORCE32
10506     printf(" 32:");
10507     for(r=0;r<=CCREG;r++) {
10508       if((regs[i].is32>>r)&1) {
10509         if(r==CCREG) printf(" CC");
10510         else if(r==HIREG) printf(" HI");
10511         else if(r==LOREG) printf(" LO");
10512         else printf(" r%d",r);
10513       }
10514     }
10515     printf("\n");
10516 #endif
10517     /*printf(" p32:");
10518     for(r=0;r<=CCREG;r++) {
10519       if((p32[i]>>r)&1) {
10520         if(r==CCREG) printf(" CC");
10521         else if(r==HIREG) printf(" HI");
10522         else if(r==LOREG) printf(" LO");
10523         else printf(" r%d",r);
10524       }
10525     }
10526     if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
10527     else printf("\n");*/
10528     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10529       #if defined(__i386__) || defined(__x86_64__)
10530       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
10531       if(branch_regs[i].dirty&1) printf("eax ");
10532       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
10533       if((branch_regs[i].dirty>>2)&1) printf("edx ");
10534       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
10535       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
10536       if((branch_regs[i].dirty>>6)&1) printf("esi ");
10537       if((branch_regs[i].dirty>>7)&1) printf("edi ");
10538       #endif
10539       #ifdef __arm__
10540       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
10541       if(branch_regs[i].dirty&1) printf("r0 ");
10542       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
10543       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
10544       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
10545       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
10546       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
10547       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
10548       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
10549       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
10550       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
10551       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
10552       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
10553       #endif
10554 #ifndef FORCE32
10555       printf(" 32:");
10556       for(r=0;r<=CCREG;r++) {
10557         if((branch_regs[i].is32>>r)&1) {
10558           if(r==CCREG) printf(" CC");
10559           else if(r==HIREG) printf(" HI");
10560           else if(r==LOREG) printf(" LO");
10561           else printf(" r%d",r);
10562         }
10563       }
10564       printf("\n");
10565 #endif
10566     }
10567   }
10568
10569   /* Pass 8 - Assembly */
10570   linkcount=0;stubcount=0;
10571   ds=0;is_delayslot=0;
10572   cop1_usable=0;
10573   uint64_t is32_pre=0;
10574   u_int dirty_pre=0;
10575   u_int beginning=(u_int)out;
10576   if((u_int)addr&1) {
10577     ds=1;
10578     pagespan_ds();
10579   }
10580   u_int instr_addr0_override=0;
10581
10582 #ifdef PCSX
10583   if (start == 0x80030000) {
10584     // nasty hack for fastbios thing
10585     instr_addr0_override=(u_int)out;
10586     emit_movimm(start,0);
10587     emit_readword((int)&pcaddr,1);
10588     emit_writeword(0,(int)&pcaddr);
10589     emit_cmp(0,1);
10590     emit_jne((int)new_dyna_leave);
10591   }
10592 #endif
10593   for(i=0;i<slen;i++)
10594   {
10595     //if(ds) printf("ds: ");
10596     if((void*)assem_debug==(void*)printf) disassemble_inst(i);
10597     if(ds) {
10598       ds=0; // Skip delay slot
10599       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10600       instr_addr[i]=0;
10601     } else {
10602       #ifndef DESTRUCTIVE_WRITEBACK
10603       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10604       {
10605         wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
10606               unneeded_reg[i],unneeded_reg_upper[i]);
10607         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10608               unneeded_reg[i],unneeded_reg_upper[i]);
10609       }
10610       is32_pre=regs[i].is32;
10611       dirty_pre=regs[i].dirty;
10612       #endif
10613       // write back
10614       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10615       {
10616         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10617                       unneeded_reg[i],unneeded_reg_upper[i]);
10618         loop_preload(regmap_pre[i],regs[i].regmap_entry);
10619       }
10620       // branch target entry point
10621       instr_addr[i]=(u_int)out;
10622       assem_debug("<->\n");
10623       // load regs
10624       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10625         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10626       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10627       address_generation(i,&regs[i],regs[i].regmap_entry);
10628       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10629       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10630       {
10631         // Load the delay slot registers if necessary
10632         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10633           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10634         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10635           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10636         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10637           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10638       }
10639       else if(i+1<slen)
10640       {
10641         // Preload registers for following instruction
10642         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10643           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10644             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10645         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10646           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10647             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10648       }
10649       // TODO: if(is_ooo(i)) address_generation(i+1);
10650       if(itype[i]==CJUMP||itype[i]==FJUMP)
10651         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10652       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10653         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10654       if(bt[i]) cop1_usable=0;
10655       // assemble
10656       switch(itype[i]) {
10657         case ALU:
10658           alu_assemble(i,&regs[i]);break;
10659         case IMM16:
10660           imm16_assemble(i,&regs[i]);break;
10661         case SHIFT:
10662           shift_assemble(i,&regs[i]);break;
10663         case SHIFTIMM:
10664           shiftimm_assemble(i,&regs[i]);break;
10665         case LOAD:
10666           load_assemble(i,&regs[i]);break;
10667         case LOADLR:
10668           loadlr_assemble(i,&regs[i]);break;
10669         case STORE:
10670           store_assemble(i,&regs[i]);break;
10671         case STORELR:
10672           storelr_assemble(i,&regs[i]);break;
10673         case COP0:
10674           cop0_assemble(i,&regs[i]);break;
10675         case COP1:
10676           cop1_assemble(i,&regs[i]);break;
10677         case C1LS:
10678           c1ls_assemble(i,&regs[i]);break;
10679         case COP2:
10680           cop2_assemble(i,&regs[i]);break;
10681         case C2LS:
10682           c2ls_assemble(i,&regs[i]);break;
10683         case C2OP:
10684           c2op_assemble(i,&regs[i]);break;
10685         case FCONV:
10686           fconv_assemble(i,&regs[i]);break;
10687         case FLOAT:
10688           float_assemble(i,&regs[i]);break;
10689         case FCOMP:
10690           fcomp_assemble(i,&regs[i]);break;
10691         case MULTDIV:
10692           multdiv_assemble(i,&regs[i]);break;
10693         case MOV:
10694           mov_assemble(i,&regs[i]);break;
10695         case SYSCALL:
10696           syscall_assemble(i,&regs[i]);break;
10697         case HLECALL:
10698           hlecall_assemble(i,&regs[i]);break;
10699         case INTCALL:
10700           intcall_assemble(i,&regs[i]);break;
10701         case UJUMP:
10702           ujump_assemble(i,&regs[i]);ds=1;break;
10703         case RJUMP:
10704           rjump_assemble(i,&regs[i]);ds=1;break;
10705         case CJUMP:
10706           cjump_assemble(i,&regs[i]);ds=1;break;
10707         case SJUMP:
10708           sjump_assemble(i,&regs[i]);ds=1;break;
10709         case FJUMP:
10710           fjump_assemble(i,&regs[i]);ds=1;break;
10711         case SPAN:
10712           pagespan_assemble(i,&regs[i]);break;
10713       }
10714       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10715         literal_pool(1024);
10716       else
10717         literal_pool_jumpover(256);
10718     }
10719   }
10720   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10721   // If the block did not end with an unconditional branch,
10722   // add a jump to the next instruction.
10723   if(i>1) {
10724     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10725       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10726       assert(i==slen);
10727       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10728         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10729         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10730           emit_loadreg(CCREG,HOST_CCREG);
10731         emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10732       }
10733       else if(!likely[i-2])
10734       {
10735         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10736         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10737       }
10738       else
10739       {
10740         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10741         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10742       }
10743       add_to_linker((int)out,start+i*4,0);
10744       emit_jmp(0);
10745     }
10746   }
10747   else
10748   {
10749     assert(i>0);
10750     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10751     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10752     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10753       emit_loadreg(CCREG,HOST_CCREG);
10754     emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10755     add_to_linker((int)out,start+i*4,0);
10756     emit_jmp(0);
10757   }
10758
10759   // TODO: delay slot stubs?
10760   // Stubs
10761   for(i=0;i<stubcount;i++)
10762   {
10763     switch(stubs[i][0])
10764     {
10765       case LOADB_STUB:
10766       case LOADH_STUB:
10767       case LOADW_STUB:
10768       case LOADD_STUB:
10769       case LOADBU_STUB:
10770       case LOADHU_STUB:
10771         do_readstub(i);break;
10772       case STOREB_STUB:
10773       case STOREH_STUB:
10774       case STOREW_STUB:
10775       case STORED_STUB:
10776         do_writestub(i);break;
10777       case CC_STUB:
10778         do_ccstub(i);break;
10779       case INVCODE_STUB:
10780         do_invstub(i);break;
10781       case FP_STUB:
10782         do_cop1stub(i);break;
10783       case STORELR_STUB:
10784         do_unalignedwritestub(i);break;
10785     }
10786   }
10787
10788   if (instr_addr0_override)
10789     instr_addr[0] = instr_addr0_override;
10790
10791   /* Pass 9 - Linker */
10792   for(i=0;i<linkcount;i++)
10793   {
10794     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10795     literal_pool(64);
10796     if(!link_addr[i][2])
10797     {
10798       void *stub=out;
10799       void *addr=check_addr(link_addr[i][1]);
10800       emit_extjump(link_addr[i][0],link_addr[i][1]);
10801       if(addr) {
10802         set_jump_target(link_addr[i][0],(int)addr);
10803         add_link(link_addr[i][1],stub);
10804       }
10805       else set_jump_target(link_addr[i][0],(int)stub);
10806     }
10807     else
10808     {
10809       // Internal branch
10810       int target=(link_addr[i][1]-start)>>2;
10811       assert(target>=0&&target<slen);
10812       assert(instr_addr[target]);
10813       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10814       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10815       //#else
10816       set_jump_target(link_addr[i][0],instr_addr[target]);
10817       //#endif
10818     }
10819   }
10820   // External Branch Targets (jump_in)
10821   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10822   for(i=0;i<slen;i++)
10823   {
10824     if(bt[i]||i==0)
10825     {
10826       if(instr_addr[i]) // TODO - delay slots (=null)
10827       {
10828         u_int vaddr=start+i*4;
10829         u_int page=get_page(vaddr);
10830         u_int vpage=get_vpage(vaddr);
10831         literal_pool(256);
10832         //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
10833         if(!requires_32bit[i])
10834         {
10835           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10836           assem_debug("jump_in: %x\n",start+i*4);
10837           ll_add(jump_dirty+vpage,vaddr,(void *)out);
10838           int entry_point=do_dirty_stub(i);
10839           ll_add(jump_in+page,vaddr,(void *)entry_point);
10840           // If there was an existing entry in the hash table,
10841           // replace it with the new address.
10842           // Don't add new entries.  We'll insert the
10843           // ones that actually get used in check_addr().
10844           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10845           if(ht_bin[0]==vaddr) {
10846             ht_bin[1]=entry_point;
10847           }
10848           if(ht_bin[2]==vaddr) {
10849             ht_bin[3]=entry_point;
10850           }
10851         }
10852         else
10853         {
10854           u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
10855           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10856           assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
10857           //int entry_point=(int)out;
10858           ////assem_debug("entry_point: %x\n",entry_point);
10859           //load_regs_entry(i);
10860           //if(entry_point==(int)out)
10861           //  entry_point=instr_addr[i];
10862           //else
10863           //  emit_jmp(instr_addr[i]);
10864           //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10865           ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
10866           int entry_point=do_dirty_stub(i);
10867           ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10868         }
10869       }
10870     }
10871   }
10872   // Write out the literal pool if necessary
10873   literal_pool(0);
10874   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10875   // Align code
10876   if(((u_int)out)&7) emit_addnop(13);
10877   #endif
10878   assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
10879   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10880   memcpy(copy,source,slen*4);
10881   copy+=slen*4;
10882   
10883   #ifdef __arm__
10884   __clear_cache((void *)beginning,out);
10885   #endif
10886   
10887   // If we're within 256K of the end of the buffer,
10888   // start over from the beginning. (Is 256K enough?)
10889   if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10890   
10891   // Trap writes to any of the pages we compiled
10892   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10893     invalid_code[i]=0;
10894 #ifndef DISABLE_TLB
10895     memory_map[i]|=0x40000000;
10896     if((signed int)start>=(signed int)0xC0000000) {
10897       assert(using_tlb);
10898       j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
10899       invalid_code[j]=0;
10900       memory_map[j]|=0x40000000;
10901       //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
10902     }
10903 #endif
10904   }
10905   
10906   /* Pass 10 - Free memory by expiring oldest blocks */
10907   
10908   int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10909   while(expirep!=end)
10910   {
10911     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10912     int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10913     inv_debug("EXP: Phase %d\n",expirep);
10914     switch((expirep>>11)&3)
10915     {
10916       case 0:
10917         // Clear jump_in and jump_dirty
10918         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10919         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10920         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10921         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10922         break;
10923       case 1:
10924         // Clear pointers
10925         ll_kill_pointers(jump_out[expirep&2047],base,shift);
10926         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10927         break;
10928       case 2:
10929         // Clear hash table
10930         for(i=0;i<32;i++) {
10931           int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10932           if((ht_bin[3]>>shift)==(base>>shift) ||
10933              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10934             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10935             ht_bin[2]=ht_bin[3]=-1;
10936           }
10937           if((ht_bin[1]>>shift)==(base>>shift) ||
10938              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10939             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10940             ht_bin[0]=ht_bin[2];
10941             ht_bin[1]=ht_bin[3];
10942             ht_bin[2]=ht_bin[3]=-1;
10943           }
10944         }
10945         break;
10946       case 3:
10947         // Clear jump_out
10948         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10949         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10950         break;
10951     }
10952     expirep=(expirep+1)&65535;
10953   }
10954   return 0;
10955 }
10956
10957 // vim:shiftwidth=2:expandtab