fix some alignment issues
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
CommitLineData
57871462 1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
3 * Copyright (C) 2009-2010 Ari64 *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21#include <stdlib.h>
22#include <stdint.h> //include for uint64_t
23#include <assert.h>
24
3d624f89 25#include "emu_if.h" //emulator interface
57871462 26
27#include <sys/mman.h>
28
29#ifdef __i386__
30#include "assem_x86.h"
31#endif
32#ifdef __x86_64__
33#include "assem_x64.h"
34#endif
35#ifdef __arm__
36#include "assem_arm.h"
37#endif
38
39#define MAXBLOCK 4096
40#define MAX_OUTPUT_BLOCK_SIZE 262144
41#define CLOCK_DIVIDER 2
42
43struct regstat
44{
45 signed char regmap_entry[HOST_REGS];
46 signed char regmap[HOST_REGS];
47 uint64_t was32;
48 uint64_t is32;
49 uint64_t wasdirty;
50 uint64_t dirty;
51 uint64_t u;
52 uint64_t uu;
53 u_int wasconst;
54 u_int isconst;
55 uint64_t constmap[HOST_REGS];
56};
57
58struct ll_entry
59{
60 u_int vaddr;
61 u_int reg32;
62 void *addr;
63 struct ll_entry *next;
64};
65
66 u_int start;
67 u_int *source;
68 u_int pagelimit;
69 char insn[MAXBLOCK][10];
70 u_char itype[MAXBLOCK];
71 u_char opcode[MAXBLOCK];
72 u_char opcode2[MAXBLOCK];
73 u_char bt[MAXBLOCK];
74 u_char rs1[MAXBLOCK];
75 u_char rs2[MAXBLOCK];
76 u_char rt1[MAXBLOCK];
77 u_char rt2[MAXBLOCK];
78 u_char us1[MAXBLOCK];
79 u_char us2[MAXBLOCK];
80 u_char dep1[MAXBLOCK];
81 u_char dep2[MAXBLOCK];
82 u_char lt1[MAXBLOCK];
83 int imm[MAXBLOCK];
84 u_int ba[MAXBLOCK];
85 char likely[MAXBLOCK];
86 char is_ds[MAXBLOCK];
87 uint64_t unneeded_reg[MAXBLOCK];
88 uint64_t unneeded_reg_upper[MAXBLOCK];
89 uint64_t branch_unneeded_reg[MAXBLOCK];
90 uint64_t branch_unneeded_reg_upper[MAXBLOCK];
91 uint64_t p32[MAXBLOCK];
92 uint64_t pr32[MAXBLOCK];
93 signed char regmap_pre[MAXBLOCK][HOST_REGS];
94 signed char regmap[MAXBLOCK][HOST_REGS];
95 signed char regmap_entry[MAXBLOCK][HOST_REGS];
96 uint64_t constmap[MAXBLOCK][HOST_REGS];
97 uint64_t known_value[HOST_REGS];
98 u_int known_reg;
99 struct regstat regs[MAXBLOCK];
100 struct regstat branch_regs[MAXBLOCK];
101 u_int needed_reg[MAXBLOCK];
102 uint64_t requires_32bit[MAXBLOCK];
103 u_int wont_dirty[MAXBLOCK];
104 u_int will_dirty[MAXBLOCK];
105 int ccadj[MAXBLOCK];
106 int slen;
107 u_int instr_addr[MAXBLOCK];
108 u_int link_addr[MAXBLOCK][3];
109 int linkcount;
110 u_int stubs[MAXBLOCK*3][8];
111 int stubcount;
112 u_int literals[1024][2];
113 int literalcount;
114 int is_delayslot;
115 int cop1_usable;
116 u_char *out;
117 struct ll_entry *jump_in[4096];
118 struct ll_entry *jump_out[4096];
119 struct ll_entry *jump_dirty[4096];
120 u_int hash_table[65536][4] __attribute__((aligned(16)));
121 char shadow[1048576] __attribute__((aligned(16)));
122 void *copy;
123 int expirep;
124 u_int using_tlb;
125 u_int stop_after_jal;
126 extern u_char restore_candidate[512];
127 extern int cycle_count;
128
129 /* registers that may be allocated */
130 /* 1-31 gpr */
131#define HIREG 32 // hi
132#define LOREG 33 // lo
133#define FSREG 34 // FPU status (FCSR)
134#define CSREG 35 // Coprocessor status
135#define CCREG 36 // Cycle count
136#define INVCP 37 // Pointer to invalid_code
137#define TEMPREG 38
b9b61529 138#define FTEMP 38 // FPU/LDL/LDR temporary register
57871462 139#define PTEMP 39 // Prefetch temporary register
140#define TLREG 40 // TLB mapping offset
141#define RHASH 41 // Return address hash
142#define RHTBL 42 // Return address hash table address
143#define RTEMP 43 // JR/JALR address register
144#define MAXREG 43
145#define AGEN1 44 // Address generation temporary register
146#define AGEN2 45 // Address generation temporary register
147#define MGEN1 46 // Maptable address generation temporary register
148#define MGEN2 47 // Maptable address generation temporary register
149#define BTREG 48 // Branch target temporary register
150
151 /* instruction types */
152#define NOP 0 // No operation
153#define LOAD 1 // Load
154#define STORE 2 // Store
155#define LOADLR 3 // Unaligned load
156#define STORELR 4 // Unaligned store
157#define MOV 5 // Move
158#define ALU 6 // Arithmetic/logic
159#define MULTDIV 7 // Multiply/divide
160#define SHIFT 8 // Shift by register
161#define SHIFTIMM 9// Shift by immediate
162#define IMM16 10 // 16-bit immediate
163#define RJUMP 11 // Unconditional jump to register
164#define UJUMP 12 // Unconditional jump
165#define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
166#define SJUMP 14 // Conditional branch (regimm format)
167#define COP0 15 // Coprocessor 0
168#define COP1 16 // Coprocessor 1
169#define C1LS 17 // Coprocessor 1 load/store
170#define FJUMP 18 // Conditional branch (floating point)
171#define FLOAT 19 // Floating point unit
172#define FCONV 20 // Convert integer to float
173#define FCOMP 21 // Floating point compare (sets FSREG)
174#define SYSCALL 22// SYSCALL
175#define OTHER 23 // Other
176#define SPAN 24 // Branch/delay slot spans 2 pages
177#define NI 25 // Not implemented
7139f3c8 178#define HLECALL 26// PCSX fake opcodes for HLE
b9b61529 179#define COP2 27 // Coprocessor 2 move
180#define C2LS 28 // Coprocessor 2 load/store
181#define C2OP 29 // Coprocessor 2 operation
57871462 182
183 /* stubs */
184#define CC_STUB 1
185#define FP_STUB 2
186#define LOADB_STUB 3
187#define LOADH_STUB 4
188#define LOADW_STUB 5
189#define LOADD_STUB 6
190#define LOADBU_STUB 7
191#define LOADHU_STUB 8
192#define STOREB_STUB 9
193#define STOREH_STUB 10
194#define STOREW_STUB 11
195#define STORED_STUB 12
196#define STORELR_STUB 13
197#define INVCODE_STUB 14
198
199 /* branch codes */
200#define TAKEN 1
201#define NOTTAKEN 2
202#define NULLDS 3
203
204// asm linkage
205int new_recompile_block(int addr);
206void *get_addr_ht(u_int vaddr);
207void invalidate_block(u_int block);
208void invalidate_addr(u_int addr);
209void remove_hash(int vaddr);
210void jump_vaddr();
211void dyna_linker();
212void dyna_linker_ds();
213void verify_code();
214void verify_code_vm();
215void verify_code_ds();
216void cc_interrupt();
217void fp_exception();
218void fp_exception_ds();
219void jump_syscall();
7139f3c8 220void jump_syscall_hle();
57871462 221void jump_eret();
7139f3c8 222void jump_hlecall();
223void new_dyna_leave();
57871462 224
225// TLB
226void TLBWI_new();
227void TLBWR_new();
228void read_nomem_new();
229void read_nomemb_new();
230void read_nomemh_new();
231void read_nomemd_new();
232void write_nomem_new();
233void write_nomemb_new();
234void write_nomemh_new();
235void write_nomemd_new();
236void write_rdram_new();
237void write_rdramb_new();
238void write_rdramh_new();
239void write_rdramd_new();
240extern u_int memory_map[1048576];
241
242// Needed by assembler
243void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
244void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
245void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
246void load_all_regs(signed char i_regmap[]);
247void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
248void load_regs_entry(int t);
249void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
250
251int tracedebug=0;
252
253//#define DEBUG_CYCLE_COUNT 1
254
255void nullf() {}
256//#define assem_debug printf
257//#define inv_debug printf
258#define assem_debug nullf
259#define inv_debug nullf
260
94d23bb9 261static void tlb_hacks()
57871462 262{
94d23bb9 263#ifndef DISABLE_TLB
57871462 264 // Goldeneye hack
265 if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
266 {
267 u_int addr;
268 int n;
269 switch (ROM_HEADER->Country_code&0xFF)
270 {
271 case 0x45: // U
272 addr=0x34b30;
273 break;
274 case 0x4A: // J
275 addr=0x34b70;
276 break;
277 case 0x50: // E
278 addr=0x329f0;
279 break;
280 default:
281 // Unknown country code
282 addr=0;
283 break;
284 }
285 u_int rom_addr=(u_int)rom;
286 #ifdef ROM_COPY
287 // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
288 // in the lower 4G of memory to use this hack. Copy it if necessary.
289 if((void *)rom>(void *)0xffffffff) {
290 munmap(ROM_COPY, 67108864);
291 if(mmap(ROM_COPY, 12582912,
292 PROT_READ | PROT_WRITE,
293 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
294 -1, 0) <= 0) {printf("mmap() failed\n");}
295 memcpy(ROM_COPY,rom,12582912);
296 rom_addr=(u_int)ROM_COPY;
297 }
298 #endif
299 if(addr) {
300 for(n=0x7F000;n<0x80000;n++) {
301 memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
302 }
303 }
304 }
94d23bb9 305#endif
57871462 306}
307
94d23bb9 308static u_int get_page(u_int vaddr)
57871462 309{
310 u_int page=(vaddr^0x80000000)>>12;
94d23bb9 311#ifndef DISABLE_TLB
57871462 312 if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
94d23bb9 313#endif
57871462 314 if(page>2048) page=2048+(page&2047);
94d23bb9 315 return page;
316}
317
318static u_int get_vpage(u_int vaddr)
319{
320 u_int vpage=(vaddr^0x80000000)>>12;
321#ifndef DISABLE_TLB
57871462 322 if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
94d23bb9 323#endif
57871462 324 if(vpage>2048) vpage=2048+(vpage&2047);
94d23bb9 325 return vpage;
326}
327
328// Get address from virtual address
329// This is called from the recompiled JR/JALR instructions
330void *get_addr(u_int vaddr)
331{
332 u_int page=get_page(vaddr);
333 u_int vpage=get_vpage(vaddr);
57871462 334 struct ll_entry *head;
335 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
336 head=jump_in[page];
337 while(head!=NULL) {
338 if(head->vaddr==vaddr&&head->reg32==0) {
339 //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
340 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
341 ht_bin[3]=ht_bin[1];
342 ht_bin[2]=ht_bin[0];
343 ht_bin[1]=(int)head->addr;
344 ht_bin[0]=vaddr;
345 return head->addr;
346 }
347 head=head->next;
348 }
349 head=jump_dirty[vpage];
350 while(head!=NULL) {
351 if(head->vaddr==vaddr&&head->reg32==0) {
352 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
353 // Don't restore blocks which are about to expire from the cache
354 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
355 if(verify_dirty(head->addr)) {
356 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
357 invalid_code[vaddr>>12]=0;
358 memory_map[vaddr>>12]|=0x40000000;
359 if(vpage<2048) {
94d23bb9 360#ifndef DISABLE_TLB
57871462 361 if(tlb_LUT_r[vaddr>>12]) {
362 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
363 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
364 }
94d23bb9 365#endif
57871462 366 restore_candidate[vpage>>3]|=1<<(vpage&7);
367 }
368 else restore_candidate[page>>3]|=1<<(page&7);
369 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
370 if(ht_bin[0]==vaddr) {
371 ht_bin[1]=(int)head->addr; // Replace existing entry
372 }
373 else
374 {
375 ht_bin[3]=ht_bin[1];
376 ht_bin[2]=ht_bin[0];
377 ht_bin[1]=(int)head->addr;
378 ht_bin[0]=vaddr;
379 }
380 return head->addr;
381 }
382 }
383 head=head->next;
384 }
385 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
386 int r=new_recompile_block(vaddr);
387 if(r==0) return get_addr(vaddr);
388 // Execute in unmapped page, generate pagefault execption
389 Status|=2;
390 Cause=(vaddr<<31)|0x8;
391 EPC=(vaddr&1)?vaddr-5:vaddr;
392 BadVAddr=(vaddr&~1);
393 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
394 EntryHi=BadVAddr&0xFFFFE000;
395 return get_addr_ht(0x80000000);
396}
397// Look up address in hash table first
398void *get_addr_ht(u_int vaddr)
399{
400 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
401 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
402 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
403 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
404 return get_addr(vaddr);
405}
406
407void *get_addr_32(u_int vaddr,u_int flags)
408{
7139f3c8 409#ifdef FORCE32
410 return get_addr(vaddr);
560e4a12 411#else
57871462 412 //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
413 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
414 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
415 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
94d23bb9 416 u_int page=get_page(vaddr);
417 u_int vpage=get_vpage(vaddr);
57871462 418 struct ll_entry *head;
419 head=jump_in[page];
420 while(head!=NULL) {
421 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
422 //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
423 if(head->reg32==0) {
424 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
425 if(ht_bin[0]==-1) {
426 ht_bin[1]=(int)head->addr;
427 ht_bin[0]=vaddr;
428 }else if(ht_bin[2]==-1) {
429 ht_bin[3]=(int)head->addr;
430 ht_bin[2]=vaddr;
431 }
432 //ht_bin[3]=ht_bin[1];
433 //ht_bin[2]=ht_bin[0];
434 //ht_bin[1]=(int)head->addr;
435 //ht_bin[0]=vaddr;
436 }
437 return head->addr;
438 }
439 head=head->next;
440 }
441 head=jump_dirty[vpage];
442 while(head!=NULL) {
443 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
444 //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
445 // Don't restore blocks which are about to expire from the cache
446 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
447 if(verify_dirty(head->addr)) {
448 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
449 invalid_code[vaddr>>12]=0;
450 memory_map[vaddr>>12]|=0x40000000;
451 if(vpage<2048) {
94d23bb9 452#ifndef DISABLE_TLB
57871462 453 if(tlb_LUT_r[vaddr>>12]) {
454 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
455 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
456 }
94d23bb9 457#endif
57871462 458 restore_candidate[vpage>>3]|=1<<(vpage&7);
459 }
460 else restore_candidate[page>>3]|=1<<(page&7);
461 if(head->reg32==0) {
462 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
463 if(ht_bin[0]==-1) {
464 ht_bin[1]=(int)head->addr;
465 ht_bin[0]=vaddr;
466 }else if(ht_bin[2]==-1) {
467 ht_bin[3]=(int)head->addr;
468 ht_bin[2]=vaddr;
469 }
470 //ht_bin[3]=ht_bin[1];
471 //ht_bin[2]=ht_bin[0];
472 //ht_bin[1]=(int)head->addr;
473 //ht_bin[0]=vaddr;
474 }
475 return head->addr;
476 }
477 }
478 head=head->next;
479 }
480 //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
481 int r=new_recompile_block(vaddr);
482 if(r==0) return get_addr(vaddr);
483 // Execute in unmapped page, generate pagefault execption
484 Status|=2;
485 Cause=(vaddr<<31)|0x8;
486 EPC=(vaddr&1)?vaddr-5:vaddr;
487 BadVAddr=(vaddr&~1);
488 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
489 EntryHi=BadVAddr&0xFFFFE000;
490 return get_addr_ht(0x80000000);
560e4a12 491#endif
57871462 492}
493
494void clear_all_regs(signed char regmap[])
495{
496 int hr;
497 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
498}
499
500signed char get_reg(signed char regmap[],int r)
501{
502 int hr;
503 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
504 return -1;
505}
506
507// Find a register that is available for two consecutive cycles
508signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
509{
510 int hr;
511 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
512 return -1;
513}
514
515int count_free_regs(signed char regmap[])
516{
517 int count=0;
518 int hr;
519 for(hr=0;hr<HOST_REGS;hr++)
520 {
521 if(hr!=EXCLUDE_REG) {
522 if(regmap[hr]<0) count++;
523 }
524 }
525 return count;
526}
527
528void dirty_reg(struct regstat *cur,signed char reg)
529{
530 int hr;
531 if(!reg) return;
532 for (hr=0;hr<HOST_REGS;hr++) {
533 if((cur->regmap[hr]&63)==reg) {
534 cur->dirty|=1<<hr;
535 }
536 }
537}
538
539// If we dirty the lower half of a 64 bit register which is now being
540// sign-extended, we need to dump the upper half.
541// Note: Do this only after completion of the instruction, because
542// some instructions may need to read the full 64-bit value even if
543// overwriting it (eg SLTI, DSRA32).
544static void flush_dirty_uppers(struct regstat *cur)
545{
546 int hr,reg;
547 for (hr=0;hr<HOST_REGS;hr++) {
548 if((cur->dirty>>hr)&1) {
549 reg=cur->regmap[hr];
550 if(reg>=64)
551 if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
552 }
553 }
554}
555
556void set_const(struct regstat *cur,signed char reg,uint64_t value)
557{
558 int hr;
559 if(!reg) return;
560 for (hr=0;hr<HOST_REGS;hr++) {
561 if(cur->regmap[hr]==reg) {
562 cur->isconst|=1<<hr;
563 cur->constmap[hr]=value;
564 }
565 else if((cur->regmap[hr]^64)==reg) {
566 cur->isconst|=1<<hr;
567 cur->constmap[hr]=value>>32;
568 }
569 }
570}
571
572void clear_const(struct regstat *cur,signed char reg)
573{
574 int hr;
575 if(!reg) return;
576 for (hr=0;hr<HOST_REGS;hr++) {
577 if((cur->regmap[hr]&63)==reg) {
578 cur->isconst&=~(1<<hr);
579 }
580 }
581}
582
583int is_const(struct regstat *cur,signed char reg)
584{
585 int hr;
586 if(!reg) return 1;
587 for (hr=0;hr<HOST_REGS;hr++) {
588 if((cur->regmap[hr]&63)==reg) {
589 return (cur->isconst>>hr)&1;
590 }
591 }
592 return 0;
593}
594uint64_t get_const(struct regstat *cur,signed char reg)
595{
596 int hr;
597 if(!reg) return 0;
598 for (hr=0;hr<HOST_REGS;hr++) {
599 if(cur->regmap[hr]==reg) {
600 return cur->constmap[hr];
601 }
602 }
603 printf("Unknown constant in r%d\n",reg);
604 exit(1);
605}
606
607// Least soon needed registers
608// Look at the next ten instructions and see which registers
609// will be used. Try not to reallocate these.
610void lsn(u_char hsn[], int i, int *preferred_reg)
611{
612 int j;
613 int b=-1;
614 for(j=0;j<9;j++)
615 {
616 if(i+j>=slen) {
617 j=slen-i-1;
618 break;
619 }
620 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
621 {
622 // Don't go past an unconditonal jump
623 j++;
624 break;
625 }
626 }
627 for(;j>=0;j--)
628 {
629 if(rs1[i+j]) hsn[rs1[i+j]]=j;
630 if(rs2[i+j]) hsn[rs2[i+j]]=j;
631 if(rt1[i+j]) hsn[rt1[i+j]]=j;
632 if(rt2[i+j]) hsn[rt2[i+j]]=j;
633 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
634 // Stores can allocate zero
635 hsn[rs1[i+j]]=j;
636 hsn[rs2[i+j]]=j;
637 }
638 // On some architectures stores need invc_ptr
639 #if defined(HOST_IMM8)
b9b61529 640 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
57871462 641 hsn[INVCP]=j;
642 }
643 #endif
644 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
645 {
646 hsn[CCREG]=j;
647 b=j;
648 }
649 }
650 if(b>=0)
651 {
652 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
653 {
654 // Follow first branch
655 int t=(ba[i+b]-start)>>2;
656 j=7-b;if(t+j>=slen) j=slen-t-1;
657 for(;j>=0;j--)
658 {
659 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
660 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
661 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
662 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
663 }
664 }
665 // TODO: preferred register based on backward branch
666 }
667 // Delay slot should preferably not overwrite branch conditions or cycle count
668 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
669 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
670 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
671 hsn[CCREG]=1;
672 // ...or hash tables
673 hsn[RHASH]=1;
674 hsn[RHTBL]=1;
675 }
676 // Coprocessor load/store needs FTEMP, even if not declared
b9b61529 677 if(itype[i]==C1LS||itype[i]==C2LS) {
57871462 678 hsn[FTEMP]=0;
679 }
680 // Load L/R also uses FTEMP as a temporary register
681 if(itype[i]==LOADLR) {
682 hsn[FTEMP]=0;
683 }
b7918751 684 // Also SWL/SWR/SDL/SDR
685 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
57871462 686 hsn[FTEMP]=0;
687 }
688 // Don't remove the TLB registers either
b9b61529 689 if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
57871462 690 hsn[TLREG]=0;
691 }
692 // Don't remove the miniht registers
693 if(itype[i]==UJUMP||itype[i]==RJUMP)
694 {
695 hsn[RHASH]=0;
696 hsn[RHTBL]=0;
697 }
698}
699
700// We only want to allocate registers if we're going to use them again soon
701int needed_again(int r, int i)
702{
703 int j;
704 int b=-1;
705 int rn=10;
706 int hr;
707 u_char hsn[MAXREG+1];
708 int preferred_reg;
709
710 memset(hsn,10,sizeof(hsn));
711 lsn(hsn,i,&preferred_reg);
712
713 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
714 {
715 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
716 return 0; // Don't need any registers if exiting the block
717 }
718 for(j=0;j<9;j++)
719 {
720 if(i+j>=slen) {
721 j=slen-i-1;
722 break;
723 }
724 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
725 {
726 // Don't go past an unconditonal jump
727 j++;
728 break;
729 }
7139f3c8 730 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||((source[i+j]&0xfc00003f)==0x0d))
57871462 731 {
732 break;
733 }
734 }
735 for(;j>=1;j--)
736 {
737 if(rs1[i+j]==r) rn=j;
738 if(rs2[i+j]==r) rn=j;
739 if((unneeded_reg[i+j]>>r)&1) rn=10;
740 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
741 {
742 b=j;
743 }
744 }
745 /*
746 if(b>=0)
747 {
748 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
749 {
750 // Follow first branch
751 int o=rn;
752 int t=(ba[i+b]-start)>>2;
753 j=7-b;if(t+j>=slen) j=slen-t-1;
754 for(;j>=0;j--)
755 {
756 if(!((unneeded_reg[t+j]>>r)&1)) {
757 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
758 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
759 }
760 else rn=o;
761 }
762 }
763 }*/
764 for(hr=0;hr<HOST_REGS;hr++) {
765 if(hr!=EXCLUDE_REG) {
766 if(rn<hsn[hr]) return 1;
767 }
768 }
769 return 0;
770}
771
772// Try to match register allocations at the end of a loop with those
773// at the beginning
774int loop_reg(int i, int r, int hr)
775{
776 int j,k;
777 for(j=0;j<9;j++)
778 {
779 if(i+j>=slen) {
780 j=slen-i-1;
781 break;
782 }
783 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
784 {
785 // Don't go past an unconditonal jump
786 j++;
787 break;
788 }
789 }
790 k=0;
791 if(i>0){
792 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
793 k--;
794 }
795 for(;k<j;k++)
796 {
797 if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
798 if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
799 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
800 {
801 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
802 {
803 int t=(ba[i+k]-start)>>2;
804 int reg=get_reg(regs[t].regmap_entry,r);
805 if(reg>=0) return reg;
806 //reg=get_reg(regs[t+1].regmap_entry,r);
807 //if(reg>=0) return reg;
808 }
809 }
810 }
811 return hr;
812}
813
814
815// Allocate every register, preserving source/target regs
816void alloc_all(struct regstat *cur,int i)
817{
818 int hr;
819
820 for(hr=0;hr<HOST_REGS;hr++) {
821 if(hr!=EXCLUDE_REG) {
822 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
823 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
824 {
825 cur->regmap[hr]=-1;
826 cur->dirty&=~(1<<hr);
827 }
828 // Don't need zeros
829 if((cur->regmap[hr]&63)==0)
830 {
831 cur->regmap[hr]=-1;
832 cur->dirty&=~(1<<hr);
833 }
834 }
835 }
836}
837
838
839void div64(int64_t dividend,int64_t divisor)
840{
841 lo=dividend/divisor;
842 hi=dividend%divisor;
843 //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
844 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
845}
846void divu64(uint64_t dividend,uint64_t divisor)
847{
848 lo=dividend/divisor;
849 hi=dividend%divisor;
850 //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
851 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
852}
853
854void mult64(uint64_t m1,uint64_t m2)
855{
856 unsigned long long int op1, op2, op3, op4;
857 unsigned long long int result1, result2, result3, result4;
858 unsigned long long int temp1, temp2, temp3, temp4;
859 int sign = 0;
860
861 if (m1 < 0)
862 {
863 op2 = -m1;
864 sign = 1 - sign;
865 }
866 else op2 = m1;
867 if (m2 < 0)
868 {
869 op4 = -m2;
870 sign = 1 - sign;
871 }
872 else op4 = m2;
873
874 op1 = op2 & 0xFFFFFFFF;
875 op2 = (op2 >> 32) & 0xFFFFFFFF;
876 op3 = op4 & 0xFFFFFFFF;
877 op4 = (op4 >> 32) & 0xFFFFFFFF;
878
879 temp1 = op1 * op3;
880 temp2 = (temp1 >> 32) + op1 * op4;
881 temp3 = op2 * op3;
882 temp4 = (temp3 >> 32) + op2 * op4;
883
884 result1 = temp1 & 0xFFFFFFFF;
885 result2 = temp2 + (temp3 & 0xFFFFFFFF);
886 result3 = (result2 >> 32) + temp4;
887 result4 = (result3 >> 32);
888
889 lo = result1 | (result2 << 32);
890 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
891 if (sign)
892 {
893 hi = ~hi;
894 if (!lo) hi++;
895 else lo = ~lo + 1;
896 }
897}
898
899void multu64(uint64_t m1,uint64_t m2)
900{
901 unsigned long long int op1, op2, op3, op4;
902 unsigned long long int result1, result2, result3, result4;
903 unsigned long long int temp1, temp2, temp3, temp4;
904
905 op1 = m1 & 0xFFFFFFFF;
906 op2 = (m1 >> 32) & 0xFFFFFFFF;
907 op3 = m2 & 0xFFFFFFFF;
908 op4 = (m2 >> 32) & 0xFFFFFFFF;
909
910 temp1 = op1 * op3;
911 temp2 = (temp1 >> 32) + op1 * op4;
912 temp3 = op2 * op3;
913 temp4 = (temp3 >> 32) + op2 * op4;
914
915 result1 = temp1 & 0xFFFFFFFF;
916 result2 = temp2 + (temp3 & 0xFFFFFFFF);
917 result3 = (result2 >> 32) + temp4;
918 result4 = (result3 >> 32);
919
920 lo = result1 | (result2 << 32);
921 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
922
923 //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
924 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
925}
926
927uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
928{
929 if(bits) {
930 original<<=64-bits;
931 original>>=64-bits;
932 loaded<<=bits;
933 original|=loaded;
934 }
935 else original=loaded;
936 return original;
937}
938uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
939{
940 if(bits^56) {
941 original>>=64-(bits^56);
942 original<<=64-(bits^56);
943 loaded>>=bits^56;
944 original|=loaded;
945 }
946 else original=loaded;
947 return original;
948}
949
950#ifdef __i386__
951#include "assem_x86.c"
952#endif
953#ifdef __x86_64__
954#include "assem_x64.c"
955#endif
956#ifdef __arm__
957#include "assem_arm.c"
958#endif
959
960// Add virtual address mapping to linked list
961void ll_add(struct ll_entry **head,int vaddr,void *addr)
962{
963 struct ll_entry *new_entry;
964 new_entry=malloc(sizeof(struct ll_entry));
965 assert(new_entry!=NULL);
966 new_entry->vaddr=vaddr;
967 new_entry->reg32=0;
968 new_entry->addr=addr;
969 new_entry->next=*head;
970 *head=new_entry;
971}
972
973// Add virtual address mapping for 32-bit compiled block
974void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
975{
7139f3c8 976 ll_add(head,vaddr,addr);
977#ifndef FORCE32
978 (*head)->reg32=reg32;
979#endif
57871462 980}
981
982// Check if an address is already compiled
983// but don't return addresses which are about to expire from the cache
984void *check_addr(u_int vaddr)
985{
986 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
987 if(ht_bin[0]==vaddr) {
988 if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
989 if(isclean(ht_bin[1])) return (void *)ht_bin[1];
990 }
991 if(ht_bin[2]==vaddr) {
992 if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
993 if(isclean(ht_bin[3])) return (void *)ht_bin[3];
994 }
94d23bb9 995 u_int page=get_page(vaddr);
57871462 996 struct ll_entry *head;
997 head=jump_in[page];
998 while(head!=NULL) {
999 if(head->vaddr==vaddr&&head->reg32==0) {
1000 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1001 // Update existing entry with current address
1002 if(ht_bin[0]==vaddr) {
1003 ht_bin[1]=(int)head->addr;
1004 return head->addr;
1005 }
1006 if(ht_bin[2]==vaddr) {
1007 ht_bin[3]=(int)head->addr;
1008 return head->addr;
1009 }
1010 // Insert into hash table with low priority.
1011 // Don't evict existing entries, as they are probably
1012 // addresses that are being accessed frequently.
1013 if(ht_bin[0]==-1) {
1014 ht_bin[1]=(int)head->addr;
1015 ht_bin[0]=vaddr;
1016 }else if(ht_bin[2]==-1) {
1017 ht_bin[3]=(int)head->addr;
1018 ht_bin[2]=vaddr;
1019 }
1020 return head->addr;
1021 }
1022 }
1023 head=head->next;
1024 }
1025 return 0;
1026}
1027
1028void remove_hash(int vaddr)
1029{
1030 //printf("remove hash: %x\n",vaddr);
1031 int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1032 if(ht_bin[2]==vaddr) {
1033 ht_bin[2]=ht_bin[3]=-1;
1034 }
1035 if(ht_bin[0]==vaddr) {
1036 ht_bin[0]=ht_bin[2];
1037 ht_bin[1]=ht_bin[3];
1038 ht_bin[2]=ht_bin[3]=-1;
1039 }
1040}
1041
1042void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1043{
1044 struct ll_entry *next;
1045 while(*head) {
1046 if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
1047 ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1048 {
1049 inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1050 remove_hash((*head)->vaddr);
1051 next=(*head)->next;
1052 free(*head);
1053 *head=next;
1054 }
1055 else
1056 {
1057 head=&((*head)->next);
1058 }
1059 }
1060}
1061
1062// Remove all entries from linked list
1063void ll_clear(struct ll_entry **head)
1064{
1065 struct ll_entry *cur;
1066 struct ll_entry *next;
1067 if(cur=*head) {
1068 *head=0;
1069 while(cur) {
1070 next=cur->next;
1071 free(cur);
1072 cur=next;
1073 }
1074 }
1075}
1076
1077// Dereference the pointers and remove if it matches
1078void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1079{
f76eeef9 1080 u_int old_host_addr=0;
57871462 1081 while(head) {
1082 int ptr=get_pointer(head->addr);
1083 inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1084 if(((ptr>>shift)==(addr>>shift)) ||
1085 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1086 {
5088bb70 1087 inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
f76eeef9 1088 u_int host_addr=(u_int)kill_pointer(head->addr);
1089
1090 if((host_addr>>12)!=(old_host_addr>>12)) {
1091 #ifdef __arm__
1092 __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1093 #endif
1094 old_host_addr=host_addr;
1095 }
57871462 1096 }
1097 head=head->next;
1098 }
f76eeef9 1099 #ifdef __arm__
1100 if (old_host_addr)
1101 __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1102 #endif
57871462 1103}
1104
1105// This is called when we write to a compiled block (see do_invstub)
f76eeef9 1106void invalidate_page(u_int page)
57871462 1107{
57871462 1108 struct ll_entry *head;
1109 struct ll_entry *next;
f76eeef9 1110 u_int old_host_addr=0;
57871462 1111 head=jump_in[page];
1112 jump_in[page]=0;
1113 while(head!=NULL) {
1114 inv_debug("INVALIDATE: %x\n",head->vaddr);
1115 remove_hash(head->vaddr);
1116 next=head->next;
1117 free(head);
1118 head=next;
1119 }
1120 head=jump_out[page];
1121 jump_out[page]=0;
1122 while(head!=NULL) {
1123 inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
f76eeef9 1124 u_int host_addr=(u_int)kill_pointer(head->addr);
1125
1126 if((host_addr>>12)!=(old_host_addr>>12)) {
1127 #ifdef __arm__
1128 __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1129 #endif
1130 old_host_addr=host_addr;
1131 }
57871462 1132 next=head->next;
1133 free(head);
1134 head=next;
1135 }
f76eeef9 1136 #ifdef __arm__
1137 if (old_host_addr)
1138 __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1139 #endif
57871462 1140}
1141void invalidate_block(u_int block)
1142{
94d23bb9 1143 u_int page=get_page(block<<12);
1144 u_int vpage=get_vpage(block<<12);
57871462 1145 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1146 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1147 u_int first,last;
1148 first=last=page;
1149 struct ll_entry *head;
1150 head=jump_dirty[vpage];
1151 //printf("page=%d vpage=%d\n",page,vpage);
1152 while(head!=NULL) {
1153 u_int start,end;
1154 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1155 get_bounds((int)head->addr,&start,&end);
1156 //printf("start: %x end: %x\n",start,end);
4cb76aa4 1157 if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
57871462 1158 if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1159 if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1160 if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1161 }
1162 }
90ae6d4e 1163#ifndef DISABLE_TLB
57871462 1164 if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1165 if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1166 if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1167 if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1168 }
1169 }
90ae6d4e 1170#endif
57871462 1171 }
1172 head=head->next;
1173 }
1174 //printf("first=%d last=%d\n",first,last);
f76eeef9 1175 invalidate_page(page);
57871462 1176 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1177 assert(last<page+5);
1178 // Invalidate the adjacent pages if a block crosses a 4K boundary
1179 while(first<page) {
1180 invalidate_page(first);
1181 first++;
1182 }
1183 for(first=page+1;first<last;first++) {
1184 invalidate_page(first);
1185 }
1186
1187 // Don't trap writes
1188 invalid_code[block]=1;
94d23bb9 1189#ifndef DISABLE_TLB
57871462 1190 // If there is a valid TLB entry for this page, remove write protect
1191 if(tlb_LUT_w[block]) {
1192 assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1193 // CHECK: Is this right?
1194 memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1195 u_int real_block=tlb_LUT_w[block]>>12;
1196 invalid_code[real_block]=1;
1197 if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1198 }
1199 else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
94d23bb9 1200#endif
f76eeef9 1201
57871462 1202 #ifdef USE_MINI_HT
1203 memset(mini_ht,-1,sizeof(mini_ht));
1204 #endif
1205}
1206void invalidate_addr(u_int addr)
1207{
1208 invalidate_block(addr>>12);
1209}
1210void invalidate_all_pages()
1211{
1212 u_int page,n;
1213 for(page=0;page<4096;page++)
1214 invalidate_page(page);
1215 for(page=0;page<1048576;page++)
1216 if(!invalid_code[page]) {
1217 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1218 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1219 }
1220 #ifdef __arm__
1221 __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1222 #endif
1223 #ifdef USE_MINI_HT
1224 memset(mini_ht,-1,sizeof(mini_ht));
1225 #endif
94d23bb9 1226 #ifndef DISABLE_TLB
57871462 1227 // TLB
1228 for(page=0;page<0x100000;page++) {
1229 if(tlb_LUT_r[page]) {
1230 memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1231 if(!tlb_LUT_w[page]||!invalid_code[page])
1232 memory_map[page]|=0x40000000; // Write protect
1233 }
1234 else memory_map[page]=-1;
1235 if(page==0x80000) page=0xC0000;
1236 }
1237 tlb_hacks();
94d23bb9 1238 #endif
57871462 1239}
1240
1241// Add an entry to jump_out after making a link
1242void add_link(u_int vaddr,void *src)
1243{
94d23bb9 1244 u_int page=get_page(vaddr);
57871462 1245 inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1246 ll_add(jump_out+page,vaddr,src);
1247 //int ptr=get_pointer(src);
1248 //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1249}
1250
1251// If a code block was found to be unmodified (bit was set in
1252// restore_candidate) and it remains unmodified (bit is clear
1253// in invalid_code) then move the entries for that 4K page from
1254// the dirty list to the clean list.
1255void clean_blocks(u_int page)
1256{
1257 struct ll_entry *head;
1258 inv_debug("INV: clean_blocks page=%d\n",page);
1259 head=jump_dirty[page];
1260 while(head!=NULL) {
1261 if(!invalid_code[head->vaddr>>12]) {
1262 // Don't restore blocks which are about to expire from the cache
1263 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1264 u_int start,end;
1265 if(verify_dirty((int)head->addr)) {
1266 //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1267 u_int i;
1268 u_int inv=0;
1269 get_bounds((int)head->addr,&start,&end);
4cb76aa4 1270 if(start-(u_int)rdram<RAM_SIZE) {
57871462 1271 for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1272 inv|=invalid_code[i];
1273 }
1274 }
1275 if((signed int)head->vaddr>=(signed int)0xC0000000) {
1276 u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1277 //printf("addr=%x start=%x end=%x\n",addr,start,end);
1278 if(addr<start||addr>=end) inv=1;
1279 }
4cb76aa4 1280 else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
57871462 1281 inv=1;
1282 }
1283 if(!inv) {
1284 void * clean_addr=(void *)get_clean_addr((int)head->addr);
1285 if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1286 u_int ppage=page;
94d23bb9 1287#ifndef DISABLE_TLB
57871462 1288 if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
94d23bb9 1289#endif
57871462 1290 inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1291 //printf("page=%x, addr=%x\n",page,head->vaddr);
1292 //assert(head->vaddr>>12==(page|0x80000));
1293 ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1294 int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1295 if(!head->reg32) {
1296 if(ht_bin[0]==head->vaddr) {
1297 ht_bin[1]=(int)clean_addr; // Replace existing entry
1298 }
1299 if(ht_bin[2]==head->vaddr) {
1300 ht_bin[3]=(int)clean_addr; // Replace existing entry
1301 }
1302 }
1303 }
1304 }
1305 }
1306 }
1307 }
1308 head=head->next;
1309 }
1310}
1311
1312
1313void mov_alloc(struct regstat *current,int i)
1314{
1315 // Note: Don't need to actually alloc the source registers
1316 if((~current->is32>>rs1[i])&1) {
1317 //alloc_reg64(current,i,rs1[i]);
1318 alloc_reg64(current,i,rt1[i]);
1319 current->is32&=~(1LL<<rt1[i]);
1320 } else {
1321 //alloc_reg(current,i,rs1[i]);
1322 alloc_reg(current,i,rt1[i]);
1323 current->is32|=(1LL<<rt1[i]);
1324 }
1325 clear_const(current,rs1[i]);
1326 clear_const(current,rt1[i]);
1327 dirty_reg(current,rt1[i]);
1328}
1329
1330void shiftimm_alloc(struct regstat *current,int i)
1331{
1332 clear_const(current,rs1[i]);
1333 clear_const(current,rt1[i]);
1334 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1335 {
1336 if(rt1[i]) {
1337 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1338 else lt1[i]=rs1[i];
1339 alloc_reg(current,i,rt1[i]);
1340 current->is32|=1LL<<rt1[i];
1341 dirty_reg(current,rt1[i]);
1342 }
1343 }
1344 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1345 {
1346 if(rt1[i]) {
1347 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1348 alloc_reg64(current,i,rt1[i]);
1349 current->is32&=~(1LL<<rt1[i]);
1350 dirty_reg(current,rt1[i]);
1351 }
1352 }
1353 if(opcode2[i]==0x3c) // DSLL32
1354 {
1355 if(rt1[i]) {
1356 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1357 alloc_reg64(current,i,rt1[i]);
1358 current->is32&=~(1LL<<rt1[i]);
1359 dirty_reg(current,rt1[i]);
1360 }
1361 }
1362 if(opcode2[i]==0x3e) // DSRL32
1363 {
1364 if(rt1[i]) {
1365 alloc_reg64(current,i,rs1[i]);
1366 if(imm[i]==32) {
1367 alloc_reg64(current,i,rt1[i]);
1368 current->is32&=~(1LL<<rt1[i]);
1369 } else {
1370 alloc_reg(current,i,rt1[i]);
1371 current->is32|=1LL<<rt1[i];
1372 }
1373 dirty_reg(current,rt1[i]);
1374 }
1375 }
1376 if(opcode2[i]==0x3f) // DSRA32
1377 {
1378 if(rt1[i]) {
1379 alloc_reg64(current,i,rs1[i]);
1380 alloc_reg(current,i,rt1[i]);
1381 current->is32|=1LL<<rt1[i];
1382 dirty_reg(current,rt1[i]);
1383 }
1384 }
1385}
1386
1387void shift_alloc(struct regstat *current,int i)
1388{
1389 if(rt1[i]) {
1390 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1391 {
1392 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1393 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1394 alloc_reg(current,i,rt1[i]);
1395 if(rt1[i]==rs2[i]) alloc_reg_temp(current,i,-1);
1396 current->is32|=1LL<<rt1[i];
1397 } else { // DSLLV/DSRLV/DSRAV
1398 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1399 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1400 alloc_reg64(current,i,rt1[i]);
1401 current->is32&=~(1LL<<rt1[i]);
1402 if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1403 alloc_reg_temp(current,i,-1);
1404 }
1405 clear_const(current,rs1[i]);
1406 clear_const(current,rs2[i]);
1407 clear_const(current,rt1[i]);
1408 dirty_reg(current,rt1[i]);
1409 }
1410}
1411
1412void alu_alloc(struct regstat *current,int i)
1413{
1414 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1415 if(rt1[i]) {
1416 if(rs1[i]&&rs2[i]) {
1417 alloc_reg(current,i,rs1[i]);
1418 alloc_reg(current,i,rs2[i]);
1419 }
1420 else {
1421 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1422 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1423 }
1424 alloc_reg(current,i,rt1[i]);
1425 }
1426 current->is32|=1LL<<rt1[i];
1427 }
1428 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1429 if(rt1[i]) {
1430 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1431 {
1432 alloc_reg64(current,i,rs1[i]);
1433 alloc_reg64(current,i,rs2[i]);
1434 alloc_reg(current,i,rt1[i]);
1435 } else {
1436 alloc_reg(current,i,rs1[i]);
1437 alloc_reg(current,i,rs2[i]);
1438 alloc_reg(current,i,rt1[i]);
1439 }
1440 }
1441 current->is32|=1LL<<rt1[i];
1442 }
1443 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1444 if(rt1[i]) {
1445 if(rs1[i]&&rs2[i]) {
1446 alloc_reg(current,i,rs1[i]);
1447 alloc_reg(current,i,rs2[i]);
1448 }
1449 else
1450 {
1451 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1452 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1453 }
1454 alloc_reg(current,i,rt1[i]);
1455 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1456 {
1457 if(!((current->uu>>rt1[i])&1)) {
1458 alloc_reg64(current,i,rt1[i]);
1459 }
1460 if(get_reg(current->regmap,rt1[i]|64)>=0) {
1461 if(rs1[i]&&rs2[i]) {
1462 alloc_reg64(current,i,rs1[i]);
1463 alloc_reg64(current,i,rs2[i]);
1464 }
1465 else
1466 {
1467 // Is is really worth it to keep 64-bit values in registers?
1468 #ifdef NATIVE_64BIT
1469 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1470 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1471 #endif
1472 }
1473 }
1474 current->is32&=~(1LL<<rt1[i]);
1475 } else {
1476 current->is32|=1LL<<rt1[i];
1477 }
1478 }
1479 }
1480 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1481 if(rt1[i]) {
1482 if(rs1[i]&&rs2[i]) {
1483 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1484 alloc_reg64(current,i,rs1[i]);
1485 alloc_reg64(current,i,rs2[i]);
1486 alloc_reg64(current,i,rt1[i]);
1487 } else {
1488 alloc_reg(current,i,rs1[i]);
1489 alloc_reg(current,i,rs2[i]);
1490 alloc_reg(current,i,rt1[i]);
1491 }
1492 }
1493 else {
1494 alloc_reg(current,i,rt1[i]);
1495 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1496 // DADD used as move, or zeroing
1497 // If we have a 64-bit source, then make the target 64 bits too
1498 if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1499 if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1500 alloc_reg64(current,i,rt1[i]);
1501 } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1502 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1503 alloc_reg64(current,i,rt1[i]);
1504 }
1505 if(opcode2[i]>=0x2e&&rs2[i]) {
1506 // DSUB used as negation - 64-bit result
1507 // If we have a 32-bit register, extend it to 64 bits
1508 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1509 alloc_reg64(current,i,rt1[i]);
1510 }
1511 }
1512 }
1513 if(rs1[i]&&rs2[i]) {
1514 current->is32&=~(1LL<<rt1[i]);
1515 } else if(rs1[i]) {
1516 current->is32&=~(1LL<<rt1[i]);
1517 if((current->is32>>rs1[i])&1)
1518 current->is32|=1LL<<rt1[i];
1519 } else if(rs2[i]) {
1520 current->is32&=~(1LL<<rt1[i]);
1521 if((current->is32>>rs2[i])&1)
1522 current->is32|=1LL<<rt1[i];
1523 } else {
1524 current->is32|=1LL<<rt1[i];
1525 }
1526 }
1527 }
1528 clear_const(current,rs1[i]);
1529 clear_const(current,rs2[i]);
1530 clear_const(current,rt1[i]);
1531 dirty_reg(current,rt1[i]);
1532}
1533
1534void imm16_alloc(struct regstat *current,int i)
1535{
1536 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1537 else lt1[i]=rs1[i];
1538 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1539 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1540 current->is32&=~(1LL<<rt1[i]);
1541 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1542 // TODO: Could preserve the 32-bit flag if the immediate is zero
1543 alloc_reg64(current,i,rt1[i]);
1544 alloc_reg64(current,i,rs1[i]);
1545 }
1546 clear_const(current,rs1[i]);
1547 clear_const(current,rt1[i]);
1548 }
1549 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1550 if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1551 current->is32|=1LL<<rt1[i];
1552 clear_const(current,rs1[i]);
1553 clear_const(current,rt1[i]);
1554 }
1555 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1556 if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1557 if(rs1[i]!=rt1[i]) {
1558 if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1559 alloc_reg64(current,i,rt1[i]);
1560 current->is32&=~(1LL<<rt1[i]);
1561 }
1562 }
1563 else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1564 if(is_const(current,rs1[i])) {
1565 int v=get_const(current,rs1[i]);
1566 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1567 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1568 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1569 }
1570 else clear_const(current,rt1[i]);
1571 }
1572 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1573 if(is_const(current,rs1[i])) {
1574 int v=get_const(current,rs1[i]);
1575 set_const(current,rt1[i],v+imm[i]);
1576 }
1577 else clear_const(current,rt1[i]);
1578 current->is32|=1LL<<rt1[i];
1579 }
1580 else {
1581 set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1582 current->is32|=1LL<<rt1[i];
1583 }
1584 dirty_reg(current,rt1[i]);
1585}
1586
1587void load_alloc(struct regstat *current,int i)
1588{
1589 clear_const(current,rt1[i]);
1590 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1591 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1592 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1593 if(rt1[i]) {
1594 alloc_reg(current,i,rt1[i]);
1595 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1596 {
1597 current->is32&=~(1LL<<rt1[i]);
1598 alloc_reg64(current,i,rt1[i]);
1599 }
1600 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1601 {
1602 current->is32&=~(1LL<<rt1[i]);
1603 alloc_reg64(current,i,rt1[i]);
1604 alloc_all(current,i);
1605 alloc_reg64(current,i,FTEMP);
1606 }
1607 else current->is32|=1LL<<rt1[i];
1608 dirty_reg(current,rt1[i]);
1609 // If using TLB, need a register for pointer to the mapping table
1610 if(using_tlb) alloc_reg(current,i,TLREG);
1611 // LWL/LWR need a temporary register for the old value
1612 if(opcode[i]==0x22||opcode[i]==0x26)
1613 {
1614 alloc_reg(current,i,FTEMP);
1615 alloc_reg_temp(current,i,-1);
1616 }
1617 }
1618 else
1619 {
1620 // Load to r0 (dummy load)
1621 // but we still need a register to calculate the address
1622 alloc_reg_temp(current,i,-1);
1623 }
1624}
1625
1626void store_alloc(struct regstat *current,int i)
1627{
1628 clear_const(current,rs2[i]);
1629 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1630 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1631 alloc_reg(current,i,rs2[i]);
1632 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1633 alloc_reg64(current,i,rs2[i]);
1634 if(rs2[i]) alloc_reg(current,i,FTEMP);
1635 }
1636 // If using TLB, need a register for pointer to the mapping table
1637 if(using_tlb) alloc_reg(current,i,TLREG);
1638 #if defined(HOST_IMM8)
1639 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1640 else alloc_reg(current,i,INVCP);
1641 #endif
b7918751 1642 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
57871462 1643 alloc_reg(current,i,FTEMP);
1644 }
1645 // We need a temporary register for address generation
1646 alloc_reg_temp(current,i,-1);
1647}
1648
1649void c1ls_alloc(struct regstat *current,int i)
1650{
1651 //clear_const(current,rs1[i]); // FIXME
1652 clear_const(current,rt1[i]);
1653 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1654 alloc_reg(current,i,CSREG); // Status
1655 alloc_reg(current,i,FTEMP);
1656 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1657 alloc_reg64(current,i,FTEMP);
1658 }
1659 // If using TLB, need a register for pointer to the mapping table
1660 if(using_tlb) alloc_reg(current,i,TLREG);
1661 #if defined(HOST_IMM8)
1662 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1663 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1664 alloc_reg(current,i,INVCP);
1665 #endif
1666 // We need a temporary register for address generation
1667 alloc_reg_temp(current,i,-1);
1668}
1669
b9b61529 1670void c2ls_alloc(struct regstat *current,int i)
1671{
1672 clear_const(current,rt1[i]);
1673 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1674 alloc_reg(current,i,FTEMP);
1675 // If using TLB, need a register for pointer to the mapping table
1676 if(using_tlb) alloc_reg(current,i,TLREG);
1677 #if defined(HOST_IMM8)
1678 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1679 else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1680 alloc_reg(current,i,INVCP);
1681 #endif
1682 // We need a temporary register for address generation
1683 alloc_reg_temp(current,i,-1);
1684}
1685
57871462 1686#ifndef multdiv_alloc
1687void multdiv_alloc(struct regstat *current,int i)
1688{
1689 // case 0x18: MULT
1690 // case 0x19: MULTU
1691 // case 0x1A: DIV
1692 // case 0x1B: DIVU
1693 // case 0x1C: DMULT
1694 // case 0x1D: DMULTU
1695 // case 0x1E: DDIV
1696 // case 0x1F: DDIVU
1697 clear_const(current,rs1[i]);
1698 clear_const(current,rs2[i]);
1699 if(rs1[i]&&rs2[i])
1700 {
1701 if((opcode2[i]&4)==0) // 32-bit
1702 {
1703 current->u&=~(1LL<<HIREG);
1704 current->u&=~(1LL<<LOREG);
1705 alloc_reg(current,i,HIREG);
1706 alloc_reg(current,i,LOREG);
1707 alloc_reg(current,i,rs1[i]);
1708 alloc_reg(current,i,rs2[i]);
1709 current->is32|=1LL<<HIREG;
1710 current->is32|=1LL<<LOREG;
1711 dirty_reg(current,HIREG);
1712 dirty_reg(current,LOREG);
1713 }
1714 else // 64-bit
1715 {
1716 current->u&=~(1LL<<HIREG);
1717 current->u&=~(1LL<<LOREG);
1718 current->uu&=~(1LL<<HIREG);
1719 current->uu&=~(1LL<<LOREG);
1720 alloc_reg64(current,i,HIREG);
1721 //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1722 alloc_reg64(current,i,rs1[i]);
1723 alloc_reg64(current,i,rs2[i]);
1724 alloc_all(current,i);
1725 current->is32&=~(1LL<<HIREG);
1726 current->is32&=~(1LL<<LOREG);
1727 dirty_reg(current,HIREG);
1728 dirty_reg(current,LOREG);
1729 }
1730 }
1731 else
1732 {
1733 // Multiply by zero is zero.
1734 // MIPS does not have a divide by zero exception.
1735 // The result is undefined, we return zero.
1736 alloc_reg(current,i,HIREG);
1737 alloc_reg(current,i,LOREG);
1738 current->is32|=1LL<<HIREG;
1739 current->is32|=1LL<<LOREG;
1740 dirty_reg(current,HIREG);
1741 dirty_reg(current,LOREG);
1742 }
1743}
1744#endif
1745
1746void cop0_alloc(struct regstat *current,int i)
1747{
1748 if(opcode2[i]==0) // MFC0
1749 {
1750 if(rt1[i]) {
1751 clear_const(current,rt1[i]);
1752 alloc_all(current,i);
1753 alloc_reg(current,i,rt1[i]);
1754 current->is32|=1LL<<rt1[i];
1755 dirty_reg(current,rt1[i]);
1756 }
1757 }
1758 else if(opcode2[i]==4) // MTC0
1759 {
1760 if(rs1[i]){
1761 clear_const(current,rs1[i]);
1762 alloc_reg(current,i,rs1[i]);
1763 alloc_all(current,i);
1764 }
1765 else {
1766 alloc_all(current,i); // FIXME: Keep r0
1767 current->u&=~1LL;
1768 alloc_reg(current,i,0);
1769 }
1770 }
1771 else
1772 {
1773 // TLBR/TLBWI/TLBWR/TLBP/ERET
1774 assert(opcode2[i]==0x10);
1775 alloc_all(current,i);
1776 }
1777}
1778
1779void cop1_alloc(struct regstat *current,int i)
1780{
1781 alloc_reg(current,i,CSREG); // Load status
1782 if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1783 {
1784 assert(rt1[i]);
1785 clear_const(current,rt1[i]);
1786 if(opcode2[i]==1) {
1787 alloc_reg64(current,i,rt1[i]); // DMFC1
1788 current->is32&=~(1LL<<rt1[i]);
1789 }else{
1790 alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1791 current->is32|=1LL<<rt1[i];
1792 }
1793 dirty_reg(current,rt1[i]);
1794 alloc_reg_temp(current,i,-1);
1795 }
1796 else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1797 {
1798 if(rs1[i]){
1799 clear_const(current,rs1[i]);
1800 if(opcode2[i]==5)
1801 alloc_reg64(current,i,rs1[i]); // DMTC1
1802 else
1803 alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1804 alloc_reg_temp(current,i,-1);
1805 }
1806 else {
1807 current->u&=~1LL;
1808 alloc_reg(current,i,0);
1809 alloc_reg_temp(current,i,-1);
1810 }
1811 }
1812}
1813void fconv_alloc(struct regstat *current,int i)
1814{
1815 alloc_reg(current,i,CSREG); // Load status
1816 alloc_reg_temp(current,i,-1);
1817}
1818void float_alloc(struct regstat *current,int i)
1819{
1820 alloc_reg(current,i,CSREG); // Load status
1821 alloc_reg_temp(current,i,-1);
1822}
b9b61529 1823void c2op_alloc(struct regstat *current,int i)
1824{
1825 alloc_reg_temp(current,i,-1);
1826}
57871462 1827void fcomp_alloc(struct regstat *current,int i)
1828{
1829 alloc_reg(current,i,CSREG); // Load status
1830 alloc_reg(current,i,FSREG); // Load flags
1831 dirty_reg(current,FSREG); // Flag will be modified
1832 alloc_reg_temp(current,i,-1);
1833}
1834
1835void syscall_alloc(struct regstat *current,int i)
1836{
1837 alloc_cc(current,i);
1838 dirty_reg(current,CCREG);
1839 alloc_all(current,i);
1840 current->isconst=0;
1841}
1842
1843void delayslot_alloc(struct regstat *current,int i)
1844{
1845 switch(itype[i]) {
1846 case UJUMP:
1847 case CJUMP:
1848 case SJUMP:
1849 case RJUMP:
1850 case FJUMP:
1851 case SYSCALL:
7139f3c8 1852 case HLECALL:
57871462 1853 case SPAN:
1854 assem_debug("jump in the delay slot. this shouldn't happen.\n");//exit(1);
1855 printf("Disabled speculative precompilation\n");
1856 stop_after_jal=1;
1857 break;
1858 case IMM16:
1859 imm16_alloc(current,i);
1860 break;
1861 case LOAD:
1862 case LOADLR:
1863 load_alloc(current,i);
1864 break;
1865 case STORE:
1866 case STORELR:
1867 store_alloc(current,i);
1868 break;
1869 case ALU:
1870 alu_alloc(current,i);
1871 break;
1872 case SHIFT:
1873 shift_alloc(current,i);
1874 break;
1875 case MULTDIV:
1876 multdiv_alloc(current,i);
1877 break;
1878 case SHIFTIMM:
1879 shiftimm_alloc(current,i);
1880 break;
1881 case MOV:
1882 mov_alloc(current,i);
1883 break;
1884 case COP0:
1885 cop0_alloc(current,i);
1886 break;
1887 case COP1:
b9b61529 1888 case COP2:
57871462 1889 cop1_alloc(current,i);
1890 break;
1891 case C1LS:
1892 c1ls_alloc(current,i);
1893 break;
b9b61529 1894 case C2LS:
1895 c2ls_alloc(current,i);
1896 break;
57871462 1897 case FCONV:
1898 fconv_alloc(current,i);
1899 break;
1900 case FLOAT:
1901 float_alloc(current,i);
1902 break;
1903 case FCOMP:
1904 fcomp_alloc(current,i);
1905 break;
b9b61529 1906 case C2OP:
1907 c2op_alloc(current,i);
1908 break;
57871462 1909 }
1910}
1911
1912// Special case where a branch and delay slot span two pages in virtual memory
1913static void pagespan_alloc(struct regstat *current,int i)
1914{
1915 current->isconst=0;
1916 current->wasconst=0;
1917 regs[i].wasconst=0;
1918 alloc_all(current,i);
1919 alloc_cc(current,i);
1920 dirty_reg(current,CCREG);
1921 if(opcode[i]==3) // JAL
1922 {
1923 alloc_reg(current,i,31);
1924 dirty_reg(current,31);
1925 }
1926 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1927 {
1928 alloc_reg(current,i,rs1[i]);
5067f341 1929 if (rt1[i]!=0) {
1930 alloc_reg(current,i,rt1[i]);
1931 dirty_reg(current,rt1[i]);
57871462 1932 }
1933 }
1934 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1935 {
1936 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1937 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1938 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1939 {
1940 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1941 if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1942 }
1943 }
1944 else
1945 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1946 {
1947 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1948 if(!((current->is32>>rs1[i])&1))
1949 {
1950 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1951 }
1952 }
1953 else
1954 if(opcode[i]==0x11) // BC1
1955 {
1956 alloc_reg(current,i,FSREG);
1957 alloc_reg(current,i,CSREG);
1958 }
1959 //else ...
1960}
1961
1962add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1963{
1964 stubs[stubcount][0]=type;
1965 stubs[stubcount][1]=addr;
1966 stubs[stubcount][2]=retaddr;
1967 stubs[stubcount][3]=a;
1968 stubs[stubcount][4]=b;
1969 stubs[stubcount][5]=c;
1970 stubs[stubcount][6]=d;
1971 stubs[stubcount][7]=e;
1972 stubcount++;
1973}
1974
1975// Write out a single register
1976void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1977{
1978 int hr;
1979 for(hr=0;hr<HOST_REGS;hr++) {
1980 if(hr!=EXCLUDE_REG) {
1981 if((regmap[hr]&63)==r) {
1982 if((dirty>>hr)&1) {
1983 if(regmap[hr]<64) {
1984 emit_storereg(r,hr);
24385cae 1985#ifndef FORCE32
57871462 1986 if((is32>>regmap[hr])&1) {
1987 emit_sarimm(hr,31,hr);
1988 emit_storereg(r|64,hr);
1989 }
24385cae 1990#endif
57871462 1991 }else{
1992 emit_storereg(r|64,hr);
1993 }
1994 }
1995 }
1996 }
1997 }
1998}
1999
2000int mchecksum()
2001{
2002 //if(!tracedebug) return 0;
2003 int i;
2004 int sum=0;
2005 for(i=0;i<2097152;i++) {
2006 unsigned int temp=sum;
2007 sum<<=1;
2008 sum|=(~temp)>>31;
2009 sum^=((u_int *)rdram)[i];
2010 }
2011 return sum;
2012}
2013int rchecksum()
2014{
2015 int i;
2016 int sum=0;
2017 for(i=0;i<64;i++)
2018 sum^=((u_int *)reg)[i];
2019 return sum;
2020}
57871462 2021void rlist()
2022{
2023 int i;
2024 printf("TRACE: ");
2025 for(i=0;i<32;i++)
2026 printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2027 printf("\n");
3d624f89 2028#ifndef DISABLE_COP1
57871462 2029 printf("TRACE: ");
2030 for(i=0;i<32;i++)
2031 printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2032 printf("\n");
3d624f89 2033#endif
57871462 2034}
2035
2036void enabletrace()
2037{
2038 tracedebug=1;
2039}
2040
2041void memdebug(int i)
2042{
2043 //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2044 //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2045 //rlist();
2046 //if(tracedebug) {
2047 //if(Count>=-2084597794) {
2048 if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2049 //if(0) {
2050 printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2051 //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2052 //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2053 rlist();
2054 #ifdef __i386__
2055 printf("TRACE: %x\n",(&i)[-1]);
2056 #endif
2057 #ifdef __arm__
2058 int j;
2059 printf("TRACE: %x \n",(&j)[10]);
2060 printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2061 #endif
2062 //fflush(stdout);
2063 }
2064 //printf("TRACE: %x\n",(&i)[-1]);
2065}
2066
2067void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2068{
2069 printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2070}
2071
2072void alu_assemble(int i,struct regstat *i_regs)
2073{
2074 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2075 if(rt1[i]) {
2076 signed char s1,s2,t;
2077 t=get_reg(i_regs->regmap,rt1[i]);
2078 if(t>=0) {
2079 s1=get_reg(i_regs->regmap,rs1[i]);
2080 s2=get_reg(i_regs->regmap,rs2[i]);
2081 if(rs1[i]&&rs2[i]) {
2082 assert(s1>=0);
2083 assert(s2>=0);
2084 if(opcode2[i]&2) emit_sub(s1,s2,t);
2085 else emit_add(s1,s2,t);
2086 }
2087 else if(rs1[i]) {
2088 if(s1>=0) emit_mov(s1,t);
2089 else emit_loadreg(rs1[i],t);
2090 }
2091 else if(rs2[i]) {
2092 if(s2>=0) {
2093 if(opcode2[i]&2) emit_neg(s2,t);
2094 else emit_mov(s2,t);
2095 }
2096 else {
2097 emit_loadreg(rs2[i],t);
2098 if(opcode2[i]&2) emit_neg(t,t);
2099 }
2100 }
2101 else emit_zeroreg(t);
2102 }
2103 }
2104 }
2105 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2106 if(rt1[i]) {
2107 signed char s1l,s2l,s1h,s2h,tl,th;
2108 tl=get_reg(i_regs->regmap,rt1[i]);
2109 th=get_reg(i_regs->regmap,rt1[i]|64);
2110 if(tl>=0) {
2111 s1l=get_reg(i_regs->regmap,rs1[i]);
2112 s2l=get_reg(i_regs->regmap,rs2[i]);
2113 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2114 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2115 if(rs1[i]&&rs2[i]) {
2116 assert(s1l>=0);
2117 assert(s2l>=0);
2118 if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2119 else emit_adds(s1l,s2l,tl);
2120 if(th>=0) {
2121 #ifdef INVERTED_CARRY
2122 if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2123 #else
2124 if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2125 #endif
2126 else emit_add(s1h,s2h,th);
2127 }
2128 }
2129 else if(rs1[i]) {
2130 if(s1l>=0) emit_mov(s1l,tl);
2131 else emit_loadreg(rs1[i],tl);
2132 if(th>=0) {
2133 if(s1h>=0) emit_mov(s1h,th);
2134 else emit_loadreg(rs1[i]|64,th);
2135 }
2136 }
2137 else if(rs2[i]) {
2138 if(s2l>=0) {
2139 if(opcode2[i]&2) emit_negs(s2l,tl);
2140 else emit_mov(s2l,tl);
2141 }
2142 else {
2143 emit_loadreg(rs2[i],tl);
2144 if(opcode2[i]&2) emit_negs(tl,tl);
2145 }
2146 if(th>=0) {
2147 #ifdef INVERTED_CARRY
2148 if(s2h>=0) emit_mov(s2h,th);
2149 else emit_loadreg(rs2[i]|64,th);
2150 if(opcode2[i]&2) {
2151 emit_adcimm(-1,th); // x86 has inverted carry flag
2152 emit_not(th,th);
2153 }
2154 #else
2155 if(opcode2[i]&2) {
2156 if(s2h>=0) emit_rscimm(s2h,0,th);
2157 else {
2158 emit_loadreg(rs2[i]|64,th);
2159 emit_rscimm(th,0,th);
2160 }
2161 }else{
2162 if(s2h>=0) emit_mov(s2h,th);
2163 else emit_loadreg(rs2[i]|64,th);
2164 }
2165 #endif
2166 }
2167 }
2168 else {
2169 emit_zeroreg(tl);
2170 if(th>=0) emit_zeroreg(th);
2171 }
2172 }
2173 }
2174 }
2175 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2176 if(rt1[i]) {
2177 signed char s1l,s1h,s2l,s2h,t;
2178 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2179 {
2180 t=get_reg(i_regs->regmap,rt1[i]);
2181 //assert(t>=0);
2182 if(t>=0) {
2183 s1l=get_reg(i_regs->regmap,rs1[i]);
2184 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2185 s2l=get_reg(i_regs->regmap,rs2[i]);
2186 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2187 if(rs2[i]==0) // rx<r0
2188 {
2189 assert(s1h>=0);
2190 if(opcode2[i]==0x2a) // SLT
2191 emit_shrimm(s1h,31,t);
2192 else // SLTU (unsigned can not be less than zero)
2193 emit_zeroreg(t);
2194 }
2195 else if(rs1[i]==0) // r0<rx
2196 {
2197 assert(s2h>=0);
2198 if(opcode2[i]==0x2a) // SLT
2199 emit_set_gz64_32(s2h,s2l,t);
2200 else // SLTU (set if not zero)
2201 emit_set_nz64_32(s2h,s2l,t);
2202 }
2203 else {
2204 assert(s1l>=0);assert(s1h>=0);
2205 assert(s2l>=0);assert(s2h>=0);
2206 if(opcode2[i]==0x2a) // SLT
2207 emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2208 else // SLTU
2209 emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2210 }
2211 }
2212 } else {
2213 t=get_reg(i_regs->regmap,rt1[i]);
2214 //assert(t>=0);
2215 if(t>=0) {
2216 s1l=get_reg(i_regs->regmap,rs1[i]);
2217 s2l=get_reg(i_regs->regmap,rs2[i]);
2218 if(rs2[i]==0) // rx<r0
2219 {
2220 assert(s1l>=0);
2221 if(opcode2[i]==0x2a) // SLT
2222 emit_shrimm(s1l,31,t);
2223 else // SLTU (unsigned can not be less than zero)
2224 emit_zeroreg(t);
2225 }
2226 else if(rs1[i]==0) // r0<rx
2227 {
2228 assert(s2l>=0);
2229 if(opcode2[i]==0x2a) // SLT
2230 emit_set_gz32(s2l,t);
2231 else // SLTU (set if not zero)
2232 emit_set_nz32(s2l,t);
2233 }
2234 else{
2235 assert(s1l>=0);assert(s2l>=0);
2236 if(opcode2[i]==0x2a) // SLT
2237 emit_set_if_less32(s1l,s2l,t);
2238 else // SLTU
2239 emit_set_if_carry32(s1l,s2l,t);
2240 }
2241 }
2242 }
2243 }
2244 }
2245 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2246 if(rt1[i]) {
2247 signed char s1l,s1h,s2l,s2h,th,tl;
2248 tl=get_reg(i_regs->regmap,rt1[i]);
2249 th=get_reg(i_regs->regmap,rt1[i]|64);
2250 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2251 {
2252 assert(tl>=0);
2253 if(tl>=0) {
2254 s1l=get_reg(i_regs->regmap,rs1[i]);
2255 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2256 s2l=get_reg(i_regs->regmap,rs2[i]);
2257 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2258 if(rs1[i]&&rs2[i]) {
2259 assert(s1l>=0);assert(s1h>=0);
2260 assert(s2l>=0);assert(s2h>=0);
2261 if(opcode2[i]==0x24) { // AND
2262 emit_and(s1l,s2l,tl);
2263 emit_and(s1h,s2h,th);
2264 } else
2265 if(opcode2[i]==0x25) { // OR
2266 emit_or(s1l,s2l,tl);
2267 emit_or(s1h,s2h,th);
2268 } else
2269 if(opcode2[i]==0x26) { // XOR
2270 emit_xor(s1l,s2l,tl);
2271 emit_xor(s1h,s2h,th);
2272 } else
2273 if(opcode2[i]==0x27) { // NOR
2274 emit_or(s1l,s2l,tl);
2275 emit_or(s1h,s2h,th);
2276 emit_not(tl,tl);
2277 emit_not(th,th);
2278 }
2279 }
2280 else
2281 {
2282 if(opcode2[i]==0x24) { // AND
2283 emit_zeroreg(tl);
2284 emit_zeroreg(th);
2285 } else
2286 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2287 if(rs1[i]){
2288 if(s1l>=0) emit_mov(s1l,tl);
2289 else emit_loadreg(rs1[i],tl);
2290 if(s1h>=0) emit_mov(s1h,th);
2291 else emit_loadreg(rs1[i]|64,th);
2292 }
2293 else
2294 if(rs2[i]){
2295 if(s2l>=0) emit_mov(s2l,tl);
2296 else emit_loadreg(rs2[i],tl);
2297 if(s2h>=0) emit_mov(s2h,th);
2298 else emit_loadreg(rs2[i]|64,th);
2299 }
2300 else{
2301 emit_zeroreg(tl);
2302 emit_zeroreg(th);
2303 }
2304 } else
2305 if(opcode2[i]==0x27) { // NOR
2306 if(rs1[i]){
2307 if(s1l>=0) emit_not(s1l,tl);
2308 else{
2309 emit_loadreg(rs1[i],tl);
2310 emit_not(tl,tl);
2311 }
2312 if(s1h>=0) emit_not(s1h,th);
2313 else{
2314 emit_loadreg(rs1[i]|64,th);
2315 emit_not(th,th);
2316 }
2317 }
2318 else
2319 if(rs2[i]){
2320 if(s2l>=0) emit_not(s2l,tl);
2321 else{
2322 emit_loadreg(rs2[i],tl);
2323 emit_not(tl,tl);
2324 }
2325 if(s2h>=0) emit_not(s2h,th);
2326 else{
2327 emit_loadreg(rs2[i]|64,th);
2328 emit_not(th,th);
2329 }
2330 }
2331 else {
2332 emit_movimm(-1,tl);
2333 emit_movimm(-1,th);
2334 }
2335 }
2336 }
2337 }
2338 }
2339 else
2340 {
2341 // 32 bit
2342 if(tl>=0) {
2343 s1l=get_reg(i_regs->regmap,rs1[i]);
2344 s2l=get_reg(i_regs->regmap,rs2[i]);
2345 if(rs1[i]&&rs2[i]) {
2346 assert(s1l>=0);
2347 assert(s2l>=0);
2348 if(opcode2[i]==0x24) { // AND
2349 emit_and(s1l,s2l,tl);
2350 } else
2351 if(opcode2[i]==0x25) { // OR
2352 emit_or(s1l,s2l,tl);
2353 } else
2354 if(opcode2[i]==0x26) { // XOR
2355 emit_xor(s1l,s2l,tl);
2356 } else
2357 if(opcode2[i]==0x27) { // NOR
2358 emit_or(s1l,s2l,tl);
2359 emit_not(tl,tl);
2360 }
2361 }
2362 else
2363 {
2364 if(opcode2[i]==0x24) { // AND
2365 emit_zeroreg(tl);
2366 } else
2367 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2368 if(rs1[i]){
2369 if(s1l>=0) emit_mov(s1l,tl);
2370 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2371 }
2372 else
2373 if(rs2[i]){
2374 if(s2l>=0) emit_mov(s2l,tl);
2375 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2376 }
2377 else emit_zeroreg(tl);
2378 } else
2379 if(opcode2[i]==0x27) { // NOR
2380 if(rs1[i]){
2381 if(s1l>=0) emit_not(s1l,tl);
2382 else {
2383 emit_loadreg(rs1[i],tl);
2384 emit_not(tl,tl);
2385 }
2386 }
2387 else
2388 if(rs2[i]){
2389 if(s2l>=0) emit_not(s2l,tl);
2390 else {
2391 emit_loadreg(rs2[i],tl);
2392 emit_not(tl,tl);
2393 }
2394 }
2395 else emit_movimm(-1,tl);
2396 }
2397 }
2398 }
2399 }
2400 }
2401 }
2402}
2403
2404void imm16_assemble(int i,struct regstat *i_regs)
2405{
2406 if (opcode[i]==0x0f) { // LUI
2407 if(rt1[i]) {
2408 signed char t;
2409 t=get_reg(i_regs->regmap,rt1[i]);
2410 //assert(t>=0);
2411 if(t>=0) {
2412 if(!((i_regs->isconst>>t)&1))
2413 emit_movimm(imm[i]<<16,t);
2414 }
2415 }
2416 }
2417 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2418 if(rt1[i]) {
2419 signed char s,t;
2420 t=get_reg(i_regs->regmap,rt1[i]);
2421 s=get_reg(i_regs->regmap,rs1[i]);
2422 if(rs1[i]) {
2423 //assert(t>=0);
2424 //assert(s>=0);
2425 if(t>=0) {
2426 if(!((i_regs->isconst>>t)&1)) {
2427 if(s<0) {
2428 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2429 emit_addimm(t,imm[i],t);
2430 }else{
2431 if(!((i_regs->wasconst>>s)&1))
2432 emit_addimm(s,imm[i],t);
2433 else
2434 emit_movimm(constmap[i][s]+imm[i],t);
2435 }
2436 }
2437 }
2438 } else {
2439 if(t>=0) {
2440 if(!((i_regs->isconst>>t)&1))
2441 emit_movimm(imm[i],t);
2442 }
2443 }
2444 }
2445 }
2446 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2447 if(rt1[i]) {
2448 signed char sh,sl,th,tl;
2449 th=get_reg(i_regs->regmap,rt1[i]|64);
2450 tl=get_reg(i_regs->regmap,rt1[i]);
2451 sh=get_reg(i_regs->regmap,rs1[i]|64);
2452 sl=get_reg(i_regs->regmap,rs1[i]);
2453 if(tl>=0) {
2454 if(rs1[i]) {
2455 assert(sh>=0);
2456 assert(sl>=0);
2457 if(th>=0) {
2458 emit_addimm64_32(sh,sl,imm[i],th,tl);
2459 }
2460 else {
2461 emit_addimm(sl,imm[i],tl);
2462 }
2463 } else {
2464 emit_movimm(imm[i],tl);
2465 if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2466 }
2467 }
2468 }
2469 }
2470 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2471 if(rt1[i]) {
2472 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2473 signed char sh,sl,t;
2474 t=get_reg(i_regs->regmap,rt1[i]);
2475 sh=get_reg(i_regs->regmap,rs1[i]|64);
2476 sl=get_reg(i_regs->regmap,rs1[i]);
2477 //assert(t>=0);
2478 if(t>=0) {
2479 if(rs1[i]>0) {
2480 if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2481 if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2482 if(opcode[i]==0x0a) { // SLTI
2483 if(sl<0) {
2484 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2485 emit_slti32(t,imm[i],t);
2486 }else{
2487 emit_slti32(sl,imm[i],t);
2488 }
2489 }
2490 else { // SLTIU
2491 if(sl<0) {
2492 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2493 emit_sltiu32(t,imm[i],t);
2494 }else{
2495 emit_sltiu32(sl,imm[i],t);
2496 }
2497 }
2498 }else{ // 64-bit
2499 assert(sl>=0);
2500 if(opcode[i]==0x0a) // SLTI
2501 emit_slti64_32(sh,sl,imm[i],t);
2502 else // SLTIU
2503 emit_sltiu64_32(sh,sl,imm[i],t);
2504 }
2505 }else{
2506 // SLTI(U) with r0 is just stupid,
2507 // nonetheless examples can be found
2508 if(opcode[i]==0x0a) // SLTI
2509 if(0<imm[i]) emit_movimm(1,t);
2510 else emit_zeroreg(t);
2511 else // SLTIU
2512 {
2513 if(imm[i]) emit_movimm(1,t);
2514 else emit_zeroreg(t);
2515 }
2516 }
2517 }
2518 }
2519 }
2520 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2521 if(rt1[i]) {
2522 signed char sh,sl,th,tl;
2523 th=get_reg(i_regs->regmap,rt1[i]|64);
2524 tl=get_reg(i_regs->regmap,rt1[i]);
2525 sh=get_reg(i_regs->regmap,rs1[i]|64);
2526 sl=get_reg(i_regs->regmap,rs1[i]);
2527 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2528 if(opcode[i]==0x0c) //ANDI
2529 {
2530 if(rs1[i]) {
2531 if(sl<0) {
2532 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2533 emit_andimm(tl,imm[i],tl);
2534 }else{
2535 if(!((i_regs->wasconst>>sl)&1))
2536 emit_andimm(sl,imm[i],tl);
2537 else
2538 emit_movimm(constmap[i][sl]&imm[i],tl);
2539 }
2540 }
2541 else
2542 emit_zeroreg(tl);
2543 if(th>=0) emit_zeroreg(th);
2544 }
2545 else
2546 {
2547 if(rs1[i]) {
2548 if(sl<0) {
2549 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2550 }
2551 if(th>=0) {
2552 if(sh<0) {
2553 emit_loadreg(rs1[i]|64,th);
2554 }else{
2555 emit_mov(sh,th);
2556 }
2557 }
2558 if(opcode[i]==0x0d) //ORI
2559 if(sl<0) {
2560 emit_orimm(tl,imm[i],tl);
2561 }else{
2562 if(!((i_regs->wasconst>>sl)&1))
2563 emit_orimm(sl,imm[i],tl);
2564 else
2565 emit_movimm(constmap[i][sl]|imm[i],tl);
2566 }
2567 if(opcode[i]==0x0e) //XORI
2568 if(sl<0) {
2569 emit_xorimm(tl,imm[i],tl);
2570 }else{
2571 if(!((i_regs->wasconst>>sl)&1))
2572 emit_xorimm(sl,imm[i],tl);
2573 else
2574 emit_movimm(constmap[i][sl]^imm[i],tl);
2575 }
2576 }
2577 else {
2578 emit_movimm(imm[i],tl);
2579 if(th>=0) emit_zeroreg(th);
2580 }
2581 }
2582 }
2583 }
2584 }
2585}
2586
2587void shiftimm_assemble(int i,struct regstat *i_regs)
2588{
2589 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2590 {
2591 if(rt1[i]) {
2592 signed char s,t;
2593 t=get_reg(i_regs->regmap,rt1[i]);
2594 s=get_reg(i_regs->regmap,rs1[i]);
2595 //assert(t>=0);
2596 if(t>=0){
2597 if(rs1[i]==0)
2598 {
2599 emit_zeroreg(t);
2600 }
2601 else
2602 {
2603 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2604 if(imm[i]) {
2605 if(opcode2[i]==0) // SLL
2606 {
2607 emit_shlimm(s<0?t:s,imm[i],t);
2608 }
2609 if(opcode2[i]==2) // SRL
2610 {
2611 emit_shrimm(s<0?t:s,imm[i],t);
2612 }
2613 if(opcode2[i]==3) // SRA
2614 {
2615 emit_sarimm(s<0?t:s,imm[i],t);
2616 }
2617 }else{
2618 // Shift by zero
2619 if(s>=0 && s!=t) emit_mov(s,t);
2620 }
2621 }
2622 }
2623 //emit_storereg(rt1[i],t); //DEBUG
2624 }
2625 }
2626 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2627 {
2628 if(rt1[i]) {
2629 signed char sh,sl,th,tl;
2630 th=get_reg(i_regs->regmap,rt1[i]|64);
2631 tl=get_reg(i_regs->regmap,rt1[i]);
2632 sh=get_reg(i_regs->regmap,rs1[i]|64);
2633 sl=get_reg(i_regs->regmap,rs1[i]);
2634 if(tl>=0) {
2635 if(rs1[i]==0)
2636 {
2637 emit_zeroreg(tl);
2638 if(th>=0) emit_zeroreg(th);
2639 }
2640 else
2641 {
2642 assert(sl>=0);
2643 assert(sh>=0);
2644 if(imm[i]) {
2645 if(opcode2[i]==0x38) // DSLL
2646 {
2647 if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2648 emit_shlimm(sl,imm[i],tl);
2649 }
2650 if(opcode2[i]==0x3a) // DSRL
2651 {
2652 emit_shrdimm(sl,sh,imm[i],tl);
2653 if(th>=0) emit_shrimm(sh,imm[i],th);
2654 }
2655 if(opcode2[i]==0x3b) // DSRA
2656 {
2657 emit_shrdimm(sl,sh,imm[i],tl);
2658 if(th>=0) emit_sarimm(sh,imm[i],th);
2659 }
2660 }else{
2661 // Shift by zero
2662 if(sl!=tl) emit_mov(sl,tl);
2663 if(th>=0&&sh!=th) emit_mov(sh,th);
2664 }
2665 }
2666 }
2667 }
2668 }
2669 if(opcode2[i]==0x3c) // DSLL32
2670 {
2671 if(rt1[i]) {
2672 signed char sl,tl,th;
2673 tl=get_reg(i_regs->regmap,rt1[i]);
2674 th=get_reg(i_regs->regmap,rt1[i]|64);
2675 sl=get_reg(i_regs->regmap,rs1[i]);
2676 if(th>=0||tl>=0){
2677 assert(tl>=0);
2678 assert(th>=0);
2679 assert(sl>=0);
2680 emit_mov(sl,th);
2681 emit_zeroreg(tl);
2682 if(imm[i]>32)
2683 {
2684 emit_shlimm(th,imm[i]&31,th);
2685 }
2686 }
2687 }
2688 }
2689 if(opcode2[i]==0x3e) // DSRL32
2690 {
2691 if(rt1[i]) {
2692 signed char sh,tl,th;
2693 tl=get_reg(i_regs->regmap,rt1[i]);
2694 th=get_reg(i_regs->regmap,rt1[i]|64);
2695 sh=get_reg(i_regs->regmap,rs1[i]|64);
2696 if(tl>=0){
2697 assert(sh>=0);
2698 emit_mov(sh,tl);
2699 if(th>=0) emit_zeroreg(th);
2700 if(imm[i]>32)
2701 {
2702 emit_shrimm(tl,imm[i]&31,tl);
2703 }
2704 }
2705 }
2706 }
2707 if(opcode2[i]==0x3f) // DSRA32
2708 {
2709 if(rt1[i]) {
2710 signed char sh,tl;
2711 tl=get_reg(i_regs->regmap,rt1[i]);
2712 sh=get_reg(i_regs->regmap,rs1[i]|64);
2713 if(tl>=0){
2714 assert(sh>=0);
2715 emit_mov(sh,tl);
2716 if(imm[i]>32)
2717 {
2718 emit_sarimm(tl,imm[i]&31,tl);
2719 }
2720 }
2721 }
2722 }
2723}
2724
2725#ifndef shift_assemble
2726void shift_assemble(int i,struct regstat *i_regs)
2727{
2728 printf("Need shift_assemble for this architecture.\n");
2729 exit(1);
2730}
2731#endif
2732
2733void load_assemble(int i,struct regstat *i_regs)
2734{
2735 int s,th,tl,addr,map=-1;
2736 int offset;
2737 int jaddr=0;
5bf843dc 2738 int memtarget=0,c=0;
57871462 2739 u_int hr,reglist=0;
2740 th=get_reg(i_regs->regmap,rt1[i]|64);
2741 tl=get_reg(i_regs->regmap,rt1[i]);
2742 s=get_reg(i_regs->regmap,rs1[i]);
2743 offset=imm[i];
2744 for(hr=0;hr<HOST_REGS;hr++) {
2745 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2746 }
2747 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2748 if(s>=0) {
2749 c=(i_regs->wasconst>>s)&1;
4cb76aa4 2750 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
57871462 2751 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2752 }
57871462 2753 //printf("load_assemble: c=%d\n",c);
2754 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2755 // FIXME: Even if the load is a NOP, we should check for pagefaults...
5bf843dc 2756#ifdef PCSX
f18c0f46 2757 if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2758 ||rt1[i]==0) {
5bf843dc 2759 // could be FIFO, must perform the read
f18c0f46 2760 // ||dummy read
5bf843dc 2761 assem_debug("(forced read)\n");
2762 tl=get_reg(i_regs->regmap,-1);
2763 assert(tl>=0);
5bf843dc 2764 }
f18c0f46 2765#endif
5bf843dc 2766 if(offset||s<0||c) addr=tl;
2767 else addr=s;
57871462 2768 if(tl>=0) {
2769 //assert(tl>=0);
2770 //assert(rt1[i]);
2771 reglist&=~(1<<tl);
2772 if(th>=0) reglist&=~(1<<th);
2773 if(!using_tlb) {
2774 if(!c) {
2775//#define R29_HACK 1
2776 #ifdef R29_HACK
2777 // Strmnnrmn's speed hack
4cb76aa4 2778 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
57871462 2779 #endif
2780 {
4cb76aa4 2781 emit_cmpimm(addr,RAM_SIZE);
57871462 2782 jaddr=(int)out;
2783 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2784 // Hint to branch predictor that the branch is unlikely to be taken
2785 if(rs1[i]>=28)
2786 emit_jno_unlikely(0);
2787 else
2788 #endif
2789 emit_jno(0);
2790 }
2791 }
2792 }else{ // using tlb
2793 int x=0;
2794 if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2795 if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2796 map=get_reg(i_regs->regmap,TLREG);
2797 assert(map>=0);
2798 map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2799 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2800 }
2801 if (opcode[i]==0x20) { // LB
2802 if(!c||memtarget) {
2803 #ifdef HOST_IMM_ADDR32
2804 if(c)
2805 emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2806 else
2807 #endif
2808 {
2809 //emit_xorimm(addr,3,tl);
2810 //gen_tlb_addr_r(tl,map);
2811 //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2812 int x=0;
2002a1db 2813#ifdef BIG_ENDIAN_MIPS
57871462 2814 if(!c) emit_xorimm(addr,3,tl);
2815 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 2816#else
2817 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2818 else if (tl!=addr) emit_mov(addr,tl);
2819#endif
57871462 2820 emit_movsbl_indexed_tlb(x,tl,map,tl);
2821 }
2822 if(jaddr)
2823 add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2824 }
2825 else
2826 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2827 }
2828 if (opcode[i]==0x21) { // LH
2829 if(!c||memtarget) {
2830 #ifdef HOST_IMM_ADDR32
2831 if(c)
2832 emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2833 else
2834 #endif
2835 {
2836 int x=0;
2002a1db 2837#ifdef BIG_ENDIAN_MIPS
57871462 2838 if(!c) emit_xorimm(addr,2,tl);
2839 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 2840#else
2841 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2842 else if (tl!=addr) emit_mov(addr,tl);
2843#endif
57871462 2844 //#ifdef
2845 //emit_movswl_indexed_tlb(x,tl,map,tl);
2846 //else
2847 if(map>=0) {
2848 gen_tlb_addr_r(tl,map);
2849 emit_movswl_indexed(x,tl,tl);
2850 }else
2851 emit_movswl_indexed((int)rdram-0x80000000+x,tl,tl);
2852 }
2853 if(jaddr)
2854 add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2855 }
2856 else
2857 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2858 }
2859 if (opcode[i]==0x23) { // LW
2860 if(!c||memtarget) {
2861 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2862 #ifdef HOST_IMM_ADDR32
2863 if(c)
2864 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2865 else
2866 #endif
2867 emit_readword_indexed_tlb(0,addr,map,tl);
2868 if(jaddr)
2869 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2870 }
2871 else
2872 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2873 }
2874 if (opcode[i]==0x24) { // LBU
2875 if(!c||memtarget) {
2876 #ifdef HOST_IMM_ADDR32
2877 if(c)
2878 emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2879 else
2880 #endif
2881 {
2882 //emit_xorimm(addr,3,tl);
2883 //gen_tlb_addr_r(tl,map);
2884 //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2885 int x=0;
2002a1db 2886#ifdef BIG_ENDIAN_MIPS
57871462 2887 if(!c) emit_xorimm(addr,3,tl);
2888 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 2889#else
2890 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2891 else if (tl!=addr) emit_mov(addr,tl);
2892#endif
57871462 2893 emit_movzbl_indexed_tlb(x,tl,map,tl);
2894 }
2895 if(jaddr)
2896 add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2897 }
2898 else
2899 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2900 }
2901 if (opcode[i]==0x25) { // LHU
2902 if(!c||memtarget) {
2903 #ifdef HOST_IMM_ADDR32
2904 if(c)
2905 emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2906 else
2907 #endif
2908 {
2909 int x=0;
2002a1db 2910#ifdef BIG_ENDIAN_MIPS
57871462 2911 if(!c) emit_xorimm(addr,2,tl);
2912 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 2913#else
2914 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2915 else if (tl!=addr) emit_mov(addr,tl);
2916#endif
57871462 2917 //#ifdef
2918 //emit_movzwl_indexed_tlb(x,tl,map,tl);
2919 //#else
2920 if(map>=0) {
2921 gen_tlb_addr_r(tl,map);
2922 emit_movzwl_indexed(x,tl,tl);
2923 }else
2924 emit_movzwl_indexed((int)rdram-0x80000000+x,tl,tl);
2925 if(jaddr)
2926 add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2927 }
2928 }
2929 else
2930 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2931 }
2932 if (opcode[i]==0x27) { // LWU
2933 assert(th>=0);
2934 if(!c||memtarget) {
2935 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2936 #ifdef HOST_IMM_ADDR32
2937 if(c)
2938 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2939 else
2940 #endif
2941 emit_readword_indexed_tlb(0,addr,map,tl);
2942 if(jaddr)
2943 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2944 }
2945 else {
2946 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2947 }
2948 emit_zeroreg(th);
2949 }
2950 if (opcode[i]==0x37) { // LD
2951 if(!c||memtarget) {
2952 //gen_tlb_addr_r(tl,map);
2953 //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2954 //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2955 #ifdef HOST_IMM_ADDR32
2956 if(c)
2957 emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2958 else
2959 #endif
2960 emit_readdword_indexed_tlb(0,addr,map,th,tl);
2961 if(jaddr)
2962 add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2963 }
2964 else
2965 inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2966 }
2967 //emit_storereg(rt1[i],tl); // DEBUG
2968 }
2969 //if(opcode[i]==0x23)
2970 //if(opcode[i]==0x24)
2971 //if(opcode[i]==0x23||opcode[i]==0x24)
2972 /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2973 {
2974 //emit_pusha();
2975 save_regs(0x100f);
2976 emit_readword((int)&last_count,ECX);
2977 #ifdef __i386__
2978 if(get_reg(i_regs->regmap,CCREG)<0)
2979 emit_loadreg(CCREG,HOST_CCREG);
2980 emit_add(HOST_CCREG,ECX,HOST_CCREG);
2981 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2982 emit_writeword(HOST_CCREG,(int)&Count);
2983 #endif
2984 #ifdef __arm__
2985 if(get_reg(i_regs->regmap,CCREG)<0)
2986 emit_loadreg(CCREG,0);
2987 else
2988 emit_mov(HOST_CCREG,0);
2989 emit_add(0,ECX,0);
2990 emit_addimm(0,2*ccadj[i],0);
2991 emit_writeword(0,(int)&Count);
2992 #endif
2993 emit_call((int)memdebug);
2994 //emit_popa();
2995 restore_regs(0x100f);
2996 }/**/
2997}
2998
2999#ifndef loadlr_assemble
3000void loadlr_assemble(int i,struct regstat *i_regs)
3001{
3002 printf("Need loadlr_assemble for this architecture.\n");
3003 exit(1);
3004}
3005#endif
3006
3007void store_assemble(int i,struct regstat *i_regs)
3008{
3009 int s,th,tl,map=-1;
3010 int addr,temp;
3011 int offset;
3012 int jaddr=0,jaddr2,type;
666a299d 3013 int memtarget=0,c=0;
57871462 3014 int agr=AGEN1+(i&1);
3015 u_int hr,reglist=0;
3016 th=get_reg(i_regs->regmap,rs2[i]|64);
3017 tl=get_reg(i_regs->regmap,rs2[i]);
3018 s=get_reg(i_regs->regmap,rs1[i]);
3019 temp=get_reg(i_regs->regmap,agr);
3020 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3021 offset=imm[i];
3022 if(s>=0) {
3023 c=(i_regs->wasconst>>s)&1;
4cb76aa4 3024 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
57871462 3025 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3026 }
3027 assert(tl>=0);
3028 assert(temp>=0);
3029 for(hr=0;hr<HOST_REGS;hr++) {
3030 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3031 }
3032 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3033 if(offset||s<0||c) addr=temp;
3034 else addr=s;
3035 if(!using_tlb) {
3036 if(!c) {
3037 #ifdef R29_HACK
3038 // Strmnnrmn's speed hack
3039 memtarget=1;
4cb76aa4 3040 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
57871462 3041 #endif
4cb76aa4 3042 emit_cmpimm(addr,RAM_SIZE);
57871462 3043 #ifdef DESTRUCTIVE_SHIFT
3044 if(s==addr) emit_mov(s,temp);
3045 #endif
3046 #ifdef R29_HACK
4cb76aa4 3047 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
57871462 3048 #endif
3049 {
3050 jaddr=(int)out;
3051 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3052 // Hint to branch predictor that the branch is unlikely to be taken
3053 if(rs1[i]>=28)
3054 emit_jno_unlikely(0);
3055 else
3056 #endif
3057 emit_jno(0);
3058 }
3059 }
3060 }else{ // using tlb
3061 int x=0;
3062 if (opcode[i]==0x28) x=3; // SB
3063 if (opcode[i]==0x29) x=2; // SH
3064 map=get_reg(i_regs->regmap,TLREG);
3065 assert(map>=0);
3066 map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3067 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3068 }
3069
3070 if (opcode[i]==0x28) { // SB
3071 if(!c||memtarget) {
3072 int x=0;
2002a1db 3073#ifdef BIG_ENDIAN_MIPS
57871462 3074 if(!c) emit_xorimm(addr,3,temp);
3075 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 3076#else
3077 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3078 else if (addr!=temp) emit_mov(addr,temp);
3079#endif
57871462 3080 //gen_tlb_addr_w(temp,map);
3081 //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3082 emit_writebyte_indexed_tlb(tl,x,temp,map,temp);
3083 }
3084 type=STOREB_STUB;
3085 }
3086 if (opcode[i]==0x29) { // SH
3087 if(!c||memtarget) {
3088 int x=0;
2002a1db 3089#ifdef BIG_ENDIAN_MIPS
57871462 3090 if(!c) emit_xorimm(addr,2,temp);
3091 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 3092#else
3093 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3094 else if (addr!=temp) emit_mov(addr,temp);
3095#endif
57871462 3096 //#ifdef
3097 //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3098 //#else
3099 if(map>=0) {
3100 gen_tlb_addr_w(temp,map);
3101 emit_writehword_indexed(tl,x,temp);
3102 }else
3103 emit_writehword_indexed(tl,(int)rdram-0x80000000+x,temp);
3104 }
3105 type=STOREH_STUB;
3106 }
3107 if (opcode[i]==0x2B) { // SW
3108 if(!c||memtarget)
3109 //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3110 emit_writeword_indexed_tlb(tl,0,addr,map,temp);
3111 type=STOREW_STUB;
3112 }
3113 if (opcode[i]==0x3F) { // SD
3114 if(!c||memtarget) {
3115 if(rs2[i]) {
3116 assert(th>=0);
3117 //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3118 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3119 emit_writedword_indexed_tlb(th,tl,0,addr,map,temp);
3120 }else{
3121 // Store zero
3122 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3123 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3124 emit_writedword_indexed_tlb(tl,tl,0,addr,map,temp);
3125 }
3126 }
3127 type=STORED_STUB;
3128 }
666a299d 3129 if(!using_tlb&&(!c||memtarget))
3130 // addr could be a temp, make sure it survives STORE*_STUB
3131 reglist|=1<<addr;
57871462 3132 if(jaddr) {
3133 add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3134 } else if(!memtarget) {
3135 inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3136 }
3137 if(!using_tlb) {
3138 if(!c||memtarget) {
3139 #ifdef DESTRUCTIVE_SHIFT
3140 // The x86 shift operation is 'destructive'; it overwrites the
3141 // source register, so we need to make a copy first and use that.
3142 addr=temp;
3143 #endif
3144 #if defined(HOST_IMM8)
3145 int ir=get_reg(i_regs->regmap,INVCP);
3146 assert(ir>=0);
3147 emit_cmpmem_indexedsr12_reg(ir,addr,1);
3148 #else
3149 emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3150 #endif
3151 jaddr2=(int)out;
3152 emit_jne(0);
3153 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3154 }
3155 }
3156 //if(opcode[i]==0x2B || opcode[i]==0x3F)
3157 //if(opcode[i]==0x2B || opcode[i]==0x28)
3158 //if(opcode[i]==0x2B || opcode[i]==0x29)
3159 //if(opcode[i]==0x2B)
3160 /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3161 {
3162 //emit_pusha();
3163 save_regs(0x100f);
3164 emit_readword((int)&last_count,ECX);
3165 #ifdef __i386__
3166 if(get_reg(i_regs->regmap,CCREG)<0)
3167 emit_loadreg(CCREG,HOST_CCREG);
3168 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3169 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3170 emit_writeword(HOST_CCREG,(int)&Count);
3171 #endif
3172 #ifdef __arm__
3173 if(get_reg(i_regs->regmap,CCREG)<0)
3174 emit_loadreg(CCREG,0);
3175 else
3176 emit_mov(HOST_CCREG,0);
3177 emit_add(0,ECX,0);
3178 emit_addimm(0,2*ccadj[i],0);
3179 emit_writeword(0,(int)&Count);
3180 #endif
3181 emit_call((int)memdebug);
3182 //emit_popa();
3183 restore_regs(0x100f);
3184 }/**/
3185}
3186
3187void storelr_assemble(int i,struct regstat *i_regs)
3188{
3189 int s,th,tl;
3190 int temp;
3191 int temp2;
3192 int offset;
3193 int jaddr=0,jaddr2;
3194 int case1,case2,case3;
3195 int done0,done1,done2;
3196 int memtarget,c=0;
fab5d06d 3197 int agr=AGEN1+(i&1);
57871462 3198 u_int hr,reglist=0;
3199 th=get_reg(i_regs->regmap,rs2[i]|64);
3200 tl=get_reg(i_regs->regmap,rs2[i]);
3201 s=get_reg(i_regs->regmap,rs1[i]);
fab5d06d 3202 temp=get_reg(i_regs->regmap,agr);
3203 if(temp<0) temp=get_reg(i_regs->regmap,-1);
57871462 3204 offset=imm[i];
3205 if(s>=0) {
3206 c=(i_regs->isconst>>s)&1;
4cb76aa4 3207 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
57871462 3208 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3209 }
3210 assert(tl>=0);
3211 for(hr=0;hr<HOST_REGS;hr++) {
3212 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3213 }
3214 if(tl>=0) {
3215 assert(temp>=0);
3216 if(!using_tlb) {
3217 if(!c) {
4cb76aa4 3218 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
57871462 3219 if(!offset&&s!=temp) emit_mov(s,temp);
3220 jaddr=(int)out;
3221 emit_jno(0);
3222 }
3223 else
3224 {
3225 if(!memtarget||!rs1[i]) {
3226 jaddr=(int)out;
3227 emit_jmp(0);
3228 }
3229 }
3230 if((u_int)rdram!=0x80000000)
3231 emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3232 }else{ // using tlb
3233 int map=get_reg(i_regs->regmap,TLREG);
3234 assert(map>=0);
3235 map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3236 if(!c&&!offset&&s>=0) emit_mov(s,temp);
3237 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3238 if(!jaddr&&!memtarget) {
3239 jaddr=(int)out;
3240 emit_jmp(0);
3241 }
3242 gen_tlb_addr_w(temp,map);
3243 }
3244
3245 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3246 temp2=get_reg(i_regs->regmap,FTEMP);
3247 if(!rs2[i]) temp2=th=tl;
3248 }
3249
2002a1db 3250#ifndef BIG_ENDIAN_MIPS
3251 emit_xorimm(temp,3,temp);
3252#endif
57871462 3253 emit_testimm(temp,2);
3254 case2=(int)out;
3255 emit_jne(0);
3256 emit_testimm(temp,1);
3257 case1=(int)out;
3258 emit_jne(0);
3259 // 0
3260 if (opcode[i]==0x2A) { // SWL
3261 emit_writeword_indexed(tl,0,temp);
3262 }
3263 if (opcode[i]==0x2E) { // SWR
3264 emit_writebyte_indexed(tl,3,temp);
3265 }
3266 if (opcode[i]==0x2C) { // SDL
3267 emit_writeword_indexed(th,0,temp);
3268 if(rs2[i]) emit_mov(tl,temp2);
3269 }
3270 if (opcode[i]==0x2D) { // SDR
3271 emit_writebyte_indexed(tl,3,temp);
3272 if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3273 }
3274 done0=(int)out;
3275 emit_jmp(0);
3276 // 1
3277 set_jump_target(case1,(int)out);
3278 if (opcode[i]==0x2A) { // SWL
3279 // Write 3 msb into three least significant bytes
3280 if(rs2[i]) emit_rorimm(tl,8,tl);
3281 emit_writehword_indexed(tl,-1,temp);
3282 if(rs2[i]) emit_rorimm(tl,16,tl);
3283 emit_writebyte_indexed(tl,1,temp);
3284 if(rs2[i]) emit_rorimm(tl,8,tl);
3285 }
3286 if (opcode[i]==0x2E) { // SWR
3287 // Write two lsb into two most significant bytes
3288 emit_writehword_indexed(tl,1,temp);
3289 }
3290 if (opcode[i]==0x2C) { // SDL
3291 if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3292 // Write 3 msb into three least significant bytes
3293 if(rs2[i]) emit_rorimm(th,8,th);
3294 emit_writehword_indexed(th,-1,temp);
3295 if(rs2[i]) emit_rorimm(th,16,th);
3296 emit_writebyte_indexed(th,1,temp);
3297 if(rs2[i]) emit_rorimm(th,8,th);
3298 }
3299 if (opcode[i]==0x2D) { // SDR
3300 if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3301 // Write two lsb into two most significant bytes
3302 emit_writehword_indexed(tl,1,temp);
3303 }
3304 done1=(int)out;
3305 emit_jmp(0);
3306 // 2
3307 set_jump_target(case2,(int)out);
3308 emit_testimm(temp,1);
3309 case3=(int)out;
3310 emit_jne(0);
3311 if (opcode[i]==0x2A) { // SWL
3312 // Write two msb into two least significant bytes
3313 if(rs2[i]) emit_rorimm(tl,16,tl);
3314 emit_writehword_indexed(tl,-2,temp);
3315 if(rs2[i]) emit_rorimm(tl,16,tl);
3316 }
3317 if (opcode[i]==0x2E) { // SWR
3318 // Write 3 lsb into three most significant bytes
3319 emit_writebyte_indexed(tl,-1,temp);
3320 if(rs2[i]) emit_rorimm(tl,8,tl);
3321 emit_writehword_indexed(tl,0,temp);
3322 if(rs2[i]) emit_rorimm(tl,24,tl);
3323 }
3324 if (opcode[i]==0x2C) { // SDL
3325 if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3326 // Write two msb into two least significant bytes
3327 if(rs2[i]) emit_rorimm(th,16,th);
3328 emit_writehword_indexed(th,-2,temp);
3329 if(rs2[i]) emit_rorimm(th,16,th);
3330 }
3331 if (opcode[i]==0x2D) { // SDR
3332 if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3333 // Write 3 lsb into three most significant bytes
3334 emit_writebyte_indexed(tl,-1,temp);
3335 if(rs2[i]) emit_rorimm(tl,8,tl);
3336 emit_writehword_indexed(tl,0,temp);
3337 if(rs2[i]) emit_rorimm(tl,24,tl);
3338 }
3339 done2=(int)out;
3340 emit_jmp(0);
3341 // 3
3342 set_jump_target(case3,(int)out);
3343 if (opcode[i]==0x2A) { // SWL
3344 // Write msb into least significant byte
3345 if(rs2[i]) emit_rorimm(tl,24,tl);
3346 emit_writebyte_indexed(tl,-3,temp);
3347 if(rs2[i]) emit_rorimm(tl,8,tl);
3348 }
3349 if (opcode[i]==0x2E) { // SWR
3350 // Write entire word
3351 emit_writeword_indexed(tl,-3,temp);
3352 }
3353 if (opcode[i]==0x2C) { // SDL
3354 if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3355 // Write msb into least significant byte
3356 if(rs2[i]) emit_rorimm(th,24,th);
3357 emit_writebyte_indexed(th,-3,temp);
3358 if(rs2[i]) emit_rorimm(th,8,th);
3359 }
3360 if (opcode[i]==0x2D) { // SDR
3361 if(rs2[i]) emit_mov(th,temp2);
3362 // Write entire word
3363 emit_writeword_indexed(tl,-3,temp);
3364 }
3365 set_jump_target(done0,(int)out);
3366 set_jump_target(done1,(int)out);
3367 set_jump_target(done2,(int)out);
3368 if (opcode[i]==0x2C) { // SDL
3369 emit_testimm(temp,4);
3370 done0=(int)out;
3371 emit_jne(0);
3372 emit_andimm(temp,~3,temp);
3373 emit_writeword_indexed(temp2,4,temp);
3374 set_jump_target(done0,(int)out);
3375 }
3376 if (opcode[i]==0x2D) { // SDR
3377 emit_testimm(temp,4);
3378 done0=(int)out;
3379 emit_jeq(0);
3380 emit_andimm(temp,~3,temp);
3381 emit_writeword_indexed(temp2,-4,temp);
3382 set_jump_target(done0,(int)out);
3383 }
3384 if(!c||!memtarget)
b7918751 3385 add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
57871462 3386 }
3387 if(!using_tlb) {
3388 emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3389 #if defined(HOST_IMM8)
3390 int ir=get_reg(i_regs->regmap,INVCP);
3391 assert(ir>=0);
3392 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3393 #else
3394 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3395 #endif
3396 jaddr2=(int)out;
3397 emit_jne(0);
3398 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3399 }
3400 /*
3401 emit_pusha();
3402 //save_regs(0x100f);
3403 emit_readword((int)&last_count,ECX);
3404 if(get_reg(i_regs->regmap,CCREG)<0)
3405 emit_loadreg(CCREG,HOST_CCREG);
3406 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3407 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3408 emit_writeword(HOST_CCREG,(int)&Count);
3409 emit_call((int)memdebug);
3410 emit_popa();
3411 //restore_regs(0x100f);
3412 /**/
3413}
3414
3415void c1ls_assemble(int i,struct regstat *i_regs)
3416{
3d624f89 3417#ifndef DISABLE_COP1
57871462 3418 int s,th,tl;
3419 int temp,ar;
3420 int map=-1;
3421 int offset;
3422 int c=0;
3423 int jaddr,jaddr2=0,jaddr3,type;
3424 int agr=AGEN1+(i&1);
3425 u_int hr,reglist=0;
3426 th=get_reg(i_regs->regmap,FTEMP|64);
3427 tl=get_reg(i_regs->regmap,FTEMP);
3428 s=get_reg(i_regs->regmap,rs1[i]);
3429 temp=get_reg(i_regs->regmap,agr);
3430 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3431 offset=imm[i];
3432 assert(tl>=0);
3433 assert(rs1[i]>0);
3434 assert(temp>=0);
3435 for(hr=0;hr<HOST_REGS;hr++) {
3436 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3437 }
3438 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3439 if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3440 {
3441 // Loads use a temporary register which we need to save
3442 reglist|=1<<temp;
3443 }
3444 if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3445 ar=temp;
3446 else // LWC1/LDC1
3447 ar=tl;
3448 //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3449 //else c=(i_regs->wasconst>>s)&1;
3450 if(s>=0) c=(i_regs->wasconst>>s)&1;
3451 // Check cop1 unusable
3452 if(!cop1_usable) {
3453 signed char rs=get_reg(i_regs->regmap,CSREG);
3454 assert(rs>=0);
3455 emit_testimm(rs,0x20000000);
3456 jaddr=(int)out;
3457 emit_jeq(0);
3458 add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3459 cop1_usable=1;
3460 }
3461 if (opcode[i]==0x39) { // SWC1 (get float address)
3462 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3463 }
3464 if (opcode[i]==0x3D) { // SDC1 (get double address)
3465 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3466 }
3467 // Generate address + offset
3468 if(!using_tlb) {
3469 if(!c)
4cb76aa4 3470 emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
57871462 3471 }
3472 else
3473 {
3474 map=get_reg(i_regs->regmap,TLREG);
3475 assert(map>=0);
3476 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3477 map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3478 }
3479 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3480 map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3481 }
3482 }
3483 if (opcode[i]==0x39) { // SWC1 (read float)
3484 emit_readword_indexed(0,tl,tl);
3485 }
3486 if (opcode[i]==0x3D) { // SDC1 (read double)
3487 emit_readword_indexed(4,tl,th);
3488 emit_readword_indexed(0,tl,tl);
3489 }
3490 if (opcode[i]==0x31) { // LWC1 (get target address)
3491 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3492 }
3493 if (opcode[i]==0x35) { // LDC1 (get target address)
3494 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3495 }
3496 if(!using_tlb) {
3497 if(!c) {
3498 jaddr2=(int)out;
3499 emit_jno(0);
3500 }
4cb76aa4 3501 else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
57871462 3502 jaddr2=(int)out;
3503 emit_jmp(0); // inline_readstub/inline_writestub? Very rare case
3504 }
3505 #ifdef DESTRUCTIVE_SHIFT
3506 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3507 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3508 }
3509 #endif
3510 }else{
3511 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3512 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3513 }
3514 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3515 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3516 }
3517 }
3518 if (opcode[i]==0x31) { // LWC1
3519 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3520 //gen_tlb_addr_r(ar,map);
3521 //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3522 #ifdef HOST_IMM_ADDR32
3523 if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3524 else
3525 #endif
3526 emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3527 type=LOADW_STUB;
3528 }
3529 if (opcode[i]==0x35) { // LDC1
3530 assert(th>=0);
3531 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3532 //gen_tlb_addr_r(ar,map);
3533 //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3534 //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3535 #ifdef HOST_IMM_ADDR32
3536 if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3537 else
3538 #endif
3539 emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3540 type=LOADD_STUB;
3541 }
3542 if (opcode[i]==0x39) { // SWC1
3543 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3544 emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3545 type=STOREW_STUB;
3546 }
3547 if (opcode[i]==0x3D) { // SDC1
3548 assert(th>=0);
3549 //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3550 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3551 emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3552 type=STORED_STUB;
3553 }
3554 if(!using_tlb) {
3555 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3556 #ifndef DESTRUCTIVE_SHIFT
3557 temp=offset||c||s<0?ar:s;
3558 #endif
3559 #if defined(HOST_IMM8)
3560 int ir=get_reg(i_regs->regmap,INVCP);
3561 assert(ir>=0);
3562 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3563 #else
3564 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3565 #endif
3566 jaddr3=(int)out;
3567 emit_jne(0);
3568 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3569 }
3570 }
3571 if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3572 if (opcode[i]==0x31) { // LWC1 (write float)
3573 emit_writeword_indexed(tl,0,temp);
3574 }
3575 if (opcode[i]==0x35) { // LDC1 (write double)
3576 emit_writeword_indexed(th,4,temp);
3577 emit_writeword_indexed(tl,0,temp);
3578 }
3579 //if(opcode[i]==0x39)
3580 /*if(opcode[i]==0x39||opcode[i]==0x31)
3581 {
3582 emit_pusha();
3583 emit_readword((int)&last_count,ECX);
3584 if(get_reg(i_regs->regmap,CCREG)<0)
3585 emit_loadreg(CCREG,HOST_CCREG);
3586 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3587 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3588 emit_writeword(HOST_CCREG,(int)&Count);
3589 emit_call((int)memdebug);
3590 emit_popa();
3591 }/**/
3d624f89 3592#else
3593 cop1_unusable(i, i_regs);
3594#endif
57871462 3595}
3596
b9b61529 3597void c2ls_assemble(int i,struct regstat *i_regs)
3598{
3599 int s,tl;
3600 int ar;
3601 int offset;
1fd1aceb 3602 int memtarget=0,c=0;
b9b61529 3603 int jaddr,jaddr2=0,jaddr3,type;
3604 int agr=AGEN1+(i&1);
3605 u_int hr,reglist=0;
3606 u_int copr=(source[i]>>16)&0x1f;
3607 s=get_reg(i_regs->regmap,rs1[i]);
3608 tl=get_reg(i_regs->regmap,FTEMP);
3609 offset=imm[i];
3610 assert(rs1[i]>0);
3611 assert(tl>=0);
3612 assert(!using_tlb);
3613
3614 for(hr=0;hr<HOST_REGS;hr++) {
3615 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3616 }
3617 if(i_regs->regmap[HOST_CCREG]==CCREG)
3618 reglist&=~(1<<HOST_CCREG);
3619
3620 // get the address
3621 if (opcode[i]==0x3a) { // SWC2
3622 ar=get_reg(i_regs->regmap,agr);
3623 if(ar<0) ar=get_reg(i_regs->regmap,-1);
3624 reglist|=1<<ar;
3625 } else { // LWC2
3626 ar=tl;
3627 }
1fd1aceb 3628 if(s>=0) c=(i_regs->wasconst>>s)&1;
3629 memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
b9b61529 3630 if (!offset&&!c&&s>=0) ar=s;
3631 assert(ar>=0);
3632
3633 if (opcode[i]==0x3a) { // SWC2
3634 cop2_get_dreg(copr,tl,HOST_TEMPREG);
1fd1aceb 3635 type=STOREW_STUB;
b9b61529 3636 }
1fd1aceb 3637 else
b9b61529 3638 type=LOADW_STUB;
1fd1aceb 3639
3640 if(c&&!memtarget) {
3641 jaddr2=(int)out;
3642 emit_jmp(0); // inline_readstub/inline_writestub?
b9b61529 3643 }
1fd1aceb 3644 else {
3645 if(!c) {
3646 emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3647 jaddr2=(int)out;
3648 emit_jno(0);
3649 }
3650 if (opcode[i]==0x32) { // LWC2
3651 #ifdef HOST_IMM_ADDR32
3652 if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3653 else
3654 #endif
3655 emit_readword_indexed(0,ar,tl);
3656 }
3657 if (opcode[i]==0x3a) { // SWC2
3658 #ifdef DESTRUCTIVE_SHIFT
3659 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3660 #endif
3661 emit_writeword_indexed(tl,0,ar);
3662 }
b9b61529 3663 }
3664 if(jaddr2)
3665 add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3666 if (opcode[i]==0x3a) { // SWC2
3667#if defined(HOST_IMM8)
3668 int ir=get_reg(i_regs->regmap,INVCP);
3669 assert(ir>=0);
3670 emit_cmpmem_indexedsr12_reg(ir,ar,1);
3671#else
3672 emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3673#endif
3674 jaddr3=(int)out;
3675 emit_jne(0);
3676 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3677 }
3678 if (opcode[i]==0x32) { // LWC2
3679 cop2_put_dreg(copr,tl,HOST_TEMPREG);
3680 }
3681}
3682
57871462 3683#ifndef multdiv_assemble
3684void multdiv_assemble(int i,struct regstat *i_regs)
3685{
3686 printf("Need multdiv_assemble for this architecture.\n");
3687 exit(1);
3688}
3689#endif
3690
3691void mov_assemble(int i,struct regstat *i_regs)
3692{
3693 //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3694 //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
f5b13bdc 3695 //assert(rt1[i]>0);
57871462 3696 if(rt1[i]) {
3697 signed char sh,sl,th,tl;
3698 th=get_reg(i_regs->regmap,rt1[i]|64);
3699 tl=get_reg(i_regs->regmap,rt1[i]);
3700 //assert(tl>=0);
3701 if(tl>=0) {
3702 sh=get_reg(i_regs->regmap,rs1[i]|64);
3703 sl=get_reg(i_regs->regmap,rs1[i]);
3704 if(sl>=0) emit_mov(sl,tl);
3705 else emit_loadreg(rs1[i],tl);
3706 if(th>=0) {
3707 if(sh>=0) emit_mov(sh,th);
3708 else emit_loadreg(rs1[i]|64,th);
3709 }
3710 }
3711 }
3712}
3713
3714#ifndef fconv_assemble
3715void fconv_assemble(int i,struct regstat *i_regs)
3716{
3717 printf("Need fconv_assemble for this architecture.\n");
3718 exit(1);
3719}
3720#endif
3721
3722#if 0
3723void float_assemble(int i,struct regstat *i_regs)
3724{
3725 printf("Need float_assemble for this architecture.\n");
3726 exit(1);
3727}
3728#endif
3729
3730void syscall_assemble(int i,struct regstat *i_regs)
3731{
3732 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3733 assert(ccreg==HOST_CCREG);
3734 assert(!is_delayslot);
3735 emit_movimm(start+i*4,EAX); // Get PC
3736 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
7139f3c8 3737 emit_jmp((int)jump_syscall_hle); // XXX
3738}
3739
3740void hlecall_assemble(int i,struct regstat *i_regs)
3741{
3742 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3743 assert(ccreg==HOST_CCREG);
3744 assert(!is_delayslot);
3745 emit_movimm(start+i*4+4,0); // Get PC
67ba0fb4 3746 emit_movimm((int)psxHLEt[source[i]&7],1);
7139f3c8 3747 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
67ba0fb4 3748 emit_jmp((int)jump_hlecall);
57871462 3749}
3750
3751void ds_assemble(int i,struct regstat *i_regs)
3752{
3753 is_delayslot=1;
3754 switch(itype[i]) {
3755 case ALU:
3756 alu_assemble(i,i_regs);break;
3757 case IMM16:
3758 imm16_assemble(i,i_regs);break;
3759 case SHIFT:
3760 shift_assemble(i,i_regs);break;
3761 case SHIFTIMM:
3762 shiftimm_assemble(i,i_regs);break;
3763 case LOAD:
3764 load_assemble(i,i_regs);break;
3765 case LOADLR:
3766 loadlr_assemble(i,i_regs);break;
3767 case STORE:
3768 store_assemble(i,i_regs);break;
3769 case STORELR:
3770 storelr_assemble(i,i_regs);break;
3771 case COP0:
3772 cop0_assemble(i,i_regs);break;
3773 case COP1:
3774 cop1_assemble(i,i_regs);break;
3775 case C1LS:
3776 c1ls_assemble(i,i_regs);break;
b9b61529 3777 case COP2:
3778 cop2_assemble(i,i_regs);break;
3779 case C2LS:
3780 c2ls_assemble(i,i_regs);break;
3781 case C2OP:
3782 c2op_assemble(i,i_regs);break;
57871462 3783 case FCONV:
3784 fconv_assemble(i,i_regs);break;
3785 case FLOAT:
3786 float_assemble(i,i_regs);break;
3787 case FCOMP:
3788 fcomp_assemble(i,i_regs);break;
3789 case MULTDIV:
3790 multdiv_assemble(i,i_regs);break;
3791 case MOV:
3792 mov_assemble(i,i_regs);break;
3793 case SYSCALL:
7139f3c8 3794 case HLECALL:
57871462 3795 case SPAN:
3796 case UJUMP:
3797 case RJUMP:
3798 case CJUMP:
3799 case SJUMP:
3800 case FJUMP:
3801 printf("Jump in the delay slot. This is probably a bug.\n");
3802 }
3803 is_delayslot=0;
3804}
3805
3806// Is the branch target a valid internal jump?
3807int internal_branch(uint64_t i_is32,int addr)
3808{
3809 if(addr&1) return 0; // Indirect (register) jump
3810 if(addr>=start && addr<start+slen*4-4)
3811 {
3812 int t=(addr-start)>>2;
3813 // Delay slots are not valid branch targets
3814 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3815 // 64 -> 32 bit transition requires a recompile
3816 /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3817 {
3818 if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3819 else printf("optimizable: yes\n");
3820 }*/
3821 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3822 if(requires_32bit[t]&~i_is32) return 0;
3823 else return 1;
3824 }
3825 return 0;
3826}
3827
3828#ifndef wb_invalidate
3829void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3830 uint64_t u,uint64_t uu)
3831{
3832 int hr;
3833 for(hr=0;hr<HOST_REGS;hr++) {
3834 if(hr!=EXCLUDE_REG) {
3835 if(pre[hr]!=entry[hr]) {
3836 if(pre[hr]>=0) {
3837 if((dirty>>hr)&1) {
3838 if(get_reg(entry,pre[hr])<0) {
3839 if(pre[hr]<64) {
3840 if(!((u>>pre[hr])&1)) {
3841 emit_storereg(pre[hr],hr);
3842 if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3843 emit_sarimm(hr,31,hr);
3844 emit_storereg(pre[hr]|64,hr);
3845 }
3846 }
3847 }else{
3848 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3849 emit_storereg(pre[hr],hr);
3850 }
3851 }
3852 }
3853 }
3854 }
3855 }
3856 }
3857 }
3858 // Move from one register to another (no writeback)
3859 for(hr=0;hr<HOST_REGS;hr++) {
3860 if(hr!=EXCLUDE_REG) {
3861 if(pre[hr]!=entry[hr]) {
3862 if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3863 int nr;
3864 if((nr=get_reg(entry,pre[hr]))>=0) {
3865 emit_mov(hr,nr);
3866 }
3867 }
3868 }
3869 }
3870 }
3871}
3872#endif
3873
3874// Load the specified registers
3875// This only loads the registers given as arguments because
3876// we don't want to load things that will be overwritten
3877void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3878{
3879 int hr;
3880 // Load 32-bit regs
3881 for(hr=0;hr<HOST_REGS;hr++) {
3882 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3883 if(entry[hr]!=regmap[hr]) {
3884 if(regmap[hr]==rs1||regmap[hr]==rs2)
3885 {
3886 if(regmap[hr]==0) {
3887 emit_zeroreg(hr);
3888 }
3889 else
3890 {
3891 emit_loadreg(regmap[hr],hr);
3892 }
3893 }
3894 }
3895 }
3896 }
3897 //Load 64-bit regs
3898 for(hr=0;hr<HOST_REGS;hr++) {
3899 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3900 if(entry[hr]!=regmap[hr]) {
3901 if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3902 {
3903 assert(regmap[hr]!=64);
3904 if((is32>>(regmap[hr]&63))&1) {
3905 int lr=get_reg(regmap,regmap[hr]-64);
3906 if(lr>=0)
3907 emit_sarimm(lr,31,hr);
3908 else
3909 emit_loadreg(regmap[hr],hr);
3910 }
3911 else
3912 {
3913 emit_loadreg(regmap[hr],hr);
3914 }
3915 }
3916 }
3917 }
3918 }
3919}
3920
3921// Load registers prior to the start of a loop
3922// so that they are not loaded within the loop
3923static void loop_preload(signed char pre[],signed char entry[])
3924{
3925 int hr;
3926 for(hr=0;hr<HOST_REGS;hr++) {
3927 if(hr!=EXCLUDE_REG) {
3928 if(pre[hr]!=entry[hr]) {
3929 if(entry[hr]>=0) {
3930 if(get_reg(pre,entry[hr])<0) {
3931 assem_debug("loop preload:\n");
3932 //printf("loop preload: %d\n",hr);
3933 if(entry[hr]==0) {
3934 emit_zeroreg(hr);
3935 }
3936 else if(entry[hr]<TEMPREG)
3937 {
3938 emit_loadreg(entry[hr],hr);
3939 }
3940 else if(entry[hr]-64<TEMPREG)
3941 {
3942 emit_loadreg(entry[hr],hr);
3943 }
3944 }
3945 }
3946 }
3947 }
3948 }
3949}
3950
3951// Generate address for load/store instruction
b9b61529 3952// goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
57871462 3953void address_generation(int i,struct regstat *i_regs,signed char entry[])
3954{
b9b61529 3955 if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
57871462 3956 int ra;
3957 int agr=AGEN1+(i&1);
3958 int mgr=MGEN1+(i&1);
3959 if(itype[i]==LOAD) {
3960 ra=get_reg(i_regs->regmap,rt1[i]);
3961 //if(rt1[i]) assert(ra>=0);
3962 }
3963 if(itype[i]==LOADLR) {
3964 ra=get_reg(i_regs->regmap,FTEMP);
3965 }
3966 if(itype[i]==STORE||itype[i]==STORELR) {
3967 ra=get_reg(i_regs->regmap,agr);
3968 if(ra<0) ra=get_reg(i_regs->regmap,-1);
3969 }
b9b61529 3970 if(itype[i]==C1LS||itype[i]==C2LS) {
3971 if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
57871462 3972 ra=get_reg(i_regs->regmap,FTEMP);
1fd1aceb 3973 else { // SWC1/SDC1/SWC2/SDC2
57871462 3974 ra=get_reg(i_regs->regmap,agr);
3975 if(ra<0) ra=get_reg(i_regs->regmap,-1);
3976 }
3977 }
3978 int rs=get_reg(i_regs->regmap,rs1[i]);
3979 int rm=get_reg(i_regs->regmap,TLREG);
3980 if(ra>=0) {
3981 int offset=imm[i];
3982 int c=(i_regs->wasconst>>rs)&1;
3983 if(rs1[i]==0) {
3984 // Using r0 as a base address
3985 /*if(rm>=0) {
3986 if(!entry||entry[rm]!=mgr) {
3987 generate_map_const(offset,rm);
3988 } // else did it in the previous cycle
3989 }*/
3990 if(!entry||entry[ra]!=agr) {
3991 if (opcode[i]==0x22||opcode[i]==0x26) {
3992 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3993 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3994 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3995 }else{
3996 emit_movimm(offset,ra);
3997 }
3998 } // else did it in the previous cycle
3999 }
4000 else if(rs<0) {
4001 if(!entry||entry[ra]!=rs1[i])
4002 emit_loadreg(rs1[i],ra);
4003 //if(!entry||entry[ra]!=rs1[i])
4004 // printf("poor load scheduling!\n");
4005 }
4006 else if(c) {
4007 if(rm>=0) {
4008 if(!entry||entry[rm]!=mgr) {
b9b61529 4009 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
57871462 4010 // Stores to memory go thru the mapper to detect self-modifying
4011 // code, loads don't.
4012 if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4cb76aa4 4013 (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
57871462 4014 generate_map_const(constmap[i][rs]+offset,rm);
4015 }else{
4016 if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4017 generate_map_const(constmap[i][rs]+offset,rm);
4018 }
4019 }
4020 }
4021 if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4022 if(!entry||entry[ra]!=agr) {
4023 if (opcode[i]==0x22||opcode[i]==0x26) {
4024 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4025 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4026 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4027 }else{
4028 #ifdef HOST_IMM_ADDR32
b9b61529 4029 if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
57871462 4030 (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4031 #endif
4032 emit_movimm(constmap[i][rs]+offset,ra);
4033 }
4034 } // else did it in the previous cycle
4035 } // else load_consts already did it
4036 }
4037 if(offset&&!c&&rs1[i]) {
4038 if(rs>=0) {
4039 emit_addimm(rs,offset,ra);
4040 }else{
4041 emit_addimm(ra,offset,ra);
4042 }
4043 }
4044 }
4045 }
4046 // Preload constants for next instruction
b9b61529 4047 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
57871462 4048 int agr,ra;
4049 #ifndef HOST_IMM_ADDR32
4050 // Mapper entry
4051 agr=MGEN1+((i+1)&1);
4052 ra=get_reg(i_regs->regmap,agr);
4053 if(ra>=0) {
4054 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4055 int offset=imm[i+1];
4056 int c=(regs[i+1].wasconst>>rs)&1;
4057 if(c) {
b9b61529 4058 if(itype[i+1]==STORE||itype[i+1]==STORELR
4059 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
57871462 4060 // Stores to memory go thru the mapper to detect self-modifying
4061 // code, loads don't.
4062 if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4cb76aa4 4063 (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
57871462 4064 generate_map_const(constmap[i+1][rs]+offset,ra);
4065 }else{
4066 if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4067 generate_map_const(constmap[i+1][rs]+offset,ra);
4068 }
4069 }
4070 /*else if(rs1[i]==0) {
4071 generate_map_const(offset,ra);
4072 }*/
4073 }
4074 #endif
4075 // Actual address
4076 agr=AGEN1+((i+1)&1);
4077 ra=get_reg(i_regs->regmap,agr);
4078 if(ra>=0) {
4079 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4080 int offset=imm[i+1];
4081 int c=(regs[i+1].wasconst>>rs)&1;
4082 if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4083 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4084 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4085 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4086 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4087 }else{
4088 #ifdef HOST_IMM_ADDR32
b9b61529 4089 if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
57871462 4090 (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4091 #endif
4092 emit_movimm(constmap[i+1][rs]+offset,ra);
4093 }
4094 }
4095 else if(rs1[i+1]==0) {
4096 // Using r0 as a base address
4097 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4098 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4099 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4100 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4101 }else{
4102 emit_movimm(offset,ra);
4103 }
4104 }
4105 }
4106 }
4107}
4108
4109int get_final_value(int hr, int i, int *value)
4110{
4111 int reg=regs[i].regmap[hr];
4112 while(i<slen-1) {
4113 if(regs[i+1].regmap[hr]!=reg) break;
4114 if(!((regs[i+1].isconst>>hr)&1)) break;
4115 if(bt[i+1]) break;
4116 i++;
4117 }
4118 if(i<slen-1) {
4119 if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4120 *value=constmap[i][hr];
4121 return 1;
4122 }
4123 if(!bt[i+1]) {
4124 if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4125 // Load in delay slot, out-of-order execution
4126 if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4127 {
4128 #ifdef HOST_IMM_ADDR32
4129 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4130 #endif
4131 // Precompute load address
4132 *value=constmap[i][hr]+imm[i+2];
4133 return 1;
4134 }
4135 }
4136 if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4137 {
4138 #ifdef HOST_IMM_ADDR32
4139 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4140 #endif
4141 // Precompute load address
4142 *value=constmap[i][hr]+imm[i+1];
4143 //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4144 return 1;
4145 }
4146 }
4147 }
4148 *value=constmap[i][hr];
4149 //printf("c=%x\n",(int)constmap[i][hr]);
4150 if(i==slen-1) return 1;
4151 if(reg<64) {
4152 return !((unneeded_reg[i+1]>>reg)&1);
4153 }else{
4154 return !((unneeded_reg_upper[i+1]>>reg)&1);
4155 }
4156}
4157
4158// Load registers with known constants
4159void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4160{
4161 int hr;
4162 // Load 32-bit regs
4163 for(hr=0;hr<HOST_REGS;hr++) {
4164 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4165 //if(entry[hr]!=regmap[hr]) {
4166 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4167 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4168 int value;
4169 if(get_final_value(hr,i,&value)) {
4170 if(value==0) {
4171 emit_zeroreg(hr);
4172 }
4173 else {
4174 emit_movimm(value,hr);
4175 }
4176 }
4177 }
4178 }
4179 }
4180 }
4181 // Load 64-bit regs
4182 for(hr=0;hr<HOST_REGS;hr++) {
4183 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4184 //if(entry[hr]!=regmap[hr]) {
4185 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4186 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4187 if((is32>>(regmap[hr]&63))&1) {
4188 int lr=get_reg(regmap,regmap[hr]-64);
4189 assert(lr>=0);
4190 emit_sarimm(lr,31,hr);
4191 }
4192 else
4193 {
4194 int value;
4195 if(get_final_value(hr,i,&value)) {
4196 if(value==0) {
4197 emit_zeroreg(hr);
4198 }
4199 else {
4200 emit_movimm(value,hr);
4201 }
4202 }
4203 }
4204 }
4205 }
4206 }
4207 }
4208}
4209void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4210{
4211 int hr;
4212 // Load 32-bit regs
4213 for(hr=0;hr<HOST_REGS;hr++) {
4214 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4215 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4216 int value=constmap[i][hr];
4217 if(value==0) {
4218 emit_zeroreg(hr);
4219 }
4220 else {
4221 emit_movimm(value,hr);
4222 }
4223 }
4224 }
4225 }
4226 // Load 64-bit regs
4227 for(hr=0;hr<HOST_REGS;hr++) {
4228 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4229 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4230 if((is32>>(regmap[hr]&63))&1) {
4231 int lr=get_reg(regmap,regmap[hr]-64);
4232 assert(lr>=0);
4233 emit_sarimm(lr,31,hr);
4234 }
4235 else
4236 {
4237 int value=constmap[i][hr];
4238 if(value==0) {
4239 emit_zeroreg(hr);
4240 }
4241 else {
4242 emit_movimm(value,hr);
4243 }
4244 }
4245 }
4246 }
4247 }
4248}
4249
4250// Write out all dirty registers (except cycle count)
4251void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4252{
4253 int hr;
4254 for(hr=0;hr<HOST_REGS;hr++) {
4255 if(hr!=EXCLUDE_REG) {
4256 if(i_regmap[hr]>0) {
4257 if(i_regmap[hr]!=CCREG) {
4258 if((i_dirty>>hr)&1) {
4259 if(i_regmap[hr]<64) {
4260 emit_storereg(i_regmap[hr],hr);
24385cae 4261#ifndef FORCE32
57871462 4262 if( ((i_is32>>i_regmap[hr])&1) ) {
4263 #ifdef DESTRUCTIVE_WRITEBACK
4264 emit_sarimm(hr,31,hr);
4265 emit_storereg(i_regmap[hr]|64,hr);
4266 #else
4267 emit_sarimm(hr,31,HOST_TEMPREG);
4268 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4269 #endif
4270 }
24385cae 4271#endif
57871462 4272 }else{
4273 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4274 emit_storereg(i_regmap[hr],hr);
4275 }
4276 }
4277 }
4278 }
4279 }
4280 }
4281 }
4282}
4283// Write out dirty registers that we need to reload (pair with load_needed_regs)
4284// This writes the registers not written by store_regs_bt
4285void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4286{
4287 int hr;
4288 int t=(addr-start)>>2;
4289 for(hr=0;hr<HOST_REGS;hr++) {
4290 if(hr!=EXCLUDE_REG) {
4291 if(i_regmap[hr]>0) {
4292 if(i_regmap[hr]!=CCREG) {
4293 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4294 if((i_dirty>>hr)&1) {
4295 if(i_regmap[hr]<64) {
4296 emit_storereg(i_regmap[hr],hr);
24385cae 4297#ifndef FORCE32
57871462 4298 if( ((i_is32>>i_regmap[hr])&1) ) {
4299 #ifdef DESTRUCTIVE_WRITEBACK
4300 emit_sarimm(hr,31,hr);
4301 emit_storereg(i_regmap[hr]|64,hr);
4302 #else
4303 emit_sarimm(hr,31,HOST_TEMPREG);
4304 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4305 #endif
4306 }
24385cae 4307#endif
57871462 4308 }else{
4309 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4310 emit_storereg(i_regmap[hr],hr);
4311 }
4312 }
4313 }
4314 }
4315 }
4316 }
4317 }
4318 }
4319}
4320
4321// Load all registers (except cycle count)
4322void load_all_regs(signed char i_regmap[])
4323{
4324 int hr;
4325 for(hr=0;hr<HOST_REGS;hr++) {
4326 if(hr!=EXCLUDE_REG) {
4327 if(i_regmap[hr]==0) {
4328 emit_zeroreg(hr);
4329 }
4330 else
4331 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4332 {
4333 emit_loadreg(i_regmap[hr],hr);
4334 }
4335 }
4336 }
4337}
4338
4339// Load all current registers also needed by next instruction
4340void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4341{
4342 int hr;
4343 for(hr=0;hr<HOST_REGS;hr++) {
4344 if(hr!=EXCLUDE_REG) {
4345 if(get_reg(next_regmap,i_regmap[hr])>=0) {
4346 if(i_regmap[hr]==0) {
4347 emit_zeroreg(hr);
4348 }
4349 else
4350 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4351 {
4352 emit_loadreg(i_regmap[hr],hr);
4353 }
4354 }
4355 }
4356 }
4357}
4358
4359// Load all regs, storing cycle count if necessary
4360void load_regs_entry(int t)
4361{
4362 int hr;
4363 if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4364 else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4365 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4366 emit_storereg(CCREG,HOST_CCREG);
4367 }
4368 // Load 32-bit regs
4369 for(hr=0;hr<HOST_REGS;hr++) {
4370 if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4371 if(regs[t].regmap_entry[hr]==0) {
4372 emit_zeroreg(hr);
4373 }
4374 else if(regs[t].regmap_entry[hr]!=CCREG)
4375 {
4376 emit_loadreg(regs[t].regmap_entry[hr],hr);
4377 }
4378 }
4379 }
4380 // Load 64-bit regs
4381 for(hr=0;hr<HOST_REGS;hr++) {
4382 if(regs[t].regmap_entry[hr]>=64) {
4383 assert(regs[t].regmap_entry[hr]!=64);
4384 if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4385 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4386 if(lr<0) {
4387 emit_loadreg(regs[t].regmap_entry[hr],hr);
4388 }
4389 else
4390 {
4391 emit_sarimm(lr,31,hr);
4392 }
4393 }
4394 else
4395 {
4396 emit_loadreg(regs[t].regmap_entry[hr],hr);
4397 }
4398 }
4399 }
4400}
4401
4402// Store dirty registers prior to branch
4403void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4404{
4405 if(internal_branch(i_is32,addr))
4406 {
4407 int t=(addr-start)>>2;
4408 int hr;
4409 for(hr=0;hr<HOST_REGS;hr++) {
4410 if(hr!=EXCLUDE_REG) {
4411 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4412 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4413 if((i_dirty>>hr)&1) {
4414 if(i_regmap[hr]<64) {
4415 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4416 emit_storereg(i_regmap[hr],hr);
4417 if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4418 #ifdef DESTRUCTIVE_WRITEBACK
4419 emit_sarimm(hr,31,hr);
4420 emit_storereg(i_regmap[hr]|64,hr);
4421 #else
4422 emit_sarimm(hr,31,HOST_TEMPREG);
4423 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4424 #endif
4425 }
4426 }
4427 }else{
4428 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4429 emit_storereg(i_regmap[hr],hr);
4430 }
4431 }
4432 }
4433 }
4434 }
4435 }
4436 }
4437 }
4438 else
4439 {
4440 // Branch out of this block, write out all dirty regs
4441 wb_dirtys(i_regmap,i_is32,i_dirty);
4442 }
4443}
4444
4445// Load all needed registers for branch target
4446void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4447{
4448 //if(addr>=start && addr<(start+slen*4))
4449 if(internal_branch(i_is32,addr))
4450 {
4451 int t=(addr-start)>>2;
4452 int hr;
4453 // Store the cycle count before loading something else
4454 if(i_regmap[HOST_CCREG]!=CCREG) {
4455 assert(i_regmap[HOST_CCREG]==-1);
4456 }
4457 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4458 emit_storereg(CCREG,HOST_CCREG);
4459 }
4460 // Load 32-bit regs
4461 for(hr=0;hr<HOST_REGS;hr++) {
4462 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4463 #ifdef DESTRUCTIVE_WRITEBACK
4464 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4465 #else
4466 if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4467 #endif
4468 if(regs[t].regmap_entry[hr]==0) {
4469 emit_zeroreg(hr);
4470 }
4471 else if(regs[t].regmap_entry[hr]!=CCREG)
4472 {
4473 emit_loadreg(regs[t].regmap_entry[hr],hr);
4474 }
4475 }
4476 }
4477 }
4478 //Load 64-bit regs
4479 for(hr=0;hr<HOST_REGS;hr++) {
4480 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64) {
4481 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4482 assert(regs[t].regmap_entry[hr]!=64);
4483 if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4484 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4485 if(lr<0) {
4486 emit_loadreg(regs[t].regmap_entry[hr],hr);
4487 }
4488 else
4489 {
4490 emit_sarimm(lr,31,hr);
4491 }
4492 }
4493 else
4494 {
4495 emit_loadreg(regs[t].regmap_entry[hr],hr);
4496 }
4497 }
4498 else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4499 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4500 assert(lr>=0);
4501 emit_sarimm(lr,31,hr);
4502 }
4503 }
4504 }
4505 }
4506}
4507
4508int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4509{
4510 if(addr>=start && addr<start+slen*4-4)
4511 {
4512 int t=(addr-start)>>2;
4513 int hr;
4514 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4515 for(hr=0;hr<HOST_REGS;hr++)
4516 {
4517 if(hr!=EXCLUDE_REG)
4518 {
4519 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4520 {
4521 if(regs[t].regmap_entry[hr]!=-1)
4522 {
4523 return 0;
4524 }
4525 else
4526 if((i_dirty>>hr)&1)
4527 {
4528 if(i_regmap[hr]<64)
4529 {
4530 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4531 return 0;
4532 }
4533 else
4534 {
4535 if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4536 return 0;
4537 }
4538 }
4539 }
4540 else // Same register but is it 32-bit or dirty?
4541 if(i_regmap[hr]>=0)
4542 {
4543 if(!((regs[t].dirty>>hr)&1))
4544 {
4545 if((i_dirty>>hr)&1)
4546 {
4547 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4548 {
4549 //printf("%x: dirty no match\n",addr);
4550 return 0;
4551 }
4552 }
4553 }
4554 if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4555 {
4556 //printf("%x: is32 no match\n",addr);
4557 return 0;
4558 }
4559 }
4560 }
4561 }
4562 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4563 if(requires_32bit[t]&~i_is32) return 0;
4564 // Delay slots are not valid branch targets
4565 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4566 // Delay slots require additional processing, so do not match
4567 if(is_ds[t]) return 0;
4568 }
4569 else
4570 {
4571 int hr;
4572 for(hr=0;hr<HOST_REGS;hr++)
4573 {
4574 if(hr!=EXCLUDE_REG)
4575 {
4576 if(i_regmap[hr]>=0)
4577 {
4578 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4579 {
4580 if((i_dirty>>hr)&1)
4581 {
4582 return 0;
4583 }
4584 }
4585 }
4586 }
4587 }
4588 }
4589 return 1;
4590}
4591
4592// Used when a branch jumps into the delay slot of another branch
4593void ds_assemble_entry(int i)
4594{
4595 int t=(ba[i]-start)>>2;
4596 if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4597 assem_debug("Assemble delay slot at %x\n",ba[i]);
4598 assem_debug("<->\n");
4599 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4600 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4601 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4602 address_generation(t,&regs[t],regs[t].regmap_entry);
b9b61529 4603 if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
57871462 4604 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4605 cop1_usable=0;
4606 is_delayslot=0;
4607 switch(itype[t]) {
4608 case ALU:
4609 alu_assemble(t,&regs[t]);break;
4610 case IMM16:
4611 imm16_assemble(t,&regs[t]);break;
4612 case SHIFT:
4613 shift_assemble(t,&regs[t]);break;
4614 case SHIFTIMM:
4615 shiftimm_assemble(t,&regs[t]);break;
4616 case LOAD:
4617 load_assemble(t,&regs[t]);break;
4618 case LOADLR:
4619 loadlr_assemble(t,&regs[t]);break;
4620 case STORE:
4621 store_assemble(t,&regs[t]);break;
4622 case STORELR:
4623 storelr_assemble(t,&regs[t]);break;
4624 case COP0:
4625 cop0_assemble(t,&regs[t]);break;
4626 case COP1:
4627 cop1_assemble(t,&regs[t]);break;
4628 case C1LS:
4629 c1ls_assemble(t,&regs[t]);break;
b9b61529 4630 case COP2:
4631 cop2_assemble(t,&regs[t]);break;
4632 case C2LS:
4633 c2ls_assemble(t,&regs[t]);break;
4634 case C2OP:
4635 c2op_assemble(t,&regs[t]);break;
57871462 4636 case FCONV:
4637 fconv_assemble(t,&regs[t]);break;
4638 case FLOAT:
4639 float_assemble(t,&regs[t]);break;
4640 case FCOMP:
4641 fcomp_assemble(t,&regs[t]);break;
4642 case MULTDIV:
4643 multdiv_assemble(t,&regs[t]);break;
4644 case MOV:
4645 mov_assemble(t,&regs[t]);break;
4646 case SYSCALL:
7139f3c8 4647 case HLECALL:
57871462 4648 case SPAN:
4649 case UJUMP:
4650 case RJUMP:
4651 case CJUMP:
4652 case SJUMP:
4653 case FJUMP:
4654 printf("Jump in the delay slot. This is probably a bug.\n");
4655 }
4656 store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4657 load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4658 if(internal_branch(regs[t].is32,ba[i]+4))
4659 assem_debug("branch: internal\n");
4660 else
4661 assem_debug("branch: external\n");
4662 assert(internal_branch(regs[t].is32,ba[i]+4));
4663 add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4664 emit_jmp(0);
4665}
4666
4667void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4668{
4669 int count;
4670 int jaddr;
4671 int idle=0;
4672 if(itype[i]==RJUMP)
4673 {
4674 *adj=0;
4675 }
4676 //if(ba[i]>=start && ba[i]<(start+slen*4))
4677 if(internal_branch(branch_regs[i].is32,ba[i]))
4678 {
4679 int t=(ba[i]-start)>>2;
4680 if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4681 else *adj=ccadj[t];
4682 }
4683 else
4684 {
4685 *adj=0;
4686 }
4687 count=ccadj[i];
4688 if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4689 // Idle loop
4690 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4691 idle=(int)out;
4692 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4693 emit_andimm(HOST_CCREG,3,HOST_CCREG);
4694 jaddr=(int)out;
4695 emit_jmp(0);
4696 }
4697 else if(*adj==0||invert) {
4698 emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4699 jaddr=(int)out;
4700 emit_jns(0);
4701 }
4702 else
4703 {
4704 emit_cmpimm(HOST_CCREG,-2*(count+2));
4705 jaddr=(int)out;
4706 emit_jns(0);
4707 }
4708 add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4709}
4710
4711void do_ccstub(int n)
4712{
4713 literal_pool(256);
4714 assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4715 set_jump_target(stubs[n][1],(int)out);
4716 int i=stubs[n][4];
4717 if(stubs[n][6]==NULLDS) {
4718 // Delay slot instruction is nullified ("likely" branch)
4719 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4720 }
4721 else if(stubs[n][6]!=TAKEN) {
4722 wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4723 }
4724 else {
4725 if(internal_branch(branch_regs[i].is32,ba[i]))
4726 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4727 }
4728 if(stubs[n][5]!=-1)
4729 {
4730 // Save PC as return address
4731 emit_movimm(stubs[n][5],EAX);
4732 emit_writeword(EAX,(int)&pcaddr);
4733 }
4734 else
4735 {
4736 // Return address depends on which way the branch goes
4737 if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4738 {
4739 int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4740 int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4741 int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4742 int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4743 if(rs1[i]==0)
4744 {
4745 s1l=s2l;s1h=s2h;
4746 s2l=s2h=-1;
4747 }
4748 else if(rs2[i]==0)
4749 {
4750 s2l=s2h=-1;
4751 }
4752 if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4753 s1h=s2h=-1;
4754 }
4755 assert(s1l>=0);
4756 #ifdef DESTRUCTIVE_WRITEBACK
4757 if(rs1[i]) {
4758 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4759 emit_loadreg(rs1[i],s1l);
4760 }
4761 else {
4762 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4763 emit_loadreg(rs2[i],s1l);
4764 }
4765 if(s2l>=0)
4766 if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4767 emit_loadreg(rs2[i],s2l);
4768 #endif
4769 int hr=0;
4770 int addr,alt,ntaddr;
4771 while(hr<HOST_REGS)
4772 {
4773 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4774 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4775 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4776 {
4777 addr=hr++;break;
4778 }
4779 hr++;
4780 }
4781 while(hr<HOST_REGS)
4782 {
4783 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4784 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4785 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4786 {
4787 alt=hr++;break;
4788 }
4789 hr++;
4790 }
4791 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4792 {
4793 while(hr<HOST_REGS)
4794 {
4795 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4796 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4797 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4798 {
4799 ntaddr=hr;break;
4800 }
4801 hr++;
4802 }
4803 assert(hr<HOST_REGS);
4804 }
4805 if((opcode[i]&0x2f)==4) // BEQ
4806 {
4807 #ifdef HAVE_CMOV_IMM
4808 if(s1h<0) {
4809 if(s2l>=0) emit_cmp(s1l,s2l);
4810 else emit_test(s1l,s1l);
4811 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4812 }
4813 else
4814 #endif
4815 {
4816 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4817 if(s1h>=0) {
4818 if(s2h>=0) emit_cmp(s1h,s2h);
4819 else emit_test(s1h,s1h);
4820 emit_cmovne_reg(alt,addr);
4821 }
4822 if(s2l>=0) emit_cmp(s1l,s2l);
4823 else emit_test(s1l,s1l);
4824 emit_cmovne_reg(alt,addr);
4825 }
4826 }
4827 if((opcode[i]&0x2f)==5) // BNE
4828 {
4829 #ifdef HAVE_CMOV_IMM
4830 if(s1h<0) {
4831 if(s2l>=0) emit_cmp(s1l,s2l);
4832 else emit_test(s1l,s1l);
4833 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4834 }
4835 else
4836 #endif
4837 {
4838 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4839 if(s1h>=0) {
4840 if(s2h>=0) emit_cmp(s1h,s2h);
4841 else emit_test(s1h,s1h);
4842 emit_cmovne_reg(alt,addr);
4843 }
4844 if(s2l>=0) emit_cmp(s1l,s2l);
4845 else emit_test(s1l,s1l);
4846 emit_cmovne_reg(alt,addr);
4847 }
4848 }
4849 if((opcode[i]&0x2f)==6) // BLEZ
4850 {
4851 //emit_movimm(ba[i],alt);
4852 //emit_movimm(start+i*4+8,addr);
4853 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4854 emit_cmpimm(s1l,1);
4855 if(s1h>=0) emit_mov(addr,ntaddr);
4856 emit_cmovl_reg(alt,addr);
4857 if(s1h>=0) {
4858 emit_test(s1h,s1h);
4859 emit_cmovne_reg(ntaddr,addr);
4860 emit_cmovs_reg(alt,addr);
4861 }
4862 }
4863 if((opcode[i]&0x2f)==7) // BGTZ
4864 {
4865 //emit_movimm(ba[i],addr);
4866 //emit_movimm(start+i*4+8,ntaddr);
4867 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4868 emit_cmpimm(s1l,1);
4869 if(s1h>=0) emit_mov(addr,alt);
4870 emit_cmovl_reg(ntaddr,addr);
4871 if(s1h>=0) {
4872 emit_test(s1h,s1h);
4873 emit_cmovne_reg(alt,addr);
4874 emit_cmovs_reg(ntaddr,addr);
4875 }
4876 }
4877 if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4878 {
4879 //emit_movimm(ba[i],alt);
4880 //emit_movimm(start+i*4+8,addr);
4881 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4882 if(s1h>=0) emit_test(s1h,s1h);
4883 else emit_test(s1l,s1l);
4884 emit_cmovs_reg(alt,addr);
4885 }
4886 if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4887 {
4888 //emit_movimm(ba[i],addr);
4889 //emit_movimm(start+i*4+8,alt);
4890 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4891 if(s1h>=0) emit_test(s1h,s1h);
4892 else emit_test(s1l,s1l);
4893 emit_cmovs_reg(alt,addr);
4894 }
4895 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4896 if(source[i]&0x10000) // BC1T
4897 {
4898 //emit_movimm(ba[i],alt);
4899 //emit_movimm(start+i*4+8,addr);
4900 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4901 emit_testimm(s1l,0x800000);
4902 emit_cmovne_reg(alt,addr);
4903 }
4904 else // BC1F
4905 {
4906 //emit_movimm(ba[i],addr);
4907 //emit_movimm(start+i*4+8,alt);
4908 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4909 emit_testimm(s1l,0x800000);
4910 emit_cmovne_reg(alt,addr);
4911 }
4912 }
4913 emit_writeword(addr,(int)&pcaddr);
4914 }
4915 else
4916 if(itype[i]==RJUMP)
4917 {
4918 int r=get_reg(branch_regs[i].regmap,rs1[i]);
4919 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4920 r=get_reg(branch_regs[i].regmap,RTEMP);
4921 }
4922 emit_writeword(r,(int)&pcaddr);
4923 }
4924 else {printf("Unknown branch type in do_ccstub\n");exit(1);}
4925 }
4926 // Update cycle count
4927 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4928 if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4929 emit_call((int)cc_interrupt);
4930 if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4931 if(stubs[n][6]==TAKEN) {
4932 if(internal_branch(branch_regs[i].is32,ba[i]))
4933 load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4934 else if(itype[i]==RJUMP) {
4935 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4936 emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4937 else
4938 emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4939 }
4940 }else if(stubs[n][6]==NOTTAKEN) {
4941 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4942 else load_all_regs(branch_regs[i].regmap);
4943 }else if(stubs[n][6]==NULLDS) {
4944 // Delay slot instruction is nullified ("likely" branch)
4945 if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4946 else load_all_regs(regs[i].regmap);
4947 }else{
4948 load_all_regs(branch_regs[i].regmap);
4949 }
4950 emit_jmp(stubs[n][2]); // return address
4951
4952 /* This works but uses a lot of memory...
4953 emit_readword((int)&last_count,ECX);
4954 emit_add(HOST_CCREG,ECX,EAX);
4955 emit_writeword(EAX,(int)&Count);
4956 emit_call((int)gen_interupt);
4957 emit_readword((int)&Count,HOST_CCREG);
4958 emit_readword((int)&next_interupt,EAX);
4959 emit_readword((int)&pending_exception,EBX);
4960 emit_writeword(EAX,(int)&last_count);
4961 emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4962 emit_test(EBX,EBX);
4963 int jne_instr=(int)out;
4964 emit_jne(0);
4965 if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4966 load_all_regs(branch_regs[i].regmap);
4967 emit_jmp(stubs[n][2]); // return address
4968 set_jump_target(jne_instr,(int)out);
4969 emit_readword((int)&pcaddr,EAX);
4970 // Call get_addr_ht instead of doing the hash table here.
4971 // This code is executed infrequently and takes up a lot of space
4972 // so smaller is better.
4973 emit_storereg(CCREG,HOST_CCREG);
4974 emit_pushreg(EAX);
4975 emit_call((int)get_addr_ht);
4976 emit_loadreg(CCREG,HOST_CCREG);
4977 emit_addimm(ESP,4,ESP);
4978 emit_jmpreg(EAX);*/
4979}
4980
4981add_to_linker(int addr,int target,int ext)
4982{
4983 link_addr[linkcount][0]=addr;
4984 link_addr[linkcount][1]=target;
4985 link_addr[linkcount][2]=ext;
4986 linkcount++;
4987}
4988
4989void ujump_assemble(int i,struct regstat *i_regs)
4990{
4991 signed char *i_regmap=i_regs->regmap;
4992 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4993 address_generation(i+1,i_regs,regs[i].regmap_entry);
4994 #ifdef REG_PREFETCH
4995 int temp=get_reg(branch_regs[i].regmap,PTEMP);
4996 if(rt1[i]==31&&temp>=0)
4997 {
4998 int return_address=start+i*4+8;
4999 if(get_reg(branch_regs[i].regmap,31)>0)
5000 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5001 }
5002 #endif
5003 ds_assemble(i+1,i_regs);
5004 uint64_t bc_unneeded=branch_regs[i].u;
5005 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5006 bc_unneeded|=1|(1LL<<rt1[i]);
5007 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5008 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5009 bc_unneeded,bc_unneeded_upper);
5010 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5011 if(rt1[i]==31) {
5012 int rt;
5013 unsigned int return_address;
5014 assert(rt1[i+1]!=31);
5015 assert(rt2[i+1]!=31);
5016 rt=get_reg(branch_regs[i].regmap,31);
5017 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5018 //assert(rt>=0);
5019 return_address=start+i*4+8;
5020 if(rt>=0) {
5021 #ifdef USE_MINI_HT
5022 if(internal_branch(branch_regs[i].is32,return_address)) {
5023 int temp=rt+1;
5024 if(temp==EXCLUDE_REG||temp>=HOST_REGS||
5025 branch_regs[i].regmap[temp]>=0)
5026 {
5027 temp=get_reg(branch_regs[i].regmap,-1);
5028 }
5029 #ifdef HOST_TEMPREG
5030 if(temp<0) temp=HOST_TEMPREG;
5031 #endif
5032 if(temp>=0) do_miniht_insert(return_address,rt,temp);
5033 else emit_movimm(return_address,rt);
5034 }
5035 else
5036 #endif
5037 {
5038 #ifdef REG_PREFETCH
5039 if(temp>=0)
5040 {
5041 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5042 }
5043 #endif
5044 emit_movimm(return_address,rt); // PC into link register
5045 #ifdef IMM_PREFETCH
5046 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5047 #endif
5048 }
5049 }
5050 }
5051 int cc,adj;
5052 cc=get_reg(branch_regs[i].regmap,CCREG);
5053 assert(cc==HOST_CCREG);
5054 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5055 #ifdef REG_PREFETCH
5056 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5057 #endif
5058 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5059 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5060 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5061 if(internal_branch(branch_regs[i].is32,ba[i]))
5062 assem_debug("branch: internal\n");
5063 else
5064 assem_debug("branch: external\n");
5065 if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5066 ds_assemble_entry(i);
5067 }
5068 else {
5069 add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5070 emit_jmp(0);
5071 }
5072}
5073
5074void rjump_assemble(int i,struct regstat *i_regs)
5075{
5076 signed char *i_regmap=i_regs->regmap;
5077 int temp;
5078 int rs,cc,adj;
5079 rs=get_reg(branch_regs[i].regmap,rs1[i]);
5080 assert(rs>=0);
5081 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5082 // Delay slot abuse, make a copy of the branch address register
5083 temp=get_reg(branch_regs[i].regmap,RTEMP);
5084 assert(temp>=0);
5085 assert(regs[i].regmap[temp]==RTEMP);
5086 emit_mov(rs,temp);
5087 rs=temp;
5088 }
5089 address_generation(i+1,i_regs,regs[i].regmap_entry);
5090 #ifdef REG_PREFETCH
5091 if(rt1[i]==31)
5092 {
5093 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5094 int return_address=start+i*4+8;
5095 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5096 }
5097 }
5098 #endif
5099 #ifdef USE_MINI_HT
5100 if(rs1[i]==31) {
5101 int rh=get_reg(regs[i].regmap,RHASH);
5102 if(rh>=0) do_preload_rhash(rh);
5103 }
5104 #endif
5105 ds_assemble(i+1,i_regs);
5106 uint64_t bc_unneeded=branch_regs[i].u;
5107 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5108 bc_unneeded|=1|(1LL<<rt1[i]);
5109 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5110 bc_unneeded&=~(1LL<<rs1[i]);
5111 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5112 bc_unneeded,bc_unneeded_upper);
5113 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5067f341 5114 if(rt1[i]!=0) {
57871462 5115 int rt,return_address;
5067f341 5116 assert(rt1[i+1]!=rt1[i]);
5117 assert(rt2[i+1]!=rt1[i]);
5118 rt=get_reg(branch_regs[i].regmap,rt1[i]);
57871462 5119 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5120 assert(rt>=0);
5121 return_address=start+i*4+8;
5122 #ifdef REG_PREFETCH
5123 if(temp>=0)
5124 {
5125 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5126 }
5127 #endif
5128 emit_movimm(return_address,rt); // PC into link register
5129 #ifdef IMM_PREFETCH
5130 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5131 #endif
5132 }
5133 cc=get_reg(branch_regs[i].regmap,CCREG);
5134 assert(cc==HOST_CCREG);
5135 #ifdef USE_MINI_HT
5136 int rh=get_reg(branch_regs[i].regmap,RHASH);
5137 int ht=get_reg(branch_regs[i].regmap,RHTBL);
5138 if(rs1[i]==31) {
5139 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5140 do_preload_rhtbl(ht);
5141 do_rhash(rs,rh);
5142 }
5143 #endif
5144 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5145 #ifdef DESTRUCTIVE_WRITEBACK
5146 if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5147 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5148 emit_loadreg(rs1[i],rs);
5149 }
5150 }
5151 #endif
5152 #ifdef REG_PREFETCH
5153 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5154 #endif
5155 #ifdef USE_MINI_HT
5156 if(rs1[i]==31) {
5157 do_miniht_load(ht,rh);
5158 }
5159 #endif
5160 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5161 //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5162 //assert(adj==0);
5163 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5164 add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5165 emit_jns(0);
5166 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5167 #ifdef USE_MINI_HT
5168 if(rs1[i]==31) {
5169 do_miniht_jump(rs,rh,ht);
5170 }
5171 else
5172 #endif
5173 {
5174 //if(rs!=EAX) emit_mov(rs,EAX);
5175 //emit_jmp((int)jump_vaddr_eax);
5176 emit_jmp(jump_vaddr_reg[rs]);
5177 }
5178 /* Check hash table
5179 temp=!rs;
5180 emit_mov(rs,temp);
5181 emit_shrimm(rs,16,rs);
5182 emit_xor(temp,rs,rs);
5183 emit_movzwl_reg(rs,rs);
5184 emit_shlimm(rs,4,rs);
5185 emit_cmpmem_indexed((int)hash_table,rs,temp);
5186 emit_jne((int)out+14);
5187 emit_readword_indexed((int)hash_table+4,rs,rs);
5188 emit_jmpreg(rs);
5189 emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5190 emit_addimm_no_flags(8,rs);
5191 emit_jeq((int)out-17);
5192 // No hit on hash table, call compiler
5193 emit_pushreg(temp);
5194//DEBUG >
5195#ifdef DEBUG_CYCLE_COUNT
5196 emit_readword((int)&last_count,ECX);
5197 emit_add(HOST_CCREG,ECX,HOST_CCREG);
5198 emit_readword((int)&next_interupt,ECX);
5199 emit_writeword(HOST_CCREG,(int)&Count);
5200 emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5201 emit_writeword(ECX,(int)&last_count);
5202#endif
5203//DEBUG <
5204 emit_storereg(CCREG,HOST_CCREG);
5205 emit_call((int)get_addr);
5206 emit_loadreg(CCREG,HOST_CCREG);
5207 emit_addimm(ESP,4,ESP);
5208 emit_jmpreg(EAX);*/
5209 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5210 if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5211 #endif
5212}
5213
5214void cjump_assemble(int i,struct regstat *i_regs)
5215{
5216 signed char *i_regmap=i_regs->regmap;
5217 int cc;
5218 int match;
5219 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5220 assem_debug("match=%d\n",match);
5221 int s1h,s1l,s2h,s2l;
5222 int prev_cop1_usable=cop1_usable;
5223 int unconditional=0,nop=0;
5224 int only32=0;
5225 int ooo=1;
5226 int invert=0;
5227 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5228 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5229 if(likely[i]) ooo=0;
5230 if(!match) invert=1;
5231 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5232 if(i>(ba[i]-start)>>2) invert=1;
5233 #endif
5234
5235 if(ooo)
5236 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
5237 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1])))
5238 {
5239 // Write-after-read dependency prevents out of order execution
5240 // First test branch condition, then execute delay slot, then branch
5241 ooo=0;
5242 }
5243
5244 if(ooo) {
5245 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5246 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5247 s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5248 s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5249 }
5250 else {
5251 s1l=get_reg(i_regmap,rs1[i]);
5252 s1h=get_reg(i_regmap,rs1[i]|64);
5253 s2l=get_reg(i_regmap,rs2[i]);
5254 s2h=get_reg(i_regmap,rs2[i]|64);
5255 }
5256 if(rs1[i]==0&&rs2[i]==0)
5257 {
5258 if(opcode[i]&1) nop=1;
5259 else unconditional=1;
5260 //assert(opcode[i]!=5);
5261 //assert(opcode[i]!=7);
5262 //assert(opcode[i]!=0x15);
5263 //assert(opcode[i]!=0x17);
5264 }
5265 else if(rs1[i]==0)
5266 {
5267 s1l=s2l;s1h=s2h;
5268 s2l=s2h=-1;
5269 only32=(regs[i].was32>>rs2[i])&1;
5270 }
5271 else if(rs2[i]==0)
5272 {
5273 s2l=s2h=-1;
5274 only32=(regs[i].was32>>rs1[i])&1;
5275 }
5276 else {
5277 only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5278 }
5279
5280 if(ooo) {
5281 // Out of order execution (delay slot first)
5282 //printf("OOOE\n");
5283 address_generation(i+1,i_regs,regs[i].regmap_entry);
5284 ds_assemble(i+1,i_regs);
5285 int adj;
5286 uint64_t bc_unneeded=branch_regs[i].u;
5287 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5288 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5289 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5290 bc_unneeded|=1;
5291 bc_unneeded_upper|=1;
5292 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5293 bc_unneeded,bc_unneeded_upper);
5294 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5295 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5296 cc=get_reg(branch_regs[i].regmap,CCREG);
5297 assert(cc==HOST_CCREG);
5298 if(unconditional)
5299 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5300 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5301 //assem_debug("cycle count (adj)\n");
5302 if(unconditional) {
5303 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5304 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5305 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5306 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5307 if(internal)
5308 assem_debug("branch: internal\n");
5309 else
5310 assem_debug("branch: external\n");
5311 if(internal&&is_ds[(ba[i]-start)>>2]) {
5312 ds_assemble_entry(i);
5313 }
5314 else {
5315 add_to_linker((int)out,ba[i],internal);
5316 emit_jmp(0);
5317 }
5318 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5319 if(((u_int)out)&7) emit_addnop(0);
5320 #endif
5321 }
5322 }
5323 else if(nop) {
5324 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5325 int jaddr=(int)out;
5326 emit_jns(0);
5327 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5328 }
5329 else {
5330 int taken=0,nottaken=0,nottaken1=0;
5331 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5332 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5333 if(!only32)
5334 {
5335 assert(s1h>=0);
5336 if(opcode[i]==4) // BEQ
5337 {
5338 if(s2h>=0) emit_cmp(s1h,s2h);
5339 else emit_test(s1h,s1h);
5340 nottaken1=(int)out;
5341 emit_jne(1);
5342 }
5343 if(opcode[i]==5) // BNE
5344 {
5345 if(s2h>=0) emit_cmp(s1h,s2h);
5346 else emit_test(s1h,s1h);
5347 if(invert) taken=(int)out;
5348 else add_to_linker((int)out,ba[i],internal);
5349 emit_jne(0);
5350 }
5351 if(opcode[i]==6) // BLEZ
5352 {
5353 emit_test(s1h,s1h);
5354 if(invert) taken=(int)out;
5355 else add_to_linker((int)out,ba[i],internal);
5356 emit_js(0);
5357 nottaken1=(int)out;
5358 emit_jne(1);
5359 }
5360 if(opcode[i]==7) // BGTZ
5361 {
5362 emit_test(s1h,s1h);
5363 nottaken1=(int)out;
5364 emit_js(1);
5365 if(invert) taken=(int)out;
5366 else add_to_linker((int)out,ba[i],internal);
5367 emit_jne(0);
5368 }
5369 } // if(!only32)
5370
5371 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5372 assert(s1l>=0);
5373 if(opcode[i]==4) // BEQ
5374 {
5375 if(s2l>=0) emit_cmp(s1l,s2l);
5376 else emit_test(s1l,s1l);
5377 if(invert){
5378 nottaken=(int)out;
5379 emit_jne(1);
5380 }else{
5381 add_to_linker((int)out,ba[i],internal);
5382 emit_jeq(0);
5383 }
5384 }
5385 if(opcode[i]==5) // BNE
5386 {
5387 if(s2l>=0) emit_cmp(s1l,s2l);
5388 else emit_test(s1l,s1l);
5389 if(invert){
5390 nottaken=(int)out;
5391 emit_jeq(1);
5392 }else{
5393 add_to_linker((int)out,ba[i],internal);
5394 emit_jne(0);
5395 }
5396 }
5397 if(opcode[i]==6) // BLEZ
5398 {
5399 emit_cmpimm(s1l,1);
5400 if(invert){
5401 nottaken=(int)out;
5402 emit_jge(1);
5403 }else{
5404 add_to_linker((int)out,ba[i],internal);
5405 emit_jl(0);
5406 }
5407 }
5408 if(opcode[i]==7) // BGTZ
5409 {
5410 emit_cmpimm(s1l,1);
5411 if(invert){
5412 nottaken=(int)out;
5413 emit_jl(1);
5414 }else{
5415 add_to_linker((int)out,ba[i],internal);
5416 emit_jge(0);
5417 }
5418 }
5419 if(invert) {
5420 if(taken) set_jump_target(taken,(int)out);
5421 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5422 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5423 if(adj) {
5424 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5425 add_to_linker((int)out,ba[i],internal);
5426 }else{
5427 emit_addnop(13);
5428 add_to_linker((int)out,ba[i],internal*2);
5429 }
5430 emit_jmp(0);
5431 }else
5432 #endif
5433 {
5434 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5435 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5436 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5437 if(internal)
5438 assem_debug("branch: internal\n");
5439 else
5440 assem_debug("branch: external\n");
5441 if(internal&&is_ds[(ba[i]-start)>>2]) {
5442 ds_assemble_entry(i);
5443 }
5444 else {
5445 add_to_linker((int)out,ba[i],internal);
5446 emit_jmp(0);
5447 }
5448 }
5449 set_jump_target(nottaken,(int)out);
5450 }
5451
5452 if(nottaken1) set_jump_target(nottaken1,(int)out);
5453 if(adj) {
5454 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5455 }
5456 } // (!unconditional)
5457 } // if(ooo)
5458 else
5459 {
5460 // In-order execution (branch first)
5461 //if(likely[i]) printf("IOL\n");
5462 //else
5463 //printf("IOE\n");
5464 int taken=0,nottaken=0,nottaken1=0;
5465 if(!unconditional&&!nop) {
5466 if(!only32)
5467 {
5468 assert(s1h>=0);
5469 if((opcode[i]&0x2f)==4) // BEQ
5470 {
5471 if(s2h>=0) emit_cmp(s1h,s2h);
5472 else emit_test(s1h,s1h);
5473 nottaken1=(int)out;
5474 emit_jne(2);
5475 }
5476 if((opcode[i]&0x2f)==5) // BNE
5477 {
5478 if(s2h>=0) emit_cmp(s1h,s2h);
5479 else emit_test(s1h,s1h);
5480 taken=(int)out;
5481 emit_jne(1);
5482 }
5483 if((opcode[i]&0x2f)==6) // BLEZ
5484 {
5485 emit_test(s1h,s1h);
5486 taken=(int)out;
5487 emit_js(1);
5488 nottaken1=(int)out;
5489 emit_jne(2);
5490 }
5491 if((opcode[i]&0x2f)==7) // BGTZ
5492 {
5493 emit_test(s1h,s1h);
5494 nottaken1=(int)out;
5495 emit_js(2);
5496 taken=(int)out;
5497 emit_jne(1);
5498 }
5499 } // if(!only32)
5500
5501 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5502 assert(s1l>=0);
5503 if((opcode[i]&0x2f)==4) // BEQ
5504 {
5505 if(s2l>=0) emit_cmp(s1l,s2l);
5506 else emit_test(s1l,s1l);
5507 nottaken=(int)out;
5508 emit_jne(2);
5509 }
5510 if((opcode[i]&0x2f)==5) // BNE
5511 {
5512 if(s2l>=0) emit_cmp(s1l,s2l);
5513 else emit_test(s1l,s1l);
5514 nottaken=(int)out;
5515 emit_jeq(2);
5516 }
5517 if((opcode[i]&0x2f)==6) // BLEZ
5518 {
5519 emit_cmpimm(s1l,1);
5520 nottaken=(int)out;
5521 emit_jge(2);
5522 }
5523 if((opcode[i]&0x2f)==7) // BGTZ
5524 {
5525 emit_cmpimm(s1l,1);
5526 nottaken=(int)out;
5527 emit_jl(2);
5528 }
5529 } // if(!unconditional)
5530 int adj;
5531 uint64_t ds_unneeded=branch_regs[i].u;
5532 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5533 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5534 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5535 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5536 ds_unneeded|=1;
5537 ds_unneeded_upper|=1;
5538 // branch taken
5539 if(!nop) {
5540 if(taken) set_jump_target(taken,(int)out);
5541 assem_debug("1:\n");
5542 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5543 ds_unneeded,ds_unneeded_upper);
5544 // load regs
5545 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5546 address_generation(i+1,&branch_regs[i],0);
5547 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5548 ds_assemble(i+1,&branch_regs[i]);
5549 cc=get_reg(branch_regs[i].regmap,CCREG);
5550 if(cc==-1) {
5551 emit_loadreg(CCREG,cc=HOST_CCREG);
5552 // CHECK: Is the following instruction (fall thru) allocated ok?
5553 }
5554 assert(cc==HOST_CCREG);
5555 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5556 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5557 assem_debug("cycle count (adj)\n");
5558 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5559 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5560 if(internal)
5561 assem_debug("branch: internal\n");
5562 else
5563 assem_debug("branch: external\n");
5564 if(internal&&is_ds[(ba[i]-start)>>2]) {
5565 ds_assemble_entry(i);
5566 }
5567 else {
5568 add_to_linker((int)out,ba[i],internal);
5569 emit_jmp(0);
5570 }
5571 }
5572 // branch not taken
5573 cop1_usable=prev_cop1_usable;
5574 if(!unconditional) {
5575 if(nottaken1) set_jump_target(nottaken1,(int)out);
5576 set_jump_target(nottaken,(int)out);
5577 assem_debug("2:\n");
5578 if(!likely[i]) {
5579 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5580 ds_unneeded,ds_unneeded_upper);
5581 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5582 address_generation(i+1,&branch_regs[i],0);
5583 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5584 ds_assemble(i+1,&branch_regs[i]);
5585 }
5586 cc=get_reg(branch_regs[i].regmap,CCREG);
5587 if(cc==-1&&!likely[i]) {
5588 // Cycle count isn't in a register, temporarily load it then write it out
5589 emit_loadreg(CCREG,HOST_CCREG);
5590 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5591 int jaddr=(int)out;
5592 emit_jns(0);
5593 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5594 emit_storereg(CCREG,HOST_CCREG);
5595 }
5596 else{
5597 cc=get_reg(i_regmap,CCREG);
5598 assert(cc==HOST_CCREG);
5599 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5600 int jaddr=(int)out;
5601 emit_jns(0);
5602 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5603 }
5604 }
5605 }
5606}
5607
5608void sjump_assemble(int i,struct regstat *i_regs)
5609{
5610 signed char *i_regmap=i_regs->regmap;
5611 int cc;
5612 int match;
5613 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5614 assem_debug("smatch=%d\n",match);
5615 int s1h,s1l;
5616 int prev_cop1_usable=cop1_usable;
5617 int unconditional=0,nevertaken=0;
5618 int only32=0;
5619 int ooo=1;
5620 int invert=0;
5621 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5622 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5623 if(likely[i]) ooo=0;
5624 if(!match) invert=1;
5625 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5626 if(i>(ba[i]-start)>>2) invert=1;
5627 #endif
5628
5629 //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
df894a3a 5630 //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
57871462 5631
5632 if(ooo)
5633 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))
5634 {
5635 // Write-after-read dependency prevents out of order execution
5636 // First test branch condition, then execute delay slot, then branch
5637 ooo=0;
5638 }
df894a3a 5639 assert(opcode2[i]<0x10||ooo); // FIXME (BxxZALL)
57871462 5640
5641 if(ooo) {
5642 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5643 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5644 }
5645 else {
5646 s1l=get_reg(i_regmap,rs1[i]);
5647 s1h=get_reg(i_regmap,rs1[i]|64);
5648 }
5649 if(rs1[i]==0)
5650 {
5651 if(opcode2[i]&1) unconditional=1;
5652 else nevertaken=1;
5653 // These are never taken (r0 is never less than zero)
5654 //assert(opcode2[i]!=0);
5655 //assert(opcode2[i]!=2);
5656 //assert(opcode2[i]!=0x10);
5657 //assert(opcode2[i]!=0x12);
5658 }
5659 else {
5660 only32=(regs[i].was32>>rs1[i])&1;
5661 }
5662
5663 if(ooo) {
5664 // Out of order execution (delay slot first)
5665 //printf("OOOE\n");
5666 address_generation(i+1,i_regs,regs[i].regmap_entry);
5667 ds_assemble(i+1,i_regs);
5668 int adj;
5669 uint64_t bc_unneeded=branch_regs[i].u;
5670 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5671 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5672 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5673 bc_unneeded|=1;
5674 bc_unneeded_upper|=1;
5675 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5676 bc_unneeded,bc_unneeded_upper);
5677 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5678 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5679 if(rt1[i]==31) {
5680 int rt,return_address;
5681 assert(rt1[i+1]!=31);
5682 assert(rt2[i+1]!=31);
5683 rt=get_reg(branch_regs[i].regmap,31);
5684 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5685 if(rt>=0) {
5686 // Save the PC even if the branch is not taken
5687 return_address=start+i*4+8;
5688 emit_movimm(return_address,rt); // PC into link register
5689 #ifdef IMM_PREFETCH
5690 if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5691 #endif
5692 }
5693 }
5694 cc=get_reg(branch_regs[i].regmap,CCREG);
5695 assert(cc==HOST_CCREG);
5696 if(unconditional)
5697 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5698 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5699 assem_debug("cycle count (adj)\n");
5700 if(unconditional) {
5701 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5702 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5703 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5704 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5705 if(internal)
5706 assem_debug("branch: internal\n");
5707 else
5708 assem_debug("branch: external\n");
5709 if(internal&&is_ds[(ba[i]-start)>>2]) {
5710 ds_assemble_entry(i);
5711 }
5712 else {
5713 add_to_linker((int)out,ba[i],internal);
5714 emit_jmp(0);
5715 }
5716 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5717 if(((u_int)out)&7) emit_addnop(0);
5718 #endif
5719 }
5720 }
5721 else if(nevertaken) {
5722 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5723 int jaddr=(int)out;
5724 emit_jns(0);
5725 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5726 }
5727 else {
5728 int nottaken=0;
5729 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5730 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5731 if(!only32)
5732 {
5733 assert(s1h>=0);
df894a3a 5734 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
57871462 5735 {
5736 emit_test(s1h,s1h);
5737 if(invert){
5738 nottaken=(int)out;
5739 emit_jns(1);
5740 }else{
5741 add_to_linker((int)out,ba[i],internal);
5742 emit_js(0);
5743 }
5744 }
df894a3a 5745 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
57871462 5746 {
5747 emit_test(s1h,s1h);
5748 if(invert){
5749 nottaken=(int)out;
5750 emit_js(1);
5751 }else{
5752 add_to_linker((int)out,ba[i],internal);
5753 emit_jns(0);
5754 }
5755 }
5756 } // if(!only32)
5757 else
5758 {
5759 assert(s1l>=0);
df894a3a 5760 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
57871462 5761 {
5762 emit_test(s1l,s1l);
5763 if(invert){
5764 nottaken=(int)out;
5765 emit_jns(1);
5766 }else{
5767 add_to_linker((int)out,ba[i],internal);
5768 emit_js(0);
5769 }
5770 }
df894a3a 5771 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
57871462 5772 {
5773 emit_test(s1l,s1l);
5774 if(invert){
5775 nottaken=(int)out;
5776 emit_js(1);
5777 }else{
5778 add_to_linker((int)out,ba[i],internal);
5779 emit_jns(0);
5780 }
5781 }
5782 } // if(!only32)
5783
5784 if(invert) {
5785 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5786 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5787 if(adj) {
5788 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5789 add_to_linker((int)out,ba[i],internal);
5790 }else{
5791 emit_addnop(13);
5792 add_to_linker((int)out,ba[i],internal*2);
5793 }
5794 emit_jmp(0);
5795 }else
5796 #endif
5797 {
5798 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5799 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5800 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5801 if(internal)
5802 assem_debug("branch: internal\n");
5803 else
5804 assem_debug("branch: external\n");
5805 if(internal&&is_ds[(ba[i]-start)>>2]) {
5806 ds_assemble_entry(i);
5807 }
5808 else {
5809 add_to_linker((int)out,ba[i],internal);
5810 emit_jmp(0);
5811 }
5812 }
5813 set_jump_target(nottaken,(int)out);
5814 }
5815
5816 if(adj) {
5817 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5818 }
5819 } // (!unconditional)
5820 } // if(ooo)
5821 else
5822 {
5823 // In-order execution (branch first)
5824 //printf("IOE\n");
5825 int nottaken=0;
5826 if(!unconditional) {
5827 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5828 if(!only32)
5829 {
5830 assert(s1h>=0);
5831 if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5832 {
5833 emit_test(s1h,s1h);
5834 nottaken=(int)out;
5835 emit_jns(1);
5836 }
5837 if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5838 {
5839 emit_test(s1h,s1h);
5840 nottaken=(int)out;
5841 emit_js(1);
5842 }
5843 } // if(!only32)
5844 else
5845 {
5846 assert(s1l>=0);
5847 if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5848 {
5849 emit_test(s1l,s1l);
5850 nottaken=(int)out;
5851 emit_jns(1);
5852 }
5853 if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5854 {
5855 emit_test(s1l,s1l);
5856 nottaken=(int)out;
5857 emit_js(1);
5858 }
5859 }
5860 } // if(!unconditional)
5861 int adj;
5862 uint64_t ds_unneeded=branch_regs[i].u;
5863 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5864 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5865 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5866 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5867 ds_unneeded|=1;
5868 ds_unneeded_upper|=1;
5869 // branch taken
5870 if(!nevertaken) {
5871 //assem_debug("1:\n");
5872 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5873 ds_unneeded,ds_unneeded_upper);
5874 // load regs
5875 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5876 address_generation(i+1,&branch_regs[i],0);
5877 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5878 ds_assemble(i+1,&branch_regs[i]);
5879 cc=get_reg(branch_regs[i].regmap,CCREG);
5880 if(cc==-1) {
5881 emit_loadreg(CCREG,cc=HOST_CCREG);
5882 // CHECK: Is the following instruction (fall thru) allocated ok?
5883 }
5884 assert(cc==HOST_CCREG);
5885 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5886 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5887 assem_debug("cycle count (adj)\n");
5888 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5889 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5890 if(internal)
5891 assem_debug("branch: internal\n");
5892 else
5893 assem_debug("branch: external\n");
5894 if(internal&&is_ds[(ba[i]-start)>>2]) {
5895 ds_assemble_entry(i);
5896 }
5897 else {
5898 add_to_linker((int)out,ba[i],internal);
5899 emit_jmp(0);
5900 }
5901 }
5902 // branch not taken
5903 cop1_usable=prev_cop1_usable;
5904 if(!unconditional) {
5905 set_jump_target(nottaken,(int)out);
5906 assem_debug("1:\n");
5907 if(!likely[i]) {
5908 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5909 ds_unneeded,ds_unneeded_upper);
5910 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5911 address_generation(i+1,&branch_regs[i],0);
5912 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5913 ds_assemble(i+1,&branch_regs[i]);
5914 }
5915 cc=get_reg(branch_regs[i].regmap,CCREG);
5916 if(cc==-1&&!likely[i]) {
5917 // Cycle count isn't in a register, temporarily load it then write it out
5918 emit_loadreg(CCREG,HOST_CCREG);
5919 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5920 int jaddr=(int)out;
5921 emit_jns(0);
5922 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5923 emit_storereg(CCREG,HOST_CCREG);
5924 }
5925 else{
5926 cc=get_reg(i_regmap,CCREG);
5927 assert(cc==HOST_CCREG);
5928 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5929 int jaddr=(int)out;
5930 emit_jns(0);
5931 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5932 }
5933 }
5934 }
5935}
5936
5937void fjump_assemble(int i,struct regstat *i_regs)
5938{
5939 signed char *i_regmap=i_regs->regmap;
5940 int cc;
5941 int match;
5942 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5943 assem_debug("fmatch=%d\n",match);
5944 int fs,cs;
5945 int eaddr;
5946 int ooo=1;
5947 int invert=0;
5948 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5949 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5950 if(likely[i]) ooo=0;
5951 if(!match) invert=1;
5952 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5953 if(i>(ba[i]-start)>>2) invert=1;
5954 #endif
5955
5956 if(ooo)
5957 if(itype[i+1]==FCOMP)
5958 {
5959 // Write-after-read dependency prevents out of order execution
5960 // First test branch condition, then execute delay slot, then branch
5961 ooo=0;
5962 }
5963
5964 if(ooo) {
5965 fs=get_reg(branch_regs[i].regmap,FSREG);
5966 address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5967 }
5968 else {
5969 fs=get_reg(i_regmap,FSREG);
5970 }
5971
5972 // Check cop1 unusable
5973 if(!cop1_usable) {
5974 cs=get_reg(i_regmap,CSREG);
5975 assert(cs>=0);
5976 emit_testimm(cs,0x20000000);
5977 eaddr=(int)out;
5978 emit_jeq(0);
5979 add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5980 cop1_usable=1;
5981 }
5982
5983 if(ooo) {
5984 // Out of order execution (delay slot first)
5985 //printf("OOOE\n");
5986 ds_assemble(i+1,i_regs);
5987 int adj;
5988 uint64_t bc_unneeded=branch_regs[i].u;
5989 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5990 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5991 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5992 bc_unneeded|=1;
5993 bc_unneeded_upper|=1;
5994 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5995 bc_unneeded,bc_unneeded_upper);
5996 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5997 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5998 cc=get_reg(branch_regs[i].regmap,CCREG);
5999 assert(cc==HOST_CCREG);
6000 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6001 assem_debug("cycle count (adj)\n");
6002 if(1) {
6003 int nottaken=0;
6004 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6005 if(1) {
6006 assert(fs>=0);
6007 emit_testimm(fs,0x800000);
6008 if(source[i]&0x10000) // BC1T
6009 {
6010 if(invert){
6011 nottaken=(int)out;
6012 emit_jeq(1);
6013 }else{
6014 add_to_linker((int)out,ba[i],internal);
6015 emit_jne(0);
6016 }
6017 }
6018 else // BC1F
6019 if(invert){
6020 nottaken=(int)out;
6021 emit_jne(1);
6022 }else{
6023 add_to_linker((int)out,ba[i],internal);
6024 emit_jeq(0);
6025 }
6026 {
6027 }
6028 } // if(!only32)
6029
6030 if(invert) {
6031 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6032 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6033 else if(match) emit_addnop(13);
6034 #endif
6035 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6036 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6037 if(internal)
6038 assem_debug("branch: internal\n");
6039 else
6040 assem_debug("branch: external\n");
6041 if(internal&&is_ds[(ba[i]-start)>>2]) {
6042 ds_assemble_entry(i);
6043 }
6044 else {
6045 add_to_linker((int)out,ba[i],internal);
6046 emit_jmp(0);
6047 }
6048 set_jump_target(nottaken,(int)out);
6049 }
6050
6051 if(adj) {
6052 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6053 }
6054 } // (!unconditional)
6055 } // if(ooo)
6056 else
6057 {
6058 // In-order execution (branch first)
6059 //printf("IOE\n");
6060 int nottaken=0;
6061 if(1) {
6062 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6063 if(1) {
6064 assert(fs>=0);
6065 emit_testimm(fs,0x800000);
6066 if(source[i]&0x10000) // BC1T
6067 {
6068 nottaken=(int)out;
6069 emit_jeq(1);
6070 }
6071 else // BC1F
6072 {
6073 nottaken=(int)out;
6074 emit_jne(1);
6075 }
6076 }
6077 } // if(!unconditional)
6078 int adj;
6079 uint64_t ds_unneeded=branch_regs[i].u;
6080 uint64_t ds_unneeded_upper=branch_regs[i].uu;
6081 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6082 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6083 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6084 ds_unneeded|=1;
6085 ds_unneeded_upper|=1;
6086 // branch taken
6087 //assem_debug("1:\n");
6088 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6089 ds_unneeded,ds_unneeded_upper);
6090 // load regs
6091 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6092 address_generation(i+1,&branch_regs[i],0);
6093 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6094 ds_assemble(i+1,&branch_regs[i]);
6095 cc=get_reg(branch_regs[i].regmap,CCREG);
6096 if(cc==-1) {
6097 emit_loadreg(CCREG,cc=HOST_CCREG);
6098 // CHECK: Is the following instruction (fall thru) allocated ok?
6099 }
6100 assert(cc==HOST_CCREG);
6101 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6102 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6103 assem_debug("cycle count (adj)\n");
6104 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6105 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6106 if(internal)
6107 assem_debug("branch: internal\n");
6108 else
6109 assem_debug("branch: external\n");
6110 if(internal&&is_ds[(ba[i]-start)>>2]) {
6111 ds_assemble_entry(i);
6112 }
6113 else {
6114 add_to_linker((int)out,ba[i],internal);
6115 emit_jmp(0);
6116 }
6117
6118 // branch not taken
6119 if(1) { // <- FIXME (don't need this)
6120 set_jump_target(nottaken,(int)out);
6121 assem_debug("1:\n");
6122 if(!likely[i]) {
6123 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6124 ds_unneeded,ds_unneeded_upper);
6125 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6126 address_generation(i+1,&branch_regs[i],0);
6127 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6128 ds_assemble(i+1,&branch_regs[i]);
6129 }
6130 cc=get_reg(branch_regs[i].regmap,CCREG);
6131 if(cc==-1&&!likely[i]) {
6132 // Cycle count isn't in a register, temporarily load it then write it out
6133 emit_loadreg(CCREG,HOST_CCREG);
6134 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6135 int jaddr=(int)out;
6136 emit_jns(0);
6137 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6138 emit_storereg(CCREG,HOST_CCREG);
6139 }
6140 else{
6141 cc=get_reg(i_regmap,CCREG);
6142 assert(cc==HOST_CCREG);
6143 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6144 int jaddr=(int)out;
6145 emit_jns(0);
6146 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6147 }
6148 }
6149 }
6150}
6151
6152static void pagespan_assemble(int i,struct regstat *i_regs)
6153{
6154 int s1l=get_reg(i_regs->regmap,rs1[i]);
6155 int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6156 int s2l=get_reg(i_regs->regmap,rs2[i]);
6157 int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6158 void *nt_branch=NULL;
6159 int taken=0;
6160 int nottaken=0;
6161 int unconditional=0;
6162 if(rs1[i]==0)
6163 {
6164 s1l=s2l;s1h=s2h;
6165 s2l=s2h=-1;
6166 }
6167 else if(rs2[i]==0)
6168 {
6169 s2l=s2h=-1;
6170 }
6171 if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6172 s1h=s2h=-1;
6173 }
6174 int hr=0;
6175 int addr,alt,ntaddr;
6176 if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6177 else {
6178 while(hr<HOST_REGS)
6179 {
6180 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6181 (i_regs->regmap[hr]&63)!=rs1[i] &&
6182 (i_regs->regmap[hr]&63)!=rs2[i] )
6183 {
6184 addr=hr++;break;
6185 }
6186 hr++;
6187 }
6188 }
6189 while(hr<HOST_REGS)
6190 {
6191 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6192 (i_regs->regmap[hr]&63)!=rs1[i] &&
6193 (i_regs->regmap[hr]&63)!=rs2[i] )
6194 {
6195 alt=hr++;break;
6196 }
6197 hr++;
6198 }
6199 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6200 {
6201 while(hr<HOST_REGS)
6202 {
6203 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6204 (i_regs->regmap[hr]&63)!=rs1[i] &&
6205 (i_regs->regmap[hr]&63)!=rs2[i] )
6206 {
6207 ntaddr=hr;break;
6208 }
6209 hr++;
6210 }
6211 }
6212 assert(hr<HOST_REGS);
6213 if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6214 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6215 }
6216 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6217 if(opcode[i]==2) // J
6218 {
6219 unconditional=1;
6220 }
6221 if(opcode[i]==3) // JAL
6222 {
6223 // TODO: mini_ht
6224 int rt=get_reg(i_regs->regmap,31);
6225 emit_movimm(start+i*4+8,rt);
6226 unconditional=1;
6227 }
6228 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6229 {
6230 emit_mov(s1l,addr);
6231 if(opcode2[i]==9) // JALR
6232 {
5067f341 6233 int rt=get_reg(i_regs->regmap,rt1[i]);
57871462 6234 emit_movimm(start+i*4+8,rt);
6235 }
6236 }
6237 if((opcode[i]&0x3f)==4) // BEQ
6238 {
6239 if(rs1[i]==rs2[i])
6240 {
6241 unconditional=1;
6242 }
6243 else
6244 #ifdef HAVE_CMOV_IMM
6245 if(s1h<0) {
6246 if(s2l>=0) emit_cmp(s1l,s2l);
6247 else emit_test(s1l,s1l);
6248 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6249 }
6250 else
6251 #endif
6252 {
6253 assert(s1l>=0);
6254 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6255 if(s1h>=0) {
6256 if(s2h>=0) emit_cmp(s1h,s2h);
6257 else emit_test(s1h,s1h);
6258 emit_cmovne_reg(alt,addr);
6259 }
6260 if(s2l>=0) emit_cmp(s1l,s2l);
6261 else emit_test(s1l,s1l);
6262 emit_cmovne_reg(alt,addr);
6263 }
6264 }
6265 if((opcode[i]&0x3f)==5) // BNE
6266 {
6267 #ifdef HAVE_CMOV_IMM
6268 if(s1h<0) {
6269 if(s2l>=0) emit_cmp(s1l,s2l);
6270 else emit_test(s1l,s1l);
6271 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6272 }
6273 else
6274 #endif
6275 {
6276 assert(s1l>=0);
6277 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6278 if(s1h>=0) {
6279 if(s2h>=0) emit_cmp(s1h,s2h);
6280 else emit_test(s1h,s1h);
6281 emit_cmovne_reg(alt,addr);
6282 }
6283 if(s2l>=0) emit_cmp(s1l,s2l);
6284 else emit_test(s1l,s1l);
6285 emit_cmovne_reg(alt,addr);
6286 }
6287 }
6288 if((opcode[i]&0x3f)==0x14) // BEQL
6289 {
6290 if(s1h>=0) {
6291 if(s2h>=0) emit_cmp(s1h,s2h);
6292 else emit_test(s1h,s1h);
6293 nottaken=(int)out;
6294 emit_jne(0);
6295 }
6296 if(s2l>=0) emit_cmp(s1l,s2l);
6297 else emit_test(s1l,s1l);
6298 if(nottaken) set_jump_target(nottaken,(int)out);
6299 nottaken=(int)out;
6300 emit_jne(0);
6301 }
6302 if((opcode[i]&0x3f)==0x15) // BNEL
6303 {
6304 if(s1h>=0) {
6305 if(s2h>=0) emit_cmp(s1h,s2h);
6306 else emit_test(s1h,s1h);
6307 taken=(int)out;
6308 emit_jne(0);
6309 }
6310 if(s2l>=0) emit_cmp(s1l,s2l);
6311 else emit_test(s1l,s1l);
6312 nottaken=(int)out;
6313 emit_jeq(0);
6314 if(taken) set_jump_target(taken,(int)out);
6315 }
6316 if((opcode[i]&0x3f)==6) // BLEZ
6317 {
6318 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6319 emit_cmpimm(s1l,1);
6320 if(s1h>=0) emit_mov(addr,ntaddr);
6321 emit_cmovl_reg(alt,addr);
6322 if(s1h>=0) {
6323 emit_test(s1h,s1h);
6324 emit_cmovne_reg(ntaddr,addr);
6325 emit_cmovs_reg(alt,addr);
6326 }
6327 }
6328 if((opcode[i]&0x3f)==7) // BGTZ
6329 {
6330 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6331 emit_cmpimm(s1l,1);
6332 if(s1h>=0) emit_mov(addr,alt);
6333 emit_cmovl_reg(ntaddr,addr);
6334 if(s1h>=0) {
6335 emit_test(s1h,s1h);
6336 emit_cmovne_reg(alt,addr);
6337 emit_cmovs_reg(ntaddr,addr);
6338 }
6339 }
6340 if((opcode[i]&0x3f)==0x16) // BLEZL
6341 {
6342 assert((opcode[i]&0x3f)!=0x16);
6343 }
6344 if((opcode[i]&0x3f)==0x17) // BGTZL
6345 {
6346 assert((opcode[i]&0x3f)!=0x17);
6347 }
6348 assert(opcode[i]!=1); // BLTZ/BGEZ
6349
6350 //FIXME: Check CSREG
6351 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6352 if((source[i]&0x30000)==0) // BC1F
6353 {
6354 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6355 emit_testimm(s1l,0x800000);
6356 emit_cmovne_reg(alt,addr);
6357 }
6358 if((source[i]&0x30000)==0x10000) // BC1T
6359 {
6360 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6361 emit_testimm(s1l,0x800000);
6362 emit_cmovne_reg(alt,addr);
6363 }
6364 if((source[i]&0x30000)==0x20000) // BC1FL
6365 {
6366 emit_testimm(s1l,0x800000);
6367 nottaken=(int)out;
6368 emit_jne(0);
6369 }
6370 if((source[i]&0x30000)==0x30000) // BC1TL
6371 {
6372 emit_testimm(s1l,0x800000);
6373 nottaken=(int)out;
6374 emit_jeq(0);
6375 }
6376 }
6377
6378 assert(i_regs->regmap[HOST_CCREG]==CCREG);
6379 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6380 if(likely[i]||unconditional)
6381 {
6382 emit_movimm(ba[i],HOST_BTREG);
6383 }
6384 else if(addr!=HOST_BTREG)
6385 {
6386 emit_mov(addr,HOST_BTREG);
6387 }
6388 void *branch_addr=out;
6389 emit_jmp(0);
6390 int target_addr=start+i*4+5;
6391 void *stub=out;
6392 void *compiled_target_addr=check_addr(target_addr);
6393 emit_extjump_ds((int)branch_addr,target_addr);
6394 if(compiled_target_addr) {
6395 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6396 add_link(target_addr,stub);
6397 }
6398 else set_jump_target((int)branch_addr,(int)stub);
6399 if(likely[i]) {
6400 // Not-taken path
6401 set_jump_target((int)nottaken,(int)out);
6402 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6403 void *branch_addr=out;
6404 emit_jmp(0);
6405 int target_addr=start+i*4+8;
6406 void *stub=out;
6407 void *compiled_target_addr=check_addr(target_addr);
6408 emit_extjump_ds((int)branch_addr,target_addr);
6409 if(compiled_target_addr) {
6410 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6411 add_link(target_addr,stub);
6412 }
6413 else set_jump_target((int)branch_addr,(int)stub);
6414 }
6415}
6416
6417// Assemble the delay slot for the above
6418static void pagespan_ds()
6419{
6420 assem_debug("initial delay slot:\n");
6421 u_int vaddr=start+1;
94d23bb9 6422 u_int page=get_page(vaddr);
6423 u_int vpage=get_vpage(vaddr);
57871462 6424 ll_add(jump_dirty+vpage,vaddr,(void *)out);
6425 do_dirty_stub_ds();
6426 ll_add(jump_in+page,vaddr,(void *)out);
6427 assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6428 if(regs[0].regmap[HOST_CCREG]!=CCREG)
6429 wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6430 if(regs[0].regmap[HOST_BTREG]!=BTREG)
6431 emit_writeword(HOST_BTREG,(int)&branch_target);
6432 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6433 address_generation(0,&regs[0],regs[0].regmap_entry);
b9b61529 6434 if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
57871462 6435 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6436 cop1_usable=0;
6437 is_delayslot=0;
6438 switch(itype[0]) {
6439 case ALU:
6440 alu_assemble(0,&regs[0]);break;
6441 case IMM16:
6442 imm16_assemble(0,&regs[0]);break;
6443 case SHIFT:
6444 shift_assemble(0,&regs[0]);break;
6445 case SHIFTIMM:
6446 shiftimm_assemble(0,&regs[0]);break;
6447 case LOAD:
6448 load_assemble(0,&regs[0]);break;
6449 case LOADLR:
6450 loadlr_assemble(0,&regs[0]);break;
6451 case STORE:
6452 store_assemble(0,&regs[0]);break;
6453 case STORELR:
6454 storelr_assemble(0,&regs[0]);break;
6455 case COP0:
6456 cop0_assemble(0,&regs[0]);break;
6457 case COP1:
6458 cop1_assemble(0,&regs[0]);break;
6459 case C1LS:
6460 c1ls_assemble(0,&regs[0]);break;
b9b61529 6461 case COP2:
6462 cop2_assemble(0,&regs[0]);break;
6463 case C2LS:
6464 c2ls_assemble(0,&regs[0]);break;
6465 case C2OP:
6466 c2op_assemble(0,&regs[0]);break;
57871462 6467 case FCONV:
6468 fconv_assemble(0,&regs[0]);break;
6469 case FLOAT:
6470 float_assemble(0,&regs[0]);break;
6471 case FCOMP:
6472 fcomp_assemble(0,&regs[0]);break;
6473 case MULTDIV:
6474 multdiv_assemble(0,&regs[0]);break;
6475 case MOV:
6476 mov_assemble(0,&regs[0]);break;
6477 case SYSCALL:
7139f3c8 6478 case HLECALL:
57871462 6479 case SPAN:
6480 case UJUMP:
6481 case RJUMP:
6482 case CJUMP:
6483 case SJUMP:
6484 case FJUMP:
6485 printf("Jump in the delay slot. This is probably a bug.\n");
6486 }
6487 int btaddr=get_reg(regs[0].regmap,BTREG);
6488 if(btaddr<0) {
6489 btaddr=get_reg(regs[0].regmap,-1);
6490 emit_readword((int)&branch_target,btaddr);
6491 }
6492 assert(btaddr!=HOST_CCREG);
6493 if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6494#ifdef HOST_IMM8
6495 emit_movimm(start+4,HOST_TEMPREG);
6496 emit_cmp(btaddr,HOST_TEMPREG);
6497#else
6498 emit_cmpimm(btaddr,start+4);
6499#endif
6500 int branch=(int)out;
6501 emit_jeq(0);
6502 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6503 emit_jmp(jump_vaddr_reg[btaddr]);
6504 set_jump_target(branch,(int)out);
6505 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6506 load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6507}
6508
6509// Basic liveness analysis for MIPS registers
6510void unneeded_registers(int istart,int iend,int r)
6511{
6512 int i;
6513 uint64_t u,uu,b,bu;
6514 uint64_t temp_u,temp_uu;
6515 uint64_t tdep;
6516 if(iend==slen-1) {
6517 u=1;uu=1;
6518 }else{
6519 u=unneeded_reg[iend+1];
6520 uu=unneeded_reg_upper[iend+1];
6521 u=1;uu=1;
6522 }
6523 for (i=iend;i>=istart;i--)
6524 {
6525 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6526 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6527 {
6528 // If subroutine call, flag return address as a possible branch target
6529 if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6530
6531 if(ba[i]<start || ba[i]>=(start+slen*4))
6532 {
6533 // Branch out of this block, flush all regs
6534 u=1;
6535 uu=1;
6536 /* Hexagon hack
6537 if(itype[i]==UJUMP&&rt1[i]==31)
6538 {
6539 uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6540 }
6541 if(itype[i]==RJUMP&&rs1[i]==31)
6542 {
6543 uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6544 }
4cb76aa4 6545 if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
57871462 6546 if(itype[i]==UJUMP&&rt1[i]==31)
6547 {
6548 //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6549 uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6550 }
6551 if(itype[i]==RJUMP&&rs1[i]==31)
6552 {
6553 //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6554 uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6555 }
6556 }*/
6557 branch_unneeded_reg[i]=u;
6558 branch_unneeded_reg_upper[i]=uu;
6559 // Merge in delay slot
6560 tdep=(~uu>>rt1[i+1])&1;
6561 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6562 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6563 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6564 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6565 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6566 u|=1;uu|=1;
6567 // If branch is "likely" (and conditional)
6568 // then we skip the delay slot on the fall-thru path
6569 if(likely[i]) {
6570 if(i<slen-1) {
6571 u&=unneeded_reg[i+2];
6572 uu&=unneeded_reg_upper[i+2];
6573 }
6574 else
6575 {
6576 u=1;
6577 uu=1;
6578 }
6579 }
6580 }
6581 else
6582 {
6583 // Internal branch, flag target
6584 bt[(ba[i]-start)>>2]=1;
6585 if(ba[i]<=start+i*4) {
6586 // Backward branch
6587 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6588 {
6589 // Unconditional branch
6590 temp_u=1;temp_uu=1;
6591 } else {
6592 // Conditional branch (not taken case)
6593 temp_u=unneeded_reg[i+2];
6594 temp_uu=unneeded_reg_upper[i+2];
6595 }
6596 // Merge in delay slot
6597 tdep=(~temp_uu>>rt1[i+1])&1;
6598 temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6599 temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6600 temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6601 temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6602 temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6603 temp_u|=1;temp_uu|=1;
6604 // If branch is "likely" (and conditional)
6605 // then we skip the delay slot on the fall-thru path
6606 if(likely[i]) {
6607 if(i<slen-1) {
6608 temp_u&=unneeded_reg[i+2];
6609 temp_uu&=unneeded_reg_upper[i+2];
6610 }
6611 else
6612 {
6613 temp_u=1;
6614 temp_uu=1;
6615 }
6616 }
6617 tdep=(~temp_uu>>rt1[i])&1;
6618 temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6619 temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6620 temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6621 temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6622 temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6623 temp_u|=1;temp_uu|=1;
6624 unneeded_reg[i]=temp_u;
6625 unneeded_reg_upper[i]=temp_uu;
6626 // Only go three levels deep. This recursion can take an
6627 // excessive amount of time if there are a lot of nested loops.
6628 if(r<2) {
6629 unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6630 }else{
6631 unneeded_reg[(ba[i]-start)>>2]=1;
6632 unneeded_reg_upper[(ba[i]-start)>>2]=1;
6633 }
6634 } /*else*/ if(1) {
6635 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6636 {
6637 // Unconditional branch
6638 u=unneeded_reg[(ba[i]-start)>>2];
6639 uu=unneeded_reg_upper[(ba[i]-start)>>2];
6640 branch_unneeded_reg[i]=u;
6641 branch_unneeded_reg_upper[i]=uu;
6642 //u=1;
6643 //uu=1;
6644 //branch_unneeded_reg[i]=u;
6645 //branch_unneeded_reg_upper[i]=uu;
6646 // Merge in delay slot
6647 tdep=(~uu>>rt1[i+1])&1;
6648 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6649 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6650 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6651 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6652 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6653 u|=1;uu|=1;
6654 } else {
6655 // Conditional branch
6656 b=unneeded_reg[(ba[i]-start)>>2];
6657 bu=unneeded_reg_upper[(ba[i]-start)>>2];
6658 branch_unneeded_reg[i]=b;
6659 branch_unneeded_reg_upper[i]=bu;
6660 //b=1;
6661 //bu=1;
6662 //branch_unneeded_reg[i]=b;
6663 //branch_unneeded_reg_upper[i]=bu;
6664 // Branch delay slot
6665 tdep=(~uu>>rt1[i+1])&1;
6666 b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6667 bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6668 b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6669 bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6670 bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6671 b|=1;bu|=1;
6672 // If branch is "likely" then we skip the
6673 // delay slot on the fall-thru path
6674 if(likely[i]) {
6675 u=b;
6676 uu=bu;
6677 if(i<slen-1) {
6678 u&=unneeded_reg[i+2];
6679 uu&=unneeded_reg_upper[i+2];
6680 //u=1;
6681 //uu=1;
6682 }
6683 } else {
6684 u&=b;
6685 uu&=bu;
6686 //u=1;
6687 //uu=1;
6688 }
6689 if(i<slen-1) {
6690 branch_unneeded_reg[i]&=unneeded_reg[i+2];
6691 branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6692 //branch_unneeded_reg[i]=1;
6693 //branch_unneeded_reg_upper[i]=1;
6694 } else {
6695 branch_unneeded_reg[i]=1;
6696 branch_unneeded_reg_upper[i]=1;
6697 }
6698 }
6699 }
6700 }
6701 }
7139f3c8 6702 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
57871462 6703 {
6704 // SYSCALL instruction (software interrupt)
6705 u=1;
6706 uu=1;
6707 }
6708 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6709 {
6710 // ERET instruction (return from interrupt)
6711 u=1;
6712 uu=1;
6713 }
6714 //u=uu=1; // DEBUG
6715 tdep=(~uu>>rt1[i])&1;
6716 // Written registers are unneeded
6717 u|=1LL<<rt1[i];
6718 u|=1LL<<rt2[i];
6719 uu|=1LL<<rt1[i];
6720 uu|=1LL<<rt2[i];
6721 // Accessed registers are needed
6722 u&=~(1LL<<rs1[i]);
6723 u&=~(1LL<<rs2[i]);
6724 uu&=~(1LL<<us1[i]);
6725 uu&=~(1LL<<us2[i]);
6726 // Source-target dependencies
6727 uu&=~(tdep<<dep1[i]);
6728 uu&=~(tdep<<dep2[i]);
6729 // R0 is always unneeded
6730 u|=1;uu|=1;
6731 // Save it
6732 unneeded_reg[i]=u;
6733 unneeded_reg_upper[i]=uu;
6734 /*
6735 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6736 printf("U:");
6737 int r;
6738 for(r=1;r<=CCREG;r++) {
6739 if((unneeded_reg[i]>>r)&1) {
6740 if(r==HIREG) printf(" HI");
6741 else if(r==LOREG) printf(" LO");
6742 else printf(" r%d",r);
6743 }
6744 }
6745 printf(" UU:");
6746 for(r=1;r<=CCREG;r++) {
6747 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6748 if(r==HIREG) printf(" HI");
6749 else if(r==LOREG) printf(" LO");
6750 else printf(" r%d",r);
6751 }
6752 }
6753 printf("\n");*/
6754 }
252c20fc 6755#ifdef FORCE32
6756 for (i=iend;i>=istart;i--)
6757 {
6758 unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6759 }
6760#endif
57871462 6761}
6762
6763// Identify registers which are likely to contain 32-bit values
6764// This is used to predict whether any branches will jump to a
6765// location with 64-bit values in registers.
6766static void provisional_32bit()
6767{
6768 int i,j;
6769 uint64_t is32=1;
6770 uint64_t lastbranch=1;
6771
6772 for(i=0;i<slen;i++)
6773 {
6774 if(i>0) {
6775 if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
6776 if(i>1) is32=lastbranch;
6777 else is32=1;
6778 }
6779 }
6780 if(i>1)
6781 {
6782 if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
6783 if(likely[i-2]) {
6784 if(i>2) is32=lastbranch;
6785 else is32=1;
6786 }
6787 }
6788 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
6789 {
6790 if(rs1[i-2]==0||rs2[i-2]==0)
6791 {
6792 if(rs1[i-2]) {
6793 is32|=1LL<<rs1[i-2];
6794 }
6795 if(rs2[i-2]) {
6796 is32|=1LL<<rs2[i-2];
6797 }
6798 }
6799 }
6800 }
6801 // If something jumps here with 64-bit values
6802 // then promote those registers to 64 bits
6803 if(bt[i])
6804 {
6805 uint64_t temp_is32=is32;
6806 for(j=i-1;j>=0;j--)
6807 {
6808 if(ba[j]==start+i*4)
6809 //temp_is32&=branch_regs[j].is32;
6810 temp_is32&=p32[j];
6811 }
6812 for(j=i;j<slen;j++)
6813 {
6814 if(ba[j]==start+i*4)
6815 temp_is32=1;
6816 }
6817 is32=temp_is32;
6818 }
6819 int type=itype[i];
6820 int op=opcode[i];
6821 int op2=opcode2[i];
6822 int rt=rt1[i];
6823 int s1=rs1[i];
6824 int s2=rs2[i];
6825 if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
6826 // Branches don't write registers, consider the delay slot instead.
6827 type=itype[i+1];
6828 op=opcode[i+1];
6829 op2=opcode2[i+1];
6830 rt=rt1[i+1];
6831 s1=rs1[i+1];
6832 s2=rs2[i+1];
6833 lastbranch=is32;
6834 }
6835 switch(type) {
6836 case LOAD:
6837 if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
6838 opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
6839 is32&=~(1LL<<rt);
6840 else
6841 is32|=1LL<<rt;
6842 break;
6843 case STORE:
6844 case STORELR:
6845 break;
6846 case LOADLR:
6847 if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
6848 if(op==0x22) is32|=1LL<<rt; // LWL
6849 break;
6850 case IMM16:
6851 if (op==0x08||op==0x09|| // ADDI/ADDIU
6852 op==0x0a||op==0x0b|| // SLTI/SLTIU
6853 op==0x0c|| // ANDI
6854 op==0x0f) // LUI
6855 {
6856 is32|=1LL<<rt;
6857 }
6858 if(op==0x18||op==0x19) { // DADDI/DADDIU
6859 is32&=~(1LL<<rt);
6860 //if(imm[i]==0)
6861 // is32|=((is32>>s1)&1LL)<<rt;
6862 }
6863 if(op==0x0d||op==0x0e) { // ORI/XORI
6864 uint64_t sr=((is32>>s1)&1LL);
6865 is32&=~(1LL<<rt);
6866 is32|=sr<<rt;
6867 }
6868 break;
6869 case UJUMP:
6870 break;
6871 case RJUMP:
6872 break;
6873 case CJUMP:
6874 break;
6875 case SJUMP:
6876 break;
6877 case FJUMP:
6878 break;
6879 case ALU:
6880 if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
6881 is32|=1LL<<rt;
6882 }
6883 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
6884 is32|=1LL<<rt;
6885 }
6886 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
6887 uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
6888 is32&=~(1LL<<rt);
6889 is32|=sr<<rt;
6890 }
6891 else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
6892 if(s1==0&&s2==0) {
6893 is32|=1LL<<rt;
6894 }
6895 else if(s2==0) {
6896 uint64_t sr=((is32>>s1)&1LL);
6897 is32&=~(1LL<<rt);
6898 is32|=sr<<rt;
6899 }
6900 else if(s1==0) {
6901 uint64_t sr=((is32>>s2)&1LL);
6902 is32&=~(1LL<<rt);
6903 is32|=sr<<rt;
6904 }
6905 else {
6906 is32&=~(1LL<<rt);
6907 }
6908 }
6909 else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
6910 if(s1==0&&s2==0) {
6911 is32|=1LL<<rt;
6912 }
6913 else if(s2==0) {
6914 uint64_t sr=((is32>>s1)&1LL);
6915 is32&=~(1LL<<rt);
6916 is32|=sr<<rt;
6917 }
6918 else {
6919 is32&=~(1LL<<rt);
6920 }
6921 }
6922 break;
6923 case MULTDIV:
6924 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
6925 is32&=~((1LL<<HIREG)|(1LL<<LOREG));
6926 }
6927 else {
6928 is32|=(1LL<<HIREG)|(1LL<<LOREG);
6929 }
6930 break;
6931 case MOV:
6932 {
6933 uint64_t sr=((is32>>s1)&1LL);
6934 is32&=~(1LL<<rt);
6935 is32|=sr<<rt;
6936 }
6937 break;
6938 case SHIFT:
6939 if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
6940 else is32|=1LL<<rt; // SLLV/SRLV/SRAV
6941 break;
6942 case SHIFTIMM:
6943 is32|=1LL<<rt;
6944 // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
6945 if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
6946 break;
6947 case COP0:
6948 if(op2==0) is32|=1LL<<rt; // MFC0
6949 break;
6950 case COP1:
b9b61529 6951 case COP2:
57871462 6952 if(op2==0) is32|=1LL<<rt; // MFC1
6953 if(op2==1) is32&=~(1LL<<rt); // DMFC1
6954 if(op2==2) is32|=1LL<<rt; // CFC1
6955 break;
6956 case C1LS:
b9b61529 6957 case C2LS:
57871462 6958 break;
6959 case FLOAT:
6960 case FCONV:
6961 break;
6962 case FCOMP:
6963 break;
b9b61529 6964 case C2OP:
57871462 6965 case SYSCALL:
7139f3c8 6966 case HLECALL:
57871462 6967 break;
6968 default:
6969 break;
6970 }
6971 is32|=1;
6972 p32[i]=is32;
6973
6974 if(i>0)
6975 {
6976 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
6977 {
6978 if(rt1[i-1]==31) // JAL/JALR
6979 {
6980 // Subroutine call will return here, don't alloc any registers
6981 is32=1;
6982 }
6983 else if(i+1<slen)
6984 {
6985 // Internal branch will jump here, match registers to caller
6986 is32=0x3FFFFFFFFLL;
6987 }
6988 }
6989 }
6990 }
6991}
6992
6993// Identify registers which may be assumed to contain 32-bit values
6994// and where optimizations will rely on this.
6995// This is used to determine whether backward branches can safely
6996// jump to a location with 64-bit values in registers.
6997static void provisional_r32()
6998{
6999 u_int r32=0;
7000 int i;
7001
7002 for (i=slen-1;i>=0;i--)
7003 {
7004 int hr;
7005 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7006 {
7007 if(ba[i]<start || ba[i]>=(start+slen*4))
7008 {
7009 // Branch out of this block, don't need anything
7010 r32=0;
7011 }
7012 else
7013 {
7014 // Internal branch
7015 // Need whatever matches the target
7016 // (and doesn't get overwritten by the delay slot instruction)
7017 r32=0;
7018 int t=(ba[i]-start)>>2;
7019 if(ba[i]>start+i*4) {
7020 // Forward branch
7021 //if(!(requires_32bit[t]&~regs[i].was32))
7022 // r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7023 if(!(pr32[t]&~regs[i].was32))
7024 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7025 }else{
7026 // Backward branch
7027 if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7028 r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7029 }
7030 }
7031 // Conditional branch may need registers for following instructions
7032 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7033 {
7034 if(i<slen-2) {
7035 //r32|=requires_32bit[i+2];
7036 r32|=pr32[i+2];
7037 r32&=regs[i].was32;
7038 // Mark this address as a branch target since it may be called
7039 // upon return from interrupt
7040 //bt[i+2]=1;
7041 }
7042 }
7043 // Merge in delay slot
7044 if(!likely[i]) {
7045 // These are overwritten unless the branch is "likely"
7046 // and the delay slot is nullified if not taken
7047 r32&=~(1LL<<rt1[i+1]);
7048 r32&=~(1LL<<rt2[i+1]);
7049 }
7050 // Assume these are needed (delay slot)
7051 if(us1[i+1]>0)
7052 {
7053 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7054 }
7055 if(us2[i+1]>0)
7056 {
7057 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7058 }
7059 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7060 {
7061 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7062 }
7063 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7064 {
7065 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7066 }
7067 }
7139f3c8 7068 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
57871462 7069 {
7070 // SYSCALL instruction (software interrupt)
7071 r32=0;
7072 }
7073 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7074 {
7075 // ERET instruction (return from interrupt)
7076 r32=0;
7077 }
7078 // Check 32 bits
7079 r32&=~(1LL<<rt1[i]);
7080 r32&=~(1LL<<rt2[i]);
7081 if(us1[i]>0)
7082 {
7083 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7084 }
7085 if(us2[i]>0)
7086 {
7087 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7088 }
7089 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7090 {
7091 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7092 }
7093 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7094 {
7095 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7096 }
7097 //requires_32bit[i]=r32;
7098 pr32[i]=r32;
7099
7100 // Dirty registers which are 32-bit, require 32-bit input
7101 // as they will be written as 32-bit values
7102 for(hr=0;hr<HOST_REGS;hr++)
7103 {
7104 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7105 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7106 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7107 pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7108 //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7109 }
7110 }
7111 }
7112 }
7113}
7114
7115// Write back dirty registers as soon as we will no longer modify them,
7116// so that we don't end up with lots of writes at the branches.
7117void clean_registers(int istart,int iend,int wr)
7118{
7119 int i;
7120 int r;
7121 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7122 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7123 if(iend==slen-1) {
7124 will_dirty_i=will_dirty_next=0;
7125 wont_dirty_i=wont_dirty_next=0;
7126 }else{
7127 will_dirty_i=will_dirty_next=will_dirty[iend+1];
7128 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7129 }
7130 for (i=iend;i>=istart;i--)
7131 {
7132 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7133 {
7134 if(ba[i]<start || ba[i]>=(start+slen*4))
7135 {
7136 // Branch out of this block, flush all regs
7137 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7138 {
7139 // Unconditional branch
7140 will_dirty_i=0;
7141 wont_dirty_i=0;
7142 // Merge in delay slot (will dirty)
7143 for(r=0;r<HOST_REGS;r++) {
7144 if(r!=EXCLUDE_REG) {
7145 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7146 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7147 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7148 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7149 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7150 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7151 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7152 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7153 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7154 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7155 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7156 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7157 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7158 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7159 }
7160 }
7161 }
7162 else
7163 {
7164 // Conditional branch
7165 will_dirty_i=0;
7166 wont_dirty_i=wont_dirty_next;
7167 // Merge in delay slot (will dirty)
7168 for(r=0;r<HOST_REGS;r++) {
7169 if(r!=EXCLUDE_REG) {
7170 if(!likely[i]) {
7171 // Might not dirty if likely branch is not taken
7172 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7173 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7174 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7175 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7176 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7177 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7178 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7179 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7180 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7181 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7182 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7183 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7184 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7185 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7186 }
7187 }
7188 }
7189 }
7190 // Merge in delay slot (wont dirty)
7191 for(r=0;r<HOST_REGS;r++) {
7192 if(r!=EXCLUDE_REG) {
7193 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7194 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7195 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7196 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7197 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7198 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7199 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7200 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7201 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7202 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7203 }
7204 }
7205 if(wr) {
7206 #ifndef DESTRUCTIVE_WRITEBACK
7207 branch_regs[i].dirty&=wont_dirty_i;
7208 #endif
7209 branch_regs[i].dirty|=will_dirty_i;
7210 }
7211 }
7212 else
7213 {
7214 // Internal branch
7215 if(ba[i]<=start+i*4) {
7216 // Backward branch
7217 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7218 {
7219 // Unconditional branch
7220 temp_will_dirty=0;
7221 temp_wont_dirty=0;
7222 // Merge in delay slot (will dirty)
7223 for(r=0;r<HOST_REGS;r++) {
7224 if(r!=EXCLUDE_REG) {
7225 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7226 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7227 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7228 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7229 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7230 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7231 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7232 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7233 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7234 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7235 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7236 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7237 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7238 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7239 }
7240 }
7241 } else {
7242 // Conditional branch (not taken case)
7243 temp_will_dirty=will_dirty_next;
7244 temp_wont_dirty=wont_dirty_next;
7245 // Merge in delay slot (will dirty)
7246 for(r=0;r<HOST_REGS;r++) {
7247 if(r!=EXCLUDE_REG) {
7248 if(!likely[i]) {
7249 // Will not dirty if likely branch is not taken
7250 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7251 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7252 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7253 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7254 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7255 if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7256 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7257 //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7258 //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7259 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7260 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7261 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7262 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7263 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7264 }
7265 }
7266 }
7267 }
7268 // Merge in delay slot (wont dirty)
7269 for(r=0;r<HOST_REGS;r++) {
7270 if(r!=EXCLUDE_REG) {
7271 if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7272 if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7273 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7274 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7275 if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7276 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7277 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7278 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7279 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7280 if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7281 }
7282 }
7283 // Deal with changed mappings
7284 if(i<iend) {
7285 for(r=0;r<HOST_REGS;r++) {
7286 if(r!=EXCLUDE_REG) {
7287 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7288 temp_will_dirty&=~(1<<r);
7289 temp_wont_dirty&=~(1<<r);
7290 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7291 temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7292 temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7293 } else {
7294 temp_will_dirty|=1<<r;
7295 temp_wont_dirty|=1<<r;
7296 }
7297 }
7298 }
7299 }
7300 }
7301 if(wr) {
7302 will_dirty[i]=temp_will_dirty;
7303 wont_dirty[i]=temp_wont_dirty;
7304 clean_registers((ba[i]-start)>>2,i-1,0);
7305 }else{
7306 // Limit recursion. It can take an excessive amount
7307 // of time if there are a lot of nested loops.
7308 will_dirty[(ba[i]-start)>>2]=0;
7309 wont_dirty[(ba[i]-start)>>2]=-1;
7310 }
7311 }
7312 /*else*/ if(1)
7313 {
7314 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7315 {
7316 // Unconditional branch
7317 will_dirty_i=0;
7318 wont_dirty_i=0;
7319 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7320 for(r=0;r<HOST_REGS;r++) {
7321 if(r!=EXCLUDE_REG) {
7322 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7323 will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7324 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7325 }
7326 }
7327 }
7328 //}
7329 // Merge in delay slot
7330 for(r=0;r<HOST_REGS;r++) {
7331 if(r!=EXCLUDE_REG) {
7332 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7333 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7334 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7335 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7336 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7337 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7338 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7339 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7340 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7341 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7342 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7343 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7344 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7345 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7346 }
7347 }
7348 } else {
7349 // Conditional branch
7350 will_dirty_i=will_dirty_next;
7351 wont_dirty_i=wont_dirty_next;
7352 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7353 for(r=0;r<HOST_REGS;r++) {
7354 if(r!=EXCLUDE_REG) {
7355 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7356 will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7357 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7358 }
7359 else
7360 {
7361 will_dirty_i&=~(1<<r);
7362 }
7363 // Treat delay slot as part of branch too
7364 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7365 will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7366 wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7367 }
7368 else
7369 {
7370 will_dirty[i+1]&=~(1<<r);
7371 }*/
7372 }
7373 }
7374 //}
7375 // Merge in delay slot
7376 for(r=0;r<HOST_REGS;r++) {
7377 if(r!=EXCLUDE_REG) {
7378 if(!likely[i]) {
7379 // Might not dirty if likely branch is not taken
7380 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7381 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7382 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7383 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7384 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7385 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7386 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7387 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7388 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7389 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7390 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7391 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7392 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7393 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7394 }
7395 }
7396 }
7397 }
7398 // Merge in delay slot
7399 for(r=0;r<HOST_REGS;r++) {
7400 if(r!=EXCLUDE_REG) {
7401 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7402 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7403 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7404 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7405 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7406 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7407 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7408 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7409 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7410 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7411 }
7412 }
7413 if(wr) {
7414 #ifndef DESTRUCTIVE_WRITEBACK
7415 branch_regs[i].dirty&=wont_dirty_i;
7416 #endif
7417 branch_regs[i].dirty|=will_dirty_i;
7418 }
7419 }
7420 }
7421 }
7139f3c8 7422 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
57871462 7423 {
7424 // SYSCALL instruction (software interrupt)
7425 will_dirty_i=0;
7426 wont_dirty_i=0;
7427 }
7428 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7429 {
7430 // ERET instruction (return from interrupt)
7431 will_dirty_i=0;
7432 wont_dirty_i=0;
7433 }
7434 will_dirty_next=will_dirty_i;
7435 wont_dirty_next=wont_dirty_i;
7436 for(r=0;r<HOST_REGS;r++) {
7437 if(r!=EXCLUDE_REG) {
7438 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7439 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7440 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7441 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7442 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7443 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7444 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7445 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7446 if(i>istart) {
7447 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
7448 {
7449 // Don't store a register immediately after writing it,
7450 // may prevent dual-issue.
7451 if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7452 if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7453 }
7454 }
7455 }
7456 }
7457 // Save it
7458 will_dirty[i]=will_dirty_i;
7459 wont_dirty[i]=wont_dirty_i;
7460 // Mark registers that won't be dirtied as not dirty
7461 if(wr) {
7462 /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7463 for(r=0;r<HOST_REGS;r++) {
7464 if((will_dirty_i>>r)&1) {
7465 printf(" r%d",r);
7466 }
7467 }
7468 printf("\n");*/
7469
7470 //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7471 regs[i].dirty|=will_dirty_i;
7472 #ifndef DESTRUCTIVE_WRITEBACK
7473 regs[i].dirty&=wont_dirty_i;
7474 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7475 {
7476 if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7477 for(r=0;r<HOST_REGS;r++) {
7478 if(r!=EXCLUDE_REG) {
7479 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7480 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7481 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7482 }
7483 }
7484 }
7485 }
7486 else
7487 {
7488 if(i<iend) {
7489 for(r=0;r<HOST_REGS;r++) {
7490 if(r!=EXCLUDE_REG) {
7491 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7492 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7493 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7494 }
7495 }
7496 }
7497 }
7498 #endif
7499 //}
7500 }
7501 // Deal with changed mappings
7502 temp_will_dirty=will_dirty_i;
7503 temp_wont_dirty=wont_dirty_i;
7504 for(r=0;r<HOST_REGS;r++) {
7505 if(r!=EXCLUDE_REG) {
7506 int nr;
7507 if(regs[i].regmap[r]==regmap_pre[i][r]) {
7508 if(wr) {
7509 #ifndef DESTRUCTIVE_WRITEBACK
7510 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7511 #endif
7512 regs[i].wasdirty|=will_dirty_i&(1<<r);
7513 }
7514 }
7515 else if((nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7516 // Register moved to a different register
7517 will_dirty_i&=~(1<<r);
7518 wont_dirty_i&=~(1<<r);
7519 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7520 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7521 if(wr) {
7522 #ifndef DESTRUCTIVE_WRITEBACK
7523 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7524 #endif
7525 regs[i].wasdirty|=will_dirty_i&(1<<r);
7526 }
7527 }
7528 else {
7529 will_dirty_i&=~(1<<r);
7530 wont_dirty_i&=~(1<<r);
7531 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7532 will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7533 wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7534 } else {
7535 wont_dirty_i|=1<<r;
7536 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7537 }
7538 }
7539 }
7540 }
7541 }
7542}
7543
7544 /* disassembly */
7545void disassemble_inst(int i)
7546{
7547 if (bt[i]) printf("*"); else printf(" ");
7548 switch(itype[i]) {
7549 case UJUMP:
7550 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7551 case CJUMP:
7552 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7553 case SJUMP:
7554 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7555 case FJUMP:
7556 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7557 case RJUMP:
74426039 7558 if (opcode[i]==0x9&&rt1[i]!=31)
5067f341 7559 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7560 else
7561 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7562 break;
57871462 7563 case SPAN:
7564 printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7565 case IMM16:
7566 if(opcode[i]==0xf) //LUI
7567 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7568 else
7569 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7570 break;
7571 case LOAD:
7572 case LOADLR:
7573 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7574 break;
7575 case STORE:
7576 case STORELR:
7577 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7578 break;
7579 case ALU:
7580 case SHIFT:
7581 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7582 break;
7583 case MULTDIV:
7584 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7585 break;
7586 case SHIFTIMM:
7587 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7588 break;
7589 case MOV:
7590 if((opcode2[i]&0x1d)==0x10)
7591 printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7592 else if((opcode2[i]&0x1d)==0x11)
7593 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7594 else
7595 printf (" %x: %s\n",start+i*4,insn[i]);
7596 break;
7597 case COP0:
7598 if(opcode2[i]==0)
7599 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7600 else if(opcode2[i]==4)
7601 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7602 else printf (" %x: %s\n",start+i*4,insn[i]);
7603 break;
7604 case COP1:
7605 if(opcode2[i]<3)
7606 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7607 else if(opcode2[i]>3)
7608 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7609 else printf (" %x: %s\n",start+i*4,insn[i]);
7610 break;
b9b61529 7611 case COP2:
7612 if(opcode2[i]<3)
7613 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7614 else if(opcode2[i]>3)
7615 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7616 else printf (" %x: %s\n",start+i*4,insn[i]);
7617 break;
57871462 7618 case C1LS:
7619 printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7620 break;
b9b61529 7621 case C2LS:
7622 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7623 break;
57871462 7624 default:
7625 //printf (" %s %8x\n",insn[i],source[i]);
7626 printf (" %x: %s\n",start+i*4,insn[i]);
7627 }
7628}
7629
7630void new_dynarec_init()
7631{
7632 printf("Init new dynarec\n");
7633 out=(u_char *)BASE_ADDR;
7634 if (mmap (out, 1<<TARGET_SIZE_2,
7635 PROT_READ | PROT_WRITE | PROT_EXEC,
7636 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7637 -1, 0) <= 0) {printf("mmap() failed\n");}
3d624f89 7638#ifdef MUPEN64
57871462 7639 rdword=&readmem_dword;
7640 fake_pc.f.r.rs=&readmem_dword;
7641 fake_pc.f.r.rt=&readmem_dword;
7642 fake_pc.f.r.rd=&readmem_dword;
3d624f89 7643#endif
57871462 7644 int n;
7645 for(n=0x80000;n<0x80800;n++)
7646 invalid_code[n]=1;
7647 for(n=0;n<65536;n++)
7648 hash_table[n][0]=hash_table[n][2]=-1;
7649 memset(mini_ht,-1,sizeof(mini_ht));
7650 memset(restore_candidate,0,sizeof(restore_candidate));
7651 copy=shadow;
7652 expirep=16384; // Expiry pointer, +2 blocks
7653 pending_exception=0;
7654 literalcount=0;
7655#ifdef HOST_IMM8
7656 // Copy this into local area so we don't have to put it in every literal pool
7657 invc_ptr=invalid_code;
7658#endif
7659 stop_after_jal=0;
7660 // TLB
7661 using_tlb=0;
7662 for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7663 memory_map[n]=-1;
7664 for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7665 memory_map[n]=((u_int)rdram-0x80000000)>>2;
7666 for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7667 memory_map[n]=-1;
24385cae 7668#ifdef MUPEN64
57871462 7669 for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7670 writemem[n] = write_nomem_new;
7671 writememb[n] = write_nomemb_new;
7672 writememh[n] = write_nomemh_new;
24385cae 7673#ifndef FORCE32
57871462 7674 writememd[n] = write_nomemd_new;
24385cae 7675#endif
57871462 7676 readmem[n] = read_nomem_new;
7677 readmemb[n] = read_nomemb_new;
7678 readmemh[n] = read_nomemh_new;
24385cae 7679#ifndef FORCE32
57871462 7680 readmemd[n] = read_nomemd_new;
24385cae 7681#endif
57871462 7682 }
7683 for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7684 writemem[n] = write_rdram_new;
7685 writememb[n] = write_rdramb_new;
7686 writememh[n] = write_rdramh_new;
24385cae 7687#ifndef FORCE32
57871462 7688 writememd[n] = write_rdramd_new;
24385cae 7689#endif
57871462 7690 }
7691 for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7692 writemem[n] = write_nomem_new;
7693 writememb[n] = write_nomemb_new;
7694 writememh[n] = write_nomemh_new;
24385cae 7695#ifndef FORCE32
57871462 7696 writememd[n] = write_nomemd_new;
24385cae 7697#endif
57871462 7698 readmem[n] = read_nomem_new;
7699 readmemb[n] = read_nomemb_new;
7700 readmemh[n] = read_nomemh_new;
24385cae 7701#ifndef FORCE32
57871462 7702 readmemd[n] = read_nomemd_new;
24385cae 7703#endif
57871462 7704 }
24385cae 7705#endif
57871462 7706 tlb_hacks();
7707 arch_init();
7708}
7709
7710void new_dynarec_cleanup()
7711{
7712 int n;
7713 if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7714 for(n=0;n<4096;n++) ll_clear(jump_in+n);
7715 for(n=0;n<4096;n++) ll_clear(jump_out+n);
7716 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7717 #ifdef ROM_COPY
7718 if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7719 #endif
7720}
7721
7722int new_recompile_block(int addr)
7723{
7724/*
7725 if(addr==0x800cd050) {
7726 int block;
7727 for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7728 int n;
7729 for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7730 }
7731*/
7732 //if(Count==365117028) tracedebug=1;
7733 assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7734 //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7735 //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7736 //if(debug)
7737 //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7738 //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7739 /*if(Count>=312978186) {
7740 rlist();
7741 }*/
7742 //rlist();
7743 start = (u_int)addr&~3;
7744 //assert(((u_int)addr&1)==0);
7139f3c8 7745#ifdef PCSX
9ad4d757 7746 if (Config.HLE && start == 0x80001000) // hlecall
560e4a12 7747 {
7139f3c8 7748 // XXX: is this enough? Maybe check hleSoftCall?
bb5285ef 7749 u_int beginning=(u_int)out;
7139f3c8 7750 u_int page=get_page(start);
7139f3c8 7751 invalid_code[start>>12]=0;
7752 emit_movimm(start,0);
7753 emit_writeword(0,(int)&pcaddr);
bb5285ef 7754 emit_jmp((int)new_dyna_leave);
7755#ifdef __arm__
7756 __clear_cache((void *)beginning,out);
7757#endif
9ad4d757 7758 ll_add(jump_in+page,start,(void *)beginning);
7139f3c8 7759 return 0;
7760 }
560e4a12 7761 else if ((u_int)addr < 0x00200000 ||
7762 (0xa0000000 <= addr && addr < 0xa0200000)) {
7139f3c8 7763 // used for BIOS calls mostly?
560e4a12 7764 source = (u_int *)((u_int)rdram+(start&0x1fffff));
7765 pagelimit = (addr&0xa0000000)|0x00200000;
7766 }
7767 else if (!Config.HLE && (
7768/* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
7769 (0xbfc00000 <= addr && addr < 0xbfc80000))) {
7770 // BIOS
7771 source = (u_int *)((u_int)psxR+(start&0x7ffff));
7772 pagelimit = (addr&0xfff00000)|0x80000;
7139f3c8 7773 }
7774 else
7775#endif
3d624f89 7776#ifdef MUPEN64
57871462 7777 if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
7778 source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
7779 pagelimit = 0xa4001000;
7780 }
3d624f89 7781 else
7782#endif
4cb76aa4 7783 if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
57871462 7784 source = (u_int *)((u_int)rdram+start-0x80000000);
4cb76aa4 7785 pagelimit = 0x80000000+RAM_SIZE;
57871462 7786 }
90ae6d4e 7787#ifndef DISABLE_TLB
57871462 7788 else if ((signed int)addr >= (signed int)0xC0000000) {
7789 //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
7790 //if(tlb_LUT_r[start>>12])
7791 //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
7792 if((signed int)memory_map[start>>12]>=0) {
7793 source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
7794 pagelimit=(start+4096)&0xFFFFF000;
7795 int map=memory_map[start>>12];
7796 int i;
7797 for(i=0;i<5;i++) {
7798 //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
7799 if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
7800 }
7801 assem_debug("pagelimit=%x\n",pagelimit);
7802 assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
7803 }
7804 else {
7805 assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
7806 //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
560e4a12 7807 return -1; // Caller will invoke exception handler
57871462 7808 }
7809 //printf("source= %x\n",(int)source);
7810 }
90ae6d4e 7811#endif
57871462 7812 else {
7813 printf("Compile at bogus memory address: %x \n", (int)addr);
7814 exit(1);
7815 }
7816
7817 /* Pass 1: disassemble */
7818 /* Pass 2: register dependencies, branch targets */
7819 /* Pass 3: register allocation */
7820 /* Pass 4: branch dependencies */
7821 /* Pass 5: pre-alloc */
7822 /* Pass 6: optimize clean/dirty state */
7823 /* Pass 7: flag 32-bit registers */
7824 /* Pass 8: assembly */
7825 /* Pass 9: linker */
7826 /* Pass 10: garbage collection / free memory */
7827
7828 int i,j;
7829 int done=0;
7830 unsigned int type,op,op2;
7831
7832 //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7833
7834 /* Pass 1 disassembly */
7835
7836 for(i=0;!done;i++) {
7837 bt[i]=0;likely[i]=0;op2=0;
7838 opcode[i]=op=source[i]>>26;
7839 switch(op)
7840 {
7841 case 0x00: strcpy(insn[i],"special"); type=NI;
7842 op2=source[i]&0x3f;
7843 switch(op2)
7844 {
7845 case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7846 case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7847 case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7848 case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7849 case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7850 case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7851 case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7852 case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7853 case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7854 case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7855 case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7856 case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7857 case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7858 case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7859 case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7860 case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7861 case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7862 case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7863 case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7864 case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7865 case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7866 case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7867 case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7868 case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7869 case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7870 case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7871 case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7872 case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7873 case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7874 case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7875 case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7876 case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7877 case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7878 case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7879 case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7880 case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7881 case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7882 case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7883 case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7884 case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7885 case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7886 case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7887 case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7888 case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7889 case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7890 case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7891 case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7892 case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7893 case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7894 case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7895 case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7896 case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7897 }
7898 break;
7899 case 0x01: strcpy(insn[i],"regimm"); type=NI;
7900 op2=(source[i]>>16)&0x1f;
7901 switch(op2)
7902 {
7903 case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7904 case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7905 case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7906 case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7907 case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7908 case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7909 case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7910 case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7911 case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7912 case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7913 case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7914 case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7915 case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7916 case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7917 }
7918 break;
7919 case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7920 case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7921 case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7922 case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7923 case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7924 case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7925 case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7926 case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7927 case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7928 case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7929 case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7930 case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7931 case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7932 case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7933 case 0x10: strcpy(insn[i],"cop0"); type=NI;
7934 op2=(source[i]>>21)&0x1f;
7935 switch(op2)
7936 {
7937 case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7938 case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7939 case 0x10: strcpy(insn[i],"tlb"); type=NI;
7940 switch(source[i]&0x3f)
7941 {
7942 case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7943 case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7944 case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7945 case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
576bbd8f 7946#ifdef PCSX
7947 case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
7948#else
57871462 7949 case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
576bbd8f 7950#endif
57871462 7951 }
7952 }
7953 break;
7954 case 0x11: strcpy(insn[i],"cop1"); type=NI;
7955 op2=(source[i]>>21)&0x1f;
7956 switch(op2)
7957 {
7958 case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7959 case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7960 case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7961 case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7962 case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7963 case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7964 case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7965 switch((source[i]>>16)&0x3)
7966 {
7967 case 0x00: strcpy(insn[i],"BC1F"); break;
7968 case 0x01: strcpy(insn[i],"BC1T"); break;
7969 case 0x02: strcpy(insn[i],"BC1FL"); break;
7970 case 0x03: strcpy(insn[i],"BC1TL"); break;
7971 }
7972 break;
7973 case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7974 switch(source[i]&0x3f)
7975 {
7976 case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7977 case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7978 case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7979 case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7980 case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7981 case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
7982 case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
7983 case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
7984 case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
7985 case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
7986 case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
7987 case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
7988 case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
7989 case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
7990 case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
7991 case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
7992 case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
7993 case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
7994 case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
7995 case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
7996 case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
7997 case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
7998 case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
7999 case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8000 case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8001 case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8002 case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8003 case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8004 case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8005 case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8006 case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8007 case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8008 case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8009 case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8010 case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8011 }
8012 break;
8013 case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8014 switch(source[i]&0x3f)
8015 {
8016 case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8017 case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8018 case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8019 case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8020 case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8021 case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8022 case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8023 case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8024 case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8025 case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8026 case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8027 case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8028 case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8029 case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8030 case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8031 case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8032 case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8033 case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8034 case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8035 case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8036 case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8037 case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8038 case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8039 case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8040 case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8041 case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8042 case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8043 case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8044 case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8045 case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8046 case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8047 case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8048 case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8049 case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8050 case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8051 }
8052 break;
8053 case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8054 switch(source[i]&0x3f)
8055 {
8056 case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8057 case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8058 }
8059 break;
8060 case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8061 switch(source[i]&0x3f)
8062 {
8063 case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8064 case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8065 }
8066 break;
8067 }
8068 break;
8069 case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8070 case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8071 case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8072 case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
996cc15d 8073#ifndef FORCE32
57871462 8074 case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8075 case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8076 case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8077 case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
996cc15d 8078#endif
57871462 8079 case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8080 case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8081 case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8082 case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8083 case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8084 case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8085 case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8086 case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8087 case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8088 case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8089 case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8090 case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
996cc15d 8091#ifndef FORCE32
57871462 8092 case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8093 case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
996cc15d 8094#endif
57871462 8095 case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8096 case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8097 case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8098 case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
996cc15d 8099#ifndef FORCE32
57871462 8100 case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8101 case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8102 case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
996cc15d 8103#endif
57871462 8104 case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8105 case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
996cc15d 8106#ifndef FORCE32
57871462 8107 case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8108 case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8109 case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
996cc15d 8110#endif
b9b61529 8111#ifdef PCSX
8112 case 0x12: strcpy(insn[i],"COP2"); type=NI;
8113 op2=(source[i]>>21)&0x1f;
8114 switch(op2)
8115 {
8116 case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8117 case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8118 case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8119 case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8120 default:
8121 if (gte_handlers[source[i]&0x3f]!=NULL) {
8122 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8123 type=C2OP;
8124 }
8125 break;
8126 }
8127 break;
8128 case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8129 case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8130 case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8131#endif
90ae6d4e 8132 default: strcpy(insn[i],"???"); type=NI;
75dec299 8133 printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
90ae6d4e 8134 break;
57871462 8135 }
8136 itype[i]=type;
8137 opcode2[i]=op2;
8138 /* Get registers/immediates */
8139 lt1[i]=0;
8140 us1[i]=0;
8141 us2[i]=0;
8142 dep1[i]=0;
8143 dep2[i]=0;
8144 switch(type) {
8145 case LOAD:
8146 rs1[i]=(source[i]>>21)&0x1f;
8147 rs2[i]=0;
8148 rt1[i]=(source[i]>>16)&0x1f;
8149 rt2[i]=0;
8150 imm[i]=(short)source[i];
8151 break;
8152 case STORE:
8153 case STORELR:
8154 rs1[i]=(source[i]>>21)&0x1f;
8155 rs2[i]=(source[i]>>16)&0x1f;
8156 rt1[i]=0;
8157 rt2[i]=0;
8158 imm[i]=(short)source[i];
8159 if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8160 break;
8161 case LOADLR:
8162 // LWL/LWR only load part of the register,
8163 // therefore the target register must be treated as a source too
8164 rs1[i]=(source[i]>>21)&0x1f;
8165 rs2[i]=(source[i]>>16)&0x1f;
8166 rt1[i]=(source[i]>>16)&0x1f;
8167 rt2[i]=0;
8168 imm[i]=(short)source[i];
8169 if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8170 if(op==0x26) dep1[i]=rt1[i]; // LWR
8171 break;
8172 case IMM16:
8173 if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8174 else rs1[i]=(source[i]>>21)&0x1f;
8175 rs2[i]=0;
8176 rt1[i]=(source[i]>>16)&0x1f;
8177 rt2[i]=0;
8178 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8179 imm[i]=(unsigned short)source[i];
8180 }else{
8181 imm[i]=(short)source[i];
8182 }
8183 if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8184 if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8185 if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8186 break;
8187 case UJUMP:
8188 rs1[i]=0;
8189 rs2[i]=0;
8190 rt1[i]=0;
8191 rt2[i]=0;
8192 // The JAL instruction writes to r31.
8193 if (op&1) {
8194 rt1[i]=31;
8195 }
8196 rs2[i]=CCREG;
8197 break;
8198 case RJUMP:
8199 rs1[i]=(source[i]>>21)&0x1f;
8200 rs2[i]=0;
8201 rt1[i]=0;
8202 rt2[i]=0;
5067f341 8203 // The JALR instruction writes to rd.
57871462 8204 if (op2&1) {
5067f341 8205 rt1[i]=(source[i]>>11)&0x1f;
57871462 8206 }
8207 rs2[i]=CCREG;
8208 break;
8209 case CJUMP:
8210 rs1[i]=(source[i]>>21)&0x1f;
8211 rs2[i]=(source[i]>>16)&0x1f;
8212 rt1[i]=0;
8213 rt2[i]=0;
8214 if(op&2) { // BGTZ/BLEZ
8215 rs2[i]=0;
8216 }
8217 us1[i]=rs1[i];
8218 us2[i]=rs2[i];
8219 likely[i]=op>>4;
8220 break;
8221 case SJUMP:
8222 rs1[i]=(source[i]>>21)&0x1f;
8223 rs2[i]=CCREG;
8224 rt1[i]=0;
8225 rt2[i]=0;
8226 us1[i]=rs1[i];
8227 if(op2&0x10) { // BxxAL
8228 rt1[i]=31;
8229 // NOTE: If the branch is not taken, r31 is still overwritten
8230 }
8231 likely[i]=(op2&2)>>1;
8232 break;
8233 case FJUMP:
8234 rs1[i]=FSREG;
8235 rs2[i]=CSREG;
8236 rt1[i]=0;
8237 rt2[i]=0;
8238 likely[i]=((source[i])>>17)&1;
8239 break;
8240 case ALU:
8241 rs1[i]=(source[i]>>21)&0x1f; // source
8242 rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8243 rt1[i]=(source[i]>>11)&0x1f; // destination
8244 rt2[i]=0;
8245 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8246 us1[i]=rs1[i];us2[i]=rs2[i];
8247 }
8248 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8249 dep1[i]=rs1[i];dep2[i]=rs2[i];
8250 }
8251 else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8252 dep1[i]=rs1[i];dep2[i]=rs2[i];
8253 }
8254 break;
8255 case MULTDIV:
8256 rs1[i]=(source[i]>>21)&0x1f; // source
8257 rs2[i]=(source[i]>>16)&0x1f; // divisor
8258 rt1[i]=HIREG;
8259 rt2[i]=LOREG;
8260 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8261 us1[i]=rs1[i];us2[i]=rs2[i];
8262 }
8263 break;
8264 case MOV:
8265 rs1[i]=0;
8266 rs2[i]=0;
8267 rt1[i]=0;
8268 rt2[i]=0;
8269 if(op2==0x10) rs1[i]=HIREG; // MFHI
8270 if(op2==0x11) rt1[i]=HIREG; // MTHI
8271 if(op2==0x12) rs1[i]=LOREG; // MFLO
8272 if(op2==0x13) rt1[i]=LOREG; // MTLO
8273 if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8274 if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8275 dep1[i]=rs1[i];
8276 break;
8277 case SHIFT:
8278 rs1[i]=(source[i]>>16)&0x1f; // target of shift
8279 rs2[i]=(source[i]>>21)&0x1f; // shift amount
8280 rt1[i]=(source[i]>>11)&0x1f; // destination
8281 rt2[i]=0;
8282 // DSLLV/DSRLV/DSRAV are 64-bit
8283 if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8284 break;
8285 case SHIFTIMM:
8286 rs1[i]=(source[i]>>16)&0x1f;
8287 rs2[i]=0;
8288 rt1[i]=(source[i]>>11)&0x1f;
8289 rt2[i]=0;
8290 imm[i]=(source[i]>>6)&0x1f;
8291 // DSxx32 instructions
8292 if(op2>=0x3c) imm[i]|=0x20;
8293 // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8294 if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8295 break;
8296 case COP0:
8297 rs1[i]=0;
8298 rs2[i]=0;
8299 rt1[i]=0;
8300 rt2[i]=0;
8301 if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8302 if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8303 if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8304 if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8305 break;
8306 case COP1:
b9b61529 8307 case COP2:
57871462 8308 rs1[i]=0;
8309 rs2[i]=0;
8310 rt1[i]=0;
8311 rt2[i]=0;
8312 if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8313 if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8314 if(op2==5) us1[i]=rs1[i]; // DMTC1
8315 rs2[i]=CSREG;
8316 break;
8317 case C1LS:
8318 rs1[i]=(source[i]>>21)&0x1F;
8319 rs2[i]=CSREG;
8320 rt1[i]=0;
8321 rt2[i]=0;
8322 imm[i]=(short)source[i];
8323 break;
b9b61529 8324 case C2LS:
8325 rs1[i]=(source[i]>>21)&0x1F;
8326 rs2[i]=0;
8327 rt1[i]=0;
8328 rt2[i]=0;
8329 imm[i]=(short)source[i];
8330 break;
57871462 8331 case FLOAT:
8332 case FCONV:
8333 rs1[i]=0;
8334 rs2[i]=CSREG;
8335 rt1[i]=0;
8336 rt2[i]=0;
8337 break;
8338 case FCOMP:
8339 rs1[i]=FSREG;
8340 rs2[i]=CSREG;
8341 rt1[i]=FSREG;
8342 rt2[i]=0;
8343 break;
8344 case SYSCALL:
7139f3c8 8345 case HLECALL:
57871462 8346 rs1[i]=CCREG;
8347 rs2[i]=0;
8348 rt1[i]=0;
8349 rt2[i]=0;
8350 break;
8351 default:
8352 rs1[i]=0;
8353 rs2[i]=0;
8354 rt1[i]=0;
8355 rt2[i]=0;
8356 }
8357 /* Calculate branch target addresses */
8358 if(type==UJUMP)
8359 ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8360 else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8361 ba[i]=start+i*4+8; // Ignore never taken branch
8362 else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8363 ba[i]=start+i*4+8; // Ignore never taken branch
8364 else if(type==CJUMP||type==SJUMP||type==FJUMP)
8365 ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8366 else ba[i]=-1;
8367 /* Is this the end of the block? */
8368 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
5067f341 8369 if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
57871462 8370 done=1;
8371 // Does the block continue due to a branch?
8372 for(j=i-1;j>=0;j--)
8373 {
8374 if(ba[j]==start+i*4+4) done=j=0;
8375 if(ba[j]==start+i*4+8) done=j=0;
8376 }
8377 }
8378 else {
8379 if(stop_after_jal) done=1;
8380 // Stop on BREAK
8381 if((source[i+1]&0xfc00003f)==0x0d) done=1;
8382 }
8383 // Don't recompile stuff that's already compiled
8384 if(check_addr(start+i*4+4)) done=1;
8385 // Don't get too close to the limit
8386 if(i>MAXBLOCK/2) done=1;
8387 }
75dec299 8388 if(itype[i]==SYSCALL&&stop_after_jal) done=1;
8389 if(itype[i]==HLECALL) done=1;
8390 //assert(i<MAXBLOCK-1);
57871462 8391 if(start+i*4==pagelimit-4) done=1;
8392 assert(start+i*4<pagelimit);
8393 if (i==MAXBLOCK-1) done=1;
8394 // Stop if we're compiling junk
8395 if(itype[i]==NI&&opcode[i]==0x11) {
8396 done=stop_after_jal=1;
8397 printf("Disabled speculative precompilation\n");
8398 }
8399 }
8400 slen=i;
8401 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8402 if(start+i*4==pagelimit) {
8403 itype[i-1]=SPAN;
8404 }
8405 }
8406 assert(slen>0);
8407
8408 /* Pass 2 - Register dependencies and branch targets */
8409
8410 unneeded_registers(0,slen-1,0);
8411
8412 /* Pass 3 - Register allocation */
8413
8414 struct regstat current; // Current register allocations/status
8415 current.is32=1;
8416 current.dirty=0;
8417 current.u=unneeded_reg[0];
8418 current.uu=unneeded_reg_upper[0];
8419 clear_all_regs(current.regmap);
8420 alloc_reg(&current,0,CCREG);
8421 dirty_reg(&current,CCREG);
8422 current.isconst=0;
8423 current.wasconst=0;
8424 int ds=0;
8425 int cc=0;
8426 int hr;
8427
8428 provisional_32bit();
8429
8430 if((u_int)addr&1) {
8431 // First instruction is delay slot
8432 cc=-1;
8433 bt[1]=1;
8434 ds=1;
8435 unneeded_reg[0]=1;
8436 unneeded_reg_upper[0]=1;
8437 current.regmap[HOST_BTREG]=BTREG;
8438 }
8439
8440 for(i=0;i<slen;i++)
8441 {
8442 if(bt[i])
8443 {
8444 int hr;
8445 for(hr=0;hr<HOST_REGS;hr++)
8446 {
8447 // Is this really necessary?
8448 if(current.regmap[hr]==0) current.regmap[hr]=-1;
8449 }
8450 current.isconst=0;
8451 }
8452 if(i>1)
8453 {
8454 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8455 {
8456 if(rs1[i-2]==0||rs2[i-2]==0)
8457 {
8458 if(rs1[i-2]) {
8459 current.is32|=1LL<<rs1[i-2];
8460 int hr=get_reg(current.regmap,rs1[i-2]|64);
8461 if(hr>=0) current.regmap[hr]=-1;
8462 }
8463 if(rs2[i-2]) {
8464 current.is32|=1LL<<rs2[i-2];
8465 int hr=get_reg(current.regmap,rs2[i-2]|64);
8466 if(hr>=0) current.regmap[hr]=-1;
8467 }
8468 }
8469 }
8470 }
8471 // If something jumps here with 64-bit values
8472 // then promote those registers to 64 bits
8473 if(bt[i])
8474 {
8475 uint64_t temp_is32=current.is32;
8476 for(j=i-1;j>=0;j--)
8477 {
8478 if(ba[j]==start+i*4)
8479 temp_is32&=branch_regs[j].is32;
8480 }
8481 for(j=i;j<slen;j++)
8482 {
8483 if(ba[j]==start+i*4)
8484 //temp_is32=1;
8485 temp_is32&=p32[j];
8486 }
8487 if(temp_is32!=current.is32) {
8488 //printf("dumping 32-bit regs (%x)\n",start+i*4);
8489 #ifdef DESTRUCTIVE_WRITEBACK
8490 for(hr=0;hr<HOST_REGS;hr++)
8491 {
8492 int r=current.regmap[hr];
8493 if(r>0&&r<64)
8494 {
8495 if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8496 temp_is32|=1LL<<r;
8497 //printf("restore %d\n",r);
8498 }
8499 }
8500 }
8501 #endif
8502 current.is32=temp_is32;
8503 }
8504 }
24385cae 8505#ifdef FORCE32
8506 memset(p32, 0xff, sizeof(p32));
8507 current.is32=-1LL;
8508#endif
8509
57871462 8510 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8511 regs[i].wasconst=current.isconst;
8512 regs[i].was32=current.is32;
8513 regs[i].wasdirty=current.dirty;
8514 #ifdef DESTRUCTIVE_WRITEBACK
8515 // To change a dirty register from 32 to 64 bits, we must write
8516 // it out during the previous cycle (for branches, 2 cycles)
8517 if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8518 {
8519 uint64_t temp_is32=current.is32;
8520 for(j=i-1;j>=0;j--)
8521 {
8522 if(ba[j]==start+i*4+4)
8523 temp_is32&=branch_regs[j].is32;
8524 }
8525 for(j=i;j<slen;j++)
8526 {
8527 if(ba[j]==start+i*4+4)
8528 //temp_is32=1;
8529 temp_is32&=p32[j];
8530 }
8531 if(temp_is32!=current.is32) {
8532 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8533 for(hr=0;hr<HOST_REGS;hr++)
8534 {
8535 int r=current.regmap[hr];
8536 if(r>0)
8537 {
8538 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8539 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8540 {
8541 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8542 {
8543 //printf("dump %d/r%d\n",hr,r);
8544 current.regmap[hr]=-1;
8545 if(get_reg(current.regmap,r|64)>=0)
8546 current.regmap[get_reg(current.regmap,r|64)]=-1;
8547 }
8548 }
8549 }
8550 }
8551 }
8552 }
8553 }
8554 else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8555 {
8556 uint64_t temp_is32=current.is32;
8557 for(j=i-1;j>=0;j--)
8558 {
8559 if(ba[j]==start+i*4+8)
8560 temp_is32&=branch_regs[j].is32;
8561 }
8562 for(j=i;j<slen;j++)
8563 {
8564 if(ba[j]==start+i*4+8)
8565 //temp_is32=1;
8566 temp_is32&=p32[j];
8567 }
8568 if(temp_is32!=current.is32) {
8569 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8570 for(hr=0;hr<HOST_REGS;hr++)
8571 {
8572 int r=current.regmap[hr];
8573 if(r>0)
8574 {
8575 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8576 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8577 {
8578 //printf("dump %d/r%d\n",hr,r);
8579 current.regmap[hr]=-1;
8580 if(get_reg(current.regmap,r|64)>=0)
8581 current.regmap[get_reg(current.regmap,r|64)]=-1;
8582 }
8583 }
8584 }
8585 }
8586 }
8587 }
8588 #endif
8589 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8590 if(i+1<slen) {
8591 current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8592 current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8593 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8594 current.u|=1;
8595 current.uu|=1;
8596 } else {
8597 current.u=1;
8598 current.uu=1;
8599 }
8600 } else {
8601 if(i+1<slen) {
8602 current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8603 current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8604 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8605 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8606 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8607 current.u|=1;
8608 current.uu|=1;
8609 } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8610 }
8611 is_ds[i]=ds;
8612 if(ds) {
8613 ds=0; // Skip delay slot, already allocated as part of branch
8614 // ...but we need to alloc it in case something jumps here
8615 if(i+1<slen) {
8616 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8617 current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8618 }else{
8619 current.u=branch_unneeded_reg[i-1];
8620 current.uu=branch_unneeded_reg_upper[i-1];
8621 }
8622 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8623 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8624 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8625 current.u|=1;
8626 current.uu|=1;
8627 struct regstat temp;
8628 memcpy(&temp,&current,sizeof(current));
8629 temp.wasdirty=temp.dirty;
8630 temp.was32=temp.is32;
8631 // TODO: Take into account unconditional branches, as below
8632 delayslot_alloc(&temp,i);
8633 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8634 regs[i].wasdirty=temp.wasdirty;
8635 regs[i].was32=temp.was32;
8636 regs[i].dirty=temp.dirty;
8637 regs[i].is32=temp.is32;
8638 regs[i].isconst=0;
8639 regs[i].wasconst=0;
8640 current.isconst=0;
8641 // Create entry (branch target) regmap
8642 for(hr=0;hr<HOST_REGS;hr++)
8643 {
8644 int r=temp.regmap[hr];
8645 if(r>=0) {
8646 if(r!=regmap_pre[i][hr]) {
8647 regs[i].regmap_entry[hr]=-1;
8648 }
8649 else
8650 {
8651 if(r<64){
8652 if((current.u>>r)&1) {
8653 regs[i].regmap_entry[hr]=-1;
8654 regs[i].regmap[hr]=-1;
8655 //Don't clear regs in the delay slot as the branch might need them
8656 //current.regmap[hr]=-1;
8657 }else
8658 regs[i].regmap_entry[hr]=r;
8659 }
8660 else {
8661 if((current.uu>>(r&63))&1) {
8662 regs[i].regmap_entry[hr]=-1;
8663 regs[i].regmap[hr]=-1;
8664 //Don't clear regs in the delay slot as the branch might need them
8665 //current.regmap[hr]=-1;
8666 }else
8667 regs[i].regmap_entry[hr]=r;
8668 }
8669 }
8670 } else {
8671 // First instruction expects CCREG to be allocated
8672 if(i==0&&hr==HOST_CCREG)
8673 regs[i].regmap_entry[hr]=CCREG;
8674 else
8675 regs[i].regmap_entry[hr]=-1;
8676 }
8677 }
8678 }
8679 else { // Not delay slot
8680 switch(itype[i]) {
8681 case UJUMP:
8682 //current.isconst=0; // DEBUG
8683 //current.wasconst=0; // DEBUG
8684 //regs[i].wasconst=0; // DEBUG
8685 clear_const(&current,rt1[i]);
8686 alloc_cc(&current,i);
8687 dirty_reg(&current,CCREG);
8688 if (rt1[i]==31) {
8689 alloc_reg(&current,i,31);
8690 dirty_reg(&current,31);
076655d1 8691 //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8692 assert(rt1[i+1]!=rt1[i]);
57871462 8693 #ifdef REG_PREFETCH
8694 alloc_reg(&current,i,PTEMP);
8695 #endif
8696 //current.is32|=1LL<<rt1[i];
8697 }
8698 delayslot_alloc(&current,i+1);
8699 //current.isconst=0; // DEBUG
8700 ds=1;
8701 //printf("i=%d, isconst=%x\n",i,current.isconst);
8702 break;
8703 case RJUMP:
8704 //current.isconst=0;
8705 //current.wasconst=0;
8706 //regs[i].wasconst=0;
8707 clear_const(&current,rs1[i]);
8708 clear_const(&current,rt1[i]);
8709 alloc_cc(&current,i);
8710 dirty_reg(&current,CCREG);
8711 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8712 alloc_reg(&current,i,rs1[i]);
5067f341 8713 if (rt1[i]!=0) {
8714 alloc_reg(&current,i,rt1[i]);
8715 dirty_reg(&current,rt1[i]);
076655d1 8716 //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8717 assert(rt1[i+1]!=rt1[i]);
57871462 8718 #ifdef REG_PREFETCH
8719 alloc_reg(&current,i,PTEMP);
8720 #endif
8721 }
8722 #ifdef USE_MINI_HT
8723 if(rs1[i]==31) { // JALR
8724 alloc_reg(&current,i,RHASH);
8725 #ifndef HOST_IMM_ADDR32
8726 alloc_reg(&current,i,RHTBL);
8727 #endif
8728 }
8729 #endif
8730 delayslot_alloc(&current,i+1);
8731 } else {
8732 // The delay slot overwrites our source register,
8733 // allocate a temporary register to hold the old value.
8734 current.isconst=0;
8735 current.wasconst=0;
8736 regs[i].wasconst=0;
8737 delayslot_alloc(&current,i+1);
8738 current.isconst=0;
8739 alloc_reg(&current,i,RTEMP);
8740 }
8741 //current.isconst=0; // DEBUG
8742 ds=1;
8743 break;
8744 case CJUMP:
8745 //current.isconst=0;
8746 //current.wasconst=0;
8747 //regs[i].wasconst=0;
8748 clear_const(&current,rs1[i]);
8749 clear_const(&current,rs2[i]);
8750 if((opcode[i]&0x3E)==4) // BEQ/BNE
8751 {
8752 alloc_cc(&current,i);
8753 dirty_reg(&current,CCREG);
8754 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8755 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8756 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8757 {
8758 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8759 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8760 }
8761 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8762 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8763 // The delay slot overwrites one of our conditions.
8764 // Allocate the branch condition registers instead.
8765 // Note that such a sequence of instructions could
8766 // be considered a bug since the branch can not be
8767 // re-executed if an exception occurs.
8768 current.isconst=0;
8769 current.wasconst=0;
8770 regs[i].wasconst=0;
8771 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8772 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8773 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8774 {
8775 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8776 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8777 }
8778 }
8779 else delayslot_alloc(&current,i+1);
8780 }
8781 else
8782 if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8783 {
8784 alloc_cc(&current,i);
8785 dirty_reg(&current,CCREG);
8786 alloc_reg(&current,i,rs1[i]);
8787 if(!(current.is32>>rs1[i]&1))
8788 {
8789 alloc_reg64(&current,i,rs1[i]);
8790 }
8791 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8792 // The delay slot overwrites one of our conditions.
8793 // Allocate the branch condition registers instead.
8794 // Note that such a sequence of instructions could
8795 // be considered a bug since the branch can not be
8796 // re-executed if an exception occurs.
8797 current.isconst=0;
8798 current.wasconst=0;
8799 regs[i].wasconst=0;
8800 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8801 if(!((current.is32>>rs1[i])&1))
8802 {
8803 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8804 }
8805 }
8806 else delayslot_alloc(&current,i+1);
8807 }
8808 else
8809 // Don't alloc the delay slot yet because we might not execute it
8810 if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8811 {
8812 current.isconst=0;
8813 current.wasconst=0;
8814 regs[i].wasconst=0;
8815 alloc_cc(&current,i);
8816 dirty_reg(&current,CCREG);
8817 alloc_reg(&current,i,rs1[i]);
8818 alloc_reg(&current,i,rs2[i]);
8819 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8820 {
8821 alloc_reg64(&current,i,rs1[i]);
8822 alloc_reg64(&current,i,rs2[i]);
8823 }
8824 }
8825 else
8826 if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8827 {
8828 current.isconst=0;
8829 current.wasconst=0;
8830 regs[i].wasconst=0;
8831 alloc_cc(&current,i);
8832 dirty_reg(&current,CCREG);
8833 alloc_reg(&current,i,rs1[i]);
8834 if(!(current.is32>>rs1[i]&1))
8835 {
8836 alloc_reg64(&current,i,rs1[i]);
8837 }
8838 }
8839 ds=1;
8840 //current.isconst=0;
8841 break;
8842 case SJUMP:
8843 //current.isconst=0;
8844 //current.wasconst=0;
8845 //regs[i].wasconst=0;
8846 clear_const(&current,rs1[i]);
8847 clear_const(&current,rt1[i]);
8848 //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8849 if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8850 {
8851 alloc_cc(&current,i);
8852 dirty_reg(&current,CCREG);
8853 alloc_reg(&current,i,rs1[i]);
8854 if(!(current.is32>>rs1[i]&1))
8855 {
8856 alloc_reg64(&current,i,rs1[i]);
8857 }
8858 if (rt1[i]==31) { // BLTZAL/BGEZAL
8859 alloc_reg(&current,i,31);
8860 dirty_reg(&current,31);
8861 assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8862 //#ifdef REG_PREFETCH
8863 //alloc_reg(&current,i,PTEMP);
8864 //#endif
8865 //current.is32|=1LL<<rt1[i];
8866 }
8867 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8868 // The delay slot overwrites the branch condition.
8869 // Allocate the branch condition registers instead.
8870 // Note that such a sequence of instructions could
8871 // be considered a bug since the branch can not be
8872 // re-executed if an exception occurs.
8873 current.isconst=0;
8874 current.wasconst=0;
8875 regs[i].wasconst=0;
8876 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8877 if(!((current.is32>>rs1[i])&1))
8878 {
8879 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8880 }
8881 }
8882 else delayslot_alloc(&current,i+1);
8883 }
8884 else
8885 // Don't alloc the delay slot yet because we might not execute it
8886 if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8887 {
8888 current.isconst=0;
8889 current.wasconst=0;
8890 regs[i].wasconst=0;
8891 alloc_cc(&current,i);
8892 dirty_reg(&current,CCREG);
8893 alloc_reg(&current,i,rs1[i]);
8894 if(!(current.is32>>rs1[i]&1))
8895 {
8896 alloc_reg64(&current,i,rs1[i]);
8897 }
8898 }
8899 ds=1;
8900 //current.isconst=0;
8901 break;
8902 case FJUMP:
8903 current.isconst=0;
8904 current.wasconst=0;
8905 regs[i].wasconst=0;
8906 if(likely[i]==0) // BC1F/BC1T
8907 {
8908 // TODO: Theoretically we can run out of registers here on x86.
8909 // The delay slot can allocate up to six, and we need to check
8910 // CSREG before executing the delay slot. Possibly we can drop
8911 // the cycle count and then reload it after checking that the
8912 // FPU is in a usable state, or don't do out-of-order execution.
8913 alloc_cc(&current,i);
8914 dirty_reg(&current,CCREG);
8915 alloc_reg(&current,i,FSREG);
8916 alloc_reg(&current,i,CSREG);
8917 if(itype[i+1]==FCOMP) {
8918 // The delay slot overwrites the branch condition.
8919 // Allocate the branch condition registers instead.
8920 // Note that such a sequence of instructions could
8921 // be considered a bug since the branch can not be
8922 // re-executed if an exception occurs.
8923 alloc_cc(&current,i);
8924 dirty_reg(&current,CCREG);
8925 alloc_reg(&current,i,CSREG);
8926 alloc_reg(&current,i,FSREG);
8927 }
8928 else {
8929 delayslot_alloc(&current,i+1);
8930 alloc_reg(&current,i+1,CSREG);
8931 }
8932 }
8933 else
8934 // Don't alloc the delay slot yet because we might not execute it
8935 if(likely[i]) // BC1FL/BC1TL
8936 {
8937 alloc_cc(&current,i);
8938 dirty_reg(&current,CCREG);
8939 alloc_reg(&current,i,CSREG);
8940 alloc_reg(&current,i,FSREG);
8941 }
8942 ds=1;
8943 current.isconst=0;
8944 break;
8945 case IMM16:
8946 imm16_alloc(&current,i);
8947 break;
8948 case LOAD:
8949 case LOADLR:
8950 load_alloc(&current,i);
8951 break;
8952 case STORE:
8953 case STORELR:
8954 store_alloc(&current,i);
8955 break;
8956 case ALU:
8957 alu_alloc(&current,i);
8958 break;
8959 case SHIFT:
8960 shift_alloc(&current,i);
8961 break;
8962 case MULTDIV:
8963 multdiv_alloc(&current,i);
8964 break;
8965 case SHIFTIMM:
8966 shiftimm_alloc(&current,i);
8967 break;
8968 case MOV:
8969 mov_alloc(&current,i);
8970 break;
8971 case COP0:
8972 cop0_alloc(&current,i);
8973 break;
8974 case COP1:
b9b61529 8975 case COP2:
57871462 8976 cop1_alloc(&current,i);
8977 break;
8978 case C1LS:
8979 c1ls_alloc(&current,i);
8980 break;
b9b61529 8981 case C2LS:
8982 c2ls_alloc(&current,i);
8983 break;
8984 case C2OP:
8985 c2op_alloc(&current,i);
8986 break;
57871462 8987 case FCONV:
8988 fconv_alloc(&current,i);
8989 break;
8990 case FLOAT:
8991 float_alloc(&current,i);
8992 break;
8993 case FCOMP:
8994 fcomp_alloc(&current,i);
8995 break;
8996 case SYSCALL:
7139f3c8 8997 case HLECALL:
57871462 8998 syscall_alloc(&current,i);
8999 break;
9000 case SPAN:
9001 pagespan_alloc(&current,i);
9002 break;
9003 }
9004
9005 // Drop the upper half of registers that have become 32-bit
9006 current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9007 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9008 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9009 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9010 current.uu|=1;
9011 } else {
9012 current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9013 current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9014 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9015 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9016 current.uu|=1;
9017 }
9018
9019 // Create entry (branch target) regmap
9020 for(hr=0;hr<HOST_REGS;hr++)
9021 {
9022 int r,or,er;
9023 r=current.regmap[hr];
9024 if(r>=0) {
9025 if(r!=regmap_pre[i][hr]) {
9026 // TODO: delay slot (?)
9027 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9028 if(or<0||(r&63)>=TEMPREG){
9029 regs[i].regmap_entry[hr]=-1;
9030 }
9031 else
9032 {
9033 // Just move it to a different register
9034 regs[i].regmap_entry[hr]=r;
9035 // If it was dirty before, it's still dirty
9036 if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9037 }
9038 }
9039 else
9040 {
9041 // Unneeded
9042 if(r==0){
9043 regs[i].regmap_entry[hr]=0;
9044 }
9045 else
9046 if(r<64){
9047 if((current.u>>r)&1) {
9048 regs[i].regmap_entry[hr]=-1;
9049 //regs[i].regmap[hr]=-1;
9050 current.regmap[hr]=-1;
9051 }else
9052 regs[i].regmap_entry[hr]=r;
9053 }
9054 else {
9055 if((current.uu>>(r&63))&1) {
9056 regs[i].regmap_entry[hr]=-1;
9057 //regs[i].regmap[hr]=-1;
9058 current.regmap[hr]=-1;
9059 }else
9060 regs[i].regmap_entry[hr]=r;
9061 }
9062 }
9063 } else {
9064 // Branches expect CCREG to be allocated at the target
9065 if(regmap_pre[i][hr]==CCREG)
9066 regs[i].regmap_entry[hr]=CCREG;
9067 else
9068 regs[i].regmap_entry[hr]=-1;
9069 }
9070 }
9071 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9072 }
9073 /* Branch post-alloc */
9074 if(i>0)
9075 {
9076 current.was32=current.is32;
9077 current.wasdirty=current.dirty;
9078 switch(itype[i-1]) {
9079 case UJUMP:
9080 memcpy(&branch_regs[i-1],&current,sizeof(current));
9081 branch_regs[i-1].isconst=0;
9082 branch_regs[i-1].wasconst=0;
9083 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9084 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9085 alloc_cc(&branch_regs[i-1],i-1);
9086 dirty_reg(&branch_regs[i-1],CCREG);
9087 if(rt1[i-1]==31) { // JAL
9088 alloc_reg(&branch_regs[i-1],i-1,31);
9089 dirty_reg(&branch_regs[i-1],31);
9090 branch_regs[i-1].is32|=1LL<<31;
9091 }
9092 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9093 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9094 break;
9095 case RJUMP:
9096 memcpy(&branch_regs[i-1],&current,sizeof(current));
9097 branch_regs[i-1].isconst=0;
9098 branch_regs[i-1].wasconst=0;
9099 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9100 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9101 alloc_cc(&branch_regs[i-1],i-1);
9102 dirty_reg(&branch_regs[i-1],CCREG);
9103 alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
5067f341 9104 if(rt1[i-1]!=0) { // JALR
9105 alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9106 dirty_reg(&branch_regs[i-1],rt1[i-1]);
9107 branch_regs[i-1].is32|=1LL<<rt1[i-1];
57871462 9108 }
9109 #ifdef USE_MINI_HT
9110 if(rs1[i-1]==31) { // JALR
9111 alloc_reg(&branch_regs[i-1],i-1,RHASH);
9112 #ifndef HOST_IMM_ADDR32
9113 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9114 #endif
9115 }
9116 #endif
9117 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9118 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9119 break;
9120 case CJUMP:
9121 if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9122 {
9123 alloc_cc(&current,i-1);
9124 dirty_reg(&current,CCREG);
9125 if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9126 (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9127 // The delay slot overwrote one of our conditions
9128 // Delay slot goes after the test (in order)
9129 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9130 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9131 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9132 current.u|=1;
9133 current.uu|=1;
9134 delayslot_alloc(&current,i);
9135 current.isconst=0;
9136 }
9137 else
9138 {
9139 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9140 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9141 // Alloc the branch condition registers
9142 if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9143 if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9144 if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9145 {
9146 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9147 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9148 }
9149 }
9150 memcpy(&branch_regs[i-1],&current,sizeof(current));
9151 branch_regs[i-1].isconst=0;
9152 branch_regs[i-1].wasconst=0;
9153 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9154 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9155 }
9156 else
9157 if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9158 {
9159 alloc_cc(&current,i-1);
9160 dirty_reg(&current,CCREG);
9161 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9162 // The delay slot overwrote the branch condition
9163 // Delay slot goes after the test (in order)
9164 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9165 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9166 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9167 current.u|=1;
9168 current.uu|=1;
9169 delayslot_alloc(&current,i);
9170 current.isconst=0;
9171 }
9172 else
9173 {
9174 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9175 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9176 // Alloc the branch condition register
9177 alloc_reg(&current,i-1,rs1[i-1]);
9178 if(!(current.is32>>rs1[i-1]&1))
9179 {
9180 alloc_reg64(&current,i-1,rs1[i-1]);
9181 }
9182 }
9183 memcpy(&branch_regs[i-1],&current,sizeof(current));
9184 branch_regs[i-1].isconst=0;
9185 branch_regs[i-1].wasconst=0;
9186 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9187 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9188 }
9189 else
9190 // Alloc the delay slot in case the branch is taken
9191 if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9192 {
9193 memcpy(&branch_regs[i-1],&current,sizeof(current));
9194 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9195 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9196 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9197 alloc_cc(&branch_regs[i-1],i);
9198 dirty_reg(&branch_regs[i-1],CCREG);
9199 delayslot_alloc(&branch_regs[i-1],i);
9200 branch_regs[i-1].isconst=0;
9201 alloc_reg(&current,i,CCREG); // Not taken path
9202 dirty_reg(&current,CCREG);
9203 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9204 }
9205 else
9206 if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9207 {
9208 memcpy(&branch_regs[i-1],&current,sizeof(current));
9209 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9210 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9211 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9212 alloc_cc(&branch_regs[i-1],i);
9213 dirty_reg(&branch_regs[i-1],CCREG);
9214 delayslot_alloc(&branch_regs[i-1],i);
9215 branch_regs[i-1].isconst=0;
9216 alloc_reg(&current,i,CCREG); // Not taken path
9217 dirty_reg(&current,CCREG);
9218 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9219 }
9220 break;
9221 case SJUMP:
9222 //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9223 if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9224 {
9225 alloc_cc(&current,i-1);
9226 dirty_reg(&current,CCREG);
9227 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9228 // The delay slot overwrote the branch condition
9229 // Delay slot goes after the test (in order)
9230 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9231 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9232 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9233 current.u|=1;
9234 current.uu|=1;
9235 delayslot_alloc(&current,i);
9236 current.isconst=0;
9237 }
9238 else
9239 {
9240 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9241 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9242 // Alloc the branch condition register
9243 alloc_reg(&current,i-1,rs1[i-1]);
9244 if(!(current.is32>>rs1[i-1]&1))
9245 {
9246 alloc_reg64(&current,i-1,rs1[i-1]);
9247 }
9248 }
9249 memcpy(&branch_regs[i-1],&current,sizeof(current));
9250 branch_regs[i-1].isconst=0;
9251 branch_regs[i-1].wasconst=0;
9252 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9253 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9254 }
9255 else
9256 // Alloc the delay slot in case the branch is taken
9257 if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9258 {
9259 memcpy(&branch_regs[i-1],&current,sizeof(current));
9260 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9261 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9262 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9263 alloc_cc(&branch_regs[i-1],i);
9264 dirty_reg(&branch_regs[i-1],CCREG);
9265 delayslot_alloc(&branch_regs[i-1],i);
9266 branch_regs[i-1].isconst=0;
9267 alloc_reg(&current,i,CCREG); // Not taken path
9268 dirty_reg(&current,CCREG);
9269 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9270 }
9271 // FIXME: BLTZAL/BGEZAL
9272 if(opcode2[i-1]&0x10) { // BxxZAL
9273 alloc_reg(&branch_regs[i-1],i-1,31);
9274 dirty_reg(&branch_regs[i-1],31);
9275 branch_regs[i-1].is32|=1LL<<31;
9276 }
9277 break;
9278 case FJUMP:
9279 if(likely[i-1]==0) // BC1F/BC1T
9280 {
9281 alloc_cc(&current,i-1);
9282 dirty_reg(&current,CCREG);
9283 if(itype[i]==FCOMP) {
9284 // The delay slot overwrote the branch condition
9285 // Delay slot goes after the test (in order)
9286 delayslot_alloc(&current,i);
9287 current.isconst=0;
9288 }
9289 else
9290 {
9291 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9292 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9293 // Alloc the branch condition register
9294 alloc_reg(&current,i-1,FSREG);
9295 }
9296 memcpy(&branch_regs[i-1],&current,sizeof(current));
9297 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9298 }
9299 else // BC1FL/BC1TL
9300 {
9301 // Alloc the delay slot in case the branch is taken
9302 memcpy(&branch_regs[i-1],&current,sizeof(current));
9303 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9304 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9305 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9306 alloc_cc(&branch_regs[i-1],i);
9307 dirty_reg(&branch_regs[i-1],CCREG);
9308 delayslot_alloc(&branch_regs[i-1],i);
9309 branch_regs[i-1].isconst=0;
9310 alloc_reg(&current,i,CCREG); // Not taken path
9311 dirty_reg(&current,CCREG);
9312 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9313 }
9314 break;
9315 }
9316
9317 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9318 {
9319 if(rt1[i-1]==31) // JAL/JALR
9320 {
9321 // Subroutine call will return here, don't alloc any registers
9322 current.is32=1;
9323 current.dirty=0;
9324 clear_all_regs(current.regmap);
9325 alloc_reg(&current,i,CCREG);
9326 dirty_reg(&current,CCREG);
9327 }
9328 else if(i+1<slen)
9329 {
9330 // Internal branch will jump here, match registers to caller
9331 current.is32=0x3FFFFFFFFLL;
9332 current.dirty=0;
9333 clear_all_regs(current.regmap);
9334 alloc_reg(&current,i,CCREG);
9335 dirty_reg(&current,CCREG);
9336 for(j=i-1;j>=0;j--)
9337 {
9338 if(ba[j]==start+i*4+4) {
9339 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9340 current.is32=branch_regs[j].is32;
9341 current.dirty=branch_regs[j].dirty;
9342 break;
9343 }
9344 }
9345 while(j>=0) {
9346 if(ba[j]==start+i*4+4) {
9347 for(hr=0;hr<HOST_REGS;hr++) {
9348 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9349 current.regmap[hr]=-1;
9350 }
9351 current.is32&=branch_regs[j].is32;
9352 current.dirty&=branch_regs[j].dirty;
9353 }
9354 }
9355 j--;
9356 }
9357 }
9358 }
9359 }
9360
9361 // Count cycles in between branches
9362 ccadj[i]=cc;
7139f3c8 9363 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
57871462 9364 {
9365 cc=0;
9366 }
9367 else
9368 {
9369 cc++;
9370 }
9371
9372 flush_dirty_uppers(&current);
9373 if(!is_ds[i]) {
9374 regs[i].is32=current.is32;
9375 regs[i].dirty=current.dirty;
9376 regs[i].isconst=current.isconst;
9377 memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9378 }
9379 for(hr=0;hr<HOST_REGS;hr++) {
9380 if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9381 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9382 regs[i].wasconst&=~(1<<hr);
9383 }
9384 }
9385 }
9386 if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9387 }
9388
9389 /* Pass 4 - Cull unused host registers */
9390
9391 uint64_t nr=0;
9392
9393 for (i=slen-1;i>=0;i--)
9394 {
9395 int hr;
9396 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9397 {
9398 if(ba[i]<start || ba[i]>=(start+slen*4))
9399 {
9400 // Branch out of this block, don't need anything
9401 nr=0;
9402 }
9403 else
9404 {
9405 // Internal branch
9406 // Need whatever matches the target
9407 nr=0;
9408 int t=(ba[i]-start)>>2;
9409 for(hr=0;hr<HOST_REGS;hr++)
9410 {
9411 if(regs[i].regmap_entry[hr]>=0) {
9412 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9413 }
9414 }
9415 }
9416 // Conditional branch may need registers for following instructions
9417 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9418 {
9419 if(i<slen-2) {
9420 nr|=needed_reg[i+2];
9421 for(hr=0;hr<HOST_REGS;hr++)
9422 {
9423 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9424 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9425 }
9426 }
9427 }
9428 // Don't need stuff which is overwritten
9429 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9430 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9431 // Merge in delay slot
9432 for(hr=0;hr<HOST_REGS;hr++)
9433 {
9434 if(!likely[i]) {
9435 // These are overwritten unless the branch is "likely"
9436 // and the delay slot is nullified if not taken
9437 if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9438 if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9439 }
9440 if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9441 if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9442 if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9443 if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9444 if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9445 if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9446 if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9447 if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9448 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9449 if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9450 if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9451 }
9452 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9453 if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9454 if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9455 }
b9b61529 9456 if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
57871462 9457 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9458 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9459 }
9460 }
9461 }
7139f3c8 9462 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
57871462 9463 {
9464 // SYSCALL instruction (software interrupt)
9465 nr=0;
9466 }
9467 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9468 {
9469 // ERET instruction (return from interrupt)
9470 nr=0;
9471 }
9472 else // Non-branch
9473 {
9474 if(i<slen-1) {
9475 for(hr=0;hr<HOST_REGS;hr++) {
9476 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9477 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9478 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9479 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9480 }
9481 }
9482 }
9483 for(hr=0;hr<HOST_REGS;hr++)
9484 {
9485 // Overwritten registers are not needed
9486 if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9487 if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9488 if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9489 // Source registers are needed
9490 if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9491 if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9492 if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9493 if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9494 if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9495 if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9496 if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9497 if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9498 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9499 if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9500 if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9501 }
9502 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9503 if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9504 if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9505 }
b9b61529 9506 if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
57871462 9507 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9508 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9509 }
9510 // Don't store a register immediately after writing it,
9511 // may prevent dual-issue.
9512 // But do so if this is a branch target, otherwise we
9513 // might have to load the register before the branch.
9514 if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9515 if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9516 (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9517 if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9518 if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9519 }
9520 if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9521 (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9522 if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9523 if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9524 }
9525 }
9526 }
9527 // Cycle count is needed at branches. Assume it is needed at the target too.
9528 if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9529 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9530 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9531 }
9532 // Save it
9533 needed_reg[i]=nr;
9534
9535 // Deallocate unneeded registers
9536 for(hr=0;hr<HOST_REGS;hr++)
9537 {
9538 if(!((nr>>hr)&1)) {
9539 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9540 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9541 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9542 (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9543 {
9544 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9545 {
9546 if(likely[i]) {
9547 regs[i].regmap[hr]=-1;
9548 regs[i].isconst&=~(1<<hr);
9549 if(i<slen-2) regmap_pre[i+2][hr]=-1;
9550 }
9551 }
9552 }
9553 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9554 {
9555 int d1=0,d2=0,map=0,temp=0;
9556 if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9557 {
9558 d1=dep1[i+1];
9559 d2=dep2[i+1];
9560 }
9561 if(using_tlb) {
9562 if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9563 itype[i+1]==STORE || itype[i+1]==STORELR ||
b9b61529 9564 itype[i+1]==C1LS || itype[i+1]==C2LS)
57871462 9565 map=TLREG;
9566 } else
b9b61529 9567 if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9568 (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
57871462 9569 map=INVCP;
9570 }
9571 if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
b9b61529 9572 itype[i+1]==C1LS || itype[i+1]==C2LS)
57871462 9573 temp=FTEMP;
9574 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9575 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9576 (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9577 (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9578 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9579 regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9580 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9581 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9582 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9583 regs[i].regmap[hr]!=map )
9584 {
9585 regs[i].regmap[hr]=-1;
9586 regs[i].isconst&=~(1<<hr);
9587 if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9588 (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9589 (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9590 (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9591 (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9592 branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9593 (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9594 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9595 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9596 branch_regs[i].regmap[hr]!=map)
9597 {
9598 branch_regs[i].regmap[hr]=-1;
9599 branch_regs[i].regmap_entry[hr]=-1;
9600 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9601 {
9602 if(!likely[i]&&i<slen-2) {
9603 regmap_pre[i+2][hr]=-1;
9604 }
9605 }
9606 }
9607 }
9608 }
9609 else
9610 {
9611 // Non-branch
9612 if(i>0)
9613 {
9614 int d1=0,d2=0,map=-1,temp=-1;
9615 if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9616 {
9617 d1=dep1[i];
9618 d2=dep2[i];
9619 }
9620 if(using_tlb) {
9621 if(itype[i]==LOAD || itype[i]==LOADLR ||
9622 itype[i]==STORE || itype[i]==STORELR ||
b9b61529 9623 itype[i]==C1LS || itype[i]==C2LS)
57871462 9624 map=TLREG;
b9b61529 9625 } else if(itype[i]==STORE || itype[i]==STORELR ||
9626 (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
57871462 9627 map=INVCP;
9628 }
9629 if(itype[i]==LOADLR || itype[i]==STORELR ||
b9b61529 9630 itype[i]==C1LS || itype[i]==C2LS)
57871462 9631 temp=FTEMP;
9632 if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9633 (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9634 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9635 regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9636 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9637 (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9638 {
9639 if(i<slen-1&&!is_ds[i]) {
9640 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9641 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9642 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9643 {
9644 printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9645 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9646 }
9647 regmap_pre[i+1][hr]=-1;
9648 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9649 }
9650 regs[i].regmap[hr]=-1;
9651 regs[i].isconst&=~(1<<hr);
9652 }
9653 }
9654 }
9655 }
9656 }
9657 }
9658
9659 /* Pass 5 - Pre-allocate registers */
9660
9661 // If a register is allocated during a loop, try to allocate it for the
9662 // entire loop, if possible. This avoids loading/storing registers
9663 // inside of the loop.
9664
9665 signed char f_regmap[HOST_REGS];
9666 clear_all_regs(f_regmap);
9667 for(i=0;i<slen-1;i++)
9668 {
9669 if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9670 {
9671 if(ba[i]>=start && ba[i]<(start+i*4))
9672 if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9673 ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9674 ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9675 ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
b9b61529 9676 ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9677 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
57871462 9678 {
9679 int t=(ba[i]-start)>>2;
9680 if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9681 if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
9682 for(hr=0;hr<HOST_REGS;hr++)
9683 {
9684 if(regs[i].regmap[hr]>64) {
9685 if(!((regs[i].dirty>>hr)&1))
9686 f_regmap[hr]=regs[i].regmap[hr];
9687 else f_regmap[hr]=-1;
9688 }
b372a952 9689 else if(regs[i].regmap[hr]>=0) {
9690 if(f_regmap[hr]!=regs[i].regmap[hr]) {
9691 // dealloc old register
9692 int n;
9693 for(n=0;n<HOST_REGS;n++)
9694 {
9695 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9696 }
9697 // and alloc new one
9698 f_regmap[hr]=regs[i].regmap[hr];
9699 }
9700 }
57871462 9701 if(branch_regs[i].regmap[hr]>64) {
9702 if(!((branch_regs[i].dirty>>hr)&1))
9703 f_regmap[hr]=branch_regs[i].regmap[hr];
9704 else f_regmap[hr]=-1;
9705 }
b372a952 9706 else if(branch_regs[i].regmap[hr]>=0) {
9707 if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
9708 // dealloc old register
9709 int n;
9710 for(n=0;n<HOST_REGS;n++)
9711 {
9712 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
9713 }
9714 // and alloc new one
9715 f_regmap[hr]=branch_regs[i].regmap[hr];
9716 }
9717 }
57871462 9718 if(itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9719 ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
b9b61529 9720 ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9721 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
57871462 9722 {
9723 // Test both in case the delay slot is ooo,
9724 // could be done better...
9725 if(count_free_regs(branch_regs[i].regmap)<2
9726 ||count_free_regs(regs[i].regmap)<2)
9727 f_regmap[hr]=branch_regs[i].regmap[hr];
9728 }
9729 // Avoid dirty->clean transition
9730 // #ifdef DESTRUCTIVE_WRITEBACK here?
9731 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9732 if(f_regmap[hr]>0) {
9733 if(regs[t].regmap_entry[hr]<0) {
9734 int r=f_regmap[hr];
9735 for(j=t;j<=i;j++)
9736 {
9737 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9738 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9739 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9740 if(r>63) {
9741 // NB This can exclude the case where the upper-half
9742 // register is lower numbered than the lower-half
9743 // register. Not sure if it's worth fixing...
9744 if(get_reg(regs[j].regmap,r&63)<0) break;
9745 if(regs[j].is32&(1LL<<(r&63))) break;
9746 }
9747 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9748 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9749 int k;
9750 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9751 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9752 if(r>63) {
9753 if(get_reg(regs[i].regmap,r&63)<0) break;
9754 if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9755 }
9756 k=i;
9757 while(k>1&&regs[k-1].regmap[hr]==-1) {
9758 if(itype[k-1]==STORE||itype[k-1]==STORELR
9759 ||itype[k-1]==C1LS||itype[k-1]==SHIFT||itype[k-1]==COP1
b9b61529 9760 ||itype[k-1]==FLOAT||itype[k-1]==FCONV||itype[k-1]==FCOMP
9761 ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
57871462 9762 if(count_free_regs(regs[k-1].regmap)<2) {
9763 //printf("no free regs for store %x\n",start+(k-1)*4);
9764 break;
9765 }
9766 }
9767 else
9768 if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9769 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9770 //printf("no-match due to different register\n");
9771 break;
9772 }
9773 if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9774 //printf("no-match due to branch\n");
9775 break;
9776 }
9777 // call/ret fast path assumes no registers allocated
9778 if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
9779 break;
9780 }
9781 if(r>63) {
9782 // NB This can exclude the case where the upper-half
9783 // register is lower numbered than the lower-half
9784 // register. Not sure if it's worth fixing...
9785 if(get_reg(regs[k-1].regmap,r&63)<0) break;
9786 if(regs[k-1].is32&(1LL<<(r&63))) break;
9787 }
9788 k--;
9789 }
9790 if(i<slen-1) {
9791 if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9792 (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9793 //printf("bad match after branch\n");
9794 break;
9795 }
9796 }
9797 if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9798 //printf("Extend r%d, %x ->\n",hr,start+k*4);
9799 while(k<i) {
9800 regs[k].regmap_entry[hr]=f_regmap[hr];
9801 regs[k].regmap[hr]=f_regmap[hr];
9802 regmap_pre[k+1][hr]=f_regmap[hr];
9803 regs[k].wasdirty&=~(1<<hr);
9804 regs[k].dirty&=~(1<<hr);
9805 regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9806 regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9807 regs[k].wasconst&=~(1<<hr);
9808 regs[k].isconst&=~(1<<hr);
9809 k++;
9810 }
9811 }
9812 else {
9813 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9814 break;
9815 }
9816 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9817 if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9818 //printf("OK fill %x (r%d)\n",start+i*4,hr);
9819 regs[i].regmap_entry[hr]=f_regmap[hr];
9820 regs[i].regmap[hr]=f_regmap[hr];
9821 regs[i].wasdirty&=~(1<<hr);
9822 regs[i].dirty&=~(1<<hr);
9823 regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9824 regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9825 regs[i].wasconst&=~(1<<hr);
9826 regs[i].isconst&=~(1<<hr);
9827 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9828 branch_regs[i].wasdirty&=~(1<<hr);
9829 branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9830 branch_regs[i].regmap[hr]=f_regmap[hr];
9831 branch_regs[i].dirty&=~(1<<hr);
9832 branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9833 branch_regs[i].wasconst&=~(1<<hr);
9834 branch_regs[i].isconst&=~(1<<hr);
9835 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9836 regmap_pre[i+2][hr]=f_regmap[hr];
9837 regs[i+2].wasdirty&=~(1<<hr);
9838 regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9839 assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9840 (regs[i+2].was32&(1LL<<f_regmap[hr])));
9841 }
9842 }
9843 }
9844 for(k=t;k<j;k++) {
9845 regs[k].regmap_entry[hr]=f_regmap[hr];
9846 regs[k].regmap[hr]=f_regmap[hr];
9847 regmap_pre[k+1][hr]=f_regmap[hr];
9848 regs[k+1].wasdirty&=~(1<<hr);
9849 regs[k].dirty&=~(1<<hr);
9850 regs[k].wasconst&=~(1<<hr);
9851 regs[k].isconst&=~(1<<hr);
9852 }
9853 if(regs[j].regmap[hr]==f_regmap[hr])
9854 regs[j].regmap_entry[hr]=f_regmap[hr];
9855 break;
9856 }
9857 if(j==i) break;
9858 if(regs[j].regmap[hr]>=0)
9859 break;
9860 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9861 //printf("no-match due to different register\n");
9862 break;
9863 }
9864 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9865 //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9866 break;
9867 }
9868 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9869 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
b9b61529 9870 ||itype[j]==FCOMP||itype[j]==FCONV
9871 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
57871462 9872 if(count_free_regs(regs[j].regmap)<2) {
9873 //printf("No free regs for store %x\n",start+j*4);
9874 break;
9875 }
9876 }
9877 else if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9878 if(f_regmap[hr]>=64) {
9879 if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9880 break;
9881 }
9882 else
9883 {
9884 if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9885 break;
9886 }
9887 }
9888 }
9889 }
9890 }
9891 }
9892 }
9893 }
9894 }else{
9895 int count=0;
9896 for(hr=0;hr<HOST_REGS;hr++)
9897 {
9898 if(hr!=EXCLUDE_REG) {
9899 if(regs[i].regmap[hr]>64) {
9900 if(!((regs[i].dirty>>hr)&1))
9901 f_regmap[hr]=regs[i].regmap[hr];
9902 }
b372a952 9903 else if(regs[i].regmap[hr]>=0) {
9904 if(f_regmap[hr]!=regs[i].regmap[hr]) {
9905 // dealloc old register
9906 int n;
9907 for(n=0;n<HOST_REGS;n++)
9908 {
9909 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9910 }
9911 // and alloc new one
9912 f_regmap[hr]=regs[i].regmap[hr];
9913 }
9914 }
57871462 9915 else if(regs[i].regmap[hr]<0) count++;
9916 }
9917 }
9918 // Try to restore cycle count at branch targets
9919 if(bt[i]) {
9920 for(j=i;j<slen-1;j++) {
9921 if(regs[j].regmap[HOST_CCREG]!=-1) break;
9922 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9923 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
b9b61529 9924 ||itype[j]==FCOMP||itype[j]==FCONV
9925 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
57871462 9926 if(count_free_regs(regs[j].regmap)<2) {
9927 //printf("no free regs for store %x\n",start+j*4);
9928 break;
9929 }
9930 }
9931 else
9932 if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9933 }
9934 if(regs[j].regmap[HOST_CCREG]==CCREG) {
9935 int k=i;
9936 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9937 while(k<j) {
9938 regs[k].regmap_entry[HOST_CCREG]=CCREG;
9939 regs[k].regmap[HOST_CCREG]=CCREG;
9940 regmap_pre[k+1][HOST_CCREG]=CCREG;
9941 regs[k+1].wasdirty|=1<<HOST_CCREG;
9942 regs[k].dirty|=1<<HOST_CCREG;
9943 regs[k].wasconst&=~(1<<HOST_CCREG);
9944 regs[k].isconst&=~(1<<HOST_CCREG);
9945 k++;
9946 }
9947 regs[j].regmap_entry[HOST_CCREG]=CCREG;
9948 }
9949 // Work backwards from the branch target
9950 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9951 {
9952 //printf("Extend backwards\n");
9953 int k;
9954 k=i;
9955 while(regs[k-1].regmap[HOST_CCREG]==-1) {
9956 if(itype[k-1]==STORE||itype[k-1]==STORELR||itype[k-1]==C1LS
9957 ||itype[k-1]==SHIFT||itype[k-1]==COP1||itype[k-1]==FLOAT
b9b61529 9958 ||itype[k-1]==FCONV||itype[k-1]==FCOMP
9959 ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
57871462 9960 if(count_free_regs(regs[k-1].regmap)<2) {
9961 //printf("no free regs for store %x\n",start+(k-1)*4);
9962 break;
9963 }
9964 }
9965 else
9966 if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9967 k--;
9968 }
9969 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
9970 //printf("Extend CC, %x ->\n",start+k*4);
9971 while(k<=i) {
9972 regs[k].regmap_entry[HOST_CCREG]=CCREG;
9973 regs[k].regmap[HOST_CCREG]=CCREG;
9974 regmap_pre[k+1][HOST_CCREG]=CCREG;
9975 regs[k+1].wasdirty|=1<<HOST_CCREG;
9976 regs[k].dirty|=1<<HOST_CCREG;
9977 regs[k].wasconst&=~(1<<HOST_CCREG);
9978 regs[k].isconst&=~(1<<HOST_CCREG);
9979 k++;
9980 }
9981 }
9982 else {
9983 //printf("Fail Extend CC, %x ->\n",start+k*4);
9984 }
9985 }
9986 }
9987 if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
9988 itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
9989 itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
b9b61529 9990 itype[i]!=FCONV&&itype[i]!=FCOMP&&
9991 itype[i]!=COP2&&itype[i]!=C2LS&&itype[i]!=C2OP)
57871462 9992 {
9993 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
9994 }
9995 }
9996 }
9997
9998 // This allocates registers (if possible) one instruction prior
9999 // to use, which can avoid a load-use penalty on certain CPUs.
10000 for(i=0;i<slen-1;i++)
10001 {
10002 if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10003 {
10004 if(!bt[i+1])
10005 {
b9b61529 10006 if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10007 ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
57871462 10008 {
10009 if(rs1[i+1]) {
10010 if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10011 {
10012 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10013 {
10014 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10015 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10016 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10017 regs[i].isconst&=~(1<<hr);
10018 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10019 constmap[i][hr]=constmap[i+1][hr];
10020 regs[i+1].wasdirty&=~(1<<hr);
10021 regs[i].dirty&=~(1<<hr);
10022 }
10023 }
10024 }
10025 if(rs2[i+1]) {
10026 if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10027 {
10028 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10029 {
10030 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10031 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10032 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10033 regs[i].isconst&=~(1<<hr);
10034 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10035 constmap[i][hr]=constmap[i+1][hr];
10036 regs[i+1].wasdirty&=~(1<<hr);
10037 regs[i].dirty&=~(1<<hr);
10038 }
10039 }
10040 }
10041 if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10042 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10043 {
10044 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10045 {
10046 regs[i].regmap[hr]=rs1[i+1];
10047 regmap_pre[i+1][hr]=rs1[i+1];
10048 regs[i+1].regmap_entry[hr]=rs1[i+1];
10049 regs[i].isconst&=~(1<<hr);
10050 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10051 constmap[i][hr]=constmap[i+1][hr];
10052 regs[i+1].wasdirty&=~(1<<hr);
10053 regs[i].dirty&=~(1<<hr);
10054 }
10055 }
10056 }
10057 if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10058 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10059 {
10060 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10061 {
10062 regs[i].regmap[hr]=rs1[i+1];
10063 regmap_pre[i+1][hr]=rs1[i+1];
10064 regs[i+1].regmap_entry[hr]=rs1[i+1];
10065 regs[i].isconst&=~(1<<hr);
10066 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10067 constmap[i][hr]=constmap[i+1][hr];
10068 regs[i+1].wasdirty&=~(1<<hr);
10069 regs[i].dirty&=~(1<<hr);
10070 }
10071 }
10072 }
10073 #ifndef HOST_IMM_ADDR32
b9b61529 10074 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
57871462 10075 hr=get_reg(regs[i+1].regmap,TLREG);
10076 if(hr>=0) {
10077 int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10078 if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10079 int nr;
10080 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10081 {
10082 regs[i].regmap[hr]=MGEN1+((i+1)&1);
10083 regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10084 regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10085 regs[i].isconst&=~(1<<hr);
10086 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10087 constmap[i][hr]=constmap[i+1][hr];
10088 regs[i+1].wasdirty&=~(1<<hr);
10089 regs[i].dirty&=~(1<<hr);
10090 }
10091 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10092 {
10093 // move it to another register
10094 regs[i+1].regmap[hr]=-1;
10095 regmap_pre[i+2][hr]=-1;
10096 regs[i+1].regmap[nr]=TLREG;
10097 regmap_pre[i+2][nr]=TLREG;
10098 regs[i].regmap[nr]=MGEN1+((i+1)&1);
10099 regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10100 regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10101 regs[i].isconst&=~(1<<nr);
10102 regs[i+1].isconst&=~(1<<nr);
10103 regs[i].dirty&=~(1<<nr);
10104 regs[i+1].wasdirty&=~(1<<nr);
10105 regs[i+1].dirty&=~(1<<nr);
10106 regs[i+2].wasdirty&=~(1<<nr);
10107 }
10108 }
10109 }
10110 }
10111 #endif
b9b61529 10112 if(itype[i+1]==STORE||itype[i+1]==STORELR
10113 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
57871462 10114 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10115 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10116 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10117 else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10118 assert(hr>=0);
10119 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10120 {
10121 regs[i].regmap[hr]=rs1[i+1];
10122 regmap_pre[i+1][hr]=rs1[i+1];
10123 regs[i+1].regmap_entry[hr]=rs1[i+1];
10124 regs[i].isconst&=~(1<<hr);
10125 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10126 constmap[i][hr]=constmap[i+1][hr];
10127 regs[i+1].wasdirty&=~(1<<hr);
10128 regs[i].dirty&=~(1<<hr);
10129 }
10130 }
10131 }
b9b61529 10132 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
57871462 10133 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10134 int nr;
10135 hr=get_reg(regs[i+1].regmap,FTEMP);
10136 assert(hr>=0);
10137 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10138 {
10139 regs[i].regmap[hr]=rs1[i+1];
10140 regmap_pre[i+1][hr]=rs1[i+1];
10141 regs[i+1].regmap_entry[hr]=rs1[i+1];
10142 regs[i].isconst&=~(1<<hr);
10143 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10144 constmap[i][hr]=constmap[i+1][hr];
10145 regs[i+1].wasdirty&=~(1<<hr);
10146 regs[i].dirty&=~(1<<hr);
10147 }
10148 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10149 {
10150 // move it to another register
10151 regs[i+1].regmap[hr]=-1;
10152 regmap_pre[i+2][hr]=-1;
10153 regs[i+1].regmap[nr]=FTEMP;
10154 regmap_pre[i+2][nr]=FTEMP;
10155 regs[i].regmap[nr]=rs1[i+1];
10156 regmap_pre[i+1][nr]=rs1[i+1];
10157 regs[i+1].regmap_entry[nr]=rs1[i+1];
10158 regs[i].isconst&=~(1<<nr);
10159 regs[i+1].isconst&=~(1<<nr);
10160 regs[i].dirty&=~(1<<nr);
10161 regs[i+1].wasdirty&=~(1<<nr);
10162 regs[i+1].dirty&=~(1<<nr);
10163 regs[i+2].wasdirty&=~(1<<nr);
10164 }
10165 }
10166 }
b9b61529 10167 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
57871462 10168 if(itype[i+1]==LOAD)
10169 hr=get_reg(regs[i+1].regmap,rt1[i+1]);
b9b61529 10170 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
57871462 10171 hr=get_reg(regs[i+1].regmap,FTEMP);
b9b61529 10172 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
57871462 10173 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10174 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10175 }
10176 if(hr>=0&&regs[i].regmap[hr]<0) {
10177 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10178 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10179 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10180 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10181 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10182 regs[i].isconst&=~(1<<hr);
10183 regs[i+1].wasdirty&=~(1<<hr);
10184 regs[i].dirty&=~(1<<hr);
10185 }
10186 }
10187 }
10188 }
10189 }
10190 }
10191 }
10192
10193 /* Pass 6 - Optimize clean/dirty state */
10194 clean_registers(0,slen-1,1);
10195
10196 /* Pass 7 - Identify 32-bit registers */
10197
10198 provisional_r32();
10199
10200 u_int r32=0;
10201
10202 for (i=slen-1;i>=0;i--)
10203 {
10204 int hr;
10205 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10206 {
10207 if(ba[i]<start || ba[i]>=(start+slen*4))
10208 {
10209 // Branch out of this block, don't need anything
10210 r32=0;
10211 }
10212 else
10213 {
10214 // Internal branch
10215 // Need whatever matches the target
10216 // (and doesn't get overwritten by the delay slot instruction)
10217 r32=0;
10218 int t=(ba[i]-start)>>2;
10219 if(ba[i]>start+i*4) {
10220 // Forward branch
10221 if(!(requires_32bit[t]&~regs[i].was32))
10222 r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10223 }else{
10224 // Backward branch
10225 //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10226 // r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10227 if(!(pr32[t]&~regs[i].was32))
10228 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10229 }
10230 }
10231 // Conditional branch may need registers for following instructions
10232 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10233 {
10234 if(i<slen-2) {
10235 r32|=requires_32bit[i+2];
10236 r32&=regs[i].was32;
10237 // Mark this address as a branch target since it may be called
10238 // upon return from interrupt
10239 bt[i+2]=1;
10240 }
10241 }
10242 // Merge in delay slot
10243 if(!likely[i]) {
10244 // These are overwritten unless the branch is "likely"
10245 // and the delay slot is nullified if not taken
10246 r32&=~(1LL<<rt1[i+1]);
10247 r32&=~(1LL<<rt2[i+1]);
10248 }
10249 // Assume these are needed (delay slot)
10250 if(us1[i+1]>0)
10251 {
10252 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10253 }
10254 if(us2[i+1]>0)
10255 {
10256 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10257 }
10258 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10259 {
10260 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10261 }
10262 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10263 {
10264 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10265 }
10266 }
7139f3c8 10267 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
57871462 10268 {
10269 // SYSCALL instruction (software interrupt)
10270 r32=0;
10271 }
10272 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10273 {
10274 // ERET instruction (return from interrupt)
10275 r32=0;
10276 }
10277 // Check 32 bits
10278 r32&=~(1LL<<rt1[i]);
10279 r32&=~(1LL<<rt2[i]);
10280 if(us1[i]>0)
10281 {
10282 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10283 }
10284 if(us2[i]>0)
10285 {
10286 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10287 }
10288 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10289 {
10290 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10291 }
10292 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10293 {
10294 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10295 }
10296 requires_32bit[i]=r32;
10297
10298 // Dirty registers which are 32-bit, require 32-bit input
10299 // as they will be written as 32-bit values
10300 for(hr=0;hr<HOST_REGS;hr++)
10301 {
10302 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10303 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10304 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10305 requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10306 }
10307 }
10308 }
10309 //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10310 }
10311
10312 if(itype[slen-1]==SPAN) {
10313 bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10314 }
10315
10316 /* Debug/disassembly */
10317 if((void*)assem_debug==(void*)printf)
10318 for(i=0;i<slen;i++)
10319 {
10320 printf("U:");
10321 int r;
10322 for(r=1;r<=CCREG;r++) {
10323 if((unneeded_reg[i]>>r)&1) {
10324 if(r==HIREG) printf(" HI");
10325 else if(r==LOREG) printf(" LO");
10326 else printf(" r%d",r);
10327 }
10328 }
90ae6d4e 10329#ifndef FORCE32
57871462 10330 printf(" UU:");
10331 for(r=1;r<=CCREG;r++) {
10332 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10333 if(r==HIREG) printf(" HI");
10334 else if(r==LOREG) printf(" LO");
10335 else printf(" r%d",r);
10336 }
10337 }
10338 printf(" 32:");
10339 for(r=0;r<=CCREG;r++) {
10340 //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10341 if((regs[i].was32>>r)&1) {
10342 if(r==CCREG) printf(" CC");
10343 else if(r==HIREG) printf(" HI");
10344 else if(r==LOREG) printf(" LO");
10345 else printf(" r%d",r);
10346 }
10347 }
90ae6d4e 10348#endif
57871462 10349 printf("\n");
10350 #if defined(__i386__) || defined(__x86_64__)
10351 printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10352 #endif
10353 #ifdef __arm__
10354 printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10355 #endif
10356 printf("needs: ");
10357 if(needed_reg[i]&1) printf("eax ");
10358 if((needed_reg[i]>>1)&1) printf("ecx ");
10359 if((needed_reg[i]>>2)&1) printf("edx ");
10360 if((needed_reg[i]>>3)&1) printf("ebx ");
10361 if((needed_reg[i]>>5)&1) printf("ebp ");
10362 if((needed_reg[i]>>6)&1) printf("esi ");
10363 if((needed_reg[i]>>7)&1) printf("edi ");
10364 printf("r:");
10365 for(r=0;r<=CCREG;r++) {
10366 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10367 if((requires_32bit[i]>>r)&1) {
10368 if(r==CCREG) printf(" CC");
10369 else if(r==HIREG) printf(" HI");
10370 else if(r==LOREG) printf(" LO");
10371 else printf(" r%d",r);
10372 }
10373 }
10374 printf("\n");
10375 /*printf("pr:");
10376 for(r=0;r<=CCREG;r++) {
10377 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10378 if((pr32[i]>>r)&1) {
10379 if(r==CCREG) printf(" CC");
10380 else if(r==HIREG) printf(" HI");
10381 else if(r==LOREG) printf(" LO");
10382 else printf(" r%d",r);
10383 }
10384 }
10385 if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10386 printf("\n");*/
10387 #if defined(__i386__) || defined(__x86_64__)
10388 printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10389 printf("dirty: ");
10390 if(regs[i].wasdirty&1) printf("eax ");
10391 if((regs[i].wasdirty>>1)&1) printf("ecx ");
10392 if((regs[i].wasdirty>>2)&1) printf("edx ");
10393 if((regs[i].wasdirty>>3)&1) printf("ebx ");
10394 if((regs[i].wasdirty>>5)&1) printf("ebp ");
10395 if((regs[i].wasdirty>>6)&1) printf("esi ");
10396 if((regs[i].wasdirty>>7)&1) printf("edi ");
10397 #endif
10398 #ifdef __arm__
10399 printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10400 printf("dirty: ");
10401 if(regs[i].wasdirty&1) printf("r0 ");
10402 if((regs[i].wasdirty>>1)&1) printf("r1 ");
10403 if((regs[i].wasdirty>>2)&1) printf("r2 ");
10404 if((regs[i].wasdirty>>3)&1) printf("r3 ");
10405 if((regs[i].wasdirty>>4)&1) printf("r4 ");
10406 if((regs[i].wasdirty>>5)&1) printf("r5 ");
10407 if((regs[i].wasdirty>>6)&1) printf("r6 ");
10408 if((regs[i].wasdirty>>7)&1) printf("r7 ");
10409 if((regs[i].wasdirty>>8)&1) printf("r8 ");
10410 if((regs[i].wasdirty>>9)&1) printf("r9 ");
10411 if((regs[i].wasdirty>>10)&1) printf("r10 ");
10412 if((regs[i].wasdirty>>12)&1) printf("r12 ");
10413 #endif
10414 printf("\n");
10415 disassemble_inst(i);
10416 //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10417 #if defined(__i386__) || defined(__x86_64__)
10418 printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
10419 if(regs[i].dirty&1) printf("eax ");
10420 if((regs[i].dirty>>1)&1) printf("ecx ");
10421 if((regs[i].dirty>>2)&1) printf("edx ");
10422 if((regs[i].dirty>>3)&1) printf("ebx ");
10423 if((regs[i].dirty>>5)&1) printf("ebp ");
10424 if((regs[i].dirty>>6)&1) printf("esi ");
10425 if((regs[i].dirty>>7)&1) printf("edi ");
10426 #endif
10427 #ifdef __arm__
10428 printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
10429 if(regs[i].dirty&1) printf("r0 ");
10430 if((regs[i].dirty>>1)&1) printf("r1 ");
10431 if((regs[i].dirty>>2)&1) printf("r2 ");
10432 if((regs[i].dirty>>3)&1) printf("r3 ");
10433 if((regs[i].dirty>>4)&1) printf("r4 ");
10434 if((regs[i].dirty>>5)&1) printf("r5 ");
10435 if((regs[i].dirty>>6)&1) printf("r6 ");
10436 if((regs[i].dirty>>7)&1) printf("r7 ");
10437 if((regs[i].dirty>>8)&1) printf("r8 ");
10438 if((regs[i].dirty>>9)&1) printf("r9 ");
10439 if((regs[i].dirty>>10)&1) printf("r10 ");
10440 if((regs[i].dirty>>12)&1) printf("r12 ");
10441 #endif
10442 printf("\n");
10443 if(regs[i].isconst) {
10444 printf("constants: ");
10445 #if defined(__i386__) || defined(__x86_64__)
10446 if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
10447 if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
10448 if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
10449 if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
10450 if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
10451 if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
10452 if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
10453 #endif
10454 #ifdef __arm__
10455 if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
10456 if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
10457 if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
10458 if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
10459 if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
10460 if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
10461 if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
10462 if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
10463 if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
10464 if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
10465 if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
10466 if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
10467 #endif
10468 printf("\n");
10469 }
90ae6d4e 10470#ifndef FORCE32
57871462 10471 printf(" 32:");
10472 for(r=0;r<=CCREG;r++) {
10473 if((regs[i].is32>>r)&1) {
10474 if(r==CCREG) printf(" CC");
10475 else if(r==HIREG) printf(" HI");
10476 else if(r==LOREG) printf(" LO");
10477 else printf(" r%d",r);
10478 }
10479 }
10480 printf("\n");
90ae6d4e 10481#endif
57871462 10482 /*printf(" p32:");
10483 for(r=0;r<=CCREG;r++) {
10484 if((p32[i]>>r)&1) {
10485 if(r==CCREG) printf(" CC");
10486 else if(r==HIREG) printf(" HI");
10487 else if(r==LOREG) printf(" LO");
10488 else printf(" r%d",r);
10489 }
10490 }
10491 if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
10492 else printf("\n");*/
10493 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10494 #if defined(__i386__) || defined(__x86_64__)
10495 printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
10496 if(branch_regs[i].dirty&1) printf("eax ");
10497 if((branch_regs[i].dirty>>1)&1) printf("ecx ");
10498 if((branch_regs[i].dirty>>2)&1) printf("edx ");
10499 if((branch_regs[i].dirty>>3)&1) printf("ebx ");
10500 if((branch_regs[i].dirty>>5)&1) printf("ebp ");
10501 if((branch_regs[i].dirty>>6)&1) printf("esi ");
10502 if((branch_regs[i].dirty>>7)&1) printf("edi ");
10503 #endif
10504 #ifdef __arm__
10505 printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
10506 if(branch_regs[i].dirty&1) printf("r0 ");
10507 if((branch_regs[i].dirty>>1)&1) printf("r1 ");
10508 if((branch_regs[i].dirty>>2)&1) printf("r2 ");
10509 if((branch_regs[i].dirty>>3)&1) printf("r3 ");
10510 if((branch_regs[i].dirty>>4)&1) printf("r4 ");
10511 if((branch_regs[i].dirty>>5)&1) printf("r5 ");
10512 if((branch_regs[i].dirty>>6)&1) printf("r6 ");
10513 if((branch_regs[i].dirty>>7)&1) printf("r7 ");
10514 if((branch_regs[i].dirty>>8)&1) printf("r8 ");
10515 if((branch_regs[i].dirty>>9)&1) printf("r9 ");
10516 if((branch_regs[i].dirty>>10)&1) printf("r10 ");
10517 if((branch_regs[i].dirty>>12)&1) printf("r12 ");
10518 #endif
90ae6d4e 10519#ifndef FORCE32
57871462 10520 printf(" 32:");
10521 for(r=0;r<=CCREG;r++) {
10522 if((branch_regs[i].is32>>r)&1) {
10523 if(r==CCREG) printf(" CC");
10524 else if(r==HIREG) printf(" HI");
10525 else if(r==LOREG) printf(" LO");
10526 else printf(" r%d",r);
10527 }
10528 }
10529 printf("\n");
90ae6d4e 10530#endif
57871462 10531 }
10532 }
10533
10534 /* Pass 8 - Assembly */
10535 linkcount=0;stubcount=0;
10536 ds=0;is_delayslot=0;
10537 cop1_usable=0;
10538 uint64_t is32_pre=0;
10539 u_int dirty_pre=0;
10540 u_int beginning=(u_int)out;
10541 if((u_int)addr&1) {
10542 ds=1;
10543 pagespan_ds();
10544 }
9ad4d757 10545 u_int instr_addr0_override=0;
10546
10547#ifdef PCSX
10548 if (start == 0x80030000) {
10549 // nasty hack for fastbios thing
10550 instr_addr0_override=(u_int)out;
10551 emit_movimm(start,0);
10552 emit_readword((int)&pcaddr,1);
10553 emit_writeword(0,(int)&pcaddr);
10554 emit_cmp(0,1);
10555 emit_jne((int)new_dyna_leave);
10556 }
10557#endif
57871462 10558 for(i=0;i<slen;i++)
10559 {
10560 //if(ds) printf("ds: ");
10561 if((void*)assem_debug==(void*)printf) disassemble_inst(i);
10562 if(ds) {
10563 ds=0; // Skip delay slot
10564 if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10565 instr_addr[i]=0;
10566 } else {
10567 #ifndef DESTRUCTIVE_WRITEBACK
10568 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10569 {
10570 wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
10571 unneeded_reg[i],unneeded_reg_upper[i]);
10572 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10573 unneeded_reg[i],unneeded_reg_upper[i]);
10574 }
10575 is32_pre=regs[i].is32;
10576 dirty_pre=regs[i].dirty;
10577 #endif
10578 // write back
10579 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10580 {
10581 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10582 unneeded_reg[i],unneeded_reg_upper[i]);
10583 loop_preload(regmap_pre[i],regs[i].regmap_entry);
10584 }
10585 // branch target entry point
10586 instr_addr[i]=(u_int)out;
10587 assem_debug("<->\n");
10588 // load regs
10589 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10590 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10591 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10592 address_generation(i,&regs[i],regs[i].regmap_entry);
10593 load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10594 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10595 {
10596 // Load the delay slot registers if necessary
10597 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10598 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10599 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10600 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
b9b61529 10601 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
57871462 10602 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10603 }
10604 else if(i+1<slen)
10605 {
10606 // Preload registers for following instruction
10607 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10608 if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10609 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10610 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10611 if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10612 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10613 }
10614 // TODO: if(is_ooo(i)) address_generation(i+1);
10615 if(itype[i]==CJUMP||itype[i]==FJUMP)
10616 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
b9b61529 10617 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
57871462 10618 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10619 if(bt[i]) cop1_usable=0;
10620 // assemble
10621 switch(itype[i]) {
10622 case ALU:
10623 alu_assemble(i,&regs[i]);break;
10624 case IMM16:
10625 imm16_assemble(i,&regs[i]);break;
10626 case SHIFT:
10627 shift_assemble(i,&regs[i]);break;
10628 case SHIFTIMM:
10629 shiftimm_assemble(i,&regs[i]);break;
10630 case LOAD:
10631 load_assemble(i,&regs[i]);break;
10632 case LOADLR:
10633 loadlr_assemble(i,&regs[i]);break;
10634 case STORE:
10635 store_assemble(i,&regs[i]);break;
10636 case STORELR:
10637 storelr_assemble(i,&regs[i]);break;
10638 case COP0:
10639 cop0_assemble(i,&regs[i]);break;
10640 case COP1:
10641 cop1_assemble(i,&regs[i]);break;
10642 case C1LS:
10643 c1ls_assemble(i,&regs[i]);break;
b9b61529 10644 case COP2:
10645 cop2_assemble(i,&regs[i]);break;
10646 case C2LS:
10647 c2ls_assemble(i,&regs[i]);break;
10648 case C2OP:
10649 c2op_assemble(i,&regs[i]);break;
57871462 10650 case FCONV:
10651 fconv_assemble(i,&regs[i]);break;
10652 case FLOAT:
10653 float_assemble(i,&regs[i]);break;
10654 case FCOMP:
10655 fcomp_assemble(i,&regs[i]);break;
10656 case MULTDIV:
10657 multdiv_assemble(i,&regs[i]);break;
10658 case MOV:
10659 mov_assemble(i,&regs[i]);break;
10660 case SYSCALL:
10661 syscall_assemble(i,&regs[i]);break;
7139f3c8 10662 case HLECALL:
10663 hlecall_assemble(i,&regs[i]);break;
57871462 10664 case UJUMP:
10665 ujump_assemble(i,&regs[i]);ds=1;break;
10666 case RJUMP:
10667 rjump_assemble(i,&regs[i]);ds=1;break;
10668 case CJUMP:
10669 cjump_assemble(i,&regs[i]);ds=1;break;
10670 case SJUMP:
10671 sjump_assemble(i,&regs[i]);ds=1;break;
10672 case FJUMP:
10673 fjump_assemble(i,&regs[i]);ds=1;break;
10674 case SPAN:
10675 pagespan_assemble(i,&regs[i]);break;
10676 }
10677 if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10678 literal_pool(1024);
10679 else
10680 literal_pool_jumpover(256);
10681 }
10682 }
10683 //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10684 // If the block did not end with an unconditional branch,
10685 // add a jump to the next instruction.
10686 if(i>1) {
10687 if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10688 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10689 assert(i==slen);
10690 if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10691 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10692 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10693 emit_loadreg(CCREG,HOST_CCREG);
10694 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10695 }
10696 else if(!likely[i-2])
10697 {
10698 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10699 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10700 }
10701 else
10702 {
10703 store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10704 assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10705 }
10706 add_to_linker((int)out,start+i*4,0);
10707 emit_jmp(0);
10708 }
10709 }
10710 else
10711 {
10712 assert(i>0);
10713 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10714 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10715 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10716 emit_loadreg(CCREG,HOST_CCREG);
10717 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10718 add_to_linker((int)out,start+i*4,0);
10719 emit_jmp(0);
10720 }
10721
10722 // TODO: delay slot stubs?
10723 // Stubs
10724 for(i=0;i<stubcount;i++)
10725 {
10726 switch(stubs[i][0])
10727 {
10728 case LOADB_STUB:
10729 case LOADH_STUB:
10730 case LOADW_STUB:
10731 case LOADD_STUB:
10732 case LOADBU_STUB:
10733 case LOADHU_STUB:
10734 do_readstub(i);break;
10735 case STOREB_STUB:
10736 case STOREH_STUB:
10737 case STOREW_STUB:
10738 case STORED_STUB:
10739 do_writestub(i);break;
10740 case CC_STUB:
10741 do_ccstub(i);break;
10742 case INVCODE_STUB:
10743 do_invstub(i);break;
10744 case FP_STUB:
10745 do_cop1stub(i);break;
10746 case STORELR_STUB:
10747 do_unalignedwritestub(i);break;
10748 }
10749 }
10750
9ad4d757 10751 if (instr_addr0_override)
10752 instr_addr[0] = instr_addr0_override;
10753
57871462 10754 /* Pass 9 - Linker */
10755 for(i=0;i<linkcount;i++)
10756 {
10757 assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10758 literal_pool(64);
10759 if(!link_addr[i][2])
10760 {
10761 void *stub=out;
10762 void *addr=check_addr(link_addr[i][1]);
10763 emit_extjump(link_addr[i][0],link_addr[i][1]);
10764 if(addr) {
10765 set_jump_target(link_addr[i][0],(int)addr);
10766 add_link(link_addr[i][1],stub);
10767 }
10768 else set_jump_target(link_addr[i][0],(int)stub);
10769 }
10770 else
10771 {
10772 // Internal branch
10773 int target=(link_addr[i][1]-start)>>2;
10774 assert(target>=0&&target<slen);
10775 assert(instr_addr[target]);
10776 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10777 //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10778 //#else
10779 set_jump_target(link_addr[i][0],instr_addr[target]);
10780 //#endif
10781 }
10782 }
10783 // External Branch Targets (jump_in)
10784 if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10785 for(i=0;i<slen;i++)
10786 {
10787 if(bt[i]||i==0)
10788 {
10789 if(instr_addr[i]) // TODO - delay slots (=null)
10790 {
10791 u_int vaddr=start+i*4;
94d23bb9 10792 u_int page=get_page(vaddr);
10793 u_int vpage=get_vpage(vaddr);
57871462 10794 literal_pool(256);
10795 //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
10796 if(!requires_32bit[i])
10797 {
10798 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10799 assem_debug("jump_in: %x\n",start+i*4);
10800 ll_add(jump_dirty+vpage,vaddr,(void *)out);
10801 int entry_point=do_dirty_stub(i);
10802 ll_add(jump_in+page,vaddr,(void *)entry_point);
10803 // If there was an existing entry in the hash table,
10804 // replace it with the new address.
10805 // Don't add new entries. We'll insert the
10806 // ones that actually get used in check_addr().
10807 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10808 if(ht_bin[0]==vaddr) {
10809 ht_bin[1]=entry_point;
10810 }
10811 if(ht_bin[2]==vaddr) {
10812 ht_bin[3]=entry_point;
10813 }
10814 }
10815 else
10816 {
10817 u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
10818 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10819 assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
10820 //int entry_point=(int)out;
10821 ////assem_debug("entry_point: %x\n",entry_point);
10822 //load_regs_entry(i);
10823 //if(entry_point==(int)out)
10824 // entry_point=instr_addr[i];
10825 //else
10826 // emit_jmp(instr_addr[i]);
10827 //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10828 ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
10829 int entry_point=do_dirty_stub(i);
10830 ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10831 }
10832 }
10833 }
10834 }
10835 // Write out the literal pool if necessary
10836 literal_pool(0);
10837 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10838 // Align code
10839 if(((u_int)out)&7) emit_addnop(13);
10840 #endif
10841 assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
10842 //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10843 memcpy(copy,source,slen*4);
10844 copy+=slen*4;
10845
10846 #ifdef __arm__
10847 __clear_cache((void *)beginning,out);
10848 #endif
10849
10850 // If we're within 256K of the end of the buffer,
10851 // start over from the beginning. (Is 256K enough?)
10852 if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10853
10854 // Trap writes to any of the pages we compiled
10855 for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10856 invalid_code[i]=0;
90ae6d4e 10857#ifndef DISABLE_TLB
57871462 10858 memory_map[i]|=0x40000000;
10859 if((signed int)start>=(signed int)0xC0000000) {
10860 assert(using_tlb);
10861 j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
10862 invalid_code[j]=0;
10863 memory_map[j]|=0x40000000;
10864 //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
10865 }
90ae6d4e 10866#endif
57871462 10867 }
10868
10869 /* Pass 10 - Free memory by expiring oldest blocks */
10870
10871 int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10872 while(expirep!=end)
10873 {
10874 int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10875 int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10876 inv_debug("EXP: Phase %d\n",expirep);
10877 switch((expirep>>11)&3)
10878 {
10879 case 0:
10880 // Clear jump_in and jump_dirty
10881 ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10882 ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10883 ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10884 ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10885 break;
10886 case 1:
10887 // Clear pointers
10888 ll_kill_pointers(jump_out[expirep&2047],base,shift);
10889 ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10890 break;
10891 case 2:
10892 // Clear hash table
10893 for(i=0;i<32;i++) {
10894 int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10895 if((ht_bin[3]>>shift)==(base>>shift) ||
10896 ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10897 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10898 ht_bin[2]=ht_bin[3]=-1;
10899 }
10900 if((ht_bin[1]>>shift)==(base>>shift) ||
10901 ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10902 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10903 ht_bin[0]=ht_bin[2];
10904 ht_bin[1]=ht_bin[3];
10905 ht_bin[2]=ht_bin[3]=-1;
10906 }
10907 }
10908 break;
10909 case 3:
10910 // Clear jump_out
57871462 10911 ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10912 ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10913 break;
10914 }
10915 expirep=(expirep+1)&65535;
10916 }
10917 return 0;
10918}
b9b61529 10919
10920// vim:shiftwidth=2:expandtab