frontend: fix valgrind errors
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
CommitLineData
57871462 1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
3 * Copyright (C) 2009-2010 Ari64 *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21#include <stdlib.h>
22#include <stdint.h> //include for uint64_t
23#include <assert.h>
24
3d624f89 25#include "emu_if.h" //emulator interface
57871462 26
27#include <sys/mman.h>
28
29#ifdef __i386__
30#include "assem_x86.h"
31#endif
32#ifdef __x86_64__
33#include "assem_x64.h"
34#endif
35#ifdef __arm__
36#include "assem_arm.h"
37#endif
38
39#define MAXBLOCK 4096
40#define MAX_OUTPUT_BLOCK_SIZE 262144
41#define CLOCK_DIVIDER 2
42
43struct regstat
44{
45 signed char regmap_entry[HOST_REGS];
46 signed char regmap[HOST_REGS];
47 uint64_t was32;
48 uint64_t is32;
49 uint64_t wasdirty;
50 uint64_t dirty;
51 uint64_t u;
52 uint64_t uu;
53 u_int wasconst;
54 u_int isconst;
55 uint64_t constmap[HOST_REGS];
56};
57
58struct ll_entry
59{
60 u_int vaddr;
61 u_int reg32;
62 void *addr;
63 struct ll_entry *next;
64};
65
66 u_int start;
67 u_int *source;
68 u_int pagelimit;
69 char insn[MAXBLOCK][10];
70 u_char itype[MAXBLOCK];
71 u_char opcode[MAXBLOCK];
72 u_char opcode2[MAXBLOCK];
73 u_char bt[MAXBLOCK];
74 u_char rs1[MAXBLOCK];
75 u_char rs2[MAXBLOCK];
76 u_char rt1[MAXBLOCK];
77 u_char rt2[MAXBLOCK];
78 u_char us1[MAXBLOCK];
79 u_char us2[MAXBLOCK];
80 u_char dep1[MAXBLOCK];
81 u_char dep2[MAXBLOCK];
82 u_char lt1[MAXBLOCK];
83 int imm[MAXBLOCK];
84 u_int ba[MAXBLOCK];
85 char likely[MAXBLOCK];
86 char is_ds[MAXBLOCK];
e1190b87 87 char ooo[MAXBLOCK];
57871462 88 uint64_t unneeded_reg[MAXBLOCK];
89 uint64_t unneeded_reg_upper[MAXBLOCK];
90 uint64_t branch_unneeded_reg[MAXBLOCK];
91 uint64_t branch_unneeded_reg_upper[MAXBLOCK];
92 uint64_t p32[MAXBLOCK];
93 uint64_t pr32[MAXBLOCK];
94 signed char regmap_pre[MAXBLOCK][HOST_REGS];
95 signed char regmap[MAXBLOCK][HOST_REGS];
96 signed char regmap_entry[MAXBLOCK][HOST_REGS];
97 uint64_t constmap[MAXBLOCK][HOST_REGS];
57871462 98 struct regstat regs[MAXBLOCK];
99 struct regstat branch_regs[MAXBLOCK];
e1190b87 100 signed char minimum_free_regs[MAXBLOCK];
57871462 101 u_int needed_reg[MAXBLOCK];
102 uint64_t requires_32bit[MAXBLOCK];
103 u_int wont_dirty[MAXBLOCK];
104 u_int will_dirty[MAXBLOCK];
105 int ccadj[MAXBLOCK];
106 int slen;
107 u_int instr_addr[MAXBLOCK];
108 u_int link_addr[MAXBLOCK][3];
109 int linkcount;
110 u_int stubs[MAXBLOCK*3][8];
111 int stubcount;
112 u_int literals[1024][2];
113 int literalcount;
114 int is_delayslot;
115 int cop1_usable;
116 u_char *out;
117 struct ll_entry *jump_in[4096];
118 struct ll_entry *jump_out[4096];
119 struct ll_entry *jump_dirty[4096];
120 u_int hash_table[65536][4] __attribute__((aligned(16)));
121 char shadow[1048576] __attribute__((aligned(16)));
122 void *copy;
123 int expirep;
124 u_int using_tlb;
125 u_int stop_after_jal;
126 extern u_char restore_candidate[512];
127 extern int cycle_count;
128
129 /* registers that may be allocated */
130 /* 1-31 gpr */
131#define HIREG 32 // hi
132#define LOREG 33 // lo
133#define FSREG 34 // FPU status (FCSR)
134#define CSREG 35 // Coprocessor status
135#define CCREG 36 // Cycle count
136#define INVCP 37 // Pointer to invalid_code
619e5ded 137#define MMREG 38 // Pointer to memory_map
138#define ROREG 39 // ram offset (if rdram!=0x80000000)
139#define TEMPREG 40
140#define FTEMP 40 // FPU temporary register
141#define PTEMP 41 // Prefetch temporary register
142#define TLREG 42 // TLB mapping offset
143#define RHASH 43 // Return address hash
144#define RHTBL 44 // Return address hash table address
145#define RTEMP 45 // JR/JALR address register
146#define MAXREG 45
147#define AGEN1 46 // Address generation temporary register
148#define AGEN2 47 // Address generation temporary register
149#define MGEN1 48 // Maptable address generation temporary register
150#define MGEN2 49 // Maptable address generation temporary register
151#define BTREG 50 // Branch target temporary register
57871462 152
153 /* instruction types */
154#define NOP 0 // No operation
155#define LOAD 1 // Load
156#define STORE 2 // Store
157#define LOADLR 3 // Unaligned load
158#define STORELR 4 // Unaligned store
159#define MOV 5 // Move
160#define ALU 6 // Arithmetic/logic
161#define MULTDIV 7 // Multiply/divide
162#define SHIFT 8 // Shift by register
163#define SHIFTIMM 9// Shift by immediate
164#define IMM16 10 // 16-bit immediate
165#define RJUMP 11 // Unconditional jump to register
166#define UJUMP 12 // Unconditional jump
167#define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
168#define SJUMP 14 // Conditional branch (regimm format)
169#define COP0 15 // Coprocessor 0
170#define COP1 16 // Coprocessor 1
171#define C1LS 17 // Coprocessor 1 load/store
172#define FJUMP 18 // Conditional branch (floating point)
173#define FLOAT 19 // Floating point unit
174#define FCONV 20 // Convert integer to float
175#define FCOMP 21 // Floating point compare (sets FSREG)
176#define SYSCALL 22// SYSCALL
177#define OTHER 23 // Other
178#define SPAN 24 // Branch/delay slot spans 2 pages
179#define NI 25 // Not implemented
7139f3c8 180#define HLECALL 26// PCSX fake opcodes for HLE
b9b61529 181#define COP2 27 // Coprocessor 2 move
182#define C2LS 28 // Coprocessor 2 load/store
183#define C2OP 29 // Coprocessor 2 operation
1e973cb0 184#define INTCALL 30// Call interpreter to handle rare corner cases
57871462 185
186 /* stubs */
187#define CC_STUB 1
188#define FP_STUB 2
189#define LOADB_STUB 3
190#define LOADH_STUB 4
191#define LOADW_STUB 5
192#define LOADD_STUB 6
193#define LOADBU_STUB 7
194#define LOADHU_STUB 8
195#define STOREB_STUB 9
196#define STOREH_STUB 10
197#define STOREW_STUB 11
198#define STORED_STUB 12
199#define STORELR_STUB 13
200#define INVCODE_STUB 14
201
202 /* branch codes */
203#define TAKEN 1
204#define NOTTAKEN 2
205#define NULLDS 3
206
207// asm linkage
208int new_recompile_block(int addr);
209void *get_addr_ht(u_int vaddr);
210void invalidate_block(u_int block);
211void invalidate_addr(u_int addr);
212void remove_hash(int vaddr);
213void jump_vaddr();
214void dyna_linker();
215void dyna_linker_ds();
216void verify_code();
217void verify_code_vm();
218void verify_code_ds();
219void cc_interrupt();
220void fp_exception();
221void fp_exception_ds();
222void jump_syscall();
7139f3c8 223void jump_syscall_hle();
57871462 224void jump_eret();
7139f3c8 225void jump_hlecall();
1e973cb0 226void jump_intcall();
7139f3c8 227void new_dyna_leave();
57871462 228
229// TLB
230void TLBWI_new();
231void TLBWR_new();
232void read_nomem_new();
233void read_nomemb_new();
234void read_nomemh_new();
235void read_nomemd_new();
236void write_nomem_new();
237void write_nomemb_new();
238void write_nomemh_new();
239void write_nomemd_new();
240void write_rdram_new();
241void write_rdramb_new();
242void write_rdramh_new();
243void write_rdramd_new();
244extern u_int memory_map[1048576];
245
246// Needed by assembler
247void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
248void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
249void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
250void load_all_regs(signed char i_regmap[]);
251void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
252void load_regs_entry(int t);
253void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
254
255int tracedebug=0;
256
257//#define DEBUG_CYCLE_COUNT 1
258
259void nullf() {}
260//#define assem_debug printf
261//#define inv_debug printf
262#define assem_debug nullf
263#define inv_debug nullf
264
94d23bb9 265static void tlb_hacks()
57871462 266{
94d23bb9 267#ifndef DISABLE_TLB
57871462 268 // Goldeneye hack
269 if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
270 {
271 u_int addr;
272 int n;
273 switch (ROM_HEADER->Country_code&0xFF)
274 {
275 case 0x45: // U
276 addr=0x34b30;
277 break;
278 case 0x4A: // J
279 addr=0x34b70;
280 break;
281 case 0x50: // E
282 addr=0x329f0;
283 break;
284 default:
285 // Unknown country code
286 addr=0;
287 break;
288 }
289 u_int rom_addr=(u_int)rom;
290 #ifdef ROM_COPY
291 // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
292 // in the lower 4G of memory to use this hack. Copy it if necessary.
293 if((void *)rom>(void *)0xffffffff) {
294 munmap(ROM_COPY, 67108864);
295 if(mmap(ROM_COPY, 12582912,
296 PROT_READ | PROT_WRITE,
297 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
298 -1, 0) <= 0) {printf("mmap() failed\n");}
299 memcpy(ROM_COPY,rom,12582912);
300 rom_addr=(u_int)ROM_COPY;
301 }
302 #endif
303 if(addr) {
304 for(n=0x7F000;n<0x80000;n++) {
305 memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
306 }
307 }
308 }
94d23bb9 309#endif
57871462 310}
311
94d23bb9 312static u_int get_page(u_int vaddr)
57871462 313{
0ce47d46 314#ifndef PCSX
57871462 315 u_int page=(vaddr^0x80000000)>>12;
0ce47d46 316#else
317 u_int page=vaddr&~0xe0000000;
318 if (page < 0x1000000)
319 page &= ~0x0e00000; // RAM mirrors
320 page>>=12;
321#endif
94d23bb9 322#ifndef DISABLE_TLB
57871462 323 if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
94d23bb9 324#endif
57871462 325 if(page>2048) page=2048+(page&2047);
94d23bb9 326 return page;
327}
328
329static u_int get_vpage(u_int vaddr)
330{
331 u_int vpage=(vaddr^0x80000000)>>12;
332#ifndef DISABLE_TLB
57871462 333 if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
94d23bb9 334#endif
57871462 335 if(vpage>2048) vpage=2048+(vpage&2047);
94d23bb9 336 return vpage;
337}
338
339// Get address from virtual address
340// This is called from the recompiled JR/JALR instructions
341void *get_addr(u_int vaddr)
342{
343 u_int page=get_page(vaddr);
344 u_int vpage=get_vpage(vaddr);
57871462 345 struct ll_entry *head;
346 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
347 head=jump_in[page];
348 while(head!=NULL) {
349 if(head->vaddr==vaddr&&head->reg32==0) {
350 //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
351 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
352 ht_bin[3]=ht_bin[1];
353 ht_bin[2]=ht_bin[0];
354 ht_bin[1]=(int)head->addr;
355 ht_bin[0]=vaddr;
356 return head->addr;
357 }
358 head=head->next;
359 }
360 head=jump_dirty[vpage];
361 while(head!=NULL) {
362 if(head->vaddr==vaddr&&head->reg32==0) {
363 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
364 // Don't restore blocks which are about to expire from the cache
365 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
366 if(verify_dirty(head->addr)) {
367 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
368 invalid_code[vaddr>>12]=0;
369 memory_map[vaddr>>12]|=0x40000000;
370 if(vpage<2048) {
94d23bb9 371#ifndef DISABLE_TLB
57871462 372 if(tlb_LUT_r[vaddr>>12]) {
373 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
374 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
375 }
94d23bb9 376#endif
57871462 377 restore_candidate[vpage>>3]|=1<<(vpage&7);
378 }
379 else restore_candidate[page>>3]|=1<<(page&7);
380 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
381 if(ht_bin[0]==vaddr) {
382 ht_bin[1]=(int)head->addr; // Replace existing entry
383 }
384 else
385 {
386 ht_bin[3]=ht_bin[1];
387 ht_bin[2]=ht_bin[0];
388 ht_bin[1]=(int)head->addr;
389 ht_bin[0]=vaddr;
390 }
391 return head->addr;
392 }
393 }
394 head=head->next;
395 }
396 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
397 int r=new_recompile_block(vaddr);
398 if(r==0) return get_addr(vaddr);
399 // Execute in unmapped page, generate pagefault execption
400 Status|=2;
401 Cause=(vaddr<<31)|0x8;
402 EPC=(vaddr&1)?vaddr-5:vaddr;
403 BadVAddr=(vaddr&~1);
404 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
405 EntryHi=BadVAddr&0xFFFFE000;
406 return get_addr_ht(0x80000000);
407}
408// Look up address in hash table first
409void *get_addr_ht(u_int vaddr)
410{
411 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
412 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
413 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
414 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
415 return get_addr(vaddr);
416}
417
418void *get_addr_32(u_int vaddr,u_int flags)
419{
7139f3c8 420#ifdef FORCE32
421 return get_addr(vaddr);
560e4a12 422#else
57871462 423 //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
424 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
425 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
426 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
94d23bb9 427 u_int page=get_page(vaddr);
428 u_int vpage=get_vpage(vaddr);
57871462 429 struct ll_entry *head;
430 head=jump_in[page];
431 while(head!=NULL) {
432 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
433 //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
434 if(head->reg32==0) {
435 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
436 if(ht_bin[0]==-1) {
437 ht_bin[1]=(int)head->addr;
438 ht_bin[0]=vaddr;
439 }else if(ht_bin[2]==-1) {
440 ht_bin[3]=(int)head->addr;
441 ht_bin[2]=vaddr;
442 }
443 //ht_bin[3]=ht_bin[1];
444 //ht_bin[2]=ht_bin[0];
445 //ht_bin[1]=(int)head->addr;
446 //ht_bin[0]=vaddr;
447 }
448 return head->addr;
449 }
450 head=head->next;
451 }
452 head=jump_dirty[vpage];
453 while(head!=NULL) {
454 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
455 //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
456 // Don't restore blocks which are about to expire from the cache
457 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
458 if(verify_dirty(head->addr)) {
459 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
460 invalid_code[vaddr>>12]=0;
461 memory_map[vaddr>>12]|=0x40000000;
462 if(vpage<2048) {
94d23bb9 463#ifndef DISABLE_TLB
57871462 464 if(tlb_LUT_r[vaddr>>12]) {
465 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
466 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
467 }
94d23bb9 468#endif
57871462 469 restore_candidate[vpage>>3]|=1<<(vpage&7);
470 }
471 else restore_candidate[page>>3]|=1<<(page&7);
472 if(head->reg32==0) {
473 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
474 if(ht_bin[0]==-1) {
475 ht_bin[1]=(int)head->addr;
476 ht_bin[0]=vaddr;
477 }else if(ht_bin[2]==-1) {
478 ht_bin[3]=(int)head->addr;
479 ht_bin[2]=vaddr;
480 }
481 //ht_bin[3]=ht_bin[1];
482 //ht_bin[2]=ht_bin[0];
483 //ht_bin[1]=(int)head->addr;
484 //ht_bin[0]=vaddr;
485 }
486 return head->addr;
487 }
488 }
489 head=head->next;
490 }
491 //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
492 int r=new_recompile_block(vaddr);
493 if(r==0) return get_addr(vaddr);
494 // Execute in unmapped page, generate pagefault execption
495 Status|=2;
496 Cause=(vaddr<<31)|0x8;
497 EPC=(vaddr&1)?vaddr-5:vaddr;
498 BadVAddr=(vaddr&~1);
499 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
500 EntryHi=BadVAddr&0xFFFFE000;
501 return get_addr_ht(0x80000000);
560e4a12 502#endif
57871462 503}
504
505void clear_all_regs(signed char regmap[])
506{
507 int hr;
508 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
509}
510
511signed char get_reg(signed char regmap[],int r)
512{
513 int hr;
514 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
515 return -1;
516}
517
518// Find a register that is available for two consecutive cycles
519signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
520{
521 int hr;
522 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
523 return -1;
524}
525
526int count_free_regs(signed char regmap[])
527{
528 int count=0;
529 int hr;
530 for(hr=0;hr<HOST_REGS;hr++)
531 {
532 if(hr!=EXCLUDE_REG) {
533 if(regmap[hr]<0) count++;
534 }
535 }
536 return count;
537}
538
539void dirty_reg(struct regstat *cur,signed char reg)
540{
541 int hr;
542 if(!reg) return;
543 for (hr=0;hr<HOST_REGS;hr++) {
544 if((cur->regmap[hr]&63)==reg) {
545 cur->dirty|=1<<hr;
546 }
547 }
548}
549
550// If we dirty the lower half of a 64 bit register which is now being
551// sign-extended, we need to dump the upper half.
552// Note: Do this only after completion of the instruction, because
553// some instructions may need to read the full 64-bit value even if
554// overwriting it (eg SLTI, DSRA32).
555static void flush_dirty_uppers(struct regstat *cur)
556{
557 int hr,reg;
558 for (hr=0;hr<HOST_REGS;hr++) {
559 if((cur->dirty>>hr)&1) {
560 reg=cur->regmap[hr];
561 if(reg>=64)
562 if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
563 }
564 }
565}
566
567void set_const(struct regstat *cur,signed char reg,uint64_t value)
568{
569 int hr;
570 if(!reg) return;
571 for (hr=0;hr<HOST_REGS;hr++) {
572 if(cur->regmap[hr]==reg) {
573 cur->isconst|=1<<hr;
574 cur->constmap[hr]=value;
575 }
576 else if((cur->regmap[hr]^64)==reg) {
577 cur->isconst|=1<<hr;
578 cur->constmap[hr]=value>>32;
579 }
580 }
581}
582
583void clear_const(struct regstat *cur,signed char reg)
584{
585 int hr;
586 if(!reg) return;
587 for (hr=0;hr<HOST_REGS;hr++) {
588 if((cur->regmap[hr]&63)==reg) {
589 cur->isconst&=~(1<<hr);
590 }
591 }
592}
593
594int is_const(struct regstat *cur,signed char reg)
595{
596 int hr;
597 if(!reg) return 1;
598 for (hr=0;hr<HOST_REGS;hr++) {
599 if((cur->regmap[hr]&63)==reg) {
600 return (cur->isconst>>hr)&1;
601 }
602 }
603 return 0;
604}
605uint64_t get_const(struct regstat *cur,signed char reg)
606{
607 int hr;
608 if(!reg) return 0;
609 for (hr=0;hr<HOST_REGS;hr++) {
610 if(cur->regmap[hr]==reg) {
611 return cur->constmap[hr];
612 }
613 }
614 printf("Unknown constant in r%d\n",reg);
615 exit(1);
616}
617
618// Least soon needed registers
619// Look at the next ten instructions and see which registers
620// will be used. Try not to reallocate these.
621void lsn(u_char hsn[], int i, int *preferred_reg)
622{
623 int j;
624 int b=-1;
625 for(j=0;j<9;j++)
626 {
627 if(i+j>=slen) {
628 j=slen-i-1;
629 break;
630 }
631 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
632 {
633 // Don't go past an unconditonal jump
634 j++;
635 break;
636 }
637 }
638 for(;j>=0;j--)
639 {
640 if(rs1[i+j]) hsn[rs1[i+j]]=j;
641 if(rs2[i+j]) hsn[rs2[i+j]]=j;
642 if(rt1[i+j]) hsn[rt1[i+j]]=j;
643 if(rt2[i+j]) hsn[rt2[i+j]]=j;
644 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
645 // Stores can allocate zero
646 hsn[rs1[i+j]]=j;
647 hsn[rs2[i+j]]=j;
648 }
649 // On some architectures stores need invc_ptr
650 #if defined(HOST_IMM8)
b9b61529 651 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
57871462 652 hsn[INVCP]=j;
653 }
654 #endif
655 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
656 {
657 hsn[CCREG]=j;
658 b=j;
659 }
660 }
661 if(b>=0)
662 {
663 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
664 {
665 // Follow first branch
666 int t=(ba[i+b]-start)>>2;
667 j=7-b;if(t+j>=slen) j=slen-t-1;
668 for(;j>=0;j--)
669 {
670 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
671 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
672 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
673 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
674 }
675 }
676 // TODO: preferred register based on backward branch
677 }
678 // Delay slot should preferably not overwrite branch conditions or cycle count
679 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
680 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
681 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
682 hsn[CCREG]=1;
683 // ...or hash tables
684 hsn[RHASH]=1;
685 hsn[RHTBL]=1;
686 }
687 // Coprocessor load/store needs FTEMP, even if not declared
b9b61529 688 if(itype[i]==C1LS||itype[i]==C2LS) {
57871462 689 hsn[FTEMP]=0;
690 }
691 // Load L/R also uses FTEMP as a temporary register
692 if(itype[i]==LOADLR) {
693 hsn[FTEMP]=0;
694 }
b7918751 695 // Also SWL/SWR/SDL/SDR
696 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
57871462 697 hsn[FTEMP]=0;
698 }
699 // Don't remove the TLB registers either
b9b61529 700 if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
57871462 701 hsn[TLREG]=0;
702 }
703 // Don't remove the miniht registers
704 if(itype[i]==UJUMP||itype[i]==RJUMP)
705 {
706 hsn[RHASH]=0;
707 hsn[RHTBL]=0;
708 }
709}
710
711// We only want to allocate registers if we're going to use them again soon
712int needed_again(int r, int i)
713{
714 int j;
715 int b=-1;
716 int rn=10;
717 int hr;
718 u_char hsn[MAXREG+1];
719 int preferred_reg;
720
721 memset(hsn,10,sizeof(hsn));
722 lsn(hsn,i,&preferred_reg);
723
724 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
725 {
726 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
727 return 0; // Don't need any registers if exiting the block
728 }
729 for(j=0;j<9;j++)
730 {
731 if(i+j>=slen) {
732 j=slen-i-1;
733 break;
734 }
735 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
736 {
737 // Don't go past an unconditonal jump
738 j++;
739 break;
740 }
1e973cb0 741 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
57871462 742 {
743 break;
744 }
745 }
746 for(;j>=1;j--)
747 {
748 if(rs1[i+j]==r) rn=j;
749 if(rs2[i+j]==r) rn=j;
750 if((unneeded_reg[i+j]>>r)&1) rn=10;
751 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
752 {
753 b=j;
754 }
755 }
756 /*
757 if(b>=0)
758 {
759 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
760 {
761 // Follow first branch
762 int o=rn;
763 int t=(ba[i+b]-start)>>2;
764 j=7-b;if(t+j>=slen) j=slen-t-1;
765 for(;j>=0;j--)
766 {
767 if(!((unneeded_reg[t+j]>>r)&1)) {
768 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
769 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
770 }
771 else rn=o;
772 }
773 }
774 }*/
775 for(hr=0;hr<HOST_REGS;hr++) {
776 if(hr!=EXCLUDE_REG) {
777 if(rn<hsn[hr]) return 1;
778 }
779 }
780 return 0;
781}
782
783// Try to match register allocations at the end of a loop with those
784// at the beginning
785int loop_reg(int i, int r, int hr)
786{
787 int j,k;
788 for(j=0;j<9;j++)
789 {
790 if(i+j>=slen) {
791 j=slen-i-1;
792 break;
793 }
794 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
795 {
796 // Don't go past an unconditonal jump
797 j++;
798 break;
799 }
800 }
801 k=0;
802 if(i>0){
803 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
804 k--;
805 }
806 for(;k<j;k++)
807 {
808 if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
809 if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
810 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
811 {
812 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
813 {
814 int t=(ba[i+k]-start)>>2;
815 int reg=get_reg(regs[t].regmap_entry,r);
816 if(reg>=0) return reg;
817 //reg=get_reg(regs[t+1].regmap_entry,r);
818 //if(reg>=0) return reg;
819 }
820 }
821 }
822 return hr;
823}
824
825
826// Allocate every register, preserving source/target regs
827void alloc_all(struct regstat *cur,int i)
828{
829 int hr;
830
831 for(hr=0;hr<HOST_REGS;hr++) {
832 if(hr!=EXCLUDE_REG) {
833 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
834 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
835 {
836 cur->regmap[hr]=-1;
837 cur->dirty&=~(1<<hr);
838 }
839 // Don't need zeros
840 if((cur->regmap[hr]&63)==0)
841 {
842 cur->regmap[hr]=-1;
843 cur->dirty&=~(1<<hr);
844 }
845 }
846 }
847}
848
849
850void div64(int64_t dividend,int64_t divisor)
851{
852 lo=dividend/divisor;
853 hi=dividend%divisor;
854 //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
855 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
856}
857void divu64(uint64_t dividend,uint64_t divisor)
858{
859 lo=dividend/divisor;
860 hi=dividend%divisor;
861 //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
862 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
863}
864
865void mult64(uint64_t m1,uint64_t m2)
866{
867 unsigned long long int op1, op2, op3, op4;
868 unsigned long long int result1, result2, result3, result4;
869 unsigned long long int temp1, temp2, temp3, temp4;
870 int sign = 0;
871
872 if (m1 < 0)
873 {
874 op2 = -m1;
875 sign = 1 - sign;
876 }
877 else op2 = m1;
878 if (m2 < 0)
879 {
880 op4 = -m2;
881 sign = 1 - sign;
882 }
883 else op4 = m2;
884
885 op1 = op2 & 0xFFFFFFFF;
886 op2 = (op2 >> 32) & 0xFFFFFFFF;
887 op3 = op4 & 0xFFFFFFFF;
888 op4 = (op4 >> 32) & 0xFFFFFFFF;
889
890 temp1 = op1 * op3;
891 temp2 = (temp1 >> 32) + op1 * op4;
892 temp3 = op2 * op3;
893 temp4 = (temp3 >> 32) + op2 * op4;
894
895 result1 = temp1 & 0xFFFFFFFF;
896 result2 = temp2 + (temp3 & 0xFFFFFFFF);
897 result3 = (result2 >> 32) + temp4;
898 result4 = (result3 >> 32);
899
900 lo = result1 | (result2 << 32);
901 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
902 if (sign)
903 {
904 hi = ~hi;
905 if (!lo) hi++;
906 else lo = ~lo + 1;
907 }
908}
909
910void multu64(uint64_t m1,uint64_t m2)
911{
912 unsigned long long int op1, op2, op3, op4;
913 unsigned long long int result1, result2, result3, result4;
914 unsigned long long int temp1, temp2, temp3, temp4;
915
916 op1 = m1 & 0xFFFFFFFF;
917 op2 = (m1 >> 32) & 0xFFFFFFFF;
918 op3 = m2 & 0xFFFFFFFF;
919 op4 = (m2 >> 32) & 0xFFFFFFFF;
920
921 temp1 = op1 * op3;
922 temp2 = (temp1 >> 32) + op1 * op4;
923 temp3 = op2 * op3;
924 temp4 = (temp3 >> 32) + op2 * op4;
925
926 result1 = temp1 & 0xFFFFFFFF;
927 result2 = temp2 + (temp3 & 0xFFFFFFFF);
928 result3 = (result2 >> 32) + temp4;
929 result4 = (result3 >> 32);
930
931 lo = result1 | (result2 << 32);
932 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
933
934 //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
935 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
936}
937
938uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
939{
940 if(bits) {
941 original<<=64-bits;
942 original>>=64-bits;
943 loaded<<=bits;
944 original|=loaded;
945 }
946 else original=loaded;
947 return original;
948}
949uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
950{
951 if(bits^56) {
952 original>>=64-(bits^56);
953 original<<=64-(bits^56);
954 loaded>>=bits^56;
955 original|=loaded;
956 }
957 else original=loaded;
958 return original;
959}
960
961#ifdef __i386__
962#include "assem_x86.c"
963#endif
964#ifdef __x86_64__
965#include "assem_x64.c"
966#endif
967#ifdef __arm__
968#include "assem_arm.c"
969#endif
970
971// Add virtual address mapping to linked list
972void ll_add(struct ll_entry **head,int vaddr,void *addr)
973{
974 struct ll_entry *new_entry;
975 new_entry=malloc(sizeof(struct ll_entry));
976 assert(new_entry!=NULL);
977 new_entry->vaddr=vaddr;
978 new_entry->reg32=0;
979 new_entry->addr=addr;
980 new_entry->next=*head;
981 *head=new_entry;
982}
983
984// Add virtual address mapping for 32-bit compiled block
985void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
986{
7139f3c8 987 ll_add(head,vaddr,addr);
988#ifndef FORCE32
989 (*head)->reg32=reg32;
990#endif
57871462 991}
992
993// Check if an address is already compiled
994// but don't return addresses which are about to expire from the cache
995void *check_addr(u_int vaddr)
996{
997 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
998 if(ht_bin[0]==vaddr) {
999 if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1000 if(isclean(ht_bin[1])) return (void *)ht_bin[1];
1001 }
1002 if(ht_bin[2]==vaddr) {
1003 if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1004 if(isclean(ht_bin[3])) return (void *)ht_bin[3];
1005 }
94d23bb9 1006 u_int page=get_page(vaddr);
57871462 1007 struct ll_entry *head;
1008 head=jump_in[page];
1009 while(head!=NULL) {
1010 if(head->vaddr==vaddr&&head->reg32==0) {
1011 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1012 // Update existing entry with current address
1013 if(ht_bin[0]==vaddr) {
1014 ht_bin[1]=(int)head->addr;
1015 return head->addr;
1016 }
1017 if(ht_bin[2]==vaddr) {
1018 ht_bin[3]=(int)head->addr;
1019 return head->addr;
1020 }
1021 // Insert into hash table with low priority.
1022 // Don't evict existing entries, as they are probably
1023 // addresses that are being accessed frequently.
1024 if(ht_bin[0]==-1) {
1025 ht_bin[1]=(int)head->addr;
1026 ht_bin[0]=vaddr;
1027 }else if(ht_bin[2]==-1) {
1028 ht_bin[3]=(int)head->addr;
1029 ht_bin[2]=vaddr;
1030 }
1031 return head->addr;
1032 }
1033 }
1034 head=head->next;
1035 }
1036 return 0;
1037}
1038
1039void remove_hash(int vaddr)
1040{
1041 //printf("remove hash: %x\n",vaddr);
1042 int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1043 if(ht_bin[2]==vaddr) {
1044 ht_bin[2]=ht_bin[3]=-1;
1045 }
1046 if(ht_bin[0]==vaddr) {
1047 ht_bin[0]=ht_bin[2];
1048 ht_bin[1]=ht_bin[3];
1049 ht_bin[2]=ht_bin[3]=-1;
1050 }
1051}
1052
1053void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1054{
1055 struct ll_entry *next;
1056 while(*head) {
1057 if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
1058 ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1059 {
1060 inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1061 remove_hash((*head)->vaddr);
1062 next=(*head)->next;
1063 free(*head);
1064 *head=next;
1065 }
1066 else
1067 {
1068 head=&((*head)->next);
1069 }
1070 }
1071}
1072
1073// Remove all entries from linked list
1074void ll_clear(struct ll_entry **head)
1075{
1076 struct ll_entry *cur;
1077 struct ll_entry *next;
1078 if(cur=*head) {
1079 *head=0;
1080 while(cur) {
1081 next=cur->next;
1082 free(cur);
1083 cur=next;
1084 }
1085 }
1086}
1087
1088// Dereference the pointers and remove if it matches
1089void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1090{
1091 while(head) {
1092 int ptr=get_pointer(head->addr);
1093 inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1094 if(((ptr>>shift)==(addr>>shift)) ||
1095 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1096 {
5088bb70 1097 inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
f76eeef9 1098 u_int host_addr=(u_int)kill_pointer(head->addr);
dd3a91a1 1099 #ifdef __arm__
1100 needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1101 #endif
57871462 1102 }
1103 head=head->next;
1104 }
1105}
1106
1107// This is called when we write to a compiled block (see do_invstub)
f76eeef9 1108void invalidate_page(u_int page)
57871462 1109{
57871462 1110 struct ll_entry *head;
1111 struct ll_entry *next;
1112 head=jump_in[page];
1113 jump_in[page]=0;
1114 while(head!=NULL) {
1115 inv_debug("INVALIDATE: %x\n",head->vaddr);
1116 remove_hash(head->vaddr);
1117 next=head->next;
1118 free(head);
1119 head=next;
1120 }
1121 head=jump_out[page];
1122 jump_out[page]=0;
1123 while(head!=NULL) {
1124 inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
f76eeef9 1125 u_int host_addr=(u_int)kill_pointer(head->addr);
dd3a91a1 1126 #ifdef __arm__
1127 needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1128 #endif
57871462 1129 next=head->next;
1130 free(head);
1131 head=next;
1132 }
57871462 1133}
1134void invalidate_block(u_int block)
1135{
94d23bb9 1136 u_int page=get_page(block<<12);
1137 u_int vpage=get_vpage(block<<12);
57871462 1138 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1139 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1140 u_int first,last;
1141 first=last=page;
1142 struct ll_entry *head;
1143 head=jump_dirty[vpage];
1144 //printf("page=%d vpage=%d\n",page,vpage);
1145 while(head!=NULL) {
1146 u_int start,end;
1147 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1148 get_bounds((int)head->addr,&start,&end);
1149 //printf("start: %x end: %x\n",start,end);
4cb76aa4 1150 if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
57871462 1151 if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1152 if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1153 if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1154 }
1155 }
90ae6d4e 1156#ifndef DISABLE_TLB
57871462 1157 if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1158 if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1159 if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1160 if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1161 }
1162 }
90ae6d4e 1163#endif
57871462 1164 }
1165 head=head->next;
1166 }
1167 //printf("first=%d last=%d\n",first,last);
f76eeef9 1168 invalidate_page(page);
57871462 1169 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1170 assert(last<page+5);
1171 // Invalidate the adjacent pages if a block crosses a 4K boundary
1172 while(first<page) {
1173 invalidate_page(first);
1174 first++;
1175 }
1176 for(first=page+1;first<last;first++) {
1177 invalidate_page(first);
1178 }
dd3a91a1 1179 #ifdef __arm__
1180 do_clear_cache();
1181 #endif
57871462 1182
1183 // Don't trap writes
1184 invalid_code[block]=1;
94d23bb9 1185#ifndef DISABLE_TLB
57871462 1186 // If there is a valid TLB entry for this page, remove write protect
1187 if(tlb_LUT_w[block]) {
1188 assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1189 // CHECK: Is this right?
1190 memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1191 u_int real_block=tlb_LUT_w[block]>>12;
1192 invalid_code[real_block]=1;
1193 if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1194 }
1195 else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
94d23bb9 1196#endif
f76eeef9 1197
57871462 1198 #ifdef USE_MINI_HT
1199 memset(mini_ht,-1,sizeof(mini_ht));
1200 #endif
1201}
1202void invalidate_addr(u_int addr)
1203{
1204 invalidate_block(addr>>12);
1205}
dd3a91a1 1206// This is called when loading a save state.
1207// Anything could have changed, so invalidate everything.
57871462 1208void invalidate_all_pages()
1209{
1210 u_int page,n;
1211 for(page=0;page<4096;page++)
1212 invalidate_page(page);
1213 for(page=0;page<1048576;page++)
1214 if(!invalid_code[page]) {
1215 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1216 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1217 }
1218 #ifdef __arm__
1219 __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1220 #endif
1221 #ifdef USE_MINI_HT
1222 memset(mini_ht,-1,sizeof(mini_ht));
1223 #endif
94d23bb9 1224 #ifndef DISABLE_TLB
57871462 1225 // TLB
1226 for(page=0;page<0x100000;page++) {
1227 if(tlb_LUT_r[page]) {
1228 memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1229 if(!tlb_LUT_w[page]||!invalid_code[page])
1230 memory_map[page]|=0x40000000; // Write protect
1231 }
1232 else memory_map[page]=-1;
1233 if(page==0x80000) page=0xC0000;
1234 }
1235 tlb_hacks();
94d23bb9 1236 #endif
57871462 1237}
1238
1239// Add an entry to jump_out after making a link
1240void add_link(u_int vaddr,void *src)
1241{
94d23bb9 1242 u_int page=get_page(vaddr);
57871462 1243 inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1244 ll_add(jump_out+page,vaddr,src);
1245 //int ptr=get_pointer(src);
1246 //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1247}
1248
1249// If a code block was found to be unmodified (bit was set in
1250// restore_candidate) and it remains unmodified (bit is clear
1251// in invalid_code) then move the entries for that 4K page from
1252// the dirty list to the clean list.
1253void clean_blocks(u_int page)
1254{
1255 struct ll_entry *head;
1256 inv_debug("INV: clean_blocks page=%d\n",page);
1257 head=jump_dirty[page];
1258 while(head!=NULL) {
1259 if(!invalid_code[head->vaddr>>12]) {
1260 // Don't restore blocks which are about to expire from the cache
1261 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1262 u_int start,end;
1263 if(verify_dirty((int)head->addr)) {
1264 //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1265 u_int i;
1266 u_int inv=0;
1267 get_bounds((int)head->addr,&start,&end);
4cb76aa4 1268 if(start-(u_int)rdram<RAM_SIZE) {
57871462 1269 for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1270 inv|=invalid_code[i];
1271 }
1272 }
1273 if((signed int)head->vaddr>=(signed int)0xC0000000) {
1274 u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1275 //printf("addr=%x start=%x end=%x\n",addr,start,end);
1276 if(addr<start||addr>=end) inv=1;
1277 }
4cb76aa4 1278 else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
57871462 1279 inv=1;
1280 }
1281 if(!inv) {
1282 void * clean_addr=(void *)get_clean_addr((int)head->addr);
1283 if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1284 u_int ppage=page;
94d23bb9 1285#ifndef DISABLE_TLB
57871462 1286 if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
94d23bb9 1287#endif
57871462 1288 inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1289 //printf("page=%x, addr=%x\n",page,head->vaddr);
1290 //assert(head->vaddr>>12==(page|0x80000));
1291 ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1292 int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1293 if(!head->reg32) {
1294 if(ht_bin[0]==head->vaddr) {
1295 ht_bin[1]=(int)clean_addr; // Replace existing entry
1296 }
1297 if(ht_bin[2]==head->vaddr) {
1298 ht_bin[3]=(int)clean_addr; // Replace existing entry
1299 }
1300 }
1301 }
1302 }
1303 }
1304 }
1305 }
1306 head=head->next;
1307 }
1308}
1309
1310
1311void mov_alloc(struct regstat *current,int i)
1312{
1313 // Note: Don't need to actually alloc the source registers
1314 if((~current->is32>>rs1[i])&1) {
1315 //alloc_reg64(current,i,rs1[i]);
1316 alloc_reg64(current,i,rt1[i]);
1317 current->is32&=~(1LL<<rt1[i]);
1318 } else {
1319 //alloc_reg(current,i,rs1[i]);
1320 alloc_reg(current,i,rt1[i]);
1321 current->is32|=(1LL<<rt1[i]);
1322 }
1323 clear_const(current,rs1[i]);
1324 clear_const(current,rt1[i]);
1325 dirty_reg(current,rt1[i]);
1326}
1327
1328void shiftimm_alloc(struct regstat *current,int i)
1329{
1330 clear_const(current,rs1[i]);
1331 clear_const(current,rt1[i]);
1332 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1333 {
1334 if(rt1[i]) {
1335 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1336 else lt1[i]=rs1[i];
1337 alloc_reg(current,i,rt1[i]);
1338 current->is32|=1LL<<rt1[i];
1339 dirty_reg(current,rt1[i]);
1340 }
1341 }
1342 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1343 {
1344 if(rt1[i]) {
1345 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1346 alloc_reg64(current,i,rt1[i]);
1347 current->is32&=~(1LL<<rt1[i]);
1348 dirty_reg(current,rt1[i]);
1349 }
1350 }
1351 if(opcode2[i]==0x3c) // DSLL32
1352 {
1353 if(rt1[i]) {
1354 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1355 alloc_reg64(current,i,rt1[i]);
1356 current->is32&=~(1LL<<rt1[i]);
1357 dirty_reg(current,rt1[i]);
1358 }
1359 }
1360 if(opcode2[i]==0x3e) // DSRL32
1361 {
1362 if(rt1[i]) {
1363 alloc_reg64(current,i,rs1[i]);
1364 if(imm[i]==32) {
1365 alloc_reg64(current,i,rt1[i]);
1366 current->is32&=~(1LL<<rt1[i]);
1367 } else {
1368 alloc_reg(current,i,rt1[i]);
1369 current->is32|=1LL<<rt1[i];
1370 }
1371 dirty_reg(current,rt1[i]);
1372 }
1373 }
1374 if(opcode2[i]==0x3f) // DSRA32
1375 {
1376 if(rt1[i]) {
1377 alloc_reg64(current,i,rs1[i]);
1378 alloc_reg(current,i,rt1[i]);
1379 current->is32|=1LL<<rt1[i];
1380 dirty_reg(current,rt1[i]);
1381 }
1382 }
1383}
1384
1385void shift_alloc(struct regstat *current,int i)
1386{
1387 if(rt1[i]) {
1388 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1389 {
1390 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1391 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1392 alloc_reg(current,i,rt1[i]);
e1190b87 1393 if(rt1[i]==rs2[i]) {
1394 alloc_reg_temp(current,i,-1);
1395 minimum_free_regs[i]=1;
1396 }
57871462 1397 current->is32|=1LL<<rt1[i];
1398 } else { // DSLLV/DSRLV/DSRAV
1399 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1400 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1401 alloc_reg64(current,i,rt1[i]);
1402 current->is32&=~(1LL<<rt1[i]);
1403 if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
e1190b87 1404 {
57871462 1405 alloc_reg_temp(current,i,-1);
e1190b87 1406 minimum_free_regs[i]=1;
1407 }
57871462 1408 }
1409 clear_const(current,rs1[i]);
1410 clear_const(current,rs2[i]);
1411 clear_const(current,rt1[i]);
1412 dirty_reg(current,rt1[i]);
1413 }
1414}
1415
1416void alu_alloc(struct regstat *current,int i)
1417{
1418 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1419 if(rt1[i]) {
1420 if(rs1[i]&&rs2[i]) {
1421 alloc_reg(current,i,rs1[i]);
1422 alloc_reg(current,i,rs2[i]);
1423 }
1424 else {
1425 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1426 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1427 }
1428 alloc_reg(current,i,rt1[i]);
1429 }
1430 current->is32|=1LL<<rt1[i];
1431 }
1432 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1433 if(rt1[i]) {
1434 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1435 {
1436 alloc_reg64(current,i,rs1[i]);
1437 alloc_reg64(current,i,rs2[i]);
1438 alloc_reg(current,i,rt1[i]);
1439 } else {
1440 alloc_reg(current,i,rs1[i]);
1441 alloc_reg(current,i,rs2[i]);
1442 alloc_reg(current,i,rt1[i]);
1443 }
1444 }
1445 current->is32|=1LL<<rt1[i];
1446 }
1447 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1448 if(rt1[i]) {
1449 if(rs1[i]&&rs2[i]) {
1450 alloc_reg(current,i,rs1[i]);
1451 alloc_reg(current,i,rs2[i]);
1452 }
1453 else
1454 {
1455 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1456 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1457 }
1458 alloc_reg(current,i,rt1[i]);
1459 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1460 {
1461 if(!((current->uu>>rt1[i])&1)) {
1462 alloc_reg64(current,i,rt1[i]);
1463 }
1464 if(get_reg(current->regmap,rt1[i]|64)>=0) {
1465 if(rs1[i]&&rs2[i]) {
1466 alloc_reg64(current,i,rs1[i]);
1467 alloc_reg64(current,i,rs2[i]);
1468 }
1469 else
1470 {
1471 // Is is really worth it to keep 64-bit values in registers?
1472 #ifdef NATIVE_64BIT
1473 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1474 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1475 #endif
1476 }
1477 }
1478 current->is32&=~(1LL<<rt1[i]);
1479 } else {
1480 current->is32|=1LL<<rt1[i];
1481 }
1482 }
1483 }
1484 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1485 if(rt1[i]) {
1486 if(rs1[i]&&rs2[i]) {
1487 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1488 alloc_reg64(current,i,rs1[i]);
1489 alloc_reg64(current,i,rs2[i]);
1490 alloc_reg64(current,i,rt1[i]);
1491 } else {
1492 alloc_reg(current,i,rs1[i]);
1493 alloc_reg(current,i,rs2[i]);
1494 alloc_reg(current,i,rt1[i]);
1495 }
1496 }
1497 else {
1498 alloc_reg(current,i,rt1[i]);
1499 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1500 // DADD used as move, or zeroing
1501 // If we have a 64-bit source, then make the target 64 bits too
1502 if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1503 if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1504 alloc_reg64(current,i,rt1[i]);
1505 } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1506 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1507 alloc_reg64(current,i,rt1[i]);
1508 }
1509 if(opcode2[i]>=0x2e&&rs2[i]) {
1510 // DSUB used as negation - 64-bit result
1511 // If we have a 32-bit register, extend it to 64 bits
1512 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1513 alloc_reg64(current,i,rt1[i]);
1514 }
1515 }
1516 }
1517 if(rs1[i]&&rs2[i]) {
1518 current->is32&=~(1LL<<rt1[i]);
1519 } else if(rs1[i]) {
1520 current->is32&=~(1LL<<rt1[i]);
1521 if((current->is32>>rs1[i])&1)
1522 current->is32|=1LL<<rt1[i];
1523 } else if(rs2[i]) {
1524 current->is32&=~(1LL<<rt1[i]);
1525 if((current->is32>>rs2[i])&1)
1526 current->is32|=1LL<<rt1[i];
1527 } else {
1528 current->is32|=1LL<<rt1[i];
1529 }
1530 }
1531 }
1532 clear_const(current,rs1[i]);
1533 clear_const(current,rs2[i]);
1534 clear_const(current,rt1[i]);
1535 dirty_reg(current,rt1[i]);
1536}
1537
1538void imm16_alloc(struct regstat *current,int i)
1539{
1540 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1541 else lt1[i]=rs1[i];
1542 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1543 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1544 current->is32&=~(1LL<<rt1[i]);
1545 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1546 // TODO: Could preserve the 32-bit flag if the immediate is zero
1547 alloc_reg64(current,i,rt1[i]);
1548 alloc_reg64(current,i,rs1[i]);
1549 }
1550 clear_const(current,rs1[i]);
1551 clear_const(current,rt1[i]);
1552 }
1553 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1554 if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1555 current->is32|=1LL<<rt1[i];
1556 clear_const(current,rs1[i]);
1557 clear_const(current,rt1[i]);
1558 }
1559 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1560 if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1561 if(rs1[i]!=rt1[i]) {
1562 if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1563 alloc_reg64(current,i,rt1[i]);
1564 current->is32&=~(1LL<<rt1[i]);
1565 }
1566 }
1567 else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1568 if(is_const(current,rs1[i])) {
1569 int v=get_const(current,rs1[i]);
1570 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1571 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1572 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1573 }
1574 else clear_const(current,rt1[i]);
1575 }
1576 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1577 if(is_const(current,rs1[i])) {
1578 int v=get_const(current,rs1[i]);
1579 set_const(current,rt1[i],v+imm[i]);
1580 }
1581 else clear_const(current,rt1[i]);
1582 current->is32|=1LL<<rt1[i];
1583 }
1584 else {
1585 set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1586 current->is32|=1LL<<rt1[i];
1587 }
1588 dirty_reg(current,rt1[i]);
1589}
1590
1591void load_alloc(struct regstat *current,int i)
1592{
1593 clear_const(current,rt1[i]);
1594 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1595 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1596 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1597 if(rt1[i]) {
1598 alloc_reg(current,i,rt1[i]);
535d208a 1599 if(get_reg(current->regmap,rt1[i])<0) {
1600 // dummy load, but we still need a register to calculate the address
1601 alloc_reg_temp(current,i,-1);
e1190b87 1602 minimum_free_regs[i]=1;
535d208a 1603 }
57871462 1604 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1605 {
1606 current->is32&=~(1LL<<rt1[i]);
1607 alloc_reg64(current,i,rt1[i]);
1608 }
1609 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1610 {
1611 current->is32&=~(1LL<<rt1[i]);
1612 alloc_reg64(current,i,rt1[i]);
1613 alloc_all(current,i);
1614 alloc_reg64(current,i,FTEMP);
e1190b87 1615 minimum_free_regs[i]=HOST_REGS;
57871462 1616 }
1617 else current->is32|=1LL<<rt1[i];
1618 dirty_reg(current,rt1[i]);
1619 // If using TLB, need a register for pointer to the mapping table
1620 if(using_tlb) alloc_reg(current,i,TLREG);
1621 // LWL/LWR need a temporary register for the old value
1622 if(opcode[i]==0x22||opcode[i]==0x26)
1623 {
1624 alloc_reg(current,i,FTEMP);
1625 alloc_reg_temp(current,i,-1);
e1190b87 1626 minimum_free_regs[i]=1;
57871462 1627 }
1628 }
1629 else
1630 {
1631 // Load to r0 (dummy load)
1632 // but we still need a register to calculate the address
535d208a 1633 if(opcode[i]==0x22||opcode[i]==0x26)
1634 {
1635 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1636 }
57871462 1637 alloc_reg_temp(current,i,-1);
e1190b87 1638 minimum_free_regs[i]=1;
535d208a 1639 if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1640 {
1641 alloc_all(current,i);
1642 alloc_reg64(current,i,FTEMP);
e1190b87 1643 minimum_free_regs[i]=HOST_REGS;
535d208a 1644 }
57871462 1645 }
1646}
1647
1648void store_alloc(struct regstat *current,int i)
1649{
1650 clear_const(current,rs2[i]);
1651 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1652 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1653 alloc_reg(current,i,rs2[i]);
1654 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1655 alloc_reg64(current,i,rs2[i]);
1656 if(rs2[i]) alloc_reg(current,i,FTEMP);
1657 }
1658 // If using TLB, need a register for pointer to the mapping table
1659 if(using_tlb) alloc_reg(current,i,TLREG);
1660 #if defined(HOST_IMM8)
1661 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1662 else alloc_reg(current,i,INVCP);
1663 #endif
b7918751 1664 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
57871462 1665 alloc_reg(current,i,FTEMP);
1666 }
1667 // We need a temporary register for address generation
1668 alloc_reg_temp(current,i,-1);
e1190b87 1669 minimum_free_regs[i]=1;
57871462 1670}
1671
1672void c1ls_alloc(struct regstat *current,int i)
1673{
1674 //clear_const(current,rs1[i]); // FIXME
1675 clear_const(current,rt1[i]);
1676 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1677 alloc_reg(current,i,CSREG); // Status
1678 alloc_reg(current,i,FTEMP);
1679 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1680 alloc_reg64(current,i,FTEMP);
1681 }
1682 // If using TLB, need a register for pointer to the mapping table
1683 if(using_tlb) alloc_reg(current,i,TLREG);
1684 #if defined(HOST_IMM8)
1685 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1686 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1687 alloc_reg(current,i,INVCP);
1688 #endif
1689 // We need a temporary register for address generation
1690 alloc_reg_temp(current,i,-1);
1691}
1692
b9b61529 1693void c2ls_alloc(struct regstat *current,int i)
1694{
1695 clear_const(current,rt1[i]);
1696 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1697 alloc_reg(current,i,FTEMP);
1698 // If using TLB, need a register for pointer to the mapping table
1699 if(using_tlb) alloc_reg(current,i,TLREG);
1700 #if defined(HOST_IMM8)
1701 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1702 else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1703 alloc_reg(current,i,INVCP);
1704 #endif
1705 // We need a temporary register for address generation
1706 alloc_reg_temp(current,i,-1);
e1190b87 1707 minimum_free_regs[i]=1;
b9b61529 1708}
1709
57871462 1710#ifndef multdiv_alloc
1711void multdiv_alloc(struct regstat *current,int i)
1712{
1713 // case 0x18: MULT
1714 // case 0x19: MULTU
1715 // case 0x1A: DIV
1716 // case 0x1B: DIVU
1717 // case 0x1C: DMULT
1718 // case 0x1D: DMULTU
1719 // case 0x1E: DDIV
1720 // case 0x1F: DDIVU
1721 clear_const(current,rs1[i]);
1722 clear_const(current,rs2[i]);
1723 if(rs1[i]&&rs2[i])
1724 {
1725 if((opcode2[i]&4)==0) // 32-bit
1726 {
1727 current->u&=~(1LL<<HIREG);
1728 current->u&=~(1LL<<LOREG);
1729 alloc_reg(current,i,HIREG);
1730 alloc_reg(current,i,LOREG);
1731 alloc_reg(current,i,rs1[i]);
1732 alloc_reg(current,i,rs2[i]);
1733 current->is32|=1LL<<HIREG;
1734 current->is32|=1LL<<LOREG;
1735 dirty_reg(current,HIREG);
1736 dirty_reg(current,LOREG);
1737 }
1738 else // 64-bit
1739 {
1740 current->u&=~(1LL<<HIREG);
1741 current->u&=~(1LL<<LOREG);
1742 current->uu&=~(1LL<<HIREG);
1743 current->uu&=~(1LL<<LOREG);
1744 alloc_reg64(current,i,HIREG);
1745 //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1746 alloc_reg64(current,i,rs1[i]);
1747 alloc_reg64(current,i,rs2[i]);
1748 alloc_all(current,i);
1749 current->is32&=~(1LL<<HIREG);
1750 current->is32&=~(1LL<<LOREG);
1751 dirty_reg(current,HIREG);
1752 dirty_reg(current,LOREG);
e1190b87 1753 minimum_free_regs[i]=HOST_REGS;
57871462 1754 }
1755 }
1756 else
1757 {
1758 // Multiply by zero is zero.
1759 // MIPS does not have a divide by zero exception.
1760 // The result is undefined, we return zero.
1761 alloc_reg(current,i,HIREG);
1762 alloc_reg(current,i,LOREG);
1763 current->is32|=1LL<<HIREG;
1764 current->is32|=1LL<<LOREG;
1765 dirty_reg(current,HIREG);
1766 dirty_reg(current,LOREG);
1767 }
1768}
1769#endif
1770
1771void cop0_alloc(struct regstat *current,int i)
1772{
1773 if(opcode2[i]==0) // MFC0
1774 {
1775 if(rt1[i]) {
1776 clear_const(current,rt1[i]);
1777 alloc_all(current,i);
1778 alloc_reg(current,i,rt1[i]);
1779 current->is32|=1LL<<rt1[i];
1780 dirty_reg(current,rt1[i]);
1781 }
1782 }
1783 else if(opcode2[i]==4) // MTC0
1784 {
1785 if(rs1[i]){
1786 clear_const(current,rs1[i]);
1787 alloc_reg(current,i,rs1[i]);
1788 alloc_all(current,i);
1789 }
1790 else {
1791 alloc_all(current,i); // FIXME: Keep r0
1792 current->u&=~1LL;
1793 alloc_reg(current,i,0);
1794 }
1795 }
1796 else
1797 {
1798 // TLBR/TLBWI/TLBWR/TLBP/ERET
1799 assert(opcode2[i]==0x10);
1800 alloc_all(current,i);
1801 }
e1190b87 1802 minimum_free_regs[i]=HOST_REGS;
57871462 1803}
1804
1805void cop1_alloc(struct regstat *current,int i)
1806{
1807 alloc_reg(current,i,CSREG); // Load status
1808 if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1809 {
7de557a6 1810 if(rt1[i]){
1811 clear_const(current,rt1[i]);
1812 if(opcode2[i]==1) {
1813 alloc_reg64(current,i,rt1[i]); // DMFC1
1814 current->is32&=~(1LL<<rt1[i]);
1815 }else{
1816 alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1817 current->is32|=1LL<<rt1[i];
1818 }
1819 dirty_reg(current,rt1[i]);
57871462 1820 }
57871462 1821 alloc_reg_temp(current,i,-1);
1822 }
1823 else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1824 {
1825 if(rs1[i]){
1826 clear_const(current,rs1[i]);
1827 if(opcode2[i]==5)
1828 alloc_reg64(current,i,rs1[i]); // DMTC1
1829 else
1830 alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1831 alloc_reg_temp(current,i,-1);
1832 }
1833 else {
1834 current->u&=~1LL;
1835 alloc_reg(current,i,0);
1836 alloc_reg_temp(current,i,-1);
1837 }
1838 }
e1190b87 1839 minimum_free_regs[i]=1;
57871462 1840}
1841void fconv_alloc(struct regstat *current,int i)
1842{
1843 alloc_reg(current,i,CSREG); // Load status
1844 alloc_reg_temp(current,i,-1);
e1190b87 1845 minimum_free_regs[i]=1;
57871462 1846}
1847void float_alloc(struct regstat *current,int i)
1848{
1849 alloc_reg(current,i,CSREG); // Load status
1850 alloc_reg_temp(current,i,-1);
e1190b87 1851 minimum_free_regs[i]=1;
57871462 1852}
b9b61529 1853void c2op_alloc(struct regstat *current,int i)
1854{
1855 alloc_reg_temp(current,i,-1);
1856}
57871462 1857void fcomp_alloc(struct regstat *current,int i)
1858{
1859 alloc_reg(current,i,CSREG); // Load status
1860 alloc_reg(current,i,FSREG); // Load flags
1861 dirty_reg(current,FSREG); // Flag will be modified
1862 alloc_reg_temp(current,i,-1);
e1190b87 1863 minimum_free_regs[i]=1;
57871462 1864}
1865
1866void syscall_alloc(struct regstat *current,int i)
1867{
1868 alloc_cc(current,i);
1869 dirty_reg(current,CCREG);
1870 alloc_all(current,i);
e1190b87 1871 minimum_free_regs[i]=HOST_REGS;
57871462 1872 current->isconst=0;
1873}
1874
1875void delayslot_alloc(struct regstat *current,int i)
1876{
1877 switch(itype[i]) {
1878 case UJUMP:
1879 case CJUMP:
1880 case SJUMP:
1881 case RJUMP:
1882 case FJUMP:
1883 case SYSCALL:
7139f3c8 1884 case HLECALL:
57871462 1885 case SPAN:
1886 assem_debug("jump in the delay slot. this shouldn't happen.\n");//exit(1);
1887 printf("Disabled speculative precompilation\n");
1888 stop_after_jal=1;
1889 break;
1890 case IMM16:
1891 imm16_alloc(current,i);
1892 break;
1893 case LOAD:
1894 case LOADLR:
1895 load_alloc(current,i);
1896 break;
1897 case STORE:
1898 case STORELR:
1899 store_alloc(current,i);
1900 break;
1901 case ALU:
1902 alu_alloc(current,i);
1903 break;
1904 case SHIFT:
1905 shift_alloc(current,i);
1906 break;
1907 case MULTDIV:
1908 multdiv_alloc(current,i);
1909 break;
1910 case SHIFTIMM:
1911 shiftimm_alloc(current,i);
1912 break;
1913 case MOV:
1914 mov_alloc(current,i);
1915 break;
1916 case COP0:
1917 cop0_alloc(current,i);
1918 break;
1919 case COP1:
b9b61529 1920 case COP2:
57871462 1921 cop1_alloc(current,i);
1922 break;
1923 case C1LS:
1924 c1ls_alloc(current,i);
1925 break;
b9b61529 1926 case C2LS:
1927 c2ls_alloc(current,i);
1928 break;
57871462 1929 case FCONV:
1930 fconv_alloc(current,i);
1931 break;
1932 case FLOAT:
1933 float_alloc(current,i);
1934 break;
1935 case FCOMP:
1936 fcomp_alloc(current,i);
1937 break;
b9b61529 1938 case C2OP:
1939 c2op_alloc(current,i);
1940 break;
57871462 1941 }
1942}
1943
1944// Special case where a branch and delay slot span two pages in virtual memory
1945static void pagespan_alloc(struct regstat *current,int i)
1946{
1947 current->isconst=0;
1948 current->wasconst=0;
1949 regs[i].wasconst=0;
e1190b87 1950 minimum_free_regs[i]=HOST_REGS;
57871462 1951 alloc_all(current,i);
1952 alloc_cc(current,i);
1953 dirty_reg(current,CCREG);
1954 if(opcode[i]==3) // JAL
1955 {
1956 alloc_reg(current,i,31);
1957 dirty_reg(current,31);
1958 }
1959 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1960 {
1961 alloc_reg(current,i,rs1[i]);
5067f341 1962 if (rt1[i]!=0) {
1963 alloc_reg(current,i,rt1[i]);
1964 dirty_reg(current,rt1[i]);
57871462 1965 }
1966 }
1967 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1968 {
1969 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1970 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1971 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1972 {
1973 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1974 if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1975 }
1976 }
1977 else
1978 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1979 {
1980 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1981 if(!((current->is32>>rs1[i])&1))
1982 {
1983 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1984 }
1985 }
1986 else
1987 if(opcode[i]==0x11) // BC1
1988 {
1989 alloc_reg(current,i,FSREG);
1990 alloc_reg(current,i,CSREG);
1991 }
1992 //else ...
1993}
1994
1995add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1996{
1997 stubs[stubcount][0]=type;
1998 stubs[stubcount][1]=addr;
1999 stubs[stubcount][2]=retaddr;
2000 stubs[stubcount][3]=a;
2001 stubs[stubcount][4]=b;
2002 stubs[stubcount][5]=c;
2003 stubs[stubcount][6]=d;
2004 stubs[stubcount][7]=e;
2005 stubcount++;
2006}
2007
2008// Write out a single register
2009void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
2010{
2011 int hr;
2012 for(hr=0;hr<HOST_REGS;hr++) {
2013 if(hr!=EXCLUDE_REG) {
2014 if((regmap[hr]&63)==r) {
2015 if((dirty>>hr)&1) {
2016 if(regmap[hr]<64) {
2017 emit_storereg(r,hr);
24385cae 2018#ifndef FORCE32
57871462 2019 if((is32>>regmap[hr])&1) {
2020 emit_sarimm(hr,31,hr);
2021 emit_storereg(r|64,hr);
2022 }
24385cae 2023#endif
57871462 2024 }else{
2025 emit_storereg(r|64,hr);
2026 }
2027 }
2028 }
2029 }
2030 }
2031}
2032
2033int mchecksum()
2034{
2035 //if(!tracedebug) return 0;
2036 int i;
2037 int sum=0;
2038 for(i=0;i<2097152;i++) {
2039 unsigned int temp=sum;
2040 sum<<=1;
2041 sum|=(~temp)>>31;
2042 sum^=((u_int *)rdram)[i];
2043 }
2044 return sum;
2045}
2046int rchecksum()
2047{
2048 int i;
2049 int sum=0;
2050 for(i=0;i<64;i++)
2051 sum^=((u_int *)reg)[i];
2052 return sum;
2053}
57871462 2054void rlist()
2055{
2056 int i;
2057 printf("TRACE: ");
2058 for(i=0;i<32;i++)
2059 printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2060 printf("\n");
3d624f89 2061#ifndef DISABLE_COP1
57871462 2062 printf("TRACE: ");
2063 for(i=0;i<32;i++)
2064 printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2065 printf("\n");
3d624f89 2066#endif
57871462 2067}
2068
2069void enabletrace()
2070{
2071 tracedebug=1;
2072}
2073
2074void memdebug(int i)
2075{
2076 //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2077 //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2078 //rlist();
2079 //if(tracedebug) {
2080 //if(Count>=-2084597794) {
2081 if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2082 //if(0) {
2083 printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2084 //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2085 //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2086 rlist();
2087 #ifdef __i386__
2088 printf("TRACE: %x\n",(&i)[-1]);
2089 #endif
2090 #ifdef __arm__
2091 int j;
2092 printf("TRACE: %x \n",(&j)[10]);
2093 printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2094 #endif
2095 //fflush(stdout);
2096 }
2097 //printf("TRACE: %x\n",(&i)[-1]);
2098}
2099
2100void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2101{
2102 printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2103}
2104
2105void alu_assemble(int i,struct regstat *i_regs)
2106{
2107 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2108 if(rt1[i]) {
2109 signed char s1,s2,t;
2110 t=get_reg(i_regs->regmap,rt1[i]);
2111 if(t>=0) {
2112 s1=get_reg(i_regs->regmap,rs1[i]);
2113 s2=get_reg(i_regs->regmap,rs2[i]);
2114 if(rs1[i]&&rs2[i]) {
2115 assert(s1>=0);
2116 assert(s2>=0);
2117 if(opcode2[i]&2) emit_sub(s1,s2,t);
2118 else emit_add(s1,s2,t);
2119 }
2120 else if(rs1[i]) {
2121 if(s1>=0) emit_mov(s1,t);
2122 else emit_loadreg(rs1[i],t);
2123 }
2124 else if(rs2[i]) {
2125 if(s2>=0) {
2126 if(opcode2[i]&2) emit_neg(s2,t);
2127 else emit_mov(s2,t);
2128 }
2129 else {
2130 emit_loadreg(rs2[i],t);
2131 if(opcode2[i]&2) emit_neg(t,t);
2132 }
2133 }
2134 else emit_zeroreg(t);
2135 }
2136 }
2137 }
2138 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2139 if(rt1[i]) {
2140 signed char s1l,s2l,s1h,s2h,tl,th;
2141 tl=get_reg(i_regs->regmap,rt1[i]);
2142 th=get_reg(i_regs->regmap,rt1[i]|64);
2143 if(tl>=0) {
2144 s1l=get_reg(i_regs->regmap,rs1[i]);
2145 s2l=get_reg(i_regs->regmap,rs2[i]);
2146 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2147 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2148 if(rs1[i]&&rs2[i]) {
2149 assert(s1l>=0);
2150 assert(s2l>=0);
2151 if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2152 else emit_adds(s1l,s2l,tl);
2153 if(th>=0) {
2154 #ifdef INVERTED_CARRY
2155 if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2156 #else
2157 if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2158 #endif
2159 else emit_add(s1h,s2h,th);
2160 }
2161 }
2162 else if(rs1[i]) {
2163 if(s1l>=0) emit_mov(s1l,tl);
2164 else emit_loadreg(rs1[i],tl);
2165 if(th>=0) {
2166 if(s1h>=0) emit_mov(s1h,th);
2167 else emit_loadreg(rs1[i]|64,th);
2168 }
2169 }
2170 else if(rs2[i]) {
2171 if(s2l>=0) {
2172 if(opcode2[i]&2) emit_negs(s2l,tl);
2173 else emit_mov(s2l,tl);
2174 }
2175 else {
2176 emit_loadreg(rs2[i],tl);
2177 if(opcode2[i]&2) emit_negs(tl,tl);
2178 }
2179 if(th>=0) {
2180 #ifdef INVERTED_CARRY
2181 if(s2h>=0) emit_mov(s2h,th);
2182 else emit_loadreg(rs2[i]|64,th);
2183 if(opcode2[i]&2) {
2184 emit_adcimm(-1,th); // x86 has inverted carry flag
2185 emit_not(th,th);
2186 }
2187 #else
2188 if(opcode2[i]&2) {
2189 if(s2h>=0) emit_rscimm(s2h,0,th);
2190 else {
2191 emit_loadreg(rs2[i]|64,th);
2192 emit_rscimm(th,0,th);
2193 }
2194 }else{
2195 if(s2h>=0) emit_mov(s2h,th);
2196 else emit_loadreg(rs2[i]|64,th);
2197 }
2198 #endif
2199 }
2200 }
2201 else {
2202 emit_zeroreg(tl);
2203 if(th>=0) emit_zeroreg(th);
2204 }
2205 }
2206 }
2207 }
2208 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2209 if(rt1[i]) {
2210 signed char s1l,s1h,s2l,s2h,t;
2211 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2212 {
2213 t=get_reg(i_regs->regmap,rt1[i]);
2214 //assert(t>=0);
2215 if(t>=0) {
2216 s1l=get_reg(i_regs->regmap,rs1[i]);
2217 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2218 s2l=get_reg(i_regs->regmap,rs2[i]);
2219 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2220 if(rs2[i]==0) // rx<r0
2221 {
2222 assert(s1h>=0);
2223 if(opcode2[i]==0x2a) // SLT
2224 emit_shrimm(s1h,31,t);
2225 else // SLTU (unsigned can not be less than zero)
2226 emit_zeroreg(t);
2227 }
2228 else if(rs1[i]==0) // r0<rx
2229 {
2230 assert(s2h>=0);
2231 if(opcode2[i]==0x2a) // SLT
2232 emit_set_gz64_32(s2h,s2l,t);
2233 else // SLTU (set if not zero)
2234 emit_set_nz64_32(s2h,s2l,t);
2235 }
2236 else {
2237 assert(s1l>=0);assert(s1h>=0);
2238 assert(s2l>=0);assert(s2h>=0);
2239 if(opcode2[i]==0x2a) // SLT
2240 emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2241 else // SLTU
2242 emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2243 }
2244 }
2245 } else {
2246 t=get_reg(i_regs->regmap,rt1[i]);
2247 //assert(t>=0);
2248 if(t>=0) {
2249 s1l=get_reg(i_regs->regmap,rs1[i]);
2250 s2l=get_reg(i_regs->regmap,rs2[i]);
2251 if(rs2[i]==0) // rx<r0
2252 {
2253 assert(s1l>=0);
2254 if(opcode2[i]==0x2a) // SLT
2255 emit_shrimm(s1l,31,t);
2256 else // SLTU (unsigned can not be less than zero)
2257 emit_zeroreg(t);
2258 }
2259 else if(rs1[i]==0) // r0<rx
2260 {
2261 assert(s2l>=0);
2262 if(opcode2[i]==0x2a) // SLT
2263 emit_set_gz32(s2l,t);
2264 else // SLTU (set if not zero)
2265 emit_set_nz32(s2l,t);
2266 }
2267 else{
2268 assert(s1l>=0);assert(s2l>=0);
2269 if(opcode2[i]==0x2a) // SLT
2270 emit_set_if_less32(s1l,s2l,t);
2271 else // SLTU
2272 emit_set_if_carry32(s1l,s2l,t);
2273 }
2274 }
2275 }
2276 }
2277 }
2278 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2279 if(rt1[i]) {
2280 signed char s1l,s1h,s2l,s2h,th,tl;
2281 tl=get_reg(i_regs->regmap,rt1[i]);
2282 th=get_reg(i_regs->regmap,rt1[i]|64);
2283 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2284 {
2285 assert(tl>=0);
2286 if(tl>=0) {
2287 s1l=get_reg(i_regs->regmap,rs1[i]);
2288 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2289 s2l=get_reg(i_regs->regmap,rs2[i]);
2290 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2291 if(rs1[i]&&rs2[i]) {
2292 assert(s1l>=0);assert(s1h>=0);
2293 assert(s2l>=0);assert(s2h>=0);
2294 if(opcode2[i]==0x24) { // AND
2295 emit_and(s1l,s2l,tl);
2296 emit_and(s1h,s2h,th);
2297 } else
2298 if(opcode2[i]==0x25) { // OR
2299 emit_or(s1l,s2l,tl);
2300 emit_or(s1h,s2h,th);
2301 } else
2302 if(opcode2[i]==0x26) { // XOR
2303 emit_xor(s1l,s2l,tl);
2304 emit_xor(s1h,s2h,th);
2305 } else
2306 if(opcode2[i]==0x27) { // NOR
2307 emit_or(s1l,s2l,tl);
2308 emit_or(s1h,s2h,th);
2309 emit_not(tl,tl);
2310 emit_not(th,th);
2311 }
2312 }
2313 else
2314 {
2315 if(opcode2[i]==0x24) { // AND
2316 emit_zeroreg(tl);
2317 emit_zeroreg(th);
2318 } else
2319 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2320 if(rs1[i]){
2321 if(s1l>=0) emit_mov(s1l,tl);
2322 else emit_loadreg(rs1[i],tl);
2323 if(s1h>=0) emit_mov(s1h,th);
2324 else emit_loadreg(rs1[i]|64,th);
2325 }
2326 else
2327 if(rs2[i]){
2328 if(s2l>=0) emit_mov(s2l,tl);
2329 else emit_loadreg(rs2[i],tl);
2330 if(s2h>=0) emit_mov(s2h,th);
2331 else emit_loadreg(rs2[i]|64,th);
2332 }
2333 else{
2334 emit_zeroreg(tl);
2335 emit_zeroreg(th);
2336 }
2337 } else
2338 if(opcode2[i]==0x27) { // NOR
2339 if(rs1[i]){
2340 if(s1l>=0) emit_not(s1l,tl);
2341 else{
2342 emit_loadreg(rs1[i],tl);
2343 emit_not(tl,tl);
2344 }
2345 if(s1h>=0) emit_not(s1h,th);
2346 else{
2347 emit_loadreg(rs1[i]|64,th);
2348 emit_not(th,th);
2349 }
2350 }
2351 else
2352 if(rs2[i]){
2353 if(s2l>=0) emit_not(s2l,tl);
2354 else{
2355 emit_loadreg(rs2[i],tl);
2356 emit_not(tl,tl);
2357 }
2358 if(s2h>=0) emit_not(s2h,th);
2359 else{
2360 emit_loadreg(rs2[i]|64,th);
2361 emit_not(th,th);
2362 }
2363 }
2364 else {
2365 emit_movimm(-1,tl);
2366 emit_movimm(-1,th);
2367 }
2368 }
2369 }
2370 }
2371 }
2372 else
2373 {
2374 // 32 bit
2375 if(tl>=0) {
2376 s1l=get_reg(i_regs->regmap,rs1[i]);
2377 s2l=get_reg(i_regs->regmap,rs2[i]);
2378 if(rs1[i]&&rs2[i]) {
2379 assert(s1l>=0);
2380 assert(s2l>=0);
2381 if(opcode2[i]==0x24) { // AND
2382 emit_and(s1l,s2l,tl);
2383 } else
2384 if(opcode2[i]==0x25) { // OR
2385 emit_or(s1l,s2l,tl);
2386 } else
2387 if(opcode2[i]==0x26) { // XOR
2388 emit_xor(s1l,s2l,tl);
2389 } else
2390 if(opcode2[i]==0x27) { // NOR
2391 emit_or(s1l,s2l,tl);
2392 emit_not(tl,tl);
2393 }
2394 }
2395 else
2396 {
2397 if(opcode2[i]==0x24) { // AND
2398 emit_zeroreg(tl);
2399 } else
2400 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2401 if(rs1[i]){
2402 if(s1l>=0) emit_mov(s1l,tl);
2403 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2404 }
2405 else
2406 if(rs2[i]){
2407 if(s2l>=0) emit_mov(s2l,tl);
2408 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2409 }
2410 else emit_zeroreg(tl);
2411 } else
2412 if(opcode2[i]==0x27) { // NOR
2413 if(rs1[i]){
2414 if(s1l>=0) emit_not(s1l,tl);
2415 else {
2416 emit_loadreg(rs1[i],tl);
2417 emit_not(tl,tl);
2418 }
2419 }
2420 else
2421 if(rs2[i]){
2422 if(s2l>=0) emit_not(s2l,tl);
2423 else {
2424 emit_loadreg(rs2[i],tl);
2425 emit_not(tl,tl);
2426 }
2427 }
2428 else emit_movimm(-1,tl);
2429 }
2430 }
2431 }
2432 }
2433 }
2434 }
2435}
2436
2437void imm16_assemble(int i,struct regstat *i_regs)
2438{
2439 if (opcode[i]==0x0f) { // LUI
2440 if(rt1[i]) {
2441 signed char t;
2442 t=get_reg(i_regs->regmap,rt1[i]);
2443 //assert(t>=0);
2444 if(t>=0) {
2445 if(!((i_regs->isconst>>t)&1))
2446 emit_movimm(imm[i]<<16,t);
2447 }
2448 }
2449 }
2450 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2451 if(rt1[i]) {
2452 signed char s,t;
2453 t=get_reg(i_regs->regmap,rt1[i]);
2454 s=get_reg(i_regs->regmap,rs1[i]);
2455 if(rs1[i]) {
2456 //assert(t>=0);
2457 //assert(s>=0);
2458 if(t>=0) {
2459 if(!((i_regs->isconst>>t)&1)) {
2460 if(s<0) {
2461 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2462 emit_addimm(t,imm[i],t);
2463 }else{
2464 if(!((i_regs->wasconst>>s)&1))
2465 emit_addimm(s,imm[i],t);
2466 else
2467 emit_movimm(constmap[i][s]+imm[i],t);
2468 }
2469 }
2470 }
2471 } else {
2472 if(t>=0) {
2473 if(!((i_regs->isconst>>t)&1))
2474 emit_movimm(imm[i],t);
2475 }
2476 }
2477 }
2478 }
2479 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2480 if(rt1[i]) {
2481 signed char sh,sl,th,tl;
2482 th=get_reg(i_regs->regmap,rt1[i]|64);
2483 tl=get_reg(i_regs->regmap,rt1[i]);
2484 sh=get_reg(i_regs->regmap,rs1[i]|64);
2485 sl=get_reg(i_regs->regmap,rs1[i]);
2486 if(tl>=0) {
2487 if(rs1[i]) {
2488 assert(sh>=0);
2489 assert(sl>=0);
2490 if(th>=0) {
2491 emit_addimm64_32(sh,sl,imm[i],th,tl);
2492 }
2493 else {
2494 emit_addimm(sl,imm[i],tl);
2495 }
2496 } else {
2497 emit_movimm(imm[i],tl);
2498 if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2499 }
2500 }
2501 }
2502 }
2503 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2504 if(rt1[i]) {
2505 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2506 signed char sh,sl,t;
2507 t=get_reg(i_regs->regmap,rt1[i]);
2508 sh=get_reg(i_regs->regmap,rs1[i]|64);
2509 sl=get_reg(i_regs->regmap,rs1[i]);
2510 //assert(t>=0);
2511 if(t>=0) {
2512 if(rs1[i]>0) {
2513 if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2514 if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2515 if(opcode[i]==0x0a) { // SLTI
2516 if(sl<0) {
2517 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2518 emit_slti32(t,imm[i],t);
2519 }else{
2520 emit_slti32(sl,imm[i],t);
2521 }
2522 }
2523 else { // SLTIU
2524 if(sl<0) {
2525 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2526 emit_sltiu32(t,imm[i],t);
2527 }else{
2528 emit_sltiu32(sl,imm[i],t);
2529 }
2530 }
2531 }else{ // 64-bit
2532 assert(sl>=0);
2533 if(opcode[i]==0x0a) // SLTI
2534 emit_slti64_32(sh,sl,imm[i],t);
2535 else // SLTIU
2536 emit_sltiu64_32(sh,sl,imm[i],t);
2537 }
2538 }else{
2539 // SLTI(U) with r0 is just stupid,
2540 // nonetheless examples can be found
2541 if(opcode[i]==0x0a) // SLTI
2542 if(0<imm[i]) emit_movimm(1,t);
2543 else emit_zeroreg(t);
2544 else // SLTIU
2545 {
2546 if(imm[i]) emit_movimm(1,t);
2547 else emit_zeroreg(t);
2548 }
2549 }
2550 }
2551 }
2552 }
2553 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2554 if(rt1[i]) {
2555 signed char sh,sl,th,tl;
2556 th=get_reg(i_regs->regmap,rt1[i]|64);
2557 tl=get_reg(i_regs->regmap,rt1[i]);
2558 sh=get_reg(i_regs->regmap,rs1[i]|64);
2559 sl=get_reg(i_regs->regmap,rs1[i]);
2560 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2561 if(opcode[i]==0x0c) //ANDI
2562 {
2563 if(rs1[i]) {
2564 if(sl<0) {
2565 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2566 emit_andimm(tl,imm[i],tl);
2567 }else{
2568 if(!((i_regs->wasconst>>sl)&1))
2569 emit_andimm(sl,imm[i],tl);
2570 else
2571 emit_movimm(constmap[i][sl]&imm[i],tl);
2572 }
2573 }
2574 else
2575 emit_zeroreg(tl);
2576 if(th>=0) emit_zeroreg(th);
2577 }
2578 else
2579 {
2580 if(rs1[i]) {
2581 if(sl<0) {
2582 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2583 }
2584 if(th>=0) {
2585 if(sh<0) {
2586 emit_loadreg(rs1[i]|64,th);
2587 }else{
2588 emit_mov(sh,th);
2589 }
2590 }
2591 if(opcode[i]==0x0d) //ORI
2592 if(sl<0) {
2593 emit_orimm(tl,imm[i],tl);
2594 }else{
2595 if(!((i_regs->wasconst>>sl)&1))
2596 emit_orimm(sl,imm[i],tl);
2597 else
2598 emit_movimm(constmap[i][sl]|imm[i],tl);
2599 }
2600 if(opcode[i]==0x0e) //XORI
2601 if(sl<0) {
2602 emit_xorimm(tl,imm[i],tl);
2603 }else{
2604 if(!((i_regs->wasconst>>sl)&1))
2605 emit_xorimm(sl,imm[i],tl);
2606 else
2607 emit_movimm(constmap[i][sl]^imm[i],tl);
2608 }
2609 }
2610 else {
2611 emit_movimm(imm[i],tl);
2612 if(th>=0) emit_zeroreg(th);
2613 }
2614 }
2615 }
2616 }
2617 }
2618}
2619
2620void shiftimm_assemble(int i,struct regstat *i_regs)
2621{
2622 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2623 {
2624 if(rt1[i]) {
2625 signed char s,t;
2626 t=get_reg(i_regs->regmap,rt1[i]);
2627 s=get_reg(i_regs->regmap,rs1[i]);
2628 //assert(t>=0);
2629 if(t>=0){
2630 if(rs1[i]==0)
2631 {
2632 emit_zeroreg(t);
2633 }
2634 else
2635 {
2636 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2637 if(imm[i]) {
2638 if(opcode2[i]==0) // SLL
2639 {
2640 emit_shlimm(s<0?t:s,imm[i],t);
2641 }
2642 if(opcode2[i]==2) // SRL
2643 {
2644 emit_shrimm(s<0?t:s,imm[i],t);
2645 }
2646 if(opcode2[i]==3) // SRA
2647 {
2648 emit_sarimm(s<0?t:s,imm[i],t);
2649 }
2650 }else{
2651 // Shift by zero
2652 if(s>=0 && s!=t) emit_mov(s,t);
2653 }
2654 }
2655 }
2656 //emit_storereg(rt1[i],t); //DEBUG
2657 }
2658 }
2659 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2660 {
2661 if(rt1[i]) {
2662 signed char sh,sl,th,tl;
2663 th=get_reg(i_regs->regmap,rt1[i]|64);
2664 tl=get_reg(i_regs->regmap,rt1[i]);
2665 sh=get_reg(i_regs->regmap,rs1[i]|64);
2666 sl=get_reg(i_regs->regmap,rs1[i]);
2667 if(tl>=0) {
2668 if(rs1[i]==0)
2669 {
2670 emit_zeroreg(tl);
2671 if(th>=0) emit_zeroreg(th);
2672 }
2673 else
2674 {
2675 assert(sl>=0);
2676 assert(sh>=0);
2677 if(imm[i]) {
2678 if(opcode2[i]==0x38) // DSLL
2679 {
2680 if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2681 emit_shlimm(sl,imm[i],tl);
2682 }
2683 if(opcode2[i]==0x3a) // DSRL
2684 {
2685 emit_shrdimm(sl,sh,imm[i],tl);
2686 if(th>=0) emit_shrimm(sh,imm[i],th);
2687 }
2688 if(opcode2[i]==0x3b) // DSRA
2689 {
2690 emit_shrdimm(sl,sh,imm[i],tl);
2691 if(th>=0) emit_sarimm(sh,imm[i],th);
2692 }
2693 }else{
2694 // Shift by zero
2695 if(sl!=tl) emit_mov(sl,tl);
2696 if(th>=0&&sh!=th) emit_mov(sh,th);
2697 }
2698 }
2699 }
2700 }
2701 }
2702 if(opcode2[i]==0x3c) // DSLL32
2703 {
2704 if(rt1[i]) {
2705 signed char sl,tl,th;
2706 tl=get_reg(i_regs->regmap,rt1[i]);
2707 th=get_reg(i_regs->regmap,rt1[i]|64);
2708 sl=get_reg(i_regs->regmap,rs1[i]);
2709 if(th>=0||tl>=0){
2710 assert(tl>=0);
2711 assert(th>=0);
2712 assert(sl>=0);
2713 emit_mov(sl,th);
2714 emit_zeroreg(tl);
2715 if(imm[i]>32)
2716 {
2717 emit_shlimm(th,imm[i]&31,th);
2718 }
2719 }
2720 }
2721 }
2722 if(opcode2[i]==0x3e) // DSRL32
2723 {
2724 if(rt1[i]) {
2725 signed char sh,tl,th;
2726 tl=get_reg(i_regs->regmap,rt1[i]);
2727 th=get_reg(i_regs->regmap,rt1[i]|64);
2728 sh=get_reg(i_regs->regmap,rs1[i]|64);
2729 if(tl>=0){
2730 assert(sh>=0);
2731 emit_mov(sh,tl);
2732 if(th>=0) emit_zeroreg(th);
2733 if(imm[i]>32)
2734 {
2735 emit_shrimm(tl,imm[i]&31,tl);
2736 }
2737 }
2738 }
2739 }
2740 if(opcode2[i]==0x3f) // DSRA32
2741 {
2742 if(rt1[i]) {
2743 signed char sh,tl;
2744 tl=get_reg(i_regs->regmap,rt1[i]);
2745 sh=get_reg(i_regs->regmap,rs1[i]|64);
2746 if(tl>=0){
2747 assert(sh>=0);
2748 emit_mov(sh,tl);
2749 if(imm[i]>32)
2750 {
2751 emit_sarimm(tl,imm[i]&31,tl);
2752 }
2753 }
2754 }
2755 }
2756}
2757
2758#ifndef shift_assemble
2759void shift_assemble(int i,struct regstat *i_regs)
2760{
2761 printf("Need shift_assemble for this architecture.\n");
2762 exit(1);
2763}
2764#endif
2765
2766void load_assemble(int i,struct regstat *i_regs)
2767{
2768 int s,th,tl,addr,map=-1;
2769 int offset;
2770 int jaddr=0;
5bf843dc 2771 int memtarget=0,c=0;
57871462 2772 u_int hr,reglist=0;
2773 th=get_reg(i_regs->regmap,rt1[i]|64);
2774 tl=get_reg(i_regs->regmap,rt1[i]);
2775 s=get_reg(i_regs->regmap,rs1[i]);
2776 offset=imm[i];
2777 for(hr=0;hr<HOST_REGS;hr++) {
2778 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2779 }
2780 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2781 if(s>=0) {
2782 c=(i_regs->wasconst>>s)&1;
4cb76aa4 2783 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
57871462 2784 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2785 }
57871462 2786 //printf("load_assemble: c=%d\n",c);
2787 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2788 // FIXME: Even if the load is a NOP, we should check for pagefaults...
5bf843dc 2789#ifdef PCSX
f18c0f46 2790 if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2791 ||rt1[i]==0) {
5bf843dc 2792 // could be FIFO, must perform the read
f18c0f46 2793 // ||dummy read
5bf843dc 2794 assem_debug("(forced read)\n");
2795 tl=get_reg(i_regs->regmap,-1);
2796 assert(tl>=0);
5bf843dc 2797 }
f18c0f46 2798#endif
5bf843dc 2799 if(offset||s<0||c) addr=tl;
2800 else addr=s;
535d208a 2801 //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2802 if(tl>=0) {
2803 //printf("load_assemble: c=%d\n",c);
2804 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2805 assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2806 reglist&=~(1<<tl);
2807 if(th>=0) reglist&=~(1<<th);
2808 if(!using_tlb) {
2809 if(!c) {
2810 #ifdef RAM_OFFSET
2811 map=get_reg(i_regs->regmap,ROREG);
2812 if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2813 #endif
57871462 2814//#define R29_HACK 1
535d208a 2815 #ifdef R29_HACK
2816 // Strmnnrmn's speed hack
2817 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2818 #endif
2819 {
2820 emit_cmpimm(addr,RAM_SIZE);
2821 jaddr=(int)out;
2822 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2823 // Hint to branch predictor that the branch is unlikely to be taken
2824 if(rs1[i]>=28)
2825 emit_jno_unlikely(0);
2826 else
57871462 2827 #endif
535d208a 2828 emit_jno(0);
57871462 2829 }
535d208a 2830 }
2831 }else{ // using tlb
2832 int x=0;
2833 if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2834 if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2835 map=get_reg(i_regs->regmap,TLREG);
2836 assert(map>=0);
2837 map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2838 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2839 }
2840 int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2841 if (opcode[i]==0x20) { // LB
2842 if(!c||memtarget) {
2843 if(!dummy) {
57871462 2844 #ifdef HOST_IMM_ADDR32
2845 if(c)
2846 emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2847 else
2848 #endif
2849 {
2850 //emit_xorimm(addr,3,tl);
2851 //gen_tlb_addr_r(tl,map);
2852 //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
535d208a 2853 int x=0,a=tl;
2002a1db 2854#ifdef BIG_ENDIAN_MIPS
57871462 2855 if(!c) emit_xorimm(addr,3,tl);
2856 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 2857#else
535d208a 2858 if(!c) a=addr;
2002a1db 2859#endif
535d208a 2860 emit_movsbl_indexed_tlb(x,a,map,tl);
57871462 2861 }
57871462 2862 }
535d208a 2863 if(jaddr)
2864 add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2865 }
535d208a 2866 else
2867 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2868 }
2869 if (opcode[i]==0x21) { // LH
2870 if(!c||memtarget) {
2871 if(!dummy) {
57871462 2872 #ifdef HOST_IMM_ADDR32
2873 if(c)
2874 emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2875 else
2876 #endif
2877 {
535d208a 2878 int x=0,a=tl;
2002a1db 2879#ifdef BIG_ENDIAN_MIPS
57871462 2880 if(!c) emit_xorimm(addr,2,tl);
2881 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 2882#else
535d208a 2883 if(!c) a=addr;
2002a1db 2884#endif
57871462 2885 //#ifdef
2886 //emit_movswl_indexed_tlb(x,tl,map,tl);
2887 //else
2888 if(map>=0) {
535d208a 2889 gen_tlb_addr_r(a,map);
2890 emit_movswl_indexed(x,a,tl);
2891 }else{
2892 #ifdef RAM_OFFSET
2893 emit_movswl_indexed(x,a,tl);
2894 #else
2895 emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2896 #endif
2897 }
57871462 2898 }
57871462 2899 }
535d208a 2900 if(jaddr)
2901 add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2902 }
535d208a 2903 else
2904 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2905 }
2906 if (opcode[i]==0x23) { // LW
2907 if(!c||memtarget) {
2908 if(!dummy) {
57871462 2909 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2910 #ifdef HOST_IMM_ADDR32
2911 if(c)
2912 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2913 else
2914 #endif
2915 emit_readword_indexed_tlb(0,addr,map,tl);
57871462 2916 }
535d208a 2917 if(jaddr)
2918 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2919 }
535d208a 2920 else
2921 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2922 }
2923 if (opcode[i]==0x24) { // LBU
2924 if(!c||memtarget) {
2925 if(!dummy) {
57871462 2926 #ifdef HOST_IMM_ADDR32
2927 if(c)
2928 emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2929 else
2930 #endif
2931 {
2932 //emit_xorimm(addr,3,tl);
2933 //gen_tlb_addr_r(tl,map);
2934 //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
535d208a 2935 int x=0,a=tl;
2002a1db 2936#ifdef BIG_ENDIAN_MIPS
57871462 2937 if(!c) emit_xorimm(addr,3,tl);
2938 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 2939#else
535d208a 2940 if(!c) a=addr;
2002a1db 2941#endif
535d208a 2942 emit_movzbl_indexed_tlb(x,a,map,tl);
57871462 2943 }
57871462 2944 }
535d208a 2945 if(jaddr)
2946 add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2947 }
535d208a 2948 else
2949 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2950 }
2951 if (opcode[i]==0x25) { // LHU
2952 if(!c||memtarget) {
2953 if(!dummy) {
57871462 2954 #ifdef HOST_IMM_ADDR32
2955 if(c)
2956 emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2957 else
2958 #endif
2959 {
535d208a 2960 int x=0,a=tl;
2002a1db 2961#ifdef BIG_ENDIAN_MIPS
57871462 2962 if(!c) emit_xorimm(addr,2,tl);
2963 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 2964#else
535d208a 2965 if(!c) a=addr;
2002a1db 2966#endif
57871462 2967 //#ifdef
2968 //emit_movzwl_indexed_tlb(x,tl,map,tl);
2969 //#else
2970 if(map>=0) {
535d208a 2971 gen_tlb_addr_r(a,map);
2972 emit_movzwl_indexed(x,a,tl);
2973 }else{
2974 #ifdef RAM_OFFSET
2975 emit_movzwl_indexed(x,a,tl);
2976 #else
2977 emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
2978 #endif
2979 }
57871462 2980 }
2981 }
535d208a 2982 if(jaddr)
2983 add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2984 }
535d208a 2985 else
2986 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2987 }
2988 if (opcode[i]==0x27) { // LWU
2989 assert(th>=0);
2990 if(!c||memtarget) {
2991 if(!dummy) {
57871462 2992 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2993 #ifdef HOST_IMM_ADDR32
2994 if(c)
2995 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2996 else
2997 #endif
2998 emit_readword_indexed_tlb(0,addr,map,tl);
57871462 2999 }
535d208a 3000 if(jaddr)
3001 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3002 }
3003 else {
3004 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
57871462 3005 }
535d208a 3006 emit_zeroreg(th);
3007 }
3008 if (opcode[i]==0x37) { // LD
3009 if(!c||memtarget) {
3010 if(!dummy) {
57871462 3011 //gen_tlb_addr_r(tl,map);
3012 //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
3013 //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
3014 #ifdef HOST_IMM_ADDR32
3015 if(c)
3016 emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3017 else
3018 #endif
3019 emit_readdword_indexed_tlb(0,addr,map,th,tl);
57871462 3020 }
535d208a 3021 if(jaddr)
3022 add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 3023 }
535d208a 3024 else
3025 inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
57871462 3026 }
535d208a 3027 }
3028 //emit_storereg(rt1[i],tl); // DEBUG
57871462 3029 //if(opcode[i]==0x23)
3030 //if(opcode[i]==0x24)
3031 //if(opcode[i]==0x23||opcode[i]==0x24)
3032 /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
3033 {
3034 //emit_pusha();
3035 save_regs(0x100f);
3036 emit_readword((int)&last_count,ECX);
3037 #ifdef __i386__
3038 if(get_reg(i_regs->regmap,CCREG)<0)
3039 emit_loadreg(CCREG,HOST_CCREG);
3040 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3041 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3042 emit_writeword(HOST_CCREG,(int)&Count);
3043 #endif
3044 #ifdef __arm__
3045 if(get_reg(i_regs->regmap,CCREG)<0)
3046 emit_loadreg(CCREG,0);
3047 else
3048 emit_mov(HOST_CCREG,0);
3049 emit_add(0,ECX,0);
3050 emit_addimm(0,2*ccadj[i],0);
3051 emit_writeword(0,(int)&Count);
3052 #endif
3053 emit_call((int)memdebug);
3054 //emit_popa();
3055 restore_regs(0x100f);
3056 }/**/
3057}
3058
3059#ifndef loadlr_assemble
3060void loadlr_assemble(int i,struct regstat *i_regs)
3061{
3062 printf("Need loadlr_assemble for this architecture.\n");
3063 exit(1);
3064}
3065#endif
3066
3067void store_assemble(int i,struct regstat *i_regs)
3068{
3069 int s,th,tl,map=-1;
3070 int addr,temp;
3071 int offset;
3072 int jaddr=0,jaddr2,type;
666a299d 3073 int memtarget=0,c=0;
57871462 3074 int agr=AGEN1+(i&1);
3075 u_int hr,reglist=0;
3076 th=get_reg(i_regs->regmap,rs2[i]|64);
3077 tl=get_reg(i_regs->regmap,rs2[i]);
3078 s=get_reg(i_regs->regmap,rs1[i]);
3079 temp=get_reg(i_regs->regmap,agr);
3080 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3081 offset=imm[i];
3082 if(s>=0) {
3083 c=(i_regs->wasconst>>s)&1;
4cb76aa4 3084 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
57871462 3085 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3086 }
3087 assert(tl>=0);
3088 assert(temp>=0);
3089 for(hr=0;hr<HOST_REGS;hr++) {
3090 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3091 }
3092 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3093 if(offset||s<0||c) addr=temp;
3094 else addr=s;
3095 if(!using_tlb) {
3096 if(!c) {
3097 #ifdef R29_HACK
3098 // Strmnnrmn's speed hack
3099 memtarget=1;
4cb76aa4 3100 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
57871462 3101 #endif
4cb76aa4 3102 emit_cmpimm(addr,RAM_SIZE);
57871462 3103 #ifdef DESTRUCTIVE_SHIFT
3104 if(s==addr) emit_mov(s,temp);
3105 #endif
3106 #ifdef R29_HACK
4cb76aa4 3107 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
57871462 3108 #endif
3109 {
3110 jaddr=(int)out;
3111 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3112 // Hint to branch predictor that the branch is unlikely to be taken
3113 if(rs1[i]>=28)
3114 emit_jno_unlikely(0);
3115 else
3116 #endif
3117 emit_jno(0);
3118 }
3119 }
3120 }else{ // using tlb
3121 int x=0;
3122 if (opcode[i]==0x28) x=3; // SB
3123 if (opcode[i]==0x29) x=2; // SH
3124 map=get_reg(i_regs->regmap,TLREG);
3125 assert(map>=0);
3126 map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3127 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3128 }
3129
3130 if (opcode[i]==0x28) { // SB
3131 if(!c||memtarget) {
3132 int x=0;
2002a1db 3133#ifdef BIG_ENDIAN_MIPS
57871462 3134 if(!c) emit_xorimm(addr,3,temp);
3135 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 3136#else
3137 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3138 else if (addr!=temp) emit_mov(addr,temp);
3139#endif
57871462 3140 //gen_tlb_addr_w(temp,map);
3141 //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3142 emit_writebyte_indexed_tlb(tl,x,temp,map,temp);
3143 }
3144 type=STOREB_STUB;
3145 }
3146 if (opcode[i]==0x29) { // SH
3147 if(!c||memtarget) {
3148 int x=0;
2002a1db 3149#ifdef BIG_ENDIAN_MIPS
57871462 3150 if(!c) emit_xorimm(addr,2,temp);
3151 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 3152#else
3153 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3154 else if (addr!=temp) emit_mov(addr,temp);
3155#endif
57871462 3156 //#ifdef
3157 //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3158 //#else
3159 if(map>=0) {
3160 gen_tlb_addr_w(temp,map);
3161 emit_writehword_indexed(tl,x,temp);
3162 }else
3163 emit_writehword_indexed(tl,(int)rdram-0x80000000+x,temp);
3164 }
3165 type=STOREH_STUB;
3166 }
3167 if (opcode[i]==0x2B) { // SW
3168 if(!c||memtarget)
3169 //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3170 emit_writeword_indexed_tlb(tl,0,addr,map,temp);
3171 type=STOREW_STUB;
3172 }
3173 if (opcode[i]==0x3F) { // SD
3174 if(!c||memtarget) {
3175 if(rs2[i]) {
3176 assert(th>=0);
3177 //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3178 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3179 emit_writedword_indexed_tlb(th,tl,0,addr,map,temp);
3180 }else{
3181 // Store zero
3182 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3183 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3184 emit_writedword_indexed_tlb(tl,tl,0,addr,map,temp);
3185 }
3186 }
3187 type=STORED_STUB;
3188 }
57871462 3189 if(!using_tlb) {
3190 if(!c||memtarget) {
3191 #ifdef DESTRUCTIVE_SHIFT
3192 // The x86 shift operation is 'destructive'; it overwrites the
3193 // source register, so we need to make a copy first and use that.
3194 addr=temp;
3195 #endif
3196 #if defined(HOST_IMM8)
3197 int ir=get_reg(i_regs->regmap,INVCP);
3198 assert(ir>=0);
3199 emit_cmpmem_indexedsr12_reg(ir,addr,1);
3200 #else
3201 emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3202 #endif
0bbd1454 3203 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3204 emit_callne(invalidate_addr_reg[addr]);
3205 #else
57871462 3206 jaddr2=(int)out;
3207 emit_jne(0);
3208 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
0bbd1454 3209 #endif
57871462 3210 }
3211 }
3eaa7048 3212 if(jaddr) {
3213 add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3214 } else if(c&&!memtarget) {
3215 inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3216 }
57871462 3217 //if(opcode[i]==0x2B || opcode[i]==0x3F)
3218 //if(opcode[i]==0x2B || opcode[i]==0x28)
3219 //if(opcode[i]==0x2B || opcode[i]==0x29)
3220 //if(opcode[i]==0x2B)
3221 /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3222 {
3223 //emit_pusha();
3224 save_regs(0x100f);
3225 emit_readword((int)&last_count,ECX);
3226 #ifdef __i386__
3227 if(get_reg(i_regs->regmap,CCREG)<0)
3228 emit_loadreg(CCREG,HOST_CCREG);
3229 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3230 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3231 emit_writeword(HOST_CCREG,(int)&Count);
3232 #endif
3233 #ifdef __arm__
3234 if(get_reg(i_regs->regmap,CCREG)<0)
3235 emit_loadreg(CCREG,0);
3236 else
3237 emit_mov(HOST_CCREG,0);
3238 emit_add(0,ECX,0);
3239 emit_addimm(0,2*ccadj[i],0);
3240 emit_writeword(0,(int)&Count);
3241 #endif
3242 emit_call((int)memdebug);
3243 //emit_popa();
3244 restore_regs(0x100f);
3245 }/**/
3246}
3247
3248void storelr_assemble(int i,struct regstat *i_regs)
3249{
3250 int s,th,tl;
3251 int temp;
3252 int temp2;
3253 int offset;
3254 int jaddr=0,jaddr2;
3255 int case1,case2,case3;
3256 int done0,done1,done2;
3257 int memtarget,c=0;
fab5d06d 3258 int agr=AGEN1+(i&1);
57871462 3259 u_int hr,reglist=0;
3260 th=get_reg(i_regs->regmap,rs2[i]|64);
3261 tl=get_reg(i_regs->regmap,rs2[i]);
3262 s=get_reg(i_regs->regmap,rs1[i]);
fab5d06d 3263 temp=get_reg(i_regs->regmap,agr);
3264 if(temp<0) temp=get_reg(i_regs->regmap,-1);
57871462 3265 offset=imm[i];
3266 if(s>=0) {
3267 c=(i_regs->isconst>>s)&1;
4cb76aa4 3268 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
57871462 3269 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3270 }
3271 assert(tl>=0);
3272 for(hr=0;hr<HOST_REGS;hr++) {
3273 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3274 }
535d208a 3275 assert(temp>=0);
3276 if(!using_tlb) {
3277 if(!c) {
3278 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3279 if(!offset&&s!=temp) emit_mov(s,temp);
3280 jaddr=(int)out;
3281 emit_jno(0);
3282 }
3283 else
3284 {
3285 if(!memtarget||!rs1[i]) {
57871462 3286 jaddr=(int)out;
3287 emit_jmp(0);
3288 }
57871462 3289 }
535d208a 3290 #ifdef RAM_OFFSET
3291 int map=get_reg(i_regs->regmap,ROREG);
3292 if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3293 gen_tlb_addr_w(temp,map);
3294 #else
3295 if((u_int)rdram!=0x80000000)
3296 emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3297 #endif
3298 }else{ // using tlb
3299 int map=get_reg(i_regs->regmap,TLREG);
3300 assert(map>=0);
3301 map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3302 if(!c&&!offset&&s>=0) emit_mov(s,temp);
3303 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3304 if(!jaddr&&!memtarget) {
3305 jaddr=(int)out;
3306 emit_jmp(0);
57871462 3307 }
535d208a 3308 gen_tlb_addr_w(temp,map);
3309 }
3310
3311 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3312 temp2=get_reg(i_regs->regmap,FTEMP);
3313 if(!rs2[i]) temp2=th=tl;
3314 }
57871462 3315
2002a1db 3316#ifndef BIG_ENDIAN_MIPS
3317 emit_xorimm(temp,3,temp);
3318#endif
535d208a 3319 emit_testimm(temp,2);
3320 case2=(int)out;
3321 emit_jne(0);
3322 emit_testimm(temp,1);
3323 case1=(int)out;
3324 emit_jne(0);
3325 // 0
3326 if (opcode[i]==0x2A) { // SWL
3327 emit_writeword_indexed(tl,0,temp);
3328 }
3329 if (opcode[i]==0x2E) { // SWR
3330 emit_writebyte_indexed(tl,3,temp);
3331 }
3332 if (opcode[i]==0x2C) { // SDL
3333 emit_writeword_indexed(th,0,temp);
3334 if(rs2[i]) emit_mov(tl,temp2);
3335 }
3336 if (opcode[i]==0x2D) { // SDR
3337 emit_writebyte_indexed(tl,3,temp);
3338 if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3339 }
3340 done0=(int)out;
3341 emit_jmp(0);
3342 // 1
3343 set_jump_target(case1,(int)out);
3344 if (opcode[i]==0x2A) { // SWL
3345 // Write 3 msb into three least significant bytes
3346 if(rs2[i]) emit_rorimm(tl,8,tl);
3347 emit_writehword_indexed(tl,-1,temp);
3348 if(rs2[i]) emit_rorimm(tl,16,tl);
3349 emit_writebyte_indexed(tl,1,temp);
3350 if(rs2[i]) emit_rorimm(tl,8,tl);
3351 }
3352 if (opcode[i]==0x2E) { // SWR
3353 // Write two lsb into two most significant bytes
3354 emit_writehword_indexed(tl,1,temp);
3355 }
3356 if (opcode[i]==0x2C) { // SDL
3357 if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3358 // Write 3 msb into three least significant bytes
3359 if(rs2[i]) emit_rorimm(th,8,th);
3360 emit_writehword_indexed(th,-1,temp);
3361 if(rs2[i]) emit_rorimm(th,16,th);
3362 emit_writebyte_indexed(th,1,temp);
3363 if(rs2[i]) emit_rorimm(th,8,th);
3364 }
3365 if (opcode[i]==0x2D) { // SDR
3366 if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3367 // Write two lsb into two most significant bytes
3368 emit_writehword_indexed(tl,1,temp);
3369 }
3370 done1=(int)out;
3371 emit_jmp(0);
3372 // 2
3373 set_jump_target(case2,(int)out);
3374 emit_testimm(temp,1);
3375 case3=(int)out;
3376 emit_jne(0);
3377 if (opcode[i]==0x2A) { // SWL
3378 // Write two msb into two least significant bytes
3379 if(rs2[i]) emit_rorimm(tl,16,tl);
3380 emit_writehword_indexed(tl,-2,temp);
3381 if(rs2[i]) emit_rorimm(tl,16,tl);
3382 }
3383 if (opcode[i]==0x2E) { // SWR
3384 // Write 3 lsb into three most significant bytes
3385 emit_writebyte_indexed(tl,-1,temp);
3386 if(rs2[i]) emit_rorimm(tl,8,tl);
3387 emit_writehword_indexed(tl,0,temp);
3388 if(rs2[i]) emit_rorimm(tl,24,tl);
3389 }
3390 if (opcode[i]==0x2C) { // SDL
3391 if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3392 // Write two msb into two least significant bytes
3393 if(rs2[i]) emit_rorimm(th,16,th);
3394 emit_writehword_indexed(th,-2,temp);
3395 if(rs2[i]) emit_rorimm(th,16,th);
3396 }
3397 if (opcode[i]==0x2D) { // SDR
3398 if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3399 // Write 3 lsb into three most significant bytes
3400 emit_writebyte_indexed(tl,-1,temp);
3401 if(rs2[i]) emit_rorimm(tl,8,tl);
3402 emit_writehword_indexed(tl,0,temp);
3403 if(rs2[i]) emit_rorimm(tl,24,tl);
3404 }
3405 done2=(int)out;
3406 emit_jmp(0);
3407 // 3
3408 set_jump_target(case3,(int)out);
3409 if (opcode[i]==0x2A) { // SWL
3410 // Write msb into least significant byte
3411 if(rs2[i]) emit_rorimm(tl,24,tl);
3412 emit_writebyte_indexed(tl,-3,temp);
3413 if(rs2[i]) emit_rorimm(tl,8,tl);
3414 }
3415 if (opcode[i]==0x2E) { // SWR
3416 // Write entire word
3417 emit_writeword_indexed(tl,-3,temp);
3418 }
3419 if (opcode[i]==0x2C) { // SDL
3420 if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3421 // Write msb into least significant byte
3422 if(rs2[i]) emit_rorimm(th,24,th);
3423 emit_writebyte_indexed(th,-3,temp);
3424 if(rs2[i]) emit_rorimm(th,8,th);
3425 }
3426 if (opcode[i]==0x2D) { // SDR
3427 if(rs2[i]) emit_mov(th,temp2);
3428 // Write entire word
3429 emit_writeword_indexed(tl,-3,temp);
3430 }
3431 set_jump_target(done0,(int)out);
3432 set_jump_target(done1,(int)out);
3433 set_jump_target(done2,(int)out);
3434 if (opcode[i]==0x2C) { // SDL
3435 emit_testimm(temp,4);
57871462 3436 done0=(int)out;
57871462 3437 emit_jne(0);
535d208a 3438 emit_andimm(temp,~3,temp);
3439 emit_writeword_indexed(temp2,4,temp);
3440 set_jump_target(done0,(int)out);
3441 }
3442 if (opcode[i]==0x2D) { // SDR
3443 emit_testimm(temp,4);
3444 done0=(int)out;
3445 emit_jeq(0);
3446 emit_andimm(temp,~3,temp);
3447 emit_writeword_indexed(temp2,-4,temp);
57871462 3448 set_jump_target(done0,(int)out);
57871462 3449 }
535d208a 3450 if(!c||!memtarget)
3451 add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
57871462 3452 if(!using_tlb) {
535d208a 3453 #ifdef RAM_OFFSET
3454 int map=get_reg(i_regs->regmap,ROREG);
3455 if(map<0) map=HOST_TEMPREG;
3456 gen_orig_addr_w(temp,map);
3457 #else
57871462 3458 emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
535d208a 3459 #endif
57871462 3460 #if defined(HOST_IMM8)
3461 int ir=get_reg(i_regs->regmap,INVCP);
3462 assert(ir>=0);
3463 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3464 #else
3465 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3466 #endif
535d208a 3467 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3468 emit_callne(invalidate_addr_reg[temp]);
3469 #else
57871462 3470 jaddr2=(int)out;
3471 emit_jne(0);
3472 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
535d208a 3473 #endif
57871462 3474 }
3475 /*
3476 emit_pusha();
3477 //save_regs(0x100f);
3478 emit_readword((int)&last_count,ECX);
3479 if(get_reg(i_regs->regmap,CCREG)<0)
3480 emit_loadreg(CCREG,HOST_CCREG);
3481 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3482 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3483 emit_writeword(HOST_CCREG,(int)&Count);
3484 emit_call((int)memdebug);
3485 emit_popa();
3486 //restore_regs(0x100f);
3487 /**/
3488}
3489
3490void c1ls_assemble(int i,struct regstat *i_regs)
3491{
3d624f89 3492#ifndef DISABLE_COP1
57871462 3493 int s,th,tl;
3494 int temp,ar;
3495 int map=-1;
3496 int offset;
3497 int c=0;
3498 int jaddr,jaddr2=0,jaddr3,type;
3499 int agr=AGEN1+(i&1);
3500 u_int hr,reglist=0;
3501 th=get_reg(i_regs->regmap,FTEMP|64);
3502 tl=get_reg(i_regs->regmap,FTEMP);
3503 s=get_reg(i_regs->regmap,rs1[i]);
3504 temp=get_reg(i_regs->regmap,agr);
3505 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3506 offset=imm[i];
3507 assert(tl>=0);
3508 assert(rs1[i]>0);
3509 assert(temp>=0);
3510 for(hr=0;hr<HOST_REGS;hr++) {
3511 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3512 }
3513 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3514 if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3515 {
3516 // Loads use a temporary register which we need to save
3517 reglist|=1<<temp;
3518 }
3519 if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3520 ar=temp;
3521 else // LWC1/LDC1
3522 ar=tl;
3523 //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3524 //else c=(i_regs->wasconst>>s)&1;
3525 if(s>=0) c=(i_regs->wasconst>>s)&1;
3526 // Check cop1 unusable
3527 if(!cop1_usable) {
3528 signed char rs=get_reg(i_regs->regmap,CSREG);
3529 assert(rs>=0);
3530 emit_testimm(rs,0x20000000);
3531 jaddr=(int)out;
3532 emit_jeq(0);
3533 add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3534 cop1_usable=1;
3535 }
3536 if (opcode[i]==0x39) { // SWC1 (get float address)
3537 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3538 }
3539 if (opcode[i]==0x3D) { // SDC1 (get double address)
3540 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3541 }
3542 // Generate address + offset
3543 if(!using_tlb) {
3544 if(!c)
4cb76aa4 3545 emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
57871462 3546 }
3547 else
3548 {
3549 map=get_reg(i_regs->regmap,TLREG);
3550 assert(map>=0);
3551 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3552 map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3553 }
3554 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3555 map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3556 }
3557 }
3558 if (opcode[i]==0x39) { // SWC1 (read float)
3559 emit_readword_indexed(0,tl,tl);
3560 }
3561 if (opcode[i]==0x3D) { // SDC1 (read double)
3562 emit_readword_indexed(4,tl,th);
3563 emit_readword_indexed(0,tl,tl);
3564 }
3565 if (opcode[i]==0x31) { // LWC1 (get target address)
3566 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3567 }
3568 if (opcode[i]==0x35) { // LDC1 (get target address)
3569 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3570 }
3571 if(!using_tlb) {
3572 if(!c) {
3573 jaddr2=(int)out;
3574 emit_jno(0);
3575 }
4cb76aa4 3576 else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
57871462 3577 jaddr2=(int)out;
3578 emit_jmp(0); // inline_readstub/inline_writestub? Very rare case
3579 }
3580 #ifdef DESTRUCTIVE_SHIFT
3581 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3582 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3583 }
3584 #endif
3585 }else{
3586 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3587 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3588 }
3589 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3590 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3591 }
3592 }
3593 if (opcode[i]==0x31) { // LWC1
3594 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3595 //gen_tlb_addr_r(ar,map);
3596 //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3597 #ifdef HOST_IMM_ADDR32
3598 if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3599 else
3600 #endif
3601 emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3602 type=LOADW_STUB;
3603 }
3604 if (opcode[i]==0x35) { // LDC1
3605 assert(th>=0);
3606 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3607 //gen_tlb_addr_r(ar,map);
3608 //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3609 //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3610 #ifdef HOST_IMM_ADDR32
3611 if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3612 else
3613 #endif
3614 emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3615 type=LOADD_STUB;
3616 }
3617 if (opcode[i]==0x39) { // SWC1
3618 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3619 emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3620 type=STOREW_STUB;
3621 }
3622 if (opcode[i]==0x3D) { // SDC1
3623 assert(th>=0);
3624 //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3625 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3626 emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3627 type=STORED_STUB;
3628 }
3629 if(!using_tlb) {
3630 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3631 #ifndef DESTRUCTIVE_SHIFT
3632 temp=offset||c||s<0?ar:s;
3633 #endif
3634 #if defined(HOST_IMM8)
3635 int ir=get_reg(i_regs->regmap,INVCP);
3636 assert(ir>=0);
3637 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3638 #else
3639 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3640 #endif
0bbd1454 3641 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3642 emit_callne(invalidate_addr_reg[temp]);
3643 #else
57871462 3644 jaddr3=(int)out;
3645 emit_jne(0);
3646 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
0bbd1454 3647 #endif
57871462 3648 }
3649 }
3650 if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3651 if (opcode[i]==0x31) { // LWC1 (write float)
3652 emit_writeword_indexed(tl,0,temp);
3653 }
3654 if (opcode[i]==0x35) { // LDC1 (write double)
3655 emit_writeword_indexed(th,4,temp);
3656 emit_writeword_indexed(tl,0,temp);
3657 }
3658 //if(opcode[i]==0x39)
3659 /*if(opcode[i]==0x39||opcode[i]==0x31)
3660 {
3661 emit_pusha();
3662 emit_readword((int)&last_count,ECX);
3663 if(get_reg(i_regs->regmap,CCREG)<0)
3664 emit_loadreg(CCREG,HOST_CCREG);
3665 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3666 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3667 emit_writeword(HOST_CCREG,(int)&Count);
3668 emit_call((int)memdebug);
3669 emit_popa();
3670 }/**/
3d624f89 3671#else
3672 cop1_unusable(i, i_regs);
3673#endif
57871462 3674}
3675
b9b61529 3676void c2ls_assemble(int i,struct regstat *i_regs)
3677{
3678 int s,tl;
3679 int ar;
3680 int offset;
1fd1aceb 3681 int memtarget=0,c=0;
b9b61529 3682 int jaddr,jaddr2=0,jaddr3,type;
3683 int agr=AGEN1+(i&1);
3684 u_int hr,reglist=0;
3685 u_int copr=(source[i]>>16)&0x1f;
3686 s=get_reg(i_regs->regmap,rs1[i]);
3687 tl=get_reg(i_regs->regmap,FTEMP);
3688 offset=imm[i];
3689 assert(rs1[i]>0);
3690 assert(tl>=0);
3691 assert(!using_tlb);
3692
3693 for(hr=0;hr<HOST_REGS;hr++) {
3694 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3695 }
3696 if(i_regs->regmap[HOST_CCREG]==CCREG)
3697 reglist&=~(1<<HOST_CCREG);
3698
3699 // get the address
3700 if (opcode[i]==0x3a) { // SWC2
3701 ar=get_reg(i_regs->regmap,agr);
3702 if(ar<0) ar=get_reg(i_regs->regmap,-1);
3703 reglist|=1<<ar;
3704 } else { // LWC2
3705 ar=tl;
3706 }
1fd1aceb 3707 if(s>=0) c=(i_regs->wasconst>>s)&1;
3708 memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
b9b61529 3709 if (!offset&&!c&&s>=0) ar=s;
3710 assert(ar>=0);
3711
3712 if (opcode[i]==0x3a) { // SWC2
3713 cop2_get_dreg(copr,tl,HOST_TEMPREG);
1fd1aceb 3714 type=STOREW_STUB;
b9b61529 3715 }
1fd1aceb 3716 else
b9b61529 3717 type=LOADW_STUB;
1fd1aceb 3718
3719 if(c&&!memtarget) {
3720 jaddr2=(int)out;
3721 emit_jmp(0); // inline_readstub/inline_writestub?
b9b61529 3722 }
1fd1aceb 3723 else {
3724 if(!c) {
3725 emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3726 jaddr2=(int)out;
3727 emit_jno(0);
3728 }
3729 if (opcode[i]==0x32) { // LWC2
3730 #ifdef HOST_IMM_ADDR32
3731 if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3732 else
3733 #endif
3734 emit_readword_indexed(0,ar,tl);
3735 }
3736 if (opcode[i]==0x3a) { // SWC2
3737 #ifdef DESTRUCTIVE_SHIFT
3738 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3739 #endif
3740 emit_writeword_indexed(tl,0,ar);
3741 }
b9b61529 3742 }
3743 if(jaddr2)
3744 add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3745 if (opcode[i]==0x3a) { // SWC2
3746#if defined(HOST_IMM8)
3747 int ir=get_reg(i_regs->regmap,INVCP);
3748 assert(ir>=0);
3749 emit_cmpmem_indexedsr12_reg(ir,ar,1);
3750#else
3751 emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3752#endif
0bbd1454 3753 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3754 emit_callne(invalidate_addr_reg[ar]);
3755 #else
b9b61529 3756 jaddr3=(int)out;
3757 emit_jne(0);
3758 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
0bbd1454 3759 #endif
b9b61529 3760 }
3761 if (opcode[i]==0x32) { // LWC2
3762 cop2_put_dreg(copr,tl,HOST_TEMPREG);
3763 }
3764}
3765
57871462 3766#ifndef multdiv_assemble
3767void multdiv_assemble(int i,struct regstat *i_regs)
3768{
3769 printf("Need multdiv_assemble for this architecture.\n");
3770 exit(1);
3771}
3772#endif
3773
3774void mov_assemble(int i,struct regstat *i_regs)
3775{
3776 //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3777 //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
57871462 3778 if(rt1[i]) {
3779 signed char sh,sl,th,tl;
3780 th=get_reg(i_regs->regmap,rt1[i]|64);
3781 tl=get_reg(i_regs->regmap,rt1[i]);
3782 //assert(tl>=0);
3783 if(tl>=0) {
3784 sh=get_reg(i_regs->regmap,rs1[i]|64);
3785 sl=get_reg(i_regs->regmap,rs1[i]);
3786 if(sl>=0) emit_mov(sl,tl);
3787 else emit_loadreg(rs1[i],tl);
3788 if(th>=0) {
3789 if(sh>=0) emit_mov(sh,th);
3790 else emit_loadreg(rs1[i]|64,th);
3791 }
3792 }
3793 }
3794}
3795
3796#ifndef fconv_assemble
3797void fconv_assemble(int i,struct regstat *i_regs)
3798{
3799 printf("Need fconv_assemble for this architecture.\n");
3800 exit(1);
3801}
3802#endif
3803
3804#if 0
3805void float_assemble(int i,struct regstat *i_regs)
3806{
3807 printf("Need float_assemble for this architecture.\n");
3808 exit(1);
3809}
3810#endif
3811
3812void syscall_assemble(int i,struct regstat *i_regs)
3813{
3814 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3815 assert(ccreg==HOST_CCREG);
3816 assert(!is_delayslot);
3817 emit_movimm(start+i*4,EAX); // Get PC
3818 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
7139f3c8 3819 emit_jmp((int)jump_syscall_hle); // XXX
3820}
3821
3822void hlecall_assemble(int i,struct regstat *i_regs)
3823{
3824 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3825 assert(ccreg==HOST_CCREG);
3826 assert(!is_delayslot);
3827 emit_movimm(start+i*4+4,0); // Get PC
67ba0fb4 3828 emit_movimm((int)psxHLEt[source[i]&7],1);
7139f3c8 3829 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
67ba0fb4 3830 emit_jmp((int)jump_hlecall);
57871462 3831}
3832
1e973cb0 3833void intcall_assemble(int i,struct regstat *i_regs)
3834{
3835 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3836 assert(ccreg==HOST_CCREG);
3837 assert(!is_delayslot);
3838 emit_movimm(start+i*4,0); // Get PC
3839 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG);
3840 emit_jmp((int)jump_intcall);
3841}
3842
57871462 3843void ds_assemble(int i,struct regstat *i_regs)
3844{
3845 is_delayslot=1;
3846 switch(itype[i]) {
3847 case ALU:
3848 alu_assemble(i,i_regs);break;
3849 case IMM16:
3850 imm16_assemble(i,i_regs);break;
3851 case SHIFT:
3852 shift_assemble(i,i_regs);break;
3853 case SHIFTIMM:
3854 shiftimm_assemble(i,i_regs);break;
3855 case LOAD:
3856 load_assemble(i,i_regs);break;
3857 case LOADLR:
3858 loadlr_assemble(i,i_regs);break;
3859 case STORE:
3860 store_assemble(i,i_regs);break;
3861 case STORELR:
3862 storelr_assemble(i,i_regs);break;
3863 case COP0:
3864 cop0_assemble(i,i_regs);break;
3865 case COP1:
3866 cop1_assemble(i,i_regs);break;
3867 case C1LS:
3868 c1ls_assemble(i,i_regs);break;
b9b61529 3869 case COP2:
3870 cop2_assemble(i,i_regs);break;
3871 case C2LS:
3872 c2ls_assemble(i,i_regs);break;
3873 case C2OP:
3874 c2op_assemble(i,i_regs);break;
57871462 3875 case FCONV:
3876 fconv_assemble(i,i_regs);break;
3877 case FLOAT:
3878 float_assemble(i,i_regs);break;
3879 case FCOMP:
3880 fcomp_assemble(i,i_regs);break;
3881 case MULTDIV:
3882 multdiv_assemble(i,i_regs);break;
3883 case MOV:
3884 mov_assemble(i,i_regs);break;
3885 case SYSCALL:
7139f3c8 3886 case HLECALL:
1e973cb0 3887 case INTCALL:
57871462 3888 case SPAN:
3889 case UJUMP:
3890 case RJUMP:
3891 case CJUMP:
3892 case SJUMP:
3893 case FJUMP:
3894 printf("Jump in the delay slot. This is probably a bug.\n");
3895 }
3896 is_delayslot=0;
3897}
3898
3899// Is the branch target a valid internal jump?
3900int internal_branch(uint64_t i_is32,int addr)
3901{
3902 if(addr&1) return 0; // Indirect (register) jump
3903 if(addr>=start && addr<start+slen*4-4)
3904 {
3905 int t=(addr-start)>>2;
3906 // Delay slots are not valid branch targets
3907 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3908 // 64 -> 32 bit transition requires a recompile
3909 /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3910 {
3911 if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3912 else printf("optimizable: yes\n");
3913 }*/
3914 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
a28c6ce8 3915#ifndef FORCE32
57871462 3916 if(requires_32bit[t]&~i_is32) return 0;
a28c6ce8 3917 else
3918#endif
3919 return 1;
57871462 3920 }
3921 return 0;
3922}
3923
3924#ifndef wb_invalidate
3925void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3926 uint64_t u,uint64_t uu)
3927{
3928 int hr;
3929 for(hr=0;hr<HOST_REGS;hr++) {
3930 if(hr!=EXCLUDE_REG) {
3931 if(pre[hr]!=entry[hr]) {
3932 if(pre[hr]>=0) {
3933 if((dirty>>hr)&1) {
3934 if(get_reg(entry,pre[hr])<0) {
3935 if(pre[hr]<64) {
3936 if(!((u>>pre[hr])&1)) {
3937 emit_storereg(pre[hr],hr);
3938 if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3939 emit_sarimm(hr,31,hr);
3940 emit_storereg(pre[hr]|64,hr);
3941 }
3942 }
3943 }else{
3944 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3945 emit_storereg(pre[hr],hr);
3946 }
3947 }
3948 }
3949 }
3950 }
3951 }
3952 }
3953 }
3954 // Move from one register to another (no writeback)
3955 for(hr=0;hr<HOST_REGS;hr++) {
3956 if(hr!=EXCLUDE_REG) {
3957 if(pre[hr]!=entry[hr]) {
3958 if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3959 int nr;
3960 if((nr=get_reg(entry,pre[hr]))>=0) {
3961 emit_mov(hr,nr);
3962 }
3963 }
3964 }
3965 }
3966 }
3967}
3968#endif
3969
3970// Load the specified registers
3971// This only loads the registers given as arguments because
3972// we don't want to load things that will be overwritten
3973void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3974{
3975 int hr;
3976 // Load 32-bit regs
3977 for(hr=0;hr<HOST_REGS;hr++) {
3978 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3979 if(entry[hr]!=regmap[hr]) {
3980 if(regmap[hr]==rs1||regmap[hr]==rs2)
3981 {
3982 if(regmap[hr]==0) {
3983 emit_zeroreg(hr);
3984 }
3985 else
3986 {
3987 emit_loadreg(regmap[hr],hr);
3988 }
3989 }
3990 }
3991 }
3992 }
3993 //Load 64-bit regs
3994 for(hr=0;hr<HOST_REGS;hr++) {
3995 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3996 if(entry[hr]!=regmap[hr]) {
3997 if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3998 {
3999 assert(regmap[hr]!=64);
4000 if((is32>>(regmap[hr]&63))&1) {
4001 int lr=get_reg(regmap,regmap[hr]-64);
4002 if(lr>=0)
4003 emit_sarimm(lr,31,hr);
4004 else
4005 emit_loadreg(regmap[hr],hr);
4006 }
4007 else
4008 {
4009 emit_loadreg(regmap[hr],hr);
4010 }
4011 }
4012 }
4013 }
4014 }
4015}
4016
4017// Load registers prior to the start of a loop
4018// so that they are not loaded within the loop
4019static void loop_preload(signed char pre[],signed char entry[])
4020{
4021 int hr;
4022 for(hr=0;hr<HOST_REGS;hr++) {
4023 if(hr!=EXCLUDE_REG) {
4024 if(pre[hr]!=entry[hr]) {
4025 if(entry[hr]>=0) {
4026 if(get_reg(pre,entry[hr])<0) {
4027 assem_debug("loop preload:\n");
4028 //printf("loop preload: %d\n",hr);
4029 if(entry[hr]==0) {
4030 emit_zeroreg(hr);
4031 }
4032 else if(entry[hr]<TEMPREG)
4033 {
4034 emit_loadreg(entry[hr],hr);
4035 }
4036 else if(entry[hr]-64<TEMPREG)
4037 {
4038 emit_loadreg(entry[hr],hr);
4039 }
4040 }
4041 }
4042 }
4043 }
4044 }
4045}
4046
4047// Generate address for load/store instruction
b9b61529 4048// goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
57871462 4049void address_generation(int i,struct regstat *i_regs,signed char entry[])
4050{
b9b61529 4051 if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
57871462 4052 int ra;
4053 int agr=AGEN1+(i&1);
4054 int mgr=MGEN1+(i&1);
4055 if(itype[i]==LOAD) {
4056 ra=get_reg(i_regs->regmap,rt1[i]);
535d208a 4057 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4058 assert(ra>=0);
57871462 4059 }
4060 if(itype[i]==LOADLR) {
4061 ra=get_reg(i_regs->regmap,FTEMP);
4062 }
4063 if(itype[i]==STORE||itype[i]==STORELR) {
4064 ra=get_reg(i_regs->regmap,agr);
4065 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4066 }
b9b61529 4067 if(itype[i]==C1LS||itype[i]==C2LS) {
4068 if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
57871462 4069 ra=get_reg(i_regs->regmap,FTEMP);
1fd1aceb 4070 else { // SWC1/SDC1/SWC2/SDC2
57871462 4071 ra=get_reg(i_regs->regmap,agr);
4072 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4073 }
4074 }
4075 int rs=get_reg(i_regs->regmap,rs1[i]);
4076 int rm=get_reg(i_regs->regmap,TLREG);
4077 if(ra>=0) {
4078 int offset=imm[i];
4079 int c=(i_regs->wasconst>>rs)&1;
4080 if(rs1[i]==0) {
4081 // Using r0 as a base address
4082 /*if(rm>=0) {
4083 if(!entry||entry[rm]!=mgr) {
4084 generate_map_const(offset,rm);
4085 } // else did it in the previous cycle
4086 }*/
4087 if(!entry||entry[ra]!=agr) {
4088 if (opcode[i]==0x22||opcode[i]==0x26) {
4089 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4090 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4091 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4092 }else{
4093 emit_movimm(offset,ra);
4094 }
4095 } // else did it in the previous cycle
4096 }
4097 else if(rs<0) {
4098 if(!entry||entry[ra]!=rs1[i])
4099 emit_loadreg(rs1[i],ra);
4100 //if(!entry||entry[ra]!=rs1[i])
4101 // printf("poor load scheduling!\n");
4102 }
4103 else if(c) {
4104 if(rm>=0) {
4105 if(!entry||entry[rm]!=mgr) {
b9b61529 4106 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
57871462 4107 // Stores to memory go thru the mapper to detect self-modifying
4108 // code, loads don't.
4109 if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4cb76aa4 4110 (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
57871462 4111 generate_map_const(constmap[i][rs]+offset,rm);
4112 }else{
4113 if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4114 generate_map_const(constmap[i][rs]+offset,rm);
4115 }
4116 }
4117 }
4118 if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4119 if(!entry||entry[ra]!=agr) {
4120 if (opcode[i]==0x22||opcode[i]==0x26) {
4121 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4122 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4123 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4124 }else{
4125 #ifdef HOST_IMM_ADDR32
b9b61529 4126 if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
57871462 4127 (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4128 #endif
4129 emit_movimm(constmap[i][rs]+offset,ra);
4130 }
4131 } // else did it in the previous cycle
4132 } // else load_consts already did it
4133 }
4134 if(offset&&!c&&rs1[i]) {
4135 if(rs>=0) {
4136 emit_addimm(rs,offset,ra);
4137 }else{
4138 emit_addimm(ra,offset,ra);
4139 }
4140 }
4141 }
4142 }
4143 // Preload constants for next instruction
b9b61529 4144 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
57871462 4145 int agr,ra;
4146 #ifndef HOST_IMM_ADDR32
4147 // Mapper entry
4148 agr=MGEN1+((i+1)&1);
4149 ra=get_reg(i_regs->regmap,agr);
4150 if(ra>=0) {
4151 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4152 int offset=imm[i+1];
4153 int c=(regs[i+1].wasconst>>rs)&1;
4154 if(c) {
b9b61529 4155 if(itype[i+1]==STORE||itype[i+1]==STORELR
4156 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
57871462 4157 // Stores to memory go thru the mapper to detect self-modifying
4158 // code, loads don't.
4159 if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4cb76aa4 4160 (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
57871462 4161 generate_map_const(constmap[i+1][rs]+offset,ra);
4162 }else{
4163 if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4164 generate_map_const(constmap[i+1][rs]+offset,ra);
4165 }
4166 }
4167 /*else if(rs1[i]==0) {
4168 generate_map_const(offset,ra);
4169 }*/
4170 }
4171 #endif
4172 // Actual address
4173 agr=AGEN1+((i+1)&1);
4174 ra=get_reg(i_regs->regmap,agr);
4175 if(ra>=0) {
4176 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4177 int offset=imm[i+1];
4178 int c=(regs[i+1].wasconst>>rs)&1;
4179 if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4180 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4181 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4182 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4183 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4184 }else{
4185 #ifdef HOST_IMM_ADDR32
b9b61529 4186 if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
57871462 4187 (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4188 #endif
4189 emit_movimm(constmap[i+1][rs]+offset,ra);
4190 }
4191 }
4192 else if(rs1[i+1]==0) {
4193 // Using r0 as a base address
4194 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4195 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4196 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4197 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4198 }else{
4199 emit_movimm(offset,ra);
4200 }
4201 }
4202 }
4203 }
4204}
4205
4206int get_final_value(int hr, int i, int *value)
4207{
4208 int reg=regs[i].regmap[hr];
4209 while(i<slen-1) {
4210 if(regs[i+1].regmap[hr]!=reg) break;
4211 if(!((regs[i+1].isconst>>hr)&1)) break;
4212 if(bt[i+1]) break;
4213 i++;
4214 }
4215 if(i<slen-1) {
4216 if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4217 *value=constmap[i][hr];
4218 return 1;
4219 }
4220 if(!bt[i+1]) {
4221 if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4222 // Load in delay slot, out-of-order execution
4223 if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4224 {
4225 #ifdef HOST_IMM_ADDR32
4226 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4227 #endif
4228 // Precompute load address
4229 *value=constmap[i][hr]+imm[i+2];
4230 return 1;
4231 }
4232 }
4233 if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4234 {
4235 #ifdef HOST_IMM_ADDR32
4236 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4237 #endif
4238 // Precompute load address
4239 *value=constmap[i][hr]+imm[i+1];
4240 //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4241 return 1;
4242 }
4243 }
4244 }
4245 *value=constmap[i][hr];
4246 //printf("c=%x\n",(int)constmap[i][hr]);
4247 if(i==slen-1) return 1;
4248 if(reg<64) {
4249 return !((unneeded_reg[i+1]>>reg)&1);
4250 }else{
4251 return !((unneeded_reg_upper[i+1]>>reg)&1);
4252 }
4253}
4254
4255// Load registers with known constants
4256void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4257{
4258 int hr;
4259 // Load 32-bit regs
4260 for(hr=0;hr<HOST_REGS;hr++) {
4261 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4262 //if(entry[hr]!=regmap[hr]) {
4263 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4264 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4265 int value;
4266 if(get_final_value(hr,i,&value)) {
4267 if(value==0) {
4268 emit_zeroreg(hr);
4269 }
4270 else {
4271 emit_movimm(value,hr);
4272 }
4273 }
4274 }
4275 }
4276 }
4277 }
4278 // Load 64-bit regs
4279 for(hr=0;hr<HOST_REGS;hr++) {
4280 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4281 //if(entry[hr]!=regmap[hr]) {
4282 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4283 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4284 if((is32>>(regmap[hr]&63))&1) {
4285 int lr=get_reg(regmap,regmap[hr]-64);
4286 assert(lr>=0);
4287 emit_sarimm(lr,31,hr);
4288 }
4289 else
4290 {
4291 int value;
4292 if(get_final_value(hr,i,&value)) {
4293 if(value==0) {
4294 emit_zeroreg(hr);
4295 }
4296 else {
4297 emit_movimm(value,hr);
4298 }
4299 }
4300 }
4301 }
4302 }
4303 }
4304 }
4305}
4306void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4307{
4308 int hr;
4309 // Load 32-bit regs
4310 for(hr=0;hr<HOST_REGS;hr++) {
4311 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4312 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4313 int value=constmap[i][hr];
4314 if(value==0) {
4315 emit_zeroreg(hr);
4316 }
4317 else {
4318 emit_movimm(value,hr);
4319 }
4320 }
4321 }
4322 }
4323 // Load 64-bit regs
4324 for(hr=0;hr<HOST_REGS;hr++) {
4325 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4326 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4327 if((is32>>(regmap[hr]&63))&1) {
4328 int lr=get_reg(regmap,regmap[hr]-64);
4329 assert(lr>=0);
4330 emit_sarimm(lr,31,hr);
4331 }
4332 else
4333 {
4334 int value=constmap[i][hr];
4335 if(value==0) {
4336 emit_zeroreg(hr);
4337 }
4338 else {
4339 emit_movimm(value,hr);
4340 }
4341 }
4342 }
4343 }
4344 }
4345}
4346
4347// Write out all dirty registers (except cycle count)
4348void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4349{
4350 int hr;
4351 for(hr=0;hr<HOST_REGS;hr++) {
4352 if(hr!=EXCLUDE_REG) {
4353 if(i_regmap[hr]>0) {
4354 if(i_regmap[hr]!=CCREG) {
4355 if((i_dirty>>hr)&1) {
4356 if(i_regmap[hr]<64) {
4357 emit_storereg(i_regmap[hr],hr);
24385cae 4358#ifndef FORCE32
57871462 4359 if( ((i_is32>>i_regmap[hr])&1) ) {
4360 #ifdef DESTRUCTIVE_WRITEBACK
4361 emit_sarimm(hr,31,hr);
4362 emit_storereg(i_regmap[hr]|64,hr);
4363 #else
4364 emit_sarimm(hr,31,HOST_TEMPREG);
4365 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4366 #endif
4367 }
24385cae 4368#endif
57871462 4369 }else{
4370 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4371 emit_storereg(i_regmap[hr],hr);
4372 }
4373 }
4374 }
4375 }
4376 }
4377 }
4378 }
4379}
4380// Write out dirty registers that we need to reload (pair with load_needed_regs)
4381// This writes the registers not written by store_regs_bt
4382void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4383{
4384 int hr;
4385 int t=(addr-start)>>2;
4386 for(hr=0;hr<HOST_REGS;hr++) {
4387 if(hr!=EXCLUDE_REG) {
4388 if(i_regmap[hr]>0) {
4389 if(i_regmap[hr]!=CCREG) {
4390 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4391 if((i_dirty>>hr)&1) {
4392 if(i_regmap[hr]<64) {
4393 emit_storereg(i_regmap[hr],hr);
24385cae 4394#ifndef FORCE32
57871462 4395 if( ((i_is32>>i_regmap[hr])&1) ) {
4396 #ifdef DESTRUCTIVE_WRITEBACK
4397 emit_sarimm(hr,31,hr);
4398 emit_storereg(i_regmap[hr]|64,hr);
4399 #else
4400 emit_sarimm(hr,31,HOST_TEMPREG);
4401 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4402 #endif
4403 }
24385cae 4404#endif
57871462 4405 }else{
4406 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4407 emit_storereg(i_regmap[hr],hr);
4408 }
4409 }
4410 }
4411 }
4412 }
4413 }
4414 }
4415 }
4416}
4417
4418// Load all registers (except cycle count)
4419void load_all_regs(signed char i_regmap[])
4420{
4421 int hr;
4422 for(hr=0;hr<HOST_REGS;hr++) {
4423 if(hr!=EXCLUDE_REG) {
4424 if(i_regmap[hr]==0) {
4425 emit_zeroreg(hr);
4426 }
4427 else
4428 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4429 {
4430 emit_loadreg(i_regmap[hr],hr);
4431 }
4432 }
4433 }
4434}
4435
4436// Load all current registers also needed by next instruction
4437void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4438{
4439 int hr;
4440 for(hr=0;hr<HOST_REGS;hr++) {
4441 if(hr!=EXCLUDE_REG) {
4442 if(get_reg(next_regmap,i_regmap[hr])>=0) {
4443 if(i_regmap[hr]==0) {
4444 emit_zeroreg(hr);
4445 }
4446 else
4447 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4448 {
4449 emit_loadreg(i_regmap[hr],hr);
4450 }
4451 }
4452 }
4453 }
4454}
4455
4456// Load all regs, storing cycle count if necessary
4457void load_regs_entry(int t)
4458{
4459 int hr;
4460 if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4461 else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4462 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4463 emit_storereg(CCREG,HOST_CCREG);
4464 }
4465 // Load 32-bit regs
4466 for(hr=0;hr<HOST_REGS;hr++) {
4467 if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4468 if(regs[t].regmap_entry[hr]==0) {
4469 emit_zeroreg(hr);
4470 }
4471 else if(regs[t].regmap_entry[hr]!=CCREG)
4472 {
4473 emit_loadreg(regs[t].regmap_entry[hr],hr);
4474 }
4475 }
4476 }
4477 // Load 64-bit regs
4478 for(hr=0;hr<HOST_REGS;hr++) {
4479 if(regs[t].regmap_entry[hr]>=64) {
4480 assert(regs[t].regmap_entry[hr]!=64);
4481 if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4482 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4483 if(lr<0) {
4484 emit_loadreg(regs[t].regmap_entry[hr],hr);
4485 }
4486 else
4487 {
4488 emit_sarimm(lr,31,hr);
4489 }
4490 }
4491 else
4492 {
4493 emit_loadreg(regs[t].regmap_entry[hr],hr);
4494 }
4495 }
4496 }
4497}
4498
4499// Store dirty registers prior to branch
4500void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4501{
4502 if(internal_branch(i_is32,addr))
4503 {
4504 int t=(addr-start)>>2;
4505 int hr;
4506 for(hr=0;hr<HOST_REGS;hr++) {
4507 if(hr!=EXCLUDE_REG) {
4508 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4509 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4510 if((i_dirty>>hr)&1) {
4511 if(i_regmap[hr]<64) {
4512 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4513 emit_storereg(i_regmap[hr],hr);
4514 if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4515 #ifdef DESTRUCTIVE_WRITEBACK
4516 emit_sarimm(hr,31,hr);
4517 emit_storereg(i_regmap[hr]|64,hr);
4518 #else
4519 emit_sarimm(hr,31,HOST_TEMPREG);
4520 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4521 #endif
4522 }
4523 }
4524 }else{
4525 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4526 emit_storereg(i_regmap[hr],hr);
4527 }
4528 }
4529 }
4530 }
4531 }
4532 }
4533 }
4534 }
4535 else
4536 {
4537 // Branch out of this block, write out all dirty regs
4538 wb_dirtys(i_regmap,i_is32,i_dirty);
4539 }
4540}
4541
4542// Load all needed registers for branch target
4543void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4544{
4545 //if(addr>=start && addr<(start+slen*4))
4546 if(internal_branch(i_is32,addr))
4547 {
4548 int t=(addr-start)>>2;
4549 int hr;
4550 // Store the cycle count before loading something else
4551 if(i_regmap[HOST_CCREG]!=CCREG) {
4552 assert(i_regmap[HOST_CCREG]==-1);
4553 }
4554 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4555 emit_storereg(CCREG,HOST_CCREG);
4556 }
4557 // Load 32-bit regs
4558 for(hr=0;hr<HOST_REGS;hr++) {
4559 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4560 #ifdef DESTRUCTIVE_WRITEBACK
4561 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4562 #else
4563 if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4564 #endif
4565 if(regs[t].regmap_entry[hr]==0) {
4566 emit_zeroreg(hr);
4567 }
4568 else if(regs[t].regmap_entry[hr]!=CCREG)
4569 {
4570 emit_loadreg(regs[t].regmap_entry[hr],hr);
4571 }
4572 }
4573 }
4574 }
4575 //Load 64-bit regs
4576 for(hr=0;hr<HOST_REGS;hr++) {
4577 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64) {
4578 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4579 assert(regs[t].regmap_entry[hr]!=64);
4580 if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4581 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4582 if(lr<0) {
4583 emit_loadreg(regs[t].regmap_entry[hr],hr);
4584 }
4585 else
4586 {
4587 emit_sarimm(lr,31,hr);
4588 }
4589 }
4590 else
4591 {
4592 emit_loadreg(regs[t].regmap_entry[hr],hr);
4593 }
4594 }
4595 else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4596 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4597 assert(lr>=0);
4598 emit_sarimm(lr,31,hr);
4599 }
4600 }
4601 }
4602 }
4603}
4604
4605int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4606{
4607 if(addr>=start && addr<start+slen*4-4)
4608 {
4609 int t=(addr-start)>>2;
4610 int hr;
4611 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4612 for(hr=0;hr<HOST_REGS;hr++)
4613 {
4614 if(hr!=EXCLUDE_REG)
4615 {
4616 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4617 {
4618 if(regs[t].regmap_entry[hr]!=-1)
4619 {
4620 return 0;
4621 }
4622 else
4623 if((i_dirty>>hr)&1)
4624 {
4625 if(i_regmap[hr]<64)
4626 {
4627 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4628 return 0;
4629 }
4630 else
4631 {
4632 if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4633 return 0;
4634 }
4635 }
4636 }
4637 else // Same register but is it 32-bit or dirty?
4638 if(i_regmap[hr]>=0)
4639 {
4640 if(!((regs[t].dirty>>hr)&1))
4641 {
4642 if((i_dirty>>hr)&1)
4643 {
4644 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4645 {
4646 //printf("%x: dirty no match\n",addr);
4647 return 0;
4648 }
4649 }
4650 }
4651 if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4652 {
4653 //printf("%x: is32 no match\n",addr);
4654 return 0;
4655 }
4656 }
4657 }
4658 }
4659 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
a28c6ce8 4660#ifndef FORCE32
57871462 4661 if(requires_32bit[t]&~i_is32) return 0;
a28c6ce8 4662#endif
57871462 4663 // Delay slots are not valid branch targets
4664 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4665 // Delay slots require additional processing, so do not match
4666 if(is_ds[t]) return 0;
4667 }
4668 else
4669 {
4670 int hr;
4671 for(hr=0;hr<HOST_REGS;hr++)
4672 {
4673 if(hr!=EXCLUDE_REG)
4674 {
4675 if(i_regmap[hr]>=0)
4676 {
4677 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4678 {
4679 if((i_dirty>>hr)&1)
4680 {
4681 return 0;
4682 }
4683 }
4684 }
4685 }
4686 }
4687 }
4688 return 1;
4689}
4690
4691// Used when a branch jumps into the delay slot of another branch
4692void ds_assemble_entry(int i)
4693{
4694 int t=(ba[i]-start)>>2;
4695 if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4696 assem_debug("Assemble delay slot at %x\n",ba[i]);
4697 assem_debug("<->\n");
4698 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4699 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4700 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4701 address_generation(t,&regs[t],regs[t].regmap_entry);
b9b61529 4702 if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
57871462 4703 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4704 cop1_usable=0;
4705 is_delayslot=0;
4706 switch(itype[t]) {
4707 case ALU:
4708 alu_assemble(t,&regs[t]);break;
4709 case IMM16:
4710 imm16_assemble(t,&regs[t]);break;
4711 case SHIFT:
4712 shift_assemble(t,&regs[t]);break;
4713 case SHIFTIMM:
4714 shiftimm_assemble(t,&regs[t]);break;
4715 case LOAD:
4716 load_assemble(t,&regs[t]);break;
4717 case LOADLR:
4718 loadlr_assemble(t,&regs[t]);break;
4719 case STORE:
4720 store_assemble(t,&regs[t]);break;
4721 case STORELR:
4722 storelr_assemble(t,&regs[t]);break;
4723 case COP0:
4724 cop0_assemble(t,&regs[t]);break;
4725 case COP1:
4726 cop1_assemble(t,&regs[t]);break;
4727 case C1LS:
4728 c1ls_assemble(t,&regs[t]);break;
b9b61529 4729 case COP2:
4730 cop2_assemble(t,&regs[t]);break;
4731 case C2LS:
4732 c2ls_assemble(t,&regs[t]);break;
4733 case C2OP:
4734 c2op_assemble(t,&regs[t]);break;
57871462 4735 case FCONV:
4736 fconv_assemble(t,&regs[t]);break;
4737 case FLOAT:
4738 float_assemble(t,&regs[t]);break;
4739 case FCOMP:
4740 fcomp_assemble(t,&regs[t]);break;
4741 case MULTDIV:
4742 multdiv_assemble(t,&regs[t]);break;
4743 case MOV:
4744 mov_assemble(t,&regs[t]);break;
4745 case SYSCALL:
7139f3c8 4746 case HLECALL:
1e973cb0 4747 case INTCALL:
57871462 4748 case SPAN:
4749 case UJUMP:
4750 case RJUMP:
4751 case CJUMP:
4752 case SJUMP:
4753 case FJUMP:
4754 printf("Jump in the delay slot. This is probably a bug.\n");
4755 }
4756 store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4757 load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4758 if(internal_branch(regs[t].is32,ba[i]+4))
4759 assem_debug("branch: internal\n");
4760 else
4761 assem_debug("branch: external\n");
4762 assert(internal_branch(regs[t].is32,ba[i]+4));
4763 add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4764 emit_jmp(0);
4765}
4766
4767void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4768{
4769 int count;
4770 int jaddr;
4771 int idle=0;
4772 if(itype[i]==RJUMP)
4773 {
4774 *adj=0;
4775 }
4776 //if(ba[i]>=start && ba[i]<(start+slen*4))
4777 if(internal_branch(branch_regs[i].is32,ba[i]))
4778 {
4779 int t=(ba[i]-start)>>2;
4780 if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4781 else *adj=ccadj[t];
4782 }
4783 else
4784 {
4785 *adj=0;
4786 }
4787 count=ccadj[i];
4788 if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4789 // Idle loop
4790 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4791 idle=(int)out;
4792 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4793 emit_andimm(HOST_CCREG,3,HOST_CCREG);
4794 jaddr=(int)out;
4795 emit_jmp(0);
4796 }
4797 else if(*adj==0||invert) {
4798 emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4799 jaddr=(int)out;
4800 emit_jns(0);
4801 }
4802 else
4803 {
4804 emit_cmpimm(HOST_CCREG,-2*(count+2));
4805 jaddr=(int)out;
4806 emit_jns(0);
4807 }
4808 add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4809}
4810
4811void do_ccstub(int n)
4812{
4813 literal_pool(256);
4814 assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4815 set_jump_target(stubs[n][1],(int)out);
4816 int i=stubs[n][4];
4817 if(stubs[n][6]==NULLDS) {
4818 // Delay slot instruction is nullified ("likely" branch)
4819 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4820 }
4821 else if(stubs[n][6]!=TAKEN) {
4822 wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4823 }
4824 else {
4825 if(internal_branch(branch_regs[i].is32,ba[i]))
4826 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4827 }
4828 if(stubs[n][5]!=-1)
4829 {
4830 // Save PC as return address
4831 emit_movimm(stubs[n][5],EAX);
4832 emit_writeword(EAX,(int)&pcaddr);
4833 }
4834 else
4835 {
4836 // Return address depends on which way the branch goes
4837 if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4838 {
4839 int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4840 int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4841 int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4842 int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4843 if(rs1[i]==0)
4844 {
4845 s1l=s2l;s1h=s2h;
4846 s2l=s2h=-1;
4847 }
4848 else if(rs2[i]==0)
4849 {
4850 s2l=s2h=-1;
4851 }
4852 if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4853 s1h=s2h=-1;
4854 }
4855 assert(s1l>=0);
4856 #ifdef DESTRUCTIVE_WRITEBACK
4857 if(rs1[i]) {
4858 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4859 emit_loadreg(rs1[i],s1l);
4860 }
4861 else {
4862 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4863 emit_loadreg(rs2[i],s1l);
4864 }
4865 if(s2l>=0)
4866 if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4867 emit_loadreg(rs2[i],s2l);
4868 #endif
4869 int hr=0;
4870 int addr,alt,ntaddr;
4871 while(hr<HOST_REGS)
4872 {
4873 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4874 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4875 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4876 {
4877 addr=hr++;break;
4878 }
4879 hr++;
4880 }
4881 while(hr<HOST_REGS)
4882 {
4883 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4884 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4885 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4886 {
4887 alt=hr++;break;
4888 }
4889 hr++;
4890 }
4891 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4892 {
4893 while(hr<HOST_REGS)
4894 {
4895 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4896 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4897 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4898 {
4899 ntaddr=hr;break;
4900 }
4901 hr++;
4902 }
4903 assert(hr<HOST_REGS);
4904 }
4905 if((opcode[i]&0x2f)==4) // BEQ
4906 {
4907 #ifdef HAVE_CMOV_IMM
4908 if(s1h<0) {
4909 if(s2l>=0) emit_cmp(s1l,s2l);
4910 else emit_test(s1l,s1l);
4911 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4912 }
4913 else
4914 #endif
4915 {
4916 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4917 if(s1h>=0) {
4918 if(s2h>=0) emit_cmp(s1h,s2h);
4919 else emit_test(s1h,s1h);
4920 emit_cmovne_reg(alt,addr);
4921 }
4922 if(s2l>=0) emit_cmp(s1l,s2l);
4923 else emit_test(s1l,s1l);
4924 emit_cmovne_reg(alt,addr);
4925 }
4926 }
4927 if((opcode[i]&0x2f)==5) // BNE
4928 {
4929 #ifdef HAVE_CMOV_IMM
4930 if(s1h<0) {
4931 if(s2l>=0) emit_cmp(s1l,s2l);
4932 else emit_test(s1l,s1l);
4933 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4934 }
4935 else
4936 #endif
4937 {
4938 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4939 if(s1h>=0) {
4940 if(s2h>=0) emit_cmp(s1h,s2h);
4941 else emit_test(s1h,s1h);
4942 emit_cmovne_reg(alt,addr);
4943 }
4944 if(s2l>=0) emit_cmp(s1l,s2l);
4945 else emit_test(s1l,s1l);
4946 emit_cmovne_reg(alt,addr);
4947 }
4948 }
4949 if((opcode[i]&0x2f)==6) // BLEZ
4950 {
4951 //emit_movimm(ba[i],alt);
4952 //emit_movimm(start+i*4+8,addr);
4953 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4954 emit_cmpimm(s1l,1);
4955 if(s1h>=0) emit_mov(addr,ntaddr);
4956 emit_cmovl_reg(alt,addr);
4957 if(s1h>=0) {
4958 emit_test(s1h,s1h);
4959 emit_cmovne_reg(ntaddr,addr);
4960 emit_cmovs_reg(alt,addr);
4961 }
4962 }
4963 if((opcode[i]&0x2f)==7) // BGTZ
4964 {
4965 //emit_movimm(ba[i],addr);
4966 //emit_movimm(start+i*4+8,ntaddr);
4967 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4968 emit_cmpimm(s1l,1);
4969 if(s1h>=0) emit_mov(addr,alt);
4970 emit_cmovl_reg(ntaddr,addr);
4971 if(s1h>=0) {
4972 emit_test(s1h,s1h);
4973 emit_cmovne_reg(alt,addr);
4974 emit_cmovs_reg(ntaddr,addr);
4975 }
4976 }
4977 if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4978 {
4979 //emit_movimm(ba[i],alt);
4980 //emit_movimm(start+i*4+8,addr);
4981 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4982 if(s1h>=0) emit_test(s1h,s1h);
4983 else emit_test(s1l,s1l);
4984 emit_cmovs_reg(alt,addr);
4985 }
4986 if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4987 {
4988 //emit_movimm(ba[i],addr);
4989 //emit_movimm(start+i*4+8,alt);
4990 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4991 if(s1h>=0) emit_test(s1h,s1h);
4992 else emit_test(s1l,s1l);
4993 emit_cmovs_reg(alt,addr);
4994 }
4995 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4996 if(source[i]&0x10000) // BC1T
4997 {
4998 //emit_movimm(ba[i],alt);
4999 //emit_movimm(start+i*4+8,addr);
5000 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5001 emit_testimm(s1l,0x800000);
5002 emit_cmovne_reg(alt,addr);
5003 }
5004 else // BC1F
5005 {
5006 //emit_movimm(ba[i],addr);
5007 //emit_movimm(start+i*4+8,alt);
5008 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5009 emit_testimm(s1l,0x800000);
5010 emit_cmovne_reg(alt,addr);
5011 }
5012 }
5013 emit_writeword(addr,(int)&pcaddr);
5014 }
5015 else
5016 if(itype[i]==RJUMP)
5017 {
5018 int r=get_reg(branch_regs[i].regmap,rs1[i]);
5019 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5020 r=get_reg(branch_regs[i].regmap,RTEMP);
5021 }
5022 emit_writeword(r,(int)&pcaddr);
5023 }
5024 else {printf("Unknown branch type in do_ccstub\n");exit(1);}
5025 }
5026 // Update cycle count
5027 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
5028 if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5029 emit_call((int)cc_interrupt);
5030 if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5031 if(stubs[n][6]==TAKEN) {
5032 if(internal_branch(branch_regs[i].is32,ba[i]))
5033 load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
5034 else if(itype[i]==RJUMP) {
5035 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5036 emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5037 else
5038 emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
5039 }
5040 }else if(stubs[n][6]==NOTTAKEN) {
5041 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5042 else load_all_regs(branch_regs[i].regmap);
5043 }else if(stubs[n][6]==NULLDS) {
5044 // Delay slot instruction is nullified ("likely" branch)
5045 if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
5046 else load_all_regs(regs[i].regmap);
5047 }else{
5048 load_all_regs(branch_regs[i].regmap);
5049 }
5050 emit_jmp(stubs[n][2]); // return address
5051
5052 /* This works but uses a lot of memory...
5053 emit_readword((int)&last_count,ECX);
5054 emit_add(HOST_CCREG,ECX,EAX);
5055 emit_writeword(EAX,(int)&Count);
5056 emit_call((int)gen_interupt);
5057 emit_readword((int)&Count,HOST_CCREG);
5058 emit_readword((int)&next_interupt,EAX);
5059 emit_readword((int)&pending_exception,EBX);
5060 emit_writeword(EAX,(int)&last_count);
5061 emit_sub(HOST_CCREG,EAX,HOST_CCREG);
5062 emit_test(EBX,EBX);
5063 int jne_instr=(int)out;
5064 emit_jne(0);
5065 if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
5066 load_all_regs(branch_regs[i].regmap);
5067 emit_jmp(stubs[n][2]); // return address
5068 set_jump_target(jne_instr,(int)out);
5069 emit_readword((int)&pcaddr,EAX);
5070 // Call get_addr_ht instead of doing the hash table here.
5071 // This code is executed infrequently and takes up a lot of space
5072 // so smaller is better.
5073 emit_storereg(CCREG,HOST_CCREG);
5074 emit_pushreg(EAX);
5075 emit_call((int)get_addr_ht);
5076 emit_loadreg(CCREG,HOST_CCREG);
5077 emit_addimm(ESP,4,ESP);
5078 emit_jmpreg(EAX);*/
5079}
5080
5081add_to_linker(int addr,int target,int ext)
5082{
5083 link_addr[linkcount][0]=addr;
5084 link_addr[linkcount][1]=target;
5085 link_addr[linkcount][2]=ext;
5086 linkcount++;
5087}
5088
5089void ujump_assemble(int i,struct regstat *i_regs)
5090{
5091 signed char *i_regmap=i_regs->regmap;
5092 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5093 address_generation(i+1,i_regs,regs[i].regmap_entry);
5094 #ifdef REG_PREFETCH
5095 int temp=get_reg(branch_regs[i].regmap,PTEMP);
5096 if(rt1[i]==31&&temp>=0)
5097 {
5098 int return_address=start+i*4+8;
5099 if(get_reg(branch_regs[i].regmap,31)>0)
5100 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5101 }
5102 #endif
5103 ds_assemble(i+1,i_regs);
5104 uint64_t bc_unneeded=branch_regs[i].u;
5105 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5106 bc_unneeded|=1|(1LL<<rt1[i]);
5107 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5108 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5109 bc_unneeded,bc_unneeded_upper);
5110 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5111 if(rt1[i]==31) {
5112 int rt;
5113 unsigned int return_address;
5114 assert(rt1[i+1]!=31);
5115 assert(rt2[i+1]!=31);
5116 rt=get_reg(branch_regs[i].regmap,31);
5117 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5118 //assert(rt>=0);
5119 return_address=start+i*4+8;
5120 if(rt>=0) {
5121 #ifdef USE_MINI_HT
5122 if(internal_branch(branch_regs[i].is32,return_address)) {
5123 int temp=rt+1;
5124 if(temp==EXCLUDE_REG||temp>=HOST_REGS||
5125 branch_regs[i].regmap[temp]>=0)
5126 {
5127 temp=get_reg(branch_regs[i].regmap,-1);
5128 }
5129 #ifdef HOST_TEMPREG
5130 if(temp<0) temp=HOST_TEMPREG;
5131 #endif
5132 if(temp>=0) do_miniht_insert(return_address,rt,temp);
5133 else emit_movimm(return_address,rt);
5134 }
5135 else
5136 #endif
5137 {
5138 #ifdef REG_PREFETCH
5139 if(temp>=0)
5140 {
5141 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5142 }
5143 #endif
5144 emit_movimm(return_address,rt); // PC into link register
5145 #ifdef IMM_PREFETCH
5146 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5147 #endif
5148 }
5149 }
5150 }
5151 int cc,adj;
5152 cc=get_reg(branch_regs[i].regmap,CCREG);
5153 assert(cc==HOST_CCREG);
5154 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5155 #ifdef REG_PREFETCH
5156 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5157 #endif
5158 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5159 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5160 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5161 if(internal_branch(branch_regs[i].is32,ba[i]))
5162 assem_debug("branch: internal\n");
5163 else
5164 assem_debug("branch: external\n");
5165 if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5166 ds_assemble_entry(i);
5167 }
5168 else {
5169 add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5170 emit_jmp(0);
5171 }
5172}
5173
5174void rjump_assemble(int i,struct regstat *i_regs)
5175{
5176 signed char *i_regmap=i_regs->regmap;
5177 int temp;
5178 int rs,cc,adj;
5179 rs=get_reg(branch_regs[i].regmap,rs1[i]);
5180 assert(rs>=0);
5181 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5182 // Delay slot abuse, make a copy of the branch address register
5183 temp=get_reg(branch_regs[i].regmap,RTEMP);
5184 assert(temp>=0);
5185 assert(regs[i].regmap[temp]==RTEMP);
5186 emit_mov(rs,temp);
5187 rs=temp;
5188 }
5189 address_generation(i+1,i_regs,regs[i].regmap_entry);
5190 #ifdef REG_PREFETCH
5191 if(rt1[i]==31)
5192 {
5193 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5194 int return_address=start+i*4+8;
5195 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5196 }
5197 }
5198 #endif
5199 #ifdef USE_MINI_HT
5200 if(rs1[i]==31) {
5201 int rh=get_reg(regs[i].regmap,RHASH);
5202 if(rh>=0) do_preload_rhash(rh);
5203 }
5204 #endif
5205 ds_assemble(i+1,i_regs);
5206 uint64_t bc_unneeded=branch_regs[i].u;
5207 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5208 bc_unneeded|=1|(1LL<<rt1[i]);
5209 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5210 bc_unneeded&=~(1LL<<rs1[i]);
5211 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5212 bc_unneeded,bc_unneeded_upper);
5213 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5067f341 5214 if(rt1[i]!=0) {
57871462 5215 int rt,return_address;
5067f341 5216 assert(rt1[i+1]!=rt1[i]);
5217 assert(rt2[i+1]!=rt1[i]);
5218 rt=get_reg(branch_regs[i].regmap,rt1[i]);
57871462 5219 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5220 assert(rt>=0);
5221 return_address=start+i*4+8;
5222 #ifdef REG_PREFETCH
5223 if(temp>=0)
5224 {
5225 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5226 }
5227 #endif
5228 emit_movimm(return_address,rt); // PC into link register
5229 #ifdef IMM_PREFETCH
5230 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5231 #endif
5232 }
5233 cc=get_reg(branch_regs[i].regmap,CCREG);
5234 assert(cc==HOST_CCREG);
5235 #ifdef USE_MINI_HT
5236 int rh=get_reg(branch_regs[i].regmap,RHASH);
5237 int ht=get_reg(branch_regs[i].regmap,RHTBL);
5238 if(rs1[i]==31) {
5239 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5240 do_preload_rhtbl(ht);
5241 do_rhash(rs,rh);
5242 }
5243 #endif
5244 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5245 #ifdef DESTRUCTIVE_WRITEBACK
5246 if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5247 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5248 emit_loadreg(rs1[i],rs);
5249 }
5250 }
5251 #endif
5252 #ifdef REG_PREFETCH
5253 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5254 #endif
5255 #ifdef USE_MINI_HT
5256 if(rs1[i]==31) {
5257 do_miniht_load(ht,rh);
5258 }
5259 #endif
5260 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5261 //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5262 //assert(adj==0);
5263 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5264 add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5265 emit_jns(0);
5266 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5267 #ifdef USE_MINI_HT
5268 if(rs1[i]==31) {
5269 do_miniht_jump(rs,rh,ht);
5270 }
5271 else
5272 #endif
5273 {
5274 //if(rs!=EAX) emit_mov(rs,EAX);
5275 //emit_jmp((int)jump_vaddr_eax);
5276 emit_jmp(jump_vaddr_reg[rs]);
5277 }
5278 /* Check hash table
5279 temp=!rs;
5280 emit_mov(rs,temp);
5281 emit_shrimm(rs,16,rs);
5282 emit_xor(temp,rs,rs);
5283 emit_movzwl_reg(rs,rs);
5284 emit_shlimm(rs,4,rs);
5285 emit_cmpmem_indexed((int)hash_table,rs,temp);
5286 emit_jne((int)out+14);
5287 emit_readword_indexed((int)hash_table+4,rs,rs);
5288 emit_jmpreg(rs);
5289 emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5290 emit_addimm_no_flags(8,rs);
5291 emit_jeq((int)out-17);
5292 // No hit on hash table, call compiler
5293 emit_pushreg(temp);
5294//DEBUG >
5295#ifdef DEBUG_CYCLE_COUNT
5296 emit_readword((int)&last_count,ECX);
5297 emit_add(HOST_CCREG,ECX,HOST_CCREG);
5298 emit_readword((int)&next_interupt,ECX);
5299 emit_writeword(HOST_CCREG,(int)&Count);
5300 emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5301 emit_writeword(ECX,(int)&last_count);
5302#endif
5303//DEBUG <
5304 emit_storereg(CCREG,HOST_CCREG);
5305 emit_call((int)get_addr);
5306 emit_loadreg(CCREG,HOST_CCREG);
5307 emit_addimm(ESP,4,ESP);
5308 emit_jmpreg(EAX);*/
5309 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5310 if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5311 #endif
5312}
5313
5314void cjump_assemble(int i,struct regstat *i_regs)
5315{
5316 signed char *i_regmap=i_regs->regmap;
5317 int cc;
5318 int match;
5319 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5320 assem_debug("match=%d\n",match);
5321 int s1h,s1l,s2h,s2l;
5322 int prev_cop1_usable=cop1_usable;
5323 int unconditional=0,nop=0;
5324 int only32=0;
57871462 5325 int invert=0;
5326 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5327 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
57871462 5328 if(!match) invert=1;
5329 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5330 if(i>(ba[i]-start)>>2) invert=1;
5331 #endif
e1190b87 5332
5333 if(ooo[i]) {
57871462 5334 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5335 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5336 s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5337 s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5338 }
5339 else {
5340 s1l=get_reg(i_regmap,rs1[i]);
5341 s1h=get_reg(i_regmap,rs1[i]|64);
5342 s2l=get_reg(i_regmap,rs2[i]);
5343 s2h=get_reg(i_regmap,rs2[i]|64);
5344 }
5345 if(rs1[i]==0&&rs2[i]==0)
5346 {
5347 if(opcode[i]&1) nop=1;
5348 else unconditional=1;
5349 //assert(opcode[i]!=5);
5350 //assert(opcode[i]!=7);
5351 //assert(opcode[i]!=0x15);
5352 //assert(opcode[i]!=0x17);
5353 }
5354 else if(rs1[i]==0)
5355 {
5356 s1l=s2l;s1h=s2h;
5357 s2l=s2h=-1;
5358 only32=(regs[i].was32>>rs2[i])&1;
5359 }
5360 else if(rs2[i]==0)
5361 {
5362 s2l=s2h=-1;
5363 only32=(regs[i].was32>>rs1[i])&1;
5364 }
5365 else {
5366 only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5367 }
5368
e1190b87 5369 if(ooo[i]) {
57871462 5370 // Out of order execution (delay slot first)
5371 //printf("OOOE\n");
5372 address_generation(i+1,i_regs,regs[i].regmap_entry);
5373 ds_assemble(i+1,i_regs);
5374 int adj;
5375 uint64_t bc_unneeded=branch_regs[i].u;
5376 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5377 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5378 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5379 bc_unneeded|=1;
5380 bc_unneeded_upper|=1;
5381 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5382 bc_unneeded,bc_unneeded_upper);
5383 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5384 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5385 cc=get_reg(branch_regs[i].regmap,CCREG);
5386 assert(cc==HOST_CCREG);
5387 if(unconditional)
5388 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5389 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5390 //assem_debug("cycle count (adj)\n");
5391 if(unconditional) {
5392 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5393 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5394 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5395 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5396 if(internal)
5397 assem_debug("branch: internal\n");
5398 else
5399 assem_debug("branch: external\n");
5400 if(internal&&is_ds[(ba[i]-start)>>2]) {
5401 ds_assemble_entry(i);
5402 }
5403 else {
5404 add_to_linker((int)out,ba[i],internal);
5405 emit_jmp(0);
5406 }
5407 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5408 if(((u_int)out)&7) emit_addnop(0);
5409 #endif
5410 }
5411 }
5412 else if(nop) {
5413 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5414 int jaddr=(int)out;
5415 emit_jns(0);
5416 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5417 }
5418 else {
5419 int taken=0,nottaken=0,nottaken1=0;
5420 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5421 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5422 if(!only32)
5423 {
5424 assert(s1h>=0);
5425 if(opcode[i]==4) // BEQ
5426 {
5427 if(s2h>=0) emit_cmp(s1h,s2h);
5428 else emit_test(s1h,s1h);
5429 nottaken1=(int)out;
5430 emit_jne(1);
5431 }
5432 if(opcode[i]==5) // BNE
5433 {
5434 if(s2h>=0) emit_cmp(s1h,s2h);
5435 else emit_test(s1h,s1h);
5436 if(invert) taken=(int)out;
5437 else add_to_linker((int)out,ba[i],internal);
5438 emit_jne(0);
5439 }
5440 if(opcode[i]==6) // BLEZ
5441 {
5442 emit_test(s1h,s1h);
5443 if(invert) taken=(int)out;
5444 else add_to_linker((int)out,ba[i],internal);
5445 emit_js(0);
5446 nottaken1=(int)out;
5447 emit_jne(1);
5448 }
5449 if(opcode[i]==7) // BGTZ
5450 {
5451 emit_test(s1h,s1h);
5452 nottaken1=(int)out;
5453 emit_js(1);
5454 if(invert) taken=(int)out;
5455 else add_to_linker((int)out,ba[i],internal);
5456 emit_jne(0);
5457 }
5458 } // if(!only32)
5459
5460 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5461 assert(s1l>=0);
5462 if(opcode[i]==4) // BEQ
5463 {
5464 if(s2l>=0) emit_cmp(s1l,s2l);
5465 else emit_test(s1l,s1l);
5466 if(invert){
5467 nottaken=(int)out;
5468 emit_jne(1);
5469 }else{
5470 add_to_linker((int)out,ba[i],internal);
5471 emit_jeq(0);
5472 }
5473 }
5474 if(opcode[i]==5) // BNE
5475 {
5476 if(s2l>=0) emit_cmp(s1l,s2l);
5477 else emit_test(s1l,s1l);
5478 if(invert){
5479 nottaken=(int)out;
5480 emit_jeq(1);
5481 }else{
5482 add_to_linker((int)out,ba[i],internal);
5483 emit_jne(0);
5484 }
5485 }
5486 if(opcode[i]==6) // BLEZ
5487 {
5488 emit_cmpimm(s1l,1);
5489 if(invert){
5490 nottaken=(int)out;
5491 emit_jge(1);
5492 }else{
5493 add_to_linker((int)out,ba[i],internal);
5494 emit_jl(0);
5495 }
5496 }
5497 if(opcode[i]==7) // BGTZ
5498 {
5499 emit_cmpimm(s1l,1);
5500 if(invert){
5501 nottaken=(int)out;
5502 emit_jl(1);
5503 }else{
5504 add_to_linker((int)out,ba[i],internal);
5505 emit_jge(0);
5506 }
5507 }
5508 if(invert) {
5509 if(taken) set_jump_target(taken,(int)out);
5510 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5511 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5512 if(adj) {
5513 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5514 add_to_linker((int)out,ba[i],internal);
5515 }else{
5516 emit_addnop(13);
5517 add_to_linker((int)out,ba[i],internal*2);
5518 }
5519 emit_jmp(0);
5520 }else
5521 #endif
5522 {
5523 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5524 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5525 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5526 if(internal)
5527 assem_debug("branch: internal\n");
5528 else
5529 assem_debug("branch: external\n");
5530 if(internal&&is_ds[(ba[i]-start)>>2]) {
5531 ds_assemble_entry(i);
5532 }
5533 else {
5534 add_to_linker((int)out,ba[i],internal);
5535 emit_jmp(0);
5536 }
5537 }
5538 set_jump_target(nottaken,(int)out);
5539 }
5540
5541 if(nottaken1) set_jump_target(nottaken1,(int)out);
5542 if(adj) {
5543 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5544 }
5545 } // (!unconditional)
5546 } // if(ooo)
5547 else
5548 {
5549 // In-order execution (branch first)
5550 //if(likely[i]) printf("IOL\n");
5551 //else
5552 //printf("IOE\n");
5553 int taken=0,nottaken=0,nottaken1=0;
5554 if(!unconditional&&!nop) {
5555 if(!only32)
5556 {
5557 assert(s1h>=0);
5558 if((opcode[i]&0x2f)==4) // BEQ
5559 {
5560 if(s2h>=0) emit_cmp(s1h,s2h);
5561 else emit_test(s1h,s1h);
5562 nottaken1=(int)out;
5563 emit_jne(2);
5564 }
5565 if((opcode[i]&0x2f)==5) // BNE
5566 {
5567 if(s2h>=0) emit_cmp(s1h,s2h);
5568 else emit_test(s1h,s1h);
5569 taken=(int)out;
5570 emit_jne(1);
5571 }
5572 if((opcode[i]&0x2f)==6) // BLEZ
5573 {
5574 emit_test(s1h,s1h);
5575 taken=(int)out;
5576 emit_js(1);
5577 nottaken1=(int)out;
5578 emit_jne(2);
5579 }
5580 if((opcode[i]&0x2f)==7) // BGTZ
5581 {
5582 emit_test(s1h,s1h);
5583 nottaken1=(int)out;
5584 emit_js(2);
5585 taken=(int)out;
5586 emit_jne(1);
5587 }
5588 } // if(!only32)
5589
5590 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5591 assert(s1l>=0);
5592 if((opcode[i]&0x2f)==4) // BEQ
5593 {
5594 if(s2l>=0) emit_cmp(s1l,s2l);
5595 else emit_test(s1l,s1l);
5596 nottaken=(int)out;
5597 emit_jne(2);
5598 }
5599 if((opcode[i]&0x2f)==5) // BNE
5600 {
5601 if(s2l>=0) emit_cmp(s1l,s2l);
5602 else emit_test(s1l,s1l);
5603 nottaken=(int)out;
5604 emit_jeq(2);
5605 }
5606 if((opcode[i]&0x2f)==6) // BLEZ
5607 {
5608 emit_cmpimm(s1l,1);
5609 nottaken=(int)out;
5610 emit_jge(2);
5611 }
5612 if((opcode[i]&0x2f)==7) // BGTZ
5613 {
5614 emit_cmpimm(s1l,1);
5615 nottaken=(int)out;
5616 emit_jl(2);
5617 }
5618 } // if(!unconditional)
5619 int adj;
5620 uint64_t ds_unneeded=branch_regs[i].u;
5621 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5622 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5623 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5624 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5625 ds_unneeded|=1;
5626 ds_unneeded_upper|=1;
5627 // branch taken
5628 if(!nop) {
5629 if(taken) set_jump_target(taken,(int)out);
5630 assem_debug("1:\n");
5631 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5632 ds_unneeded,ds_unneeded_upper);
5633 // load regs
5634 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5635 address_generation(i+1,&branch_regs[i],0);
5636 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5637 ds_assemble(i+1,&branch_regs[i]);
5638 cc=get_reg(branch_regs[i].regmap,CCREG);
5639 if(cc==-1) {
5640 emit_loadreg(CCREG,cc=HOST_CCREG);
5641 // CHECK: Is the following instruction (fall thru) allocated ok?
5642 }
5643 assert(cc==HOST_CCREG);
5644 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5645 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5646 assem_debug("cycle count (adj)\n");
5647 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5648 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5649 if(internal)
5650 assem_debug("branch: internal\n");
5651 else
5652 assem_debug("branch: external\n");
5653 if(internal&&is_ds[(ba[i]-start)>>2]) {
5654 ds_assemble_entry(i);
5655 }
5656 else {
5657 add_to_linker((int)out,ba[i],internal);
5658 emit_jmp(0);
5659 }
5660 }
5661 // branch not taken
5662 cop1_usable=prev_cop1_usable;
5663 if(!unconditional) {
5664 if(nottaken1) set_jump_target(nottaken1,(int)out);
5665 set_jump_target(nottaken,(int)out);
5666 assem_debug("2:\n");
5667 if(!likely[i]) {
5668 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5669 ds_unneeded,ds_unneeded_upper);
5670 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5671 address_generation(i+1,&branch_regs[i],0);
5672 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5673 ds_assemble(i+1,&branch_regs[i]);
5674 }
5675 cc=get_reg(branch_regs[i].regmap,CCREG);
5676 if(cc==-1&&!likely[i]) {
5677 // Cycle count isn't in a register, temporarily load it then write it out
5678 emit_loadreg(CCREG,HOST_CCREG);
5679 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5680 int jaddr=(int)out;
5681 emit_jns(0);
5682 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5683 emit_storereg(CCREG,HOST_CCREG);
5684 }
5685 else{
5686 cc=get_reg(i_regmap,CCREG);
5687 assert(cc==HOST_CCREG);
5688 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5689 int jaddr=(int)out;
5690 emit_jns(0);
5691 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5692 }
5693 }
5694 }
5695}
5696
5697void sjump_assemble(int i,struct regstat *i_regs)
5698{
5699 signed char *i_regmap=i_regs->regmap;
5700 int cc;
5701 int match;
5702 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5703 assem_debug("smatch=%d\n",match);
5704 int s1h,s1l;
5705 int prev_cop1_usable=cop1_usable;
5706 int unconditional=0,nevertaken=0;
5707 int only32=0;
57871462 5708 int invert=0;
5709 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5710 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
57871462 5711 if(!match) invert=1;
5712 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5713 if(i>(ba[i]-start)>>2) invert=1;
5714 #endif
5715
5716 //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
df894a3a 5717 //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
57871462 5718
e1190b87 5719 if(ooo[i]) {
57871462 5720 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5721 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5722 }
5723 else {
5724 s1l=get_reg(i_regmap,rs1[i]);
5725 s1h=get_reg(i_regmap,rs1[i]|64);
5726 }
5727 if(rs1[i]==0)
5728 {
5729 if(opcode2[i]&1) unconditional=1;
5730 else nevertaken=1;
5731 // These are never taken (r0 is never less than zero)
5732 //assert(opcode2[i]!=0);
5733 //assert(opcode2[i]!=2);
5734 //assert(opcode2[i]!=0x10);
5735 //assert(opcode2[i]!=0x12);
5736 }
5737 else {
5738 only32=(regs[i].was32>>rs1[i])&1;
5739 }
5740
e1190b87 5741 if(ooo[i]) {
57871462 5742 // Out of order execution (delay slot first)
5743 //printf("OOOE\n");
5744 address_generation(i+1,i_regs,regs[i].regmap_entry);
5745 ds_assemble(i+1,i_regs);
5746 int adj;
5747 uint64_t bc_unneeded=branch_regs[i].u;
5748 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5749 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5750 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5751 bc_unneeded|=1;
5752 bc_unneeded_upper|=1;
5753 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5754 bc_unneeded,bc_unneeded_upper);
5755 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5756 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5757 if(rt1[i]==31) {
5758 int rt,return_address;
57871462 5759 rt=get_reg(branch_regs[i].regmap,31);
5760 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5761 if(rt>=0) {
5762 // Save the PC even if the branch is not taken
5763 return_address=start+i*4+8;
5764 emit_movimm(return_address,rt); // PC into link register
5765 #ifdef IMM_PREFETCH
5766 if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5767 #endif
5768 }
5769 }
5770 cc=get_reg(branch_regs[i].regmap,CCREG);
5771 assert(cc==HOST_CCREG);
5772 if(unconditional)
5773 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5774 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5775 assem_debug("cycle count (adj)\n");
5776 if(unconditional) {
5777 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5778 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5779 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5780 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5781 if(internal)
5782 assem_debug("branch: internal\n");
5783 else
5784 assem_debug("branch: external\n");
5785 if(internal&&is_ds[(ba[i]-start)>>2]) {
5786 ds_assemble_entry(i);
5787 }
5788 else {
5789 add_to_linker((int)out,ba[i],internal);
5790 emit_jmp(0);
5791 }
5792 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5793 if(((u_int)out)&7) emit_addnop(0);
5794 #endif
5795 }
5796 }
5797 else if(nevertaken) {
5798 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5799 int jaddr=(int)out;
5800 emit_jns(0);
5801 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5802 }
5803 else {
5804 int nottaken=0;
5805 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5806 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5807 if(!only32)
5808 {
5809 assert(s1h>=0);
df894a3a 5810 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
57871462 5811 {
5812 emit_test(s1h,s1h);
5813 if(invert){
5814 nottaken=(int)out;
5815 emit_jns(1);
5816 }else{
5817 add_to_linker((int)out,ba[i],internal);
5818 emit_js(0);
5819 }
5820 }
df894a3a 5821 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
57871462 5822 {
5823 emit_test(s1h,s1h);
5824 if(invert){
5825 nottaken=(int)out;
5826 emit_js(1);
5827 }else{
5828 add_to_linker((int)out,ba[i],internal);
5829 emit_jns(0);
5830 }
5831 }
5832 } // if(!only32)
5833 else
5834 {
5835 assert(s1l>=0);
df894a3a 5836 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
57871462 5837 {
5838 emit_test(s1l,s1l);
5839 if(invert){
5840 nottaken=(int)out;
5841 emit_jns(1);
5842 }else{
5843 add_to_linker((int)out,ba[i],internal);
5844 emit_js(0);
5845 }
5846 }
df894a3a 5847 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
57871462 5848 {
5849 emit_test(s1l,s1l);
5850 if(invert){
5851 nottaken=(int)out;
5852 emit_js(1);
5853 }else{
5854 add_to_linker((int)out,ba[i],internal);
5855 emit_jns(0);
5856 }
5857 }
5858 } // if(!only32)
5859
5860 if(invert) {
5861 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5862 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5863 if(adj) {
5864 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5865 add_to_linker((int)out,ba[i],internal);
5866 }else{
5867 emit_addnop(13);
5868 add_to_linker((int)out,ba[i],internal*2);
5869 }
5870 emit_jmp(0);
5871 }else
5872 #endif
5873 {
5874 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5875 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5876 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5877 if(internal)
5878 assem_debug("branch: internal\n");
5879 else
5880 assem_debug("branch: external\n");
5881 if(internal&&is_ds[(ba[i]-start)>>2]) {
5882 ds_assemble_entry(i);
5883 }
5884 else {
5885 add_to_linker((int)out,ba[i],internal);
5886 emit_jmp(0);
5887 }
5888 }
5889 set_jump_target(nottaken,(int)out);
5890 }
5891
5892 if(adj) {
5893 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5894 }
5895 } // (!unconditional)
5896 } // if(ooo)
5897 else
5898 {
5899 // In-order execution (branch first)
5900 //printf("IOE\n");
5901 int nottaken=0;
a6491170 5902 if(rt1[i]==31) {
5903 int rt,return_address;
a6491170 5904 rt=get_reg(branch_regs[i].regmap,31);
5905 if(rt>=0) {
5906 // Save the PC even if the branch is not taken
5907 return_address=start+i*4+8;
5908 emit_movimm(return_address,rt); // PC into link register
5909 #ifdef IMM_PREFETCH
5910 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5911 #endif
5912 }
5913 }
57871462 5914 if(!unconditional) {
5915 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5916 if(!only32)
5917 {
5918 assert(s1h>=0);
a6491170 5919 if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
57871462 5920 {
5921 emit_test(s1h,s1h);
5922 nottaken=(int)out;
5923 emit_jns(1);
5924 }
a6491170 5925 if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
57871462 5926 {
5927 emit_test(s1h,s1h);
5928 nottaken=(int)out;
5929 emit_js(1);
5930 }
5931 } // if(!only32)
5932 else
5933 {
5934 assert(s1l>=0);
a6491170 5935 if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
57871462 5936 {
5937 emit_test(s1l,s1l);
5938 nottaken=(int)out;
5939 emit_jns(1);
5940 }
a6491170 5941 if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
57871462 5942 {
5943 emit_test(s1l,s1l);
5944 nottaken=(int)out;
5945 emit_js(1);
5946 }
5947 }
5948 } // if(!unconditional)
5949 int adj;
5950 uint64_t ds_unneeded=branch_regs[i].u;
5951 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5952 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5953 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5954 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5955 ds_unneeded|=1;
5956 ds_unneeded_upper|=1;
5957 // branch taken
5958 if(!nevertaken) {
5959 //assem_debug("1:\n");
5960 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5961 ds_unneeded,ds_unneeded_upper);
5962 // load regs
5963 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5964 address_generation(i+1,&branch_regs[i],0);
5965 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5966 ds_assemble(i+1,&branch_regs[i]);
5967 cc=get_reg(branch_regs[i].regmap,CCREG);
5968 if(cc==-1) {
5969 emit_loadreg(CCREG,cc=HOST_CCREG);
5970 // CHECK: Is the following instruction (fall thru) allocated ok?
5971 }
5972 assert(cc==HOST_CCREG);
5973 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5974 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5975 assem_debug("cycle count (adj)\n");
5976 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5977 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5978 if(internal)
5979 assem_debug("branch: internal\n");
5980 else
5981 assem_debug("branch: external\n");
5982 if(internal&&is_ds[(ba[i]-start)>>2]) {
5983 ds_assemble_entry(i);
5984 }
5985 else {
5986 add_to_linker((int)out,ba[i],internal);
5987 emit_jmp(0);
5988 }
5989 }
5990 // branch not taken
5991 cop1_usable=prev_cop1_usable;
5992 if(!unconditional) {
5993 set_jump_target(nottaken,(int)out);
5994 assem_debug("1:\n");
5995 if(!likely[i]) {
5996 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5997 ds_unneeded,ds_unneeded_upper);
5998 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5999 address_generation(i+1,&branch_regs[i],0);
6000 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6001 ds_assemble(i+1,&branch_regs[i]);
6002 }
6003 cc=get_reg(branch_regs[i].regmap,CCREG);
6004 if(cc==-1&&!likely[i]) {
6005 // Cycle count isn't in a register, temporarily load it then write it out
6006 emit_loadreg(CCREG,HOST_CCREG);
6007 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6008 int jaddr=(int)out;
6009 emit_jns(0);
6010 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6011 emit_storereg(CCREG,HOST_CCREG);
6012 }
6013 else{
6014 cc=get_reg(i_regmap,CCREG);
6015 assert(cc==HOST_CCREG);
6016 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6017 int jaddr=(int)out;
6018 emit_jns(0);
6019 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6020 }
6021 }
6022 }
6023}
6024
6025void fjump_assemble(int i,struct regstat *i_regs)
6026{
6027 signed char *i_regmap=i_regs->regmap;
6028 int cc;
6029 int match;
6030 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6031 assem_debug("fmatch=%d\n",match);
6032 int fs,cs;
6033 int eaddr;
57871462 6034 int invert=0;
6035 int internal=internal_branch(branch_regs[i].is32,ba[i]);
6036 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
57871462 6037 if(!match) invert=1;
6038 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6039 if(i>(ba[i]-start)>>2) invert=1;
6040 #endif
6041
e1190b87 6042 if(ooo[i]) {
57871462 6043 fs=get_reg(branch_regs[i].regmap,FSREG);
6044 address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
6045 }
6046 else {
6047 fs=get_reg(i_regmap,FSREG);
6048 }
6049
6050 // Check cop1 unusable
6051 if(!cop1_usable) {
6052 cs=get_reg(i_regmap,CSREG);
6053 assert(cs>=0);
6054 emit_testimm(cs,0x20000000);
6055 eaddr=(int)out;
6056 emit_jeq(0);
6057 add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
6058 cop1_usable=1;
6059 }
6060
e1190b87 6061 if(ooo[i]) {
57871462 6062 // Out of order execution (delay slot first)
6063 //printf("OOOE\n");
6064 ds_assemble(i+1,i_regs);
6065 int adj;
6066 uint64_t bc_unneeded=branch_regs[i].u;
6067 uint64_t bc_unneeded_upper=branch_regs[i].uu;
6068 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6069 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
6070 bc_unneeded|=1;
6071 bc_unneeded_upper|=1;
6072 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6073 bc_unneeded,bc_unneeded_upper);
6074 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
6075 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6076 cc=get_reg(branch_regs[i].regmap,CCREG);
6077 assert(cc==HOST_CCREG);
6078 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6079 assem_debug("cycle count (adj)\n");
6080 if(1) {
6081 int nottaken=0;
6082 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6083 if(1) {
6084 assert(fs>=0);
6085 emit_testimm(fs,0x800000);
6086 if(source[i]&0x10000) // BC1T
6087 {
6088 if(invert){
6089 nottaken=(int)out;
6090 emit_jeq(1);
6091 }else{
6092 add_to_linker((int)out,ba[i],internal);
6093 emit_jne(0);
6094 }
6095 }
6096 else // BC1F
6097 if(invert){
6098 nottaken=(int)out;
6099 emit_jne(1);
6100 }else{
6101 add_to_linker((int)out,ba[i],internal);
6102 emit_jeq(0);
6103 }
6104 {
6105 }
6106 } // if(!only32)
6107
6108 if(invert) {
6109 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6110 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6111 else if(match) emit_addnop(13);
6112 #endif
6113 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6114 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6115 if(internal)
6116 assem_debug("branch: internal\n");
6117 else
6118 assem_debug("branch: external\n");
6119 if(internal&&is_ds[(ba[i]-start)>>2]) {
6120 ds_assemble_entry(i);
6121 }
6122 else {
6123 add_to_linker((int)out,ba[i],internal);
6124 emit_jmp(0);
6125 }
6126 set_jump_target(nottaken,(int)out);
6127 }
6128
6129 if(adj) {
6130 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6131 }
6132 } // (!unconditional)
6133 } // if(ooo)
6134 else
6135 {
6136 // In-order execution (branch first)
6137 //printf("IOE\n");
6138 int nottaken=0;
6139 if(1) {
6140 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6141 if(1) {
6142 assert(fs>=0);
6143 emit_testimm(fs,0x800000);
6144 if(source[i]&0x10000) // BC1T
6145 {
6146 nottaken=(int)out;
6147 emit_jeq(1);
6148 }
6149 else // BC1F
6150 {
6151 nottaken=(int)out;
6152 emit_jne(1);
6153 }
6154 }
6155 } // if(!unconditional)
6156 int adj;
6157 uint64_t ds_unneeded=branch_regs[i].u;
6158 uint64_t ds_unneeded_upper=branch_regs[i].uu;
6159 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6160 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6161 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6162 ds_unneeded|=1;
6163 ds_unneeded_upper|=1;
6164 // branch taken
6165 //assem_debug("1:\n");
6166 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6167 ds_unneeded,ds_unneeded_upper);
6168 // load regs
6169 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6170 address_generation(i+1,&branch_regs[i],0);
6171 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6172 ds_assemble(i+1,&branch_regs[i]);
6173 cc=get_reg(branch_regs[i].regmap,CCREG);
6174 if(cc==-1) {
6175 emit_loadreg(CCREG,cc=HOST_CCREG);
6176 // CHECK: Is the following instruction (fall thru) allocated ok?
6177 }
6178 assert(cc==HOST_CCREG);
6179 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6180 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6181 assem_debug("cycle count (adj)\n");
6182 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6183 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6184 if(internal)
6185 assem_debug("branch: internal\n");
6186 else
6187 assem_debug("branch: external\n");
6188 if(internal&&is_ds[(ba[i]-start)>>2]) {
6189 ds_assemble_entry(i);
6190 }
6191 else {
6192 add_to_linker((int)out,ba[i],internal);
6193 emit_jmp(0);
6194 }
6195
6196 // branch not taken
6197 if(1) { // <- FIXME (don't need this)
6198 set_jump_target(nottaken,(int)out);
6199 assem_debug("1:\n");
6200 if(!likely[i]) {
6201 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6202 ds_unneeded,ds_unneeded_upper);
6203 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6204 address_generation(i+1,&branch_regs[i],0);
6205 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6206 ds_assemble(i+1,&branch_regs[i]);
6207 }
6208 cc=get_reg(branch_regs[i].regmap,CCREG);
6209 if(cc==-1&&!likely[i]) {
6210 // Cycle count isn't in a register, temporarily load it then write it out
6211 emit_loadreg(CCREG,HOST_CCREG);
6212 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6213 int jaddr=(int)out;
6214 emit_jns(0);
6215 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6216 emit_storereg(CCREG,HOST_CCREG);
6217 }
6218 else{
6219 cc=get_reg(i_regmap,CCREG);
6220 assert(cc==HOST_CCREG);
6221 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6222 int jaddr=(int)out;
6223 emit_jns(0);
6224 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6225 }
6226 }
6227 }
6228}
6229
6230static void pagespan_assemble(int i,struct regstat *i_regs)
6231{
6232 int s1l=get_reg(i_regs->regmap,rs1[i]);
6233 int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6234 int s2l=get_reg(i_regs->regmap,rs2[i]);
6235 int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6236 void *nt_branch=NULL;
6237 int taken=0;
6238 int nottaken=0;
6239 int unconditional=0;
6240 if(rs1[i]==0)
6241 {
6242 s1l=s2l;s1h=s2h;
6243 s2l=s2h=-1;
6244 }
6245 else if(rs2[i]==0)
6246 {
6247 s2l=s2h=-1;
6248 }
6249 if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6250 s1h=s2h=-1;
6251 }
6252 int hr=0;
6253 int addr,alt,ntaddr;
6254 if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6255 else {
6256 while(hr<HOST_REGS)
6257 {
6258 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6259 (i_regs->regmap[hr]&63)!=rs1[i] &&
6260 (i_regs->regmap[hr]&63)!=rs2[i] )
6261 {
6262 addr=hr++;break;
6263 }
6264 hr++;
6265 }
6266 }
6267 while(hr<HOST_REGS)
6268 {
6269 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6270 (i_regs->regmap[hr]&63)!=rs1[i] &&
6271 (i_regs->regmap[hr]&63)!=rs2[i] )
6272 {
6273 alt=hr++;break;
6274 }
6275 hr++;
6276 }
6277 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6278 {
6279 while(hr<HOST_REGS)
6280 {
6281 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6282 (i_regs->regmap[hr]&63)!=rs1[i] &&
6283 (i_regs->regmap[hr]&63)!=rs2[i] )
6284 {
6285 ntaddr=hr;break;
6286 }
6287 hr++;
6288 }
6289 }
6290 assert(hr<HOST_REGS);
6291 if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6292 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6293 }
6294 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6295 if(opcode[i]==2) // J
6296 {
6297 unconditional=1;
6298 }
6299 if(opcode[i]==3) // JAL
6300 {
6301 // TODO: mini_ht
6302 int rt=get_reg(i_regs->regmap,31);
6303 emit_movimm(start+i*4+8,rt);
6304 unconditional=1;
6305 }
6306 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6307 {
6308 emit_mov(s1l,addr);
6309 if(opcode2[i]==9) // JALR
6310 {
5067f341 6311 int rt=get_reg(i_regs->regmap,rt1[i]);
57871462 6312 emit_movimm(start+i*4+8,rt);
6313 }
6314 }
6315 if((opcode[i]&0x3f)==4) // BEQ
6316 {
6317 if(rs1[i]==rs2[i])
6318 {
6319 unconditional=1;
6320 }
6321 else
6322 #ifdef HAVE_CMOV_IMM
6323 if(s1h<0) {
6324 if(s2l>=0) emit_cmp(s1l,s2l);
6325 else emit_test(s1l,s1l);
6326 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6327 }
6328 else
6329 #endif
6330 {
6331 assert(s1l>=0);
6332 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6333 if(s1h>=0) {
6334 if(s2h>=0) emit_cmp(s1h,s2h);
6335 else emit_test(s1h,s1h);
6336 emit_cmovne_reg(alt,addr);
6337 }
6338 if(s2l>=0) emit_cmp(s1l,s2l);
6339 else emit_test(s1l,s1l);
6340 emit_cmovne_reg(alt,addr);
6341 }
6342 }
6343 if((opcode[i]&0x3f)==5) // BNE
6344 {
6345 #ifdef HAVE_CMOV_IMM
6346 if(s1h<0) {
6347 if(s2l>=0) emit_cmp(s1l,s2l);
6348 else emit_test(s1l,s1l);
6349 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6350 }
6351 else
6352 #endif
6353 {
6354 assert(s1l>=0);
6355 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6356 if(s1h>=0) {
6357 if(s2h>=0) emit_cmp(s1h,s2h);
6358 else emit_test(s1h,s1h);
6359 emit_cmovne_reg(alt,addr);
6360 }
6361 if(s2l>=0) emit_cmp(s1l,s2l);
6362 else emit_test(s1l,s1l);
6363 emit_cmovne_reg(alt,addr);
6364 }
6365 }
6366 if((opcode[i]&0x3f)==0x14) // BEQL
6367 {
6368 if(s1h>=0) {
6369 if(s2h>=0) emit_cmp(s1h,s2h);
6370 else emit_test(s1h,s1h);
6371 nottaken=(int)out;
6372 emit_jne(0);
6373 }
6374 if(s2l>=0) emit_cmp(s1l,s2l);
6375 else emit_test(s1l,s1l);
6376 if(nottaken) set_jump_target(nottaken,(int)out);
6377 nottaken=(int)out;
6378 emit_jne(0);
6379 }
6380 if((opcode[i]&0x3f)==0x15) // BNEL
6381 {
6382 if(s1h>=0) {
6383 if(s2h>=0) emit_cmp(s1h,s2h);
6384 else emit_test(s1h,s1h);
6385 taken=(int)out;
6386 emit_jne(0);
6387 }
6388 if(s2l>=0) emit_cmp(s1l,s2l);
6389 else emit_test(s1l,s1l);
6390 nottaken=(int)out;
6391 emit_jeq(0);
6392 if(taken) set_jump_target(taken,(int)out);
6393 }
6394 if((opcode[i]&0x3f)==6) // BLEZ
6395 {
6396 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6397 emit_cmpimm(s1l,1);
6398 if(s1h>=0) emit_mov(addr,ntaddr);
6399 emit_cmovl_reg(alt,addr);
6400 if(s1h>=0) {
6401 emit_test(s1h,s1h);
6402 emit_cmovne_reg(ntaddr,addr);
6403 emit_cmovs_reg(alt,addr);
6404 }
6405 }
6406 if((opcode[i]&0x3f)==7) // BGTZ
6407 {
6408 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6409 emit_cmpimm(s1l,1);
6410 if(s1h>=0) emit_mov(addr,alt);
6411 emit_cmovl_reg(ntaddr,addr);
6412 if(s1h>=0) {
6413 emit_test(s1h,s1h);
6414 emit_cmovne_reg(alt,addr);
6415 emit_cmovs_reg(ntaddr,addr);
6416 }
6417 }
6418 if((opcode[i]&0x3f)==0x16) // BLEZL
6419 {
6420 assert((opcode[i]&0x3f)!=0x16);
6421 }
6422 if((opcode[i]&0x3f)==0x17) // BGTZL
6423 {
6424 assert((opcode[i]&0x3f)!=0x17);
6425 }
6426 assert(opcode[i]!=1); // BLTZ/BGEZ
6427
6428 //FIXME: Check CSREG
6429 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6430 if((source[i]&0x30000)==0) // BC1F
6431 {
6432 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6433 emit_testimm(s1l,0x800000);
6434 emit_cmovne_reg(alt,addr);
6435 }
6436 if((source[i]&0x30000)==0x10000) // BC1T
6437 {
6438 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6439 emit_testimm(s1l,0x800000);
6440 emit_cmovne_reg(alt,addr);
6441 }
6442 if((source[i]&0x30000)==0x20000) // BC1FL
6443 {
6444 emit_testimm(s1l,0x800000);
6445 nottaken=(int)out;
6446 emit_jne(0);
6447 }
6448 if((source[i]&0x30000)==0x30000) // BC1TL
6449 {
6450 emit_testimm(s1l,0x800000);
6451 nottaken=(int)out;
6452 emit_jeq(0);
6453 }
6454 }
6455
6456 assert(i_regs->regmap[HOST_CCREG]==CCREG);
6457 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6458 if(likely[i]||unconditional)
6459 {
6460 emit_movimm(ba[i],HOST_BTREG);
6461 }
6462 else if(addr!=HOST_BTREG)
6463 {
6464 emit_mov(addr,HOST_BTREG);
6465 }
6466 void *branch_addr=out;
6467 emit_jmp(0);
6468 int target_addr=start+i*4+5;
6469 void *stub=out;
6470 void *compiled_target_addr=check_addr(target_addr);
6471 emit_extjump_ds((int)branch_addr,target_addr);
6472 if(compiled_target_addr) {
6473 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6474 add_link(target_addr,stub);
6475 }
6476 else set_jump_target((int)branch_addr,(int)stub);
6477 if(likely[i]) {
6478 // Not-taken path
6479 set_jump_target((int)nottaken,(int)out);
6480 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6481 void *branch_addr=out;
6482 emit_jmp(0);
6483 int target_addr=start+i*4+8;
6484 void *stub=out;
6485 void *compiled_target_addr=check_addr(target_addr);
6486 emit_extjump_ds((int)branch_addr,target_addr);
6487 if(compiled_target_addr) {
6488 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6489 add_link(target_addr,stub);
6490 }
6491 else set_jump_target((int)branch_addr,(int)stub);
6492 }
6493}
6494
6495// Assemble the delay slot for the above
6496static void pagespan_ds()
6497{
6498 assem_debug("initial delay slot:\n");
6499 u_int vaddr=start+1;
94d23bb9 6500 u_int page=get_page(vaddr);
6501 u_int vpage=get_vpage(vaddr);
57871462 6502 ll_add(jump_dirty+vpage,vaddr,(void *)out);
6503 do_dirty_stub_ds();
6504 ll_add(jump_in+page,vaddr,(void *)out);
6505 assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6506 if(regs[0].regmap[HOST_CCREG]!=CCREG)
6507 wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6508 if(regs[0].regmap[HOST_BTREG]!=BTREG)
6509 emit_writeword(HOST_BTREG,(int)&branch_target);
6510 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6511 address_generation(0,&regs[0],regs[0].regmap_entry);
b9b61529 6512 if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
57871462 6513 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6514 cop1_usable=0;
6515 is_delayslot=0;
6516 switch(itype[0]) {
6517 case ALU:
6518 alu_assemble(0,&regs[0]);break;
6519 case IMM16:
6520 imm16_assemble(0,&regs[0]);break;
6521 case SHIFT:
6522 shift_assemble(0,&regs[0]);break;
6523 case SHIFTIMM:
6524 shiftimm_assemble(0,&regs[0]);break;
6525 case LOAD:
6526 load_assemble(0,&regs[0]);break;
6527 case LOADLR:
6528 loadlr_assemble(0,&regs[0]);break;
6529 case STORE:
6530 store_assemble(0,&regs[0]);break;
6531 case STORELR:
6532 storelr_assemble(0,&regs[0]);break;
6533 case COP0:
6534 cop0_assemble(0,&regs[0]);break;
6535 case COP1:
6536 cop1_assemble(0,&regs[0]);break;
6537 case C1LS:
6538 c1ls_assemble(0,&regs[0]);break;
b9b61529 6539 case COP2:
6540 cop2_assemble(0,&regs[0]);break;
6541 case C2LS:
6542 c2ls_assemble(0,&regs[0]);break;
6543 case C2OP:
6544 c2op_assemble(0,&regs[0]);break;
57871462 6545 case FCONV:
6546 fconv_assemble(0,&regs[0]);break;
6547 case FLOAT:
6548 float_assemble(0,&regs[0]);break;
6549 case FCOMP:
6550 fcomp_assemble(0,&regs[0]);break;
6551 case MULTDIV:
6552 multdiv_assemble(0,&regs[0]);break;
6553 case MOV:
6554 mov_assemble(0,&regs[0]);break;
6555 case SYSCALL:
7139f3c8 6556 case HLECALL:
1e973cb0 6557 case INTCALL:
57871462 6558 case SPAN:
6559 case UJUMP:
6560 case RJUMP:
6561 case CJUMP:
6562 case SJUMP:
6563 case FJUMP:
6564 printf("Jump in the delay slot. This is probably a bug.\n");
6565 }
6566 int btaddr=get_reg(regs[0].regmap,BTREG);
6567 if(btaddr<0) {
6568 btaddr=get_reg(regs[0].regmap,-1);
6569 emit_readword((int)&branch_target,btaddr);
6570 }
6571 assert(btaddr!=HOST_CCREG);
6572 if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6573#ifdef HOST_IMM8
6574 emit_movimm(start+4,HOST_TEMPREG);
6575 emit_cmp(btaddr,HOST_TEMPREG);
6576#else
6577 emit_cmpimm(btaddr,start+4);
6578#endif
6579 int branch=(int)out;
6580 emit_jeq(0);
6581 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6582 emit_jmp(jump_vaddr_reg[btaddr]);
6583 set_jump_target(branch,(int)out);
6584 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6585 load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6586}
6587
6588// Basic liveness analysis for MIPS registers
6589void unneeded_registers(int istart,int iend,int r)
6590{
6591 int i;
6592 uint64_t u,uu,b,bu;
6593 uint64_t temp_u,temp_uu;
6594 uint64_t tdep;
6595 if(iend==slen-1) {
6596 u=1;uu=1;
6597 }else{
6598 u=unneeded_reg[iend+1];
6599 uu=unneeded_reg_upper[iend+1];
6600 u=1;uu=1;
6601 }
6602 for (i=iend;i>=istart;i--)
6603 {
6604 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6605 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6606 {
6607 // If subroutine call, flag return address as a possible branch target
6608 if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6609
6610 if(ba[i]<start || ba[i]>=(start+slen*4))
6611 {
6612 // Branch out of this block, flush all regs
6613 u=1;
6614 uu=1;
6615 /* Hexagon hack
6616 if(itype[i]==UJUMP&&rt1[i]==31)
6617 {
6618 uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6619 }
6620 if(itype[i]==RJUMP&&rs1[i]==31)
6621 {
6622 uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6623 }
4cb76aa4 6624 if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
57871462 6625 if(itype[i]==UJUMP&&rt1[i]==31)
6626 {
6627 //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6628 uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6629 }
6630 if(itype[i]==RJUMP&&rs1[i]==31)
6631 {
6632 //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6633 uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6634 }
6635 }*/
6636 branch_unneeded_reg[i]=u;
6637 branch_unneeded_reg_upper[i]=uu;
6638 // Merge in delay slot
6639 tdep=(~uu>>rt1[i+1])&1;
6640 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6641 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6642 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6643 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6644 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6645 u|=1;uu|=1;
6646 // If branch is "likely" (and conditional)
6647 // then we skip the delay slot on the fall-thru path
6648 if(likely[i]) {
6649 if(i<slen-1) {
6650 u&=unneeded_reg[i+2];
6651 uu&=unneeded_reg_upper[i+2];
6652 }
6653 else
6654 {
6655 u=1;
6656 uu=1;
6657 }
6658 }
6659 }
6660 else
6661 {
6662 // Internal branch, flag target
6663 bt[(ba[i]-start)>>2]=1;
6664 if(ba[i]<=start+i*4) {
6665 // Backward branch
6666 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6667 {
6668 // Unconditional branch
6669 temp_u=1;temp_uu=1;
6670 } else {
6671 // Conditional branch (not taken case)
6672 temp_u=unneeded_reg[i+2];
6673 temp_uu=unneeded_reg_upper[i+2];
6674 }
6675 // Merge in delay slot
6676 tdep=(~temp_uu>>rt1[i+1])&1;
6677 temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6678 temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6679 temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6680 temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6681 temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6682 temp_u|=1;temp_uu|=1;
6683 // If branch is "likely" (and conditional)
6684 // then we skip the delay slot on the fall-thru path
6685 if(likely[i]) {
6686 if(i<slen-1) {
6687 temp_u&=unneeded_reg[i+2];
6688 temp_uu&=unneeded_reg_upper[i+2];
6689 }
6690 else
6691 {
6692 temp_u=1;
6693 temp_uu=1;
6694 }
6695 }
6696 tdep=(~temp_uu>>rt1[i])&1;
6697 temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6698 temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6699 temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6700 temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6701 temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6702 temp_u|=1;temp_uu|=1;
6703 unneeded_reg[i]=temp_u;
6704 unneeded_reg_upper[i]=temp_uu;
6705 // Only go three levels deep. This recursion can take an
6706 // excessive amount of time if there are a lot of nested loops.
6707 if(r<2) {
6708 unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6709 }else{
6710 unneeded_reg[(ba[i]-start)>>2]=1;
6711 unneeded_reg_upper[(ba[i]-start)>>2]=1;
6712 }
6713 } /*else*/ if(1) {
6714 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6715 {
6716 // Unconditional branch
6717 u=unneeded_reg[(ba[i]-start)>>2];
6718 uu=unneeded_reg_upper[(ba[i]-start)>>2];
6719 branch_unneeded_reg[i]=u;
6720 branch_unneeded_reg_upper[i]=uu;
6721 //u=1;
6722 //uu=1;
6723 //branch_unneeded_reg[i]=u;
6724 //branch_unneeded_reg_upper[i]=uu;
6725 // Merge in delay slot
6726 tdep=(~uu>>rt1[i+1])&1;
6727 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6728 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6729 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6730 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6731 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6732 u|=1;uu|=1;
6733 } else {
6734 // Conditional branch
6735 b=unneeded_reg[(ba[i]-start)>>2];
6736 bu=unneeded_reg_upper[(ba[i]-start)>>2];
6737 branch_unneeded_reg[i]=b;
6738 branch_unneeded_reg_upper[i]=bu;
6739 //b=1;
6740 //bu=1;
6741 //branch_unneeded_reg[i]=b;
6742 //branch_unneeded_reg_upper[i]=bu;
6743 // Branch delay slot
6744 tdep=(~uu>>rt1[i+1])&1;
6745 b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6746 bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6747 b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6748 bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6749 bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6750 b|=1;bu|=1;
6751 // If branch is "likely" then we skip the
6752 // delay slot on the fall-thru path
6753 if(likely[i]) {
6754 u=b;
6755 uu=bu;
6756 if(i<slen-1) {
6757 u&=unneeded_reg[i+2];
6758 uu&=unneeded_reg_upper[i+2];
6759 //u=1;
6760 //uu=1;
6761 }
6762 } else {
6763 u&=b;
6764 uu&=bu;
6765 //u=1;
6766 //uu=1;
6767 }
6768 if(i<slen-1) {
6769 branch_unneeded_reg[i]&=unneeded_reg[i+2];
6770 branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6771 //branch_unneeded_reg[i]=1;
6772 //branch_unneeded_reg_upper[i]=1;
6773 } else {
6774 branch_unneeded_reg[i]=1;
6775 branch_unneeded_reg_upper[i]=1;
6776 }
6777 }
6778 }
6779 }
6780 }
1e973cb0 6781 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 6782 {
6783 // SYSCALL instruction (software interrupt)
6784 u=1;
6785 uu=1;
6786 }
6787 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6788 {
6789 // ERET instruction (return from interrupt)
6790 u=1;
6791 uu=1;
6792 }
6793 //u=uu=1; // DEBUG
6794 tdep=(~uu>>rt1[i])&1;
6795 // Written registers are unneeded
6796 u|=1LL<<rt1[i];
6797 u|=1LL<<rt2[i];
6798 uu|=1LL<<rt1[i];
6799 uu|=1LL<<rt2[i];
6800 // Accessed registers are needed
6801 u&=~(1LL<<rs1[i]);
6802 u&=~(1LL<<rs2[i]);
6803 uu&=~(1LL<<us1[i]);
6804 uu&=~(1LL<<us2[i]);
6805 // Source-target dependencies
6806 uu&=~(tdep<<dep1[i]);
6807 uu&=~(tdep<<dep2[i]);
6808 // R0 is always unneeded
6809 u|=1;uu|=1;
6810 // Save it
6811 unneeded_reg[i]=u;
6812 unneeded_reg_upper[i]=uu;
6813 /*
6814 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6815 printf("U:");
6816 int r;
6817 for(r=1;r<=CCREG;r++) {
6818 if((unneeded_reg[i]>>r)&1) {
6819 if(r==HIREG) printf(" HI");
6820 else if(r==LOREG) printf(" LO");
6821 else printf(" r%d",r);
6822 }
6823 }
6824 printf(" UU:");
6825 for(r=1;r<=CCREG;r++) {
6826 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6827 if(r==HIREG) printf(" HI");
6828 else if(r==LOREG) printf(" LO");
6829 else printf(" r%d",r);
6830 }
6831 }
6832 printf("\n");*/
6833 }
252c20fc 6834#ifdef FORCE32
6835 for (i=iend;i>=istart;i--)
6836 {
6837 unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6838 }
6839#endif
57871462 6840}
6841
6842// Identify registers which are likely to contain 32-bit values
6843// This is used to predict whether any branches will jump to a
6844// location with 64-bit values in registers.
6845static void provisional_32bit()
6846{
6847 int i,j;
6848 uint64_t is32=1;
6849 uint64_t lastbranch=1;
6850
6851 for(i=0;i<slen;i++)
6852 {
6853 if(i>0) {
6854 if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
6855 if(i>1) is32=lastbranch;
6856 else is32=1;
6857 }
6858 }
6859 if(i>1)
6860 {
6861 if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
6862 if(likely[i-2]) {
6863 if(i>2) is32=lastbranch;
6864 else is32=1;
6865 }
6866 }
6867 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
6868 {
6869 if(rs1[i-2]==0||rs2[i-2]==0)
6870 {
6871 if(rs1[i-2]) {
6872 is32|=1LL<<rs1[i-2];
6873 }
6874 if(rs2[i-2]) {
6875 is32|=1LL<<rs2[i-2];
6876 }
6877 }
6878 }
6879 }
6880 // If something jumps here with 64-bit values
6881 // then promote those registers to 64 bits
6882 if(bt[i])
6883 {
6884 uint64_t temp_is32=is32;
6885 for(j=i-1;j>=0;j--)
6886 {
6887 if(ba[j]==start+i*4)
6888 //temp_is32&=branch_regs[j].is32;
6889 temp_is32&=p32[j];
6890 }
6891 for(j=i;j<slen;j++)
6892 {
6893 if(ba[j]==start+i*4)
6894 temp_is32=1;
6895 }
6896 is32=temp_is32;
6897 }
6898 int type=itype[i];
6899 int op=opcode[i];
6900 int op2=opcode2[i];
6901 int rt=rt1[i];
6902 int s1=rs1[i];
6903 int s2=rs2[i];
6904 if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
6905 // Branches don't write registers, consider the delay slot instead.
6906 type=itype[i+1];
6907 op=opcode[i+1];
6908 op2=opcode2[i+1];
6909 rt=rt1[i+1];
6910 s1=rs1[i+1];
6911 s2=rs2[i+1];
6912 lastbranch=is32;
6913 }
6914 switch(type) {
6915 case LOAD:
6916 if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
6917 opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
6918 is32&=~(1LL<<rt);
6919 else
6920 is32|=1LL<<rt;
6921 break;
6922 case STORE:
6923 case STORELR:
6924 break;
6925 case LOADLR:
6926 if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
6927 if(op==0x22) is32|=1LL<<rt; // LWL
6928 break;
6929 case IMM16:
6930 if (op==0x08||op==0x09|| // ADDI/ADDIU
6931 op==0x0a||op==0x0b|| // SLTI/SLTIU
6932 op==0x0c|| // ANDI
6933 op==0x0f) // LUI
6934 {
6935 is32|=1LL<<rt;
6936 }
6937 if(op==0x18||op==0x19) { // DADDI/DADDIU
6938 is32&=~(1LL<<rt);
6939 //if(imm[i]==0)
6940 // is32|=((is32>>s1)&1LL)<<rt;
6941 }
6942 if(op==0x0d||op==0x0e) { // ORI/XORI
6943 uint64_t sr=((is32>>s1)&1LL);
6944 is32&=~(1LL<<rt);
6945 is32|=sr<<rt;
6946 }
6947 break;
6948 case UJUMP:
6949 break;
6950 case RJUMP:
6951 break;
6952 case CJUMP:
6953 break;
6954 case SJUMP:
6955 break;
6956 case FJUMP:
6957 break;
6958 case ALU:
6959 if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
6960 is32|=1LL<<rt;
6961 }
6962 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
6963 is32|=1LL<<rt;
6964 }
6965 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
6966 uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
6967 is32&=~(1LL<<rt);
6968 is32|=sr<<rt;
6969 }
6970 else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
6971 if(s1==0&&s2==0) {
6972 is32|=1LL<<rt;
6973 }
6974 else if(s2==0) {
6975 uint64_t sr=((is32>>s1)&1LL);
6976 is32&=~(1LL<<rt);
6977 is32|=sr<<rt;
6978 }
6979 else if(s1==0) {
6980 uint64_t sr=((is32>>s2)&1LL);
6981 is32&=~(1LL<<rt);
6982 is32|=sr<<rt;
6983 }
6984 else {
6985 is32&=~(1LL<<rt);
6986 }
6987 }
6988 else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
6989 if(s1==0&&s2==0) {
6990 is32|=1LL<<rt;
6991 }
6992 else if(s2==0) {
6993 uint64_t sr=((is32>>s1)&1LL);
6994 is32&=~(1LL<<rt);
6995 is32|=sr<<rt;
6996 }
6997 else {
6998 is32&=~(1LL<<rt);
6999 }
7000 }
7001 break;
7002 case MULTDIV:
7003 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7004 is32&=~((1LL<<HIREG)|(1LL<<LOREG));
7005 }
7006 else {
7007 is32|=(1LL<<HIREG)|(1LL<<LOREG);
7008 }
7009 break;
7010 case MOV:
7011 {
7012 uint64_t sr=((is32>>s1)&1LL);
7013 is32&=~(1LL<<rt);
7014 is32|=sr<<rt;
7015 }
7016 break;
7017 case SHIFT:
7018 if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
7019 else is32|=1LL<<rt; // SLLV/SRLV/SRAV
7020 break;
7021 case SHIFTIMM:
7022 is32|=1LL<<rt;
7023 // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
7024 if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
7025 break;
7026 case COP0:
7027 if(op2==0) is32|=1LL<<rt; // MFC0
7028 break;
7029 case COP1:
b9b61529 7030 case COP2:
57871462 7031 if(op2==0) is32|=1LL<<rt; // MFC1
7032 if(op2==1) is32&=~(1LL<<rt); // DMFC1
7033 if(op2==2) is32|=1LL<<rt; // CFC1
7034 break;
7035 case C1LS:
b9b61529 7036 case C2LS:
57871462 7037 break;
7038 case FLOAT:
7039 case FCONV:
7040 break;
7041 case FCOMP:
7042 break;
b9b61529 7043 case C2OP:
57871462 7044 case SYSCALL:
7139f3c8 7045 case HLECALL:
57871462 7046 break;
7047 default:
7048 break;
7049 }
7050 is32|=1;
7051 p32[i]=is32;
7052
7053 if(i>0)
7054 {
7055 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
7056 {
7057 if(rt1[i-1]==31) // JAL/JALR
7058 {
7059 // Subroutine call will return here, don't alloc any registers
7060 is32=1;
7061 }
7062 else if(i+1<slen)
7063 {
7064 // Internal branch will jump here, match registers to caller
7065 is32=0x3FFFFFFFFLL;
7066 }
7067 }
7068 }
7069 }
7070}
7071
7072// Identify registers which may be assumed to contain 32-bit values
7073// and where optimizations will rely on this.
7074// This is used to determine whether backward branches can safely
7075// jump to a location with 64-bit values in registers.
7076static void provisional_r32()
7077{
7078 u_int r32=0;
7079 int i;
7080
7081 for (i=slen-1;i>=0;i--)
7082 {
7083 int hr;
7084 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7085 {
7086 if(ba[i]<start || ba[i]>=(start+slen*4))
7087 {
7088 // Branch out of this block, don't need anything
7089 r32=0;
7090 }
7091 else
7092 {
7093 // Internal branch
7094 // Need whatever matches the target
7095 // (and doesn't get overwritten by the delay slot instruction)
7096 r32=0;
7097 int t=(ba[i]-start)>>2;
7098 if(ba[i]>start+i*4) {
7099 // Forward branch
7100 //if(!(requires_32bit[t]&~regs[i].was32))
7101 // r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7102 if(!(pr32[t]&~regs[i].was32))
7103 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7104 }else{
7105 // Backward branch
7106 if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7107 r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7108 }
7109 }
7110 // Conditional branch may need registers for following instructions
7111 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7112 {
7113 if(i<slen-2) {
7114 //r32|=requires_32bit[i+2];
7115 r32|=pr32[i+2];
7116 r32&=regs[i].was32;
7117 // Mark this address as a branch target since it may be called
7118 // upon return from interrupt
7119 //bt[i+2]=1;
7120 }
7121 }
7122 // Merge in delay slot
7123 if(!likely[i]) {
7124 // These are overwritten unless the branch is "likely"
7125 // and the delay slot is nullified if not taken
7126 r32&=~(1LL<<rt1[i+1]);
7127 r32&=~(1LL<<rt2[i+1]);
7128 }
7129 // Assume these are needed (delay slot)
7130 if(us1[i+1]>0)
7131 {
7132 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7133 }
7134 if(us2[i+1]>0)
7135 {
7136 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7137 }
7138 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7139 {
7140 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7141 }
7142 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7143 {
7144 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7145 }
7146 }
1e973cb0 7147 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 7148 {
7149 // SYSCALL instruction (software interrupt)
7150 r32=0;
7151 }
7152 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7153 {
7154 // ERET instruction (return from interrupt)
7155 r32=0;
7156 }
7157 // Check 32 bits
7158 r32&=~(1LL<<rt1[i]);
7159 r32&=~(1LL<<rt2[i]);
7160 if(us1[i]>0)
7161 {
7162 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7163 }
7164 if(us2[i]>0)
7165 {
7166 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7167 }
7168 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7169 {
7170 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7171 }
7172 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7173 {
7174 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7175 }
7176 //requires_32bit[i]=r32;
7177 pr32[i]=r32;
7178
7179 // Dirty registers which are 32-bit, require 32-bit input
7180 // as they will be written as 32-bit values
7181 for(hr=0;hr<HOST_REGS;hr++)
7182 {
7183 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7184 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7185 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7186 pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7187 //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7188 }
7189 }
7190 }
7191 }
7192}
7193
7194// Write back dirty registers as soon as we will no longer modify them,
7195// so that we don't end up with lots of writes at the branches.
7196void clean_registers(int istart,int iend,int wr)
7197{
7198 int i;
7199 int r;
7200 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7201 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7202 if(iend==slen-1) {
7203 will_dirty_i=will_dirty_next=0;
7204 wont_dirty_i=wont_dirty_next=0;
7205 }else{
7206 will_dirty_i=will_dirty_next=will_dirty[iend+1];
7207 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7208 }
7209 for (i=iend;i>=istart;i--)
7210 {
7211 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7212 {
7213 if(ba[i]<start || ba[i]>=(start+slen*4))
7214 {
7215 // Branch out of this block, flush all regs
7216 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7217 {
7218 // Unconditional branch
7219 will_dirty_i=0;
7220 wont_dirty_i=0;
7221 // Merge in delay slot (will dirty)
7222 for(r=0;r<HOST_REGS;r++) {
7223 if(r!=EXCLUDE_REG) {
7224 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7225 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7226 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7227 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7228 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7229 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7230 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7231 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7232 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7233 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7234 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7235 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7236 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7237 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7238 }
7239 }
7240 }
7241 else
7242 {
7243 // Conditional branch
7244 will_dirty_i=0;
7245 wont_dirty_i=wont_dirty_next;
7246 // Merge in delay slot (will dirty)
7247 for(r=0;r<HOST_REGS;r++) {
7248 if(r!=EXCLUDE_REG) {
7249 if(!likely[i]) {
7250 // Might not dirty if likely branch is not taken
7251 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7252 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7253 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7254 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7255 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7256 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7257 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7258 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7259 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7260 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7261 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7262 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7263 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7264 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7265 }
7266 }
7267 }
7268 }
7269 // Merge in delay slot (wont dirty)
7270 for(r=0;r<HOST_REGS;r++) {
7271 if(r!=EXCLUDE_REG) {
7272 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7273 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7274 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7275 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7276 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7277 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7278 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7279 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7280 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7281 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7282 }
7283 }
7284 if(wr) {
7285 #ifndef DESTRUCTIVE_WRITEBACK
7286 branch_regs[i].dirty&=wont_dirty_i;
7287 #endif
7288 branch_regs[i].dirty|=will_dirty_i;
7289 }
7290 }
7291 else
7292 {
7293 // Internal branch
7294 if(ba[i]<=start+i*4) {
7295 // Backward branch
7296 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7297 {
7298 // Unconditional branch
7299 temp_will_dirty=0;
7300 temp_wont_dirty=0;
7301 // Merge in delay slot (will dirty)
7302 for(r=0;r<HOST_REGS;r++) {
7303 if(r!=EXCLUDE_REG) {
7304 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7305 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7306 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7307 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7308 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7309 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7310 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7311 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7312 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7313 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7314 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7315 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7316 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7317 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7318 }
7319 }
7320 } else {
7321 // Conditional branch (not taken case)
7322 temp_will_dirty=will_dirty_next;
7323 temp_wont_dirty=wont_dirty_next;
7324 // Merge in delay slot (will dirty)
7325 for(r=0;r<HOST_REGS;r++) {
7326 if(r!=EXCLUDE_REG) {
7327 if(!likely[i]) {
7328 // Will not dirty if likely branch is not taken
7329 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7330 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7331 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7332 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7333 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7334 if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7335 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7336 //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7337 //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7338 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7339 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7340 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7341 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7342 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7343 }
7344 }
7345 }
7346 }
7347 // Merge in delay slot (wont dirty)
7348 for(r=0;r<HOST_REGS;r++) {
7349 if(r!=EXCLUDE_REG) {
7350 if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7351 if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7352 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7353 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7354 if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7355 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7356 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7357 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7358 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7359 if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7360 }
7361 }
7362 // Deal with changed mappings
7363 if(i<iend) {
7364 for(r=0;r<HOST_REGS;r++) {
7365 if(r!=EXCLUDE_REG) {
7366 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7367 temp_will_dirty&=~(1<<r);
7368 temp_wont_dirty&=~(1<<r);
7369 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7370 temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7371 temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7372 } else {
7373 temp_will_dirty|=1<<r;
7374 temp_wont_dirty|=1<<r;
7375 }
7376 }
7377 }
7378 }
7379 }
7380 if(wr) {
7381 will_dirty[i]=temp_will_dirty;
7382 wont_dirty[i]=temp_wont_dirty;
7383 clean_registers((ba[i]-start)>>2,i-1,0);
7384 }else{
7385 // Limit recursion. It can take an excessive amount
7386 // of time if there are a lot of nested loops.
7387 will_dirty[(ba[i]-start)>>2]=0;
7388 wont_dirty[(ba[i]-start)>>2]=-1;
7389 }
7390 }
7391 /*else*/ if(1)
7392 {
7393 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7394 {
7395 // Unconditional branch
7396 will_dirty_i=0;
7397 wont_dirty_i=0;
7398 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7399 for(r=0;r<HOST_REGS;r++) {
7400 if(r!=EXCLUDE_REG) {
7401 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7402 will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7403 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7404 }
7405 }
7406 }
7407 //}
7408 // Merge in delay slot
7409 for(r=0;r<HOST_REGS;r++) {
7410 if(r!=EXCLUDE_REG) {
7411 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7412 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7413 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7414 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7415 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7416 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7417 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7418 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7419 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7420 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7421 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7422 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7423 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7424 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7425 }
7426 }
7427 } else {
7428 // Conditional branch
7429 will_dirty_i=will_dirty_next;
7430 wont_dirty_i=wont_dirty_next;
7431 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7432 for(r=0;r<HOST_REGS;r++) {
7433 if(r!=EXCLUDE_REG) {
7434 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7435 will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7436 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7437 }
7438 else
7439 {
7440 will_dirty_i&=~(1<<r);
7441 }
7442 // Treat delay slot as part of branch too
7443 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7444 will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7445 wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7446 }
7447 else
7448 {
7449 will_dirty[i+1]&=~(1<<r);
7450 }*/
7451 }
7452 }
7453 //}
7454 // Merge in delay slot
7455 for(r=0;r<HOST_REGS;r++) {
7456 if(r!=EXCLUDE_REG) {
7457 if(!likely[i]) {
7458 // Might not dirty if likely branch is not taken
7459 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7460 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7461 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7462 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7463 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7464 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7465 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7466 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7467 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7468 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7469 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7470 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7471 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7472 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7473 }
7474 }
7475 }
7476 }
7477 // Merge in delay slot
7478 for(r=0;r<HOST_REGS;r++) {
7479 if(r!=EXCLUDE_REG) {
7480 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7481 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7482 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7483 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7484 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7485 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7486 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7487 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7488 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7489 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7490 }
7491 }
7492 if(wr) {
7493 #ifndef DESTRUCTIVE_WRITEBACK
7494 branch_regs[i].dirty&=wont_dirty_i;
7495 #endif
7496 branch_regs[i].dirty|=will_dirty_i;
7497 }
7498 }
7499 }
7500 }
1e973cb0 7501 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 7502 {
7503 // SYSCALL instruction (software interrupt)
7504 will_dirty_i=0;
7505 wont_dirty_i=0;
7506 }
7507 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7508 {
7509 // ERET instruction (return from interrupt)
7510 will_dirty_i=0;
7511 wont_dirty_i=0;
7512 }
7513 will_dirty_next=will_dirty_i;
7514 wont_dirty_next=wont_dirty_i;
7515 for(r=0;r<HOST_REGS;r++) {
7516 if(r!=EXCLUDE_REG) {
7517 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7518 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7519 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7520 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7521 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7522 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7523 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7524 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7525 if(i>istart) {
7526 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
7527 {
7528 // Don't store a register immediately after writing it,
7529 // may prevent dual-issue.
7530 if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7531 if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7532 }
7533 }
7534 }
7535 }
7536 // Save it
7537 will_dirty[i]=will_dirty_i;
7538 wont_dirty[i]=wont_dirty_i;
7539 // Mark registers that won't be dirtied as not dirty
7540 if(wr) {
7541 /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7542 for(r=0;r<HOST_REGS;r++) {
7543 if((will_dirty_i>>r)&1) {
7544 printf(" r%d",r);
7545 }
7546 }
7547 printf("\n");*/
7548
7549 //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7550 regs[i].dirty|=will_dirty_i;
7551 #ifndef DESTRUCTIVE_WRITEBACK
7552 regs[i].dirty&=wont_dirty_i;
7553 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7554 {
7555 if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7556 for(r=0;r<HOST_REGS;r++) {
7557 if(r!=EXCLUDE_REG) {
7558 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7559 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7560 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7561 }
7562 }
7563 }
7564 }
7565 else
7566 {
7567 if(i<iend) {
7568 for(r=0;r<HOST_REGS;r++) {
7569 if(r!=EXCLUDE_REG) {
7570 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7571 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7572 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7573 }
7574 }
7575 }
7576 }
7577 #endif
7578 //}
7579 }
7580 // Deal with changed mappings
7581 temp_will_dirty=will_dirty_i;
7582 temp_wont_dirty=wont_dirty_i;
7583 for(r=0;r<HOST_REGS;r++) {
7584 if(r!=EXCLUDE_REG) {
7585 int nr;
7586 if(regs[i].regmap[r]==regmap_pre[i][r]) {
7587 if(wr) {
7588 #ifndef DESTRUCTIVE_WRITEBACK
7589 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7590 #endif
7591 regs[i].wasdirty|=will_dirty_i&(1<<r);
7592 }
7593 }
7594 else if((nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7595 // Register moved to a different register
7596 will_dirty_i&=~(1<<r);
7597 wont_dirty_i&=~(1<<r);
7598 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7599 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7600 if(wr) {
7601 #ifndef DESTRUCTIVE_WRITEBACK
7602 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7603 #endif
7604 regs[i].wasdirty|=will_dirty_i&(1<<r);
7605 }
7606 }
7607 else {
7608 will_dirty_i&=~(1<<r);
7609 wont_dirty_i&=~(1<<r);
7610 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7611 will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7612 wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7613 } else {
7614 wont_dirty_i|=1<<r;
7615 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7616 }
7617 }
7618 }
7619 }
7620 }
7621}
7622
7623 /* disassembly */
7624void disassemble_inst(int i)
7625{
7626 if (bt[i]) printf("*"); else printf(" ");
7627 switch(itype[i]) {
7628 case UJUMP:
7629 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7630 case CJUMP:
7631 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7632 case SJUMP:
7633 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7634 case FJUMP:
7635 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7636 case RJUMP:
74426039 7637 if (opcode[i]==0x9&&rt1[i]!=31)
5067f341 7638 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7639 else
7640 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7641 break;
57871462 7642 case SPAN:
7643 printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7644 case IMM16:
7645 if(opcode[i]==0xf) //LUI
7646 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7647 else
7648 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7649 break;
7650 case LOAD:
7651 case LOADLR:
7652 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7653 break;
7654 case STORE:
7655 case STORELR:
7656 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7657 break;
7658 case ALU:
7659 case SHIFT:
7660 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7661 break;
7662 case MULTDIV:
7663 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7664 break;
7665 case SHIFTIMM:
7666 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7667 break;
7668 case MOV:
7669 if((opcode2[i]&0x1d)==0x10)
7670 printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7671 else if((opcode2[i]&0x1d)==0x11)
7672 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7673 else
7674 printf (" %x: %s\n",start+i*4,insn[i]);
7675 break;
7676 case COP0:
7677 if(opcode2[i]==0)
7678 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7679 else if(opcode2[i]==4)
7680 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7681 else printf (" %x: %s\n",start+i*4,insn[i]);
7682 break;
7683 case COP1:
7684 if(opcode2[i]<3)
7685 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7686 else if(opcode2[i]>3)
7687 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7688 else printf (" %x: %s\n",start+i*4,insn[i]);
7689 break;
b9b61529 7690 case COP2:
7691 if(opcode2[i]<3)
7692 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7693 else if(opcode2[i]>3)
7694 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7695 else printf (" %x: %s\n",start+i*4,insn[i]);
7696 break;
57871462 7697 case C1LS:
7698 printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7699 break;
b9b61529 7700 case C2LS:
7701 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7702 break;
1e973cb0 7703 case INTCALL:
7704 printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7705 break;
57871462 7706 default:
7707 //printf (" %s %8x\n",insn[i],source[i]);
7708 printf (" %x: %s\n",start+i*4,insn[i]);
7709 }
7710}
7711
dc990066 7712// clear the state completely, instead of just marking
7713// things invalid like invalidate_all_pages() does
7714void new_dynarec_clear_full()
57871462 7715{
57871462 7716 int n;
7717 for(n=0x80000;n<0x80800;n++)
7718 invalid_code[n]=1;
7719 for(n=0;n<65536;n++)
7720 hash_table[n][0]=hash_table[n][2]=-1;
7721 memset(mini_ht,-1,sizeof(mini_ht));
7722 memset(restore_candidate,0,sizeof(restore_candidate));
dc990066 7723 memset(shadow,0,sizeof(shadow));
57871462 7724 copy=shadow;
7725 expirep=16384; // Expiry pointer, +2 blocks
7726 pending_exception=0;
7727 literalcount=0;
57871462 7728 stop_after_jal=0;
7729 // TLB
7730 using_tlb=0;
7731 for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7732 memory_map[n]=-1;
7733 for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7734 memory_map[n]=((u_int)rdram-0x80000000)>>2;
7735 for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7736 memory_map[n]=-1;
dc990066 7737 for(n=0;n<4096;n++) ll_clear(jump_in+n);
7738 for(n=0;n<4096;n++) ll_clear(jump_out+n);
7739 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7740}
7741
7742void new_dynarec_init()
7743{
7744 printf("Init new dynarec\n");
7745 out=(u_char *)BASE_ADDR;
7746 if (mmap (out, 1<<TARGET_SIZE_2,
7747 PROT_READ | PROT_WRITE | PROT_EXEC,
7748 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7749 -1, 0) <= 0) {printf("mmap() failed\n");}
7750#ifdef MUPEN64
7751 rdword=&readmem_dword;
7752 fake_pc.f.r.rs=&readmem_dword;
7753 fake_pc.f.r.rt=&readmem_dword;
7754 fake_pc.f.r.rd=&readmem_dword;
7755#endif
7756 int n;
7757 new_dynarec_clear_full();
7758#ifdef HOST_IMM8
7759 // Copy this into local area so we don't have to put it in every literal pool
7760 invc_ptr=invalid_code;
7761#endif
24385cae 7762#ifdef MUPEN64
57871462 7763 for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7764 writemem[n] = write_nomem_new;
7765 writememb[n] = write_nomemb_new;
7766 writememh[n] = write_nomemh_new;
24385cae 7767#ifndef FORCE32
57871462 7768 writememd[n] = write_nomemd_new;
24385cae 7769#endif
57871462 7770 readmem[n] = read_nomem_new;
7771 readmemb[n] = read_nomemb_new;
7772 readmemh[n] = read_nomemh_new;
24385cae 7773#ifndef FORCE32
57871462 7774 readmemd[n] = read_nomemd_new;
24385cae 7775#endif
57871462 7776 }
7777 for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7778 writemem[n] = write_rdram_new;
7779 writememb[n] = write_rdramb_new;
7780 writememh[n] = write_rdramh_new;
24385cae 7781#ifndef FORCE32
57871462 7782 writememd[n] = write_rdramd_new;
24385cae 7783#endif
57871462 7784 }
7785 for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7786 writemem[n] = write_nomem_new;
7787 writememb[n] = write_nomemb_new;
7788 writememh[n] = write_nomemh_new;
24385cae 7789#ifndef FORCE32
57871462 7790 writememd[n] = write_nomemd_new;
24385cae 7791#endif
57871462 7792 readmem[n] = read_nomem_new;
7793 readmemb[n] = read_nomemb_new;
7794 readmemh[n] = read_nomemh_new;
24385cae 7795#ifndef FORCE32
57871462 7796 readmemd[n] = read_nomemd_new;
24385cae 7797#endif
57871462 7798 }
24385cae 7799#endif
57871462 7800 tlb_hacks();
7801 arch_init();
7802}
7803
7804void new_dynarec_cleanup()
7805{
7806 int n;
7807 if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7808 for(n=0;n<4096;n++) ll_clear(jump_in+n);
7809 for(n=0;n<4096;n++) ll_clear(jump_out+n);
7810 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7811 #ifdef ROM_COPY
7812 if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7813 #endif
7814}
7815
7816int new_recompile_block(int addr)
7817{
7818/*
7819 if(addr==0x800cd050) {
7820 int block;
7821 for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7822 int n;
7823 for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7824 }
7825*/
7826 //if(Count==365117028) tracedebug=1;
7827 assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7828 //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7829 //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7830 //if(debug)
7831 //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7832 //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7833 /*if(Count>=312978186) {
7834 rlist();
7835 }*/
7836 //rlist();
7837 start = (u_int)addr&~3;
7838 //assert(((u_int)addr&1)==0);
7139f3c8 7839#ifdef PCSX
9ad4d757 7840 if (Config.HLE && start == 0x80001000) // hlecall
560e4a12 7841 {
7139f3c8 7842 // XXX: is this enough? Maybe check hleSoftCall?
bb5285ef 7843 u_int beginning=(u_int)out;
7139f3c8 7844 u_int page=get_page(start);
7139f3c8 7845 invalid_code[start>>12]=0;
7846 emit_movimm(start,0);
7847 emit_writeword(0,(int)&pcaddr);
bb5285ef 7848 emit_jmp((int)new_dyna_leave);
7849#ifdef __arm__
7850 __clear_cache((void *)beginning,out);
7851#endif
9ad4d757 7852 ll_add(jump_in+page,start,(void *)beginning);
7139f3c8 7853 return 0;
7854 }
560e4a12 7855 else if ((u_int)addr < 0x00200000 ||
7856 (0xa0000000 <= addr && addr < 0xa0200000)) {
7139f3c8 7857 // used for BIOS calls mostly?
560e4a12 7858 source = (u_int *)((u_int)rdram+(start&0x1fffff));
7859 pagelimit = (addr&0xa0000000)|0x00200000;
7860 }
7861 else if (!Config.HLE && (
7862/* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
7863 (0xbfc00000 <= addr && addr < 0xbfc80000))) {
7864 // BIOS
7865 source = (u_int *)((u_int)psxR+(start&0x7ffff));
7866 pagelimit = (addr&0xfff00000)|0x80000;
7139f3c8 7867 }
7868 else
7869#endif
3d624f89 7870#ifdef MUPEN64
57871462 7871 if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
7872 source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
7873 pagelimit = 0xa4001000;
7874 }
3d624f89 7875 else
7876#endif
4cb76aa4 7877 if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
57871462 7878 source = (u_int *)((u_int)rdram+start-0x80000000);
4cb76aa4 7879 pagelimit = 0x80000000+RAM_SIZE;
57871462 7880 }
90ae6d4e 7881#ifndef DISABLE_TLB
57871462 7882 else if ((signed int)addr >= (signed int)0xC0000000) {
7883 //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
7884 //if(tlb_LUT_r[start>>12])
7885 //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
7886 if((signed int)memory_map[start>>12]>=0) {
7887 source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
7888 pagelimit=(start+4096)&0xFFFFF000;
7889 int map=memory_map[start>>12];
7890 int i;
7891 for(i=0;i<5;i++) {
7892 //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
7893 if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
7894 }
7895 assem_debug("pagelimit=%x\n",pagelimit);
7896 assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
7897 }
7898 else {
7899 assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
7900 //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
560e4a12 7901 return -1; // Caller will invoke exception handler
57871462 7902 }
7903 //printf("source= %x\n",(int)source);
7904 }
90ae6d4e 7905#endif
57871462 7906 else {
7907 printf("Compile at bogus memory address: %x \n", (int)addr);
7908 exit(1);
7909 }
7910
7911 /* Pass 1: disassemble */
7912 /* Pass 2: register dependencies, branch targets */
7913 /* Pass 3: register allocation */
7914 /* Pass 4: branch dependencies */
7915 /* Pass 5: pre-alloc */
7916 /* Pass 6: optimize clean/dirty state */
7917 /* Pass 7: flag 32-bit registers */
7918 /* Pass 8: assembly */
7919 /* Pass 9: linker */
7920 /* Pass 10: garbage collection / free memory */
7921
7922 int i,j;
7923 int done=0;
7924 unsigned int type,op,op2;
7925
7926 //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7927
7928 /* Pass 1 disassembly */
7929
7930 for(i=0;!done;i++) {
e1190b87 7931 bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
7932 minimum_free_regs[i]=0;
57871462 7933 opcode[i]=op=source[i]>>26;
7934 switch(op)
7935 {
7936 case 0x00: strcpy(insn[i],"special"); type=NI;
7937 op2=source[i]&0x3f;
7938 switch(op2)
7939 {
7940 case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7941 case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7942 case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7943 case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7944 case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7945 case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7946 case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7947 case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7948 case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7949 case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7950 case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7951 case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7952 case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7953 case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7954 case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7955 case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7956 case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7957 case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7958 case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7959 case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7960 case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7961 case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7962 case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7963 case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7964 case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7965 case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7966 case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7967 case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7968 case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7969 case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7970 case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7971 case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7972 case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7973 case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7974 case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7975 case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7976 case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7977 case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7978 case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7979 case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7980 case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7981 case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7982 case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7983 case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7984 case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7985 case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7986 case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7987 case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7988 case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7989 case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7990 case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7991 case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7992 }
7993 break;
7994 case 0x01: strcpy(insn[i],"regimm"); type=NI;
7995 op2=(source[i]>>16)&0x1f;
7996 switch(op2)
7997 {
7998 case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7999 case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
8000 case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
8001 case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
8002 case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
8003 case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
8004 case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
8005 case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
8006 case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
8007 case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
8008 case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
8009 case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
8010 case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
8011 case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
8012 }
8013 break;
8014 case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
8015 case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
8016 case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
8017 case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
8018 case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
8019 case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
8020 case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
8021 case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
8022 case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
8023 case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
8024 case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
8025 case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
8026 case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
8027 case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
8028 case 0x10: strcpy(insn[i],"cop0"); type=NI;
8029 op2=(source[i]>>21)&0x1f;
8030 switch(op2)
8031 {
8032 case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
8033 case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
8034 case 0x10: strcpy(insn[i],"tlb"); type=NI;
8035 switch(source[i]&0x3f)
8036 {
8037 case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
8038 case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
8039 case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
8040 case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
576bbd8f 8041#ifdef PCSX
8042 case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
8043#else
57871462 8044 case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
576bbd8f 8045#endif
57871462 8046 }
8047 }
8048 break;
8049 case 0x11: strcpy(insn[i],"cop1"); type=NI;
8050 op2=(source[i]>>21)&0x1f;
8051 switch(op2)
8052 {
8053 case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
8054 case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
8055 case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
8056 case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
8057 case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
8058 case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
8059 case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
8060 switch((source[i]>>16)&0x3)
8061 {
8062 case 0x00: strcpy(insn[i],"BC1F"); break;
8063 case 0x01: strcpy(insn[i],"BC1T"); break;
8064 case 0x02: strcpy(insn[i],"BC1FL"); break;
8065 case 0x03: strcpy(insn[i],"BC1TL"); break;
8066 }
8067 break;
8068 case 0x10: strcpy(insn[i],"C1.S"); type=NI;
8069 switch(source[i]&0x3f)
8070 {
8071 case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
8072 case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
8073 case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
8074 case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
8075 case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
8076 case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
8077 case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
8078 case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
8079 case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
8080 case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
8081 case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
8082 case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
8083 case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
8084 case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
8085 case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
8086 case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
8087 case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
8088 case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
8089 case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
8090 case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
8091 case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
8092 case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
8093 case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
8094 case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8095 case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8096 case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8097 case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8098 case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8099 case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8100 case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8101 case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8102 case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8103 case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8104 case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8105 case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8106 }
8107 break;
8108 case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8109 switch(source[i]&0x3f)
8110 {
8111 case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8112 case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8113 case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8114 case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8115 case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8116 case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8117 case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8118 case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8119 case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8120 case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8121 case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8122 case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8123 case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8124 case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8125 case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8126 case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8127 case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8128 case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8129 case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8130 case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8131 case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8132 case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8133 case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8134 case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8135 case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8136 case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8137 case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8138 case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8139 case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8140 case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8141 case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8142 case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8143 case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8144 case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8145 case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8146 }
8147 break;
8148 case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8149 switch(source[i]&0x3f)
8150 {
8151 case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8152 case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8153 }
8154 break;
8155 case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8156 switch(source[i]&0x3f)
8157 {
8158 case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8159 case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8160 }
8161 break;
8162 }
8163 break;
909168d6 8164#ifndef FORCE32
57871462 8165 case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8166 case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8167 case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8168 case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8169 case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8170 case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8171 case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8172 case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
996cc15d 8173#endif
57871462 8174 case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8175 case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8176 case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8177 case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8178 case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8179 case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8180 case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8181 case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8182 case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8183 case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8184 case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8185 case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
996cc15d 8186#ifndef FORCE32
57871462 8187 case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8188 case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
996cc15d 8189#endif
57871462 8190 case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8191 case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8192 case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8193 case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
996cc15d 8194#ifndef FORCE32
57871462 8195 case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8196 case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8197 case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
996cc15d 8198#endif
57871462 8199 case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8200 case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
996cc15d 8201#ifndef FORCE32
57871462 8202 case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8203 case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8204 case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
996cc15d 8205#endif
b9b61529 8206#ifdef PCSX
8207 case 0x12: strcpy(insn[i],"COP2"); type=NI;
c7abc864 8208 // note: COP MIPS-1 encoding differs from MIPS32
b9b61529 8209 op2=(source[i]>>21)&0x1f;
c7abc864 8210 if (source[i]&0x3f) {
8211 if (gte_handlers[source[i]&0x3f]!=NULL) {
8212 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8213 type=C2OP;
8214 }
8215 }
8216 else switch(op2)
b9b61529 8217 {
8218 case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8219 case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8220 case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8221 case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
b9b61529 8222 }
8223 break;
8224 case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8225 case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8226 case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8227#endif
90ae6d4e 8228 default: strcpy(insn[i],"???"); type=NI;
75dec299 8229 printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
90ae6d4e 8230 break;
57871462 8231 }
1e973cb0 8232#ifdef PCSX
8233 /* detect branch in delay slot early */
8234 if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
8235 opcode[i+1]=source[i+1]>>26;
8236 opcode2[i+1]=source[i+1]&0x3f;
8237 if((0<opcode[i+1]&&opcode[i+1]<8)||(opcode[i+1]==0&&(opcode2[i+1]==8||opcode2[i+1]==9))) {
8238 printf("branch in delay slot @%08x (%08x)\n", addr + i*4+4, addr);
8239 // don't handle first branch and call interpreter if it's hit
8240 type=INTCALL;
8241 }
8242 }
8243#endif
57871462 8244 itype[i]=type;
8245 opcode2[i]=op2;
8246 /* Get registers/immediates */
8247 lt1[i]=0;
8248 us1[i]=0;
8249 us2[i]=0;
8250 dep1[i]=0;
8251 dep2[i]=0;
8252 switch(type) {
8253 case LOAD:
8254 rs1[i]=(source[i]>>21)&0x1f;
8255 rs2[i]=0;
8256 rt1[i]=(source[i]>>16)&0x1f;
8257 rt2[i]=0;
8258 imm[i]=(short)source[i];
8259 break;
8260 case STORE:
8261 case STORELR:
8262 rs1[i]=(source[i]>>21)&0x1f;
8263 rs2[i]=(source[i]>>16)&0x1f;
8264 rt1[i]=0;
8265 rt2[i]=0;
8266 imm[i]=(short)source[i];
8267 if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8268 break;
8269 case LOADLR:
8270 // LWL/LWR only load part of the register,
8271 // therefore the target register must be treated as a source too
8272 rs1[i]=(source[i]>>21)&0x1f;
8273 rs2[i]=(source[i]>>16)&0x1f;
8274 rt1[i]=(source[i]>>16)&0x1f;
8275 rt2[i]=0;
8276 imm[i]=(short)source[i];
8277 if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8278 if(op==0x26) dep1[i]=rt1[i]; // LWR
8279 break;
8280 case IMM16:
8281 if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8282 else rs1[i]=(source[i]>>21)&0x1f;
8283 rs2[i]=0;
8284 rt1[i]=(source[i]>>16)&0x1f;
8285 rt2[i]=0;
8286 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8287 imm[i]=(unsigned short)source[i];
8288 }else{
8289 imm[i]=(short)source[i];
8290 }
8291 if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8292 if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8293 if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8294 break;
8295 case UJUMP:
8296 rs1[i]=0;
8297 rs2[i]=0;
8298 rt1[i]=0;
8299 rt2[i]=0;
8300 // The JAL instruction writes to r31.
8301 if (op&1) {
8302 rt1[i]=31;
8303 }
8304 rs2[i]=CCREG;
8305 break;
8306 case RJUMP:
8307 rs1[i]=(source[i]>>21)&0x1f;
8308 rs2[i]=0;
8309 rt1[i]=0;
8310 rt2[i]=0;
5067f341 8311 // The JALR instruction writes to rd.
57871462 8312 if (op2&1) {
5067f341 8313 rt1[i]=(source[i]>>11)&0x1f;
57871462 8314 }
8315 rs2[i]=CCREG;
8316 break;
8317 case CJUMP:
8318 rs1[i]=(source[i]>>21)&0x1f;
8319 rs2[i]=(source[i]>>16)&0x1f;
8320 rt1[i]=0;
8321 rt2[i]=0;
8322 if(op&2) { // BGTZ/BLEZ
8323 rs2[i]=0;
8324 }
8325 us1[i]=rs1[i];
8326 us2[i]=rs2[i];
8327 likely[i]=op>>4;
8328 break;
8329 case SJUMP:
8330 rs1[i]=(source[i]>>21)&0x1f;
8331 rs2[i]=CCREG;
8332 rt1[i]=0;
8333 rt2[i]=0;
8334 us1[i]=rs1[i];
8335 if(op2&0x10) { // BxxAL
8336 rt1[i]=31;
8337 // NOTE: If the branch is not taken, r31 is still overwritten
8338 }
8339 likely[i]=(op2&2)>>1;
8340 break;
8341 case FJUMP:
8342 rs1[i]=FSREG;
8343 rs2[i]=CSREG;
8344 rt1[i]=0;
8345 rt2[i]=0;
8346 likely[i]=((source[i])>>17)&1;
8347 break;
8348 case ALU:
8349 rs1[i]=(source[i]>>21)&0x1f; // source
8350 rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8351 rt1[i]=(source[i]>>11)&0x1f; // destination
8352 rt2[i]=0;
8353 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8354 us1[i]=rs1[i];us2[i]=rs2[i];
8355 }
8356 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8357 dep1[i]=rs1[i];dep2[i]=rs2[i];
8358 }
8359 else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8360 dep1[i]=rs1[i];dep2[i]=rs2[i];
8361 }
8362 break;
8363 case MULTDIV:
8364 rs1[i]=(source[i]>>21)&0x1f; // source
8365 rs2[i]=(source[i]>>16)&0x1f; // divisor
8366 rt1[i]=HIREG;
8367 rt2[i]=LOREG;
8368 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8369 us1[i]=rs1[i];us2[i]=rs2[i];
8370 }
8371 break;
8372 case MOV:
8373 rs1[i]=0;
8374 rs2[i]=0;
8375 rt1[i]=0;
8376 rt2[i]=0;
8377 if(op2==0x10) rs1[i]=HIREG; // MFHI
8378 if(op2==0x11) rt1[i]=HIREG; // MTHI
8379 if(op2==0x12) rs1[i]=LOREG; // MFLO
8380 if(op2==0x13) rt1[i]=LOREG; // MTLO
8381 if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8382 if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8383 dep1[i]=rs1[i];
8384 break;
8385 case SHIFT:
8386 rs1[i]=(source[i]>>16)&0x1f; // target of shift
8387 rs2[i]=(source[i]>>21)&0x1f; // shift amount
8388 rt1[i]=(source[i]>>11)&0x1f; // destination
8389 rt2[i]=0;
8390 // DSLLV/DSRLV/DSRAV are 64-bit
8391 if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8392 break;
8393 case SHIFTIMM:
8394 rs1[i]=(source[i]>>16)&0x1f;
8395 rs2[i]=0;
8396 rt1[i]=(source[i]>>11)&0x1f;
8397 rt2[i]=0;
8398 imm[i]=(source[i]>>6)&0x1f;
8399 // DSxx32 instructions
8400 if(op2>=0x3c) imm[i]|=0x20;
8401 // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8402 if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8403 break;
8404 case COP0:
8405 rs1[i]=0;
8406 rs2[i]=0;
8407 rt1[i]=0;
8408 rt2[i]=0;
8409 if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8410 if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8411 if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8412 if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8413 break;
8414 case COP1:
b9b61529 8415 case COP2:
57871462 8416 rs1[i]=0;
8417 rs2[i]=0;
8418 rt1[i]=0;
8419 rt2[i]=0;
8420 if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8421 if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8422 if(op2==5) us1[i]=rs1[i]; // DMTC1
8423 rs2[i]=CSREG;
8424 break;
8425 case C1LS:
8426 rs1[i]=(source[i]>>21)&0x1F;
8427 rs2[i]=CSREG;
8428 rt1[i]=0;
8429 rt2[i]=0;
8430 imm[i]=(short)source[i];
8431 break;
b9b61529 8432 case C2LS:
8433 rs1[i]=(source[i]>>21)&0x1F;
8434 rs2[i]=0;
8435 rt1[i]=0;
8436 rt2[i]=0;
8437 imm[i]=(short)source[i];
8438 break;
57871462 8439 case FLOAT:
8440 case FCONV:
8441 rs1[i]=0;
8442 rs2[i]=CSREG;
8443 rt1[i]=0;
8444 rt2[i]=0;
8445 break;
8446 case FCOMP:
8447 rs1[i]=FSREG;
8448 rs2[i]=CSREG;
8449 rt1[i]=FSREG;
8450 rt2[i]=0;
8451 break;
8452 case SYSCALL:
7139f3c8 8453 case HLECALL:
1e973cb0 8454 case INTCALL:
57871462 8455 rs1[i]=CCREG;
8456 rs2[i]=0;
8457 rt1[i]=0;
8458 rt2[i]=0;
8459 break;
8460 default:
8461 rs1[i]=0;
8462 rs2[i]=0;
8463 rt1[i]=0;
8464 rt2[i]=0;
8465 }
8466 /* Calculate branch target addresses */
8467 if(type==UJUMP)
8468 ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8469 else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8470 ba[i]=start+i*4+8; // Ignore never taken branch
8471 else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8472 ba[i]=start+i*4+8; // Ignore never taken branch
8473 else if(type==CJUMP||type==SJUMP||type==FJUMP)
8474 ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8475 else ba[i]=-1;
8476 /* Is this the end of the block? */
8477 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
26869094 8478#ifdef PCSX
8479 // check for link register access in delay slot
8480 int rt1_=rt1[i-1];
8481 if(rt1_!=0&&(rs1[i]==rt1_||rs2[i]==rt1_||rt1[i]==rt1_||rt2[i]==rt1_)) {
8482 printf("link access in delay slot @%08x (%08x)\n", addr + i*4, addr);
8483 ba[i-1]=-1;
8484 itype[i-1]=INTCALL;
8485 done=2;
8486 }
8487 else
8488#endif
5067f341 8489 if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
1e973cb0 8490 done=2;
57871462 8491 }
8492 else {
8493 if(stop_after_jal) done=1;
8494 // Stop on BREAK
8495 if((source[i+1]&0xfc00003f)==0x0d) done=1;
8496 }
8497 // Don't recompile stuff that's already compiled
8498 if(check_addr(start+i*4+4)) done=1;
8499 // Don't get too close to the limit
8500 if(i>MAXBLOCK/2) done=1;
8501 }
75dec299 8502 if(itype[i]==SYSCALL&&stop_after_jal) done=1;
1e973cb0 8503 if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
8504 if(done==2) {
8505 // Does the block continue due to a branch?
8506 for(j=i-1;j>=0;j--)
8507 {
8508 if(ba[j]==start+i*4+4) done=j=0;
8509 if(ba[j]==start+i*4+8) done=j=0;
8510 }
8511 }
75dec299 8512 //assert(i<MAXBLOCK-1);
57871462 8513 if(start+i*4==pagelimit-4) done=1;
8514 assert(start+i*4<pagelimit);
8515 if (i==MAXBLOCK-1) done=1;
8516 // Stop if we're compiling junk
8517 if(itype[i]==NI&&opcode[i]==0x11) {
8518 done=stop_after_jal=1;
8519 printf("Disabled speculative precompilation\n");
8520 }
8521 }
8522 slen=i;
8523 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8524 if(start+i*4==pagelimit) {
8525 itype[i-1]=SPAN;
8526 }
8527 }
8528 assert(slen>0);
8529
8530 /* Pass 2 - Register dependencies and branch targets */
8531
8532 unneeded_registers(0,slen-1,0);
8533
8534 /* Pass 3 - Register allocation */
8535
8536 struct regstat current; // Current register allocations/status
8537 current.is32=1;
8538 current.dirty=0;
8539 current.u=unneeded_reg[0];
8540 current.uu=unneeded_reg_upper[0];
8541 clear_all_regs(current.regmap);
8542 alloc_reg(&current,0,CCREG);
8543 dirty_reg(&current,CCREG);
8544 current.isconst=0;
8545 current.wasconst=0;
8546 int ds=0;
8547 int cc=0;
8548 int hr;
6ebf4adf 8549
8550#ifndef FORCE32
57871462 8551 provisional_32bit();
6ebf4adf 8552#endif
57871462 8553 if((u_int)addr&1) {
8554 // First instruction is delay slot
8555 cc=-1;
8556 bt[1]=1;
8557 ds=1;
8558 unneeded_reg[0]=1;
8559 unneeded_reg_upper[0]=1;
8560 current.regmap[HOST_BTREG]=BTREG;
8561 }
8562
8563 for(i=0;i<slen;i++)
8564 {
8565 if(bt[i])
8566 {
8567 int hr;
8568 for(hr=0;hr<HOST_REGS;hr++)
8569 {
8570 // Is this really necessary?
8571 if(current.regmap[hr]==0) current.regmap[hr]=-1;
8572 }
8573 current.isconst=0;
8574 }
8575 if(i>1)
8576 {
8577 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8578 {
8579 if(rs1[i-2]==0||rs2[i-2]==0)
8580 {
8581 if(rs1[i-2]) {
8582 current.is32|=1LL<<rs1[i-2];
8583 int hr=get_reg(current.regmap,rs1[i-2]|64);
8584 if(hr>=0) current.regmap[hr]=-1;
8585 }
8586 if(rs2[i-2]) {
8587 current.is32|=1LL<<rs2[i-2];
8588 int hr=get_reg(current.regmap,rs2[i-2]|64);
8589 if(hr>=0) current.regmap[hr]=-1;
8590 }
8591 }
8592 }
8593 }
6ebf4adf 8594#ifndef FORCE32
57871462 8595 // If something jumps here with 64-bit values
8596 // then promote those registers to 64 bits
8597 if(bt[i])
8598 {
8599 uint64_t temp_is32=current.is32;
8600 for(j=i-1;j>=0;j--)
8601 {
8602 if(ba[j]==start+i*4)
8603 temp_is32&=branch_regs[j].is32;
8604 }
8605 for(j=i;j<slen;j++)
8606 {
8607 if(ba[j]==start+i*4)
8608 //temp_is32=1;
8609 temp_is32&=p32[j];
8610 }
8611 if(temp_is32!=current.is32) {
8612 //printf("dumping 32-bit regs (%x)\n",start+i*4);
8613 #ifdef DESTRUCTIVE_WRITEBACK
8614 for(hr=0;hr<HOST_REGS;hr++)
8615 {
8616 int r=current.regmap[hr];
8617 if(r>0&&r<64)
8618 {
8619 if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8620 temp_is32|=1LL<<r;
8621 //printf("restore %d\n",r);
8622 }
8623 }
8624 }
8625 #endif
8626 current.is32=temp_is32;
8627 }
8628 }
6ebf4adf 8629#else
24385cae 8630 current.is32=-1LL;
8631#endif
8632
57871462 8633 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8634 regs[i].wasconst=current.isconst;
8635 regs[i].was32=current.is32;
8636 regs[i].wasdirty=current.dirty;
6ebf4adf 8637 #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
57871462 8638 // To change a dirty register from 32 to 64 bits, we must write
8639 // it out during the previous cycle (for branches, 2 cycles)
8640 if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8641 {
8642 uint64_t temp_is32=current.is32;
8643 for(j=i-1;j>=0;j--)
8644 {
8645 if(ba[j]==start+i*4+4)
8646 temp_is32&=branch_regs[j].is32;
8647 }
8648 for(j=i;j<slen;j++)
8649 {
8650 if(ba[j]==start+i*4+4)
8651 //temp_is32=1;
8652 temp_is32&=p32[j];
8653 }
8654 if(temp_is32!=current.is32) {
8655 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8656 for(hr=0;hr<HOST_REGS;hr++)
8657 {
8658 int r=current.regmap[hr];
8659 if(r>0)
8660 {
8661 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8662 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8663 {
8664 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8665 {
8666 //printf("dump %d/r%d\n",hr,r);
8667 current.regmap[hr]=-1;
8668 if(get_reg(current.regmap,r|64)>=0)
8669 current.regmap[get_reg(current.regmap,r|64)]=-1;
8670 }
8671 }
8672 }
8673 }
8674 }
8675 }
8676 }
8677 else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8678 {
8679 uint64_t temp_is32=current.is32;
8680 for(j=i-1;j>=0;j--)
8681 {
8682 if(ba[j]==start+i*4+8)
8683 temp_is32&=branch_regs[j].is32;
8684 }
8685 for(j=i;j<slen;j++)
8686 {
8687 if(ba[j]==start+i*4+8)
8688 //temp_is32=1;
8689 temp_is32&=p32[j];
8690 }
8691 if(temp_is32!=current.is32) {
8692 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8693 for(hr=0;hr<HOST_REGS;hr++)
8694 {
8695 int r=current.regmap[hr];
8696 if(r>0)
8697 {
8698 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8699 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8700 {
8701 //printf("dump %d/r%d\n",hr,r);
8702 current.regmap[hr]=-1;
8703 if(get_reg(current.regmap,r|64)>=0)
8704 current.regmap[get_reg(current.regmap,r|64)]=-1;
8705 }
8706 }
8707 }
8708 }
8709 }
8710 }
8711 #endif
8712 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8713 if(i+1<slen) {
8714 current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8715 current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8716 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8717 current.u|=1;
8718 current.uu|=1;
8719 } else {
8720 current.u=1;
8721 current.uu=1;
8722 }
8723 } else {
8724 if(i+1<slen) {
8725 current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8726 current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8727 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8728 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8729 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8730 current.u|=1;
8731 current.uu|=1;
8732 } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8733 }
8734 is_ds[i]=ds;
8735 if(ds) {
8736 ds=0; // Skip delay slot, already allocated as part of branch
8737 // ...but we need to alloc it in case something jumps here
8738 if(i+1<slen) {
8739 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8740 current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8741 }else{
8742 current.u=branch_unneeded_reg[i-1];
8743 current.uu=branch_unneeded_reg_upper[i-1];
8744 }
8745 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8746 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8747 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8748 current.u|=1;
8749 current.uu|=1;
8750 struct regstat temp;
8751 memcpy(&temp,&current,sizeof(current));
8752 temp.wasdirty=temp.dirty;
8753 temp.was32=temp.is32;
8754 // TODO: Take into account unconditional branches, as below
8755 delayslot_alloc(&temp,i);
8756 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8757 regs[i].wasdirty=temp.wasdirty;
8758 regs[i].was32=temp.was32;
8759 regs[i].dirty=temp.dirty;
8760 regs[i].is32=temp.is32;
8761 regs[i].isconst=0;
8762 regs[i].wasconst=0;
8763 current.isconst=0;
8764 // Create entry (branch target) regmap
8765 for(hr=0;hr<HOST_REGS;hr++)
8766 {
8767 int r=temp.regmap[hr];
8768 if(r>=0) {
8769 if(r!=regmap_pre[i][hr]) {
8770 regs[i].regmap_entry[hr]=-1;
8771 }
8772 else
8773 {
8774 if(r<64){
8775 if((current.u>>r)&1) {
8776 regs[i].regmap_entry[hr]=-1;
8777 regs[i].regmap[hr]=-1;
8778 //Don't clear regs in the delay slot as the branch might need them
8779 //current.regmap[hr]=-1;
8780 }else
8781 regs[i].regmap_entry[hr]=r;
8782 }
8783 else {
8784 if((current.uu>>(r&63))&1) {
8785 regs[i].regmap_entry[hr]=-1;
8786 regs[i].regmap[hr]=-1;
8787 //Don't clear regs in the delay slot as the branch might need them
8788 //current.regmap[hr]=-1;
8789 }else
8790 regs[i].regmap_entry[hr]=r;
8791 }
8792 }
8793 } else {
8794 // First instruction expects CCREG to be allocated
8795 if(i==0&&hr==HOST_CCREG)
8796 regs[i].regmap_entry[hr]=CCREG;
8797 else
8798 regs[i].regmap_entry[hr]=-1;
8799 }
8800 }
8801 }
8802 else { // Not delay slot
8803 switch(itype[i]) {
8804 case UJUMP:
8805 //current.isconst=0; // DEBUG
8806 //current.wasconst=0; // DEBUG
8807 //regs[i].wasconst=0; // DEBUG
8808 clear_const(&current,rt1[i]);
8809 alloc_cc(&current,i);
8810 dirty_reg(&current,CCREG);
8811 if (rt1[i]==31) {
8812 alloc_reg(&current,i,31);
8813 dirty_reg(&current,31);
68b3faee 8814 assert(rs1[i+1]!=31&&rs2[i+1]!=31);
076655d1 8815 assert(rt1[i+1]!=rt1[i]);
57871462 8816 #ifdef REG_PREFETCH
8817 alloc_reg(&current,i,PTEMP);
8818 #endif
8819 //current.is32|=1LL<<rt1[i];
8820 }
e1190b87 8821 ooo[i]=1;
57871462 8822 delayslot_alloc(&current,i+1);
8823 //current.isconst=0; // DEBUG
8824 ds=1;
8825 //printf("i=%d, isconst=%x\n",i,current.isconst);
8826 break;
8827 case RJUMP:
8828 //current.isconst=0;
8829 //current.wasconst=0;
8830 //regs[i].wasconst=0;
8831 clear_const(&current,rs1[i]);
8832 clear_const(&current,rt1[i]);
8833 alloc_cc(&current,i);
8834 dirty_reg(&current,CCREG);
8835 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8836 alloc_reg(&current,i,rs1[i]);
5067f341 8837 if (rt1[i]!=0) {
8838 alloc_reg(&current,i,rt1[i]);
8839 dirty_reg(&current,rt1[i]);
68b3faee 8840 assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
076655d1 8841 assert(rt1[i+1]!=rt1[i]);
57871462 8842 #ifdef REG_PREFETCH
8843 alloc_reg(&current,i,PTEMP);
8844 #endif
8845 }
8846 #ifdef USE_MINI_HT
8847 if(rs1[i]==31) { // JALR
8848 alloc_reg(&current,i,RHASH);
8849 #ifndef HOST_IMM_ADDR32
8850 alloc_reg(&current,i,RHTBL);
8851 #endif
8852 }
8853 #endif
8854 delayslot_alloc(&current,i+1);
8855 } else {
8856 // The delay slot overwrites our source register,
8857 // allocate a temporary register to hold the old value.
8858 current.isconst=0;
8859 current.wasconst=0;
8860 regs[i].wasconst=0;
8861 delayslot_alloc(&current,i+1);
8862 current.isconst=0;
8863 alloc_reg(&current,i,RTEMP);
8864 }
8865 //current.isconst=0; // DEBUG
e1190b87 8866 ooo[i]=1;
57871462 8867 ds=1;
8868 break;
8869 case CJUMP:
8870 //current.isconst=0;
8871 //current.wasconst=0;
8872 //regs[i].wasconst=0;
8873 clear_const(&current,rs1[i]);
8874 clear_const(&current,rs2[i]);
8875 if((opcode[i]&0x3E)==4) // BEQ/BNE
8876 {
8877 alloc_cc(&current,i);
8878 dirty_reg(&current,CCREG);
8879 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8880 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8881 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8882 {
8883 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8884 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8885 }
8886 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8887 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8888 // The delay slot overwrites one of our conditions.
8889 // Allocate the branch condition registers instead.
57871462 8890 current.isconst=0;
8891 current.wasconst=0;
8892 regs[i].wasconst=0;
8893 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8894 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8895 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8896 {
8897 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8898 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8899 }
8900 }
e1190b87 8901 else
8902 {
8903 ooo[i]=1;
8904 delayslot_alloc(&current,i+1);
8905 }
57871462 8906 }
8907 else
8908 if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8909 {
8910 alloc_cc(&current,i);
8911 dirty_reg(&current,CCREG);
8912 alloc_reg(&current,i,rs1[i]);
8913 if(!(current.is32>>rs1[i]&1))
8914 {
8915 alloc_reg64(&current,i,rs1[i]);
8916 }
8917 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8918 // The delay slot overwrites one of our conditions.
8919 // Allocate the branch condition registers instead.
57871462 8920 current.isconst=0;
8921 current.wasconst=0;
8922 regs[i].wasconst=0;
8923 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8924 if(!((current.is32>>rs1[i])&1))
8925 {
8926 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8927 }
8928 }
e1190b87 8929 else
8930 {
8931 ooo[i]=1;
8932 delayslot_alloc(&current,i+1);
8933 }
57871462 8934 }
8935 else
8936 // Don't alloc the delay slot yet because we might not execute it
8937 if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8938 {
8939 current.isconst=0;
8940 current.wasconst=0;
8941 regs[i].wasconst=0;
8942 alloc_cc(&current,i);
8943 dirty_reg(&current,CCREG);
8944 alloc_reg(&current,i,rs1[i]);
8945 alloc_reg(&current,i,rs2[i]);
8946 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8947 {
8948 alloc_reg64(&current,i,rs1[i]);
8949 alloc_reg64(&current,i,rs2[i]);
8950 }
8951 }
8952 else
8953 if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8954 {
8955 current.isconst=0;
8956 current.wasconst=0;
8957 regs[i].wasconst=0;
8958 alloc_cc(&current,i);
8959 dirty_reg(&current,CCREG);
8960 alloc_reg(&current,i,rs1[i]);
8961 if(!(current.is32>>rs1[i]&1))
8962 {
8963 alloc_reg64(&current,i,rs1[i]);
8964 }
8965 }
8966 ds=1;
8967 //current.isconst=0;
8968 break;
8969 case SJUMP:
8970 //current.isconst=0;
8971 //current.wasconst=0;
8972 //regs[i].wasconst=0;
8973 clear_const(&current,rs1[i]);
8974 clear_const(&current,rt1[i]);
8975 //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8976 if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8977 {
8978 alloc_cc(&current,i);
8979 dirty_reg(&current,CCREG);
8980 alloc_reg(&current,i,rs1[i]);
8981 if(!(current.is32>>rs1[i]&1))
8982 {
8983 alloc_reg64(&current,i,rs1[i]);
8984 }
8985 if (rt1[i]==31) { // BLTZAL/BGEZAL
8986 alloc_reg(&current,i,31);
8987 dirty_reg(&current,31);
57871462 8988 //#ifdef REG_PREFETCH
8989 //alloc_reg(&current,i,PTEMP);
8990 //#endif
8991 //current.is32|=1LL<<rt1[i];
8992 }
e1190b87 8993 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
8994 ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
57871462 8995 // Allocate the branch condition registers instead.
57871462 8996 current.isconst=0;
8997 current.wasconst=0;
8998 regs[i].wasconst=0;
8999 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9000 if(!((current.is32>>rs1[i])&1))
9001 {
9002 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9003 }
9004 }
e1190b87 9005 else
9006 {
9007 ooo[i]=1;
9008 delayslot_alloc(&current,i+1);
9009 }
57871462 9010 }
9011 else
9012 // Don't alloc the delay slot yet because we might not execute it
9013 if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
9014 {
9015 current.isconst=0;
9016 current.wasconst=0;
9017 regs[i].wasconst=0;
9018 alloc_cc(&current,i);
9019 dirty_reg(&current,CCREG);
9020 alloc_reg(&current,i,rs1[i]);
9021 if(!(current.is32>>rs1[i]&1))
9022 {
9023 alloc_reg64(&current,i,rs1[i]);
9024 }
9025 }
9026 ds=1;
9027 //current.isconst=0;
9028 break;
9029 case FJUMP:
9030 current.isconst=0;
9031 current.wasconst=0;
9032 regs[i].wasconst=0;
9033 if(likely[i]==0) // BC1F/BC1T
9034 {
9035 // TODO: Theoretically we can run out of registers here on x86.
9036 // The delay slot can allocate up to six, and we need to check
9037 // CSREG before executing the delay slot. Possibly we can drop
9038 // the cycle count and then reload it after checking that the
9039 // FPU is in a usable state, or don't do out-of-order execution.
9040 alloc_cc(&current,i);
9041 dirty_reg(&current,CCREG);
9042 alloc_reg(&current,i,FSREG);
9043 alloc_reg(&current,i,CSREG);
9044 if(itype[i+1]==FCOMP) {
9045 // The delay slot overwrites the branch condition.
9046 // Allocate the branch condition registers instead.
57871462 9047 alloc_cc(&current,i);
9048 dirty_reg(&current,CCREG);
9049 alloc_reg(&current,i,CSREG);
9050 alloc_reg(&current,i,FSREG);
9051 }
9052 else {
e1190b87 9053 ooo[i]=1;
57871462 9054 delayslot_alloc(&current,i+1);
9055 alloc_reg(&current,i+1,CSREG);
9056 }
9057 }
9058 else
9059 // Don't alloc the delay slot yet because we might not execute it
9060 if(likely[i]) // BC1FL/BC1TL
9061 {
9062 alloc_cc(&current,i);
9063 dirty_reg(&current,CCREG);
9064 alloc_reg(&current,i,CSREG);
9065 alloc_reg(&current,i,FSREG);
9066 }
9067 ds=1;
9068 current.isconst=0;
9069 break;
9070 case IMM16:
9071 imm16_alloc(&current,i);
9072 break;
9073 case LOAD:
9074 case LOADLR:
9075 load_alloc(&current,i);
9076 break;
9077 case STORE:
9078 case STORELR:
9079 store_alloc(&current,i);
9080 break;
9081 case ALU:
9082 alu_alloc(&current,i);
9083 break;
9084 case SHIFT:
9085 shift_alloc(&current,i);
9086 break;
9087 case MULTDIV:
9088 multdiv_alloc(&current,i);
9089 break;
9090 case SHIFTIMM:
9091 shiftimm_alloc(&current,i);
9092 break;
9093 case MOV:
9094 mov_alloc(&current,i);
9095 break;
9096 case COP0:
9097 cop0_alloc(&current,i);
9098 break;
9099 case COP1:
b9b61529 9100 case COP2:
57871462 9101 cop1_alloc(&current,i);
9102 break;
9103 case C1LS:
9104 c1ls_alloc(&current,i);
9105 break;
b9b61529 9106 case C2LS:
9107 c2ls_alloc(&current,i);
9108 break;
9109 case C2OP:
9110 c2op_alloc(&current,i);
9111 break;
57871462 9112 case FCONV:
9113 fconv_alloc(&current,i);
9114 break;
9115 case FLOAT:
9116 float_alloc(&current,i);
9117 break;
9118 case FCOMP:
9119 fcomp_alloc(&current,i);
9120 break;
9121 case SYSCALL:
7139f3c8 9122 case HLECALL:
1e973cb0 9123 case INTCALL:
57871462 9124 syscall_alloc(&current,i);
9125 break;
9126 case SPAN:
9127 pagespan_alloc(&current,i);
9128 break;
9129 }
9130
9131 // Drop the upper half of registers that have become 32-bit
9132 current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9133 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9134 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9135 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9136 current.uu|=1;
9137 } else {
9138 current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9139 current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9140 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9141 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9142 current.uu|=1;
9143 }
9144
9145 // Create entry (branch target) regmap
9146 for(hr=0;hr<HOST_REGS;hr++)
9147 {
9148 int r,or,er;
9149 r=current.regmap[hr];
9150 if(r>=0) {
9151 if(r!=regmap_pre[i][hr]) {
9152 // TODO: delay slot (?)
9153 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9154 if(or<0||(r&63)>=TEMPREG){
9155 regs[i].regmap_entry[hr]=-1;
9156 }
9157 else
9158 {
9159 // Just move it to a different register
9160 regs[i].regmap_entry[hr]=r;
9161 // If it was dirty before, it's still dirty
9162 if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9163 }
9164 }
9165 else
9166 {
9167 // Unneeded
9168 if(r==0){
9169 regs[i].regmap_entry[hr]=0;
9170 }
9171 else
9172 if(r<64){
9173 if((current.u>>r)&1) {
9174 regs[i].regmap_entry[hr]=-1;
9175 //regs[i].regmap[hr]=-1;
9176 current.regmap[hr]=-1;
9177 }else
9178 regs[i].regmap_entry[hr]=r;
9179 }
9180 else {
9181 if((current.uu>>(r&63))&1) {
9182 regs[i].regmap_entry[hr]=-1;
9183 //regs[i].regmap[hr]=-1;
9184 current.regmap[hr]=-1;
9185 }else
9186 regs[i].regmap_entry[hr]=r;
9187 }
9188 }
9189 } else {
9190 // Branches expect CCREG to be allocated at the target
9191 if(regmap_pre[i][hr]==CCREG)
9192 regs[i].regmap_entry[hr]=CCREG;
9193 else
9194 regs[i].regmap_entry[hr]=-1;
9195 }
9196 }
9197 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9198 }
9199 /* Branch post-alloc */
9200 if(i>0)
9201 {
9202 current.was32=current.is32;
9203 current.wasdirty=current.dirty;
9204 switch(itype[i-1]) {
9205 case UJUMP:
9206 memcpy(&branch_regs[i-1],&current,sizeof(current));
9207 branch_regs[i-1].isconst=0;
9208 branch_regs[i-1].wasconst=0;
9209 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9210 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9211 alloc_cc(&branch_regs[i-1],i-1);
9212 dirty_reg(&branch_regs[i-1],CCREG);
9213 if(rt1[i-1]==31) { // JAL
9214 alloc_reg(&branch_regs[i-1],i-1,31);
9215 dirty_reg(&branch_regs[i-1],31);
9216 branch_regs[i-1].is32|=1LL<<31;
9217 }
9218 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9219 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9220 break;
9221 case RJUMP:
9222 memcpy(&branch_regs[i-1],&current,sizeof(current));
9223 branch_regs[i-1].isconst=0;
9224 branch_regs[i-1].wasconst=0;
9225 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9226 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9227 alloc_cc(&branch_regs[i-1],i-1);
9228 dirty_reg(&branch_regs[i-1],CCREG);
9229 alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
5067f341 9230 if(rt1[i-1]!=0) { // JALR
9231 alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9232 dirty_reg(&branch_regs[i-1],rt1[i-1]);
9233 branch_regs[i-1].is32|=1LL<<rt1[i-1];
57871462 9234 }
9235 #ifdef USE_MINI_HT
9236 if(rs1[i-1]==31) { // JALR
9237 alloc_reg(&branch_regs[i-1],i-1,RHASH);
9238 #ifndef HOST_IMM_ADDR32
9239 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9240 #endif
9241 }
9242 #endif
9243 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9244 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9245 break;
9246 case CJUMP:
9247 if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9248 {
9249 alloc_cc(&current,i-1);
9250 dirty_reg(&current,CCREG);
9251 if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9252 (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9253 // The delay slot overwrote one of our conditions
9254 // Delay slot goes after the test (in order)
9255 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9256 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9257 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9258 current.u|=1;
9259 current.uu|=1;
9260 delayslot_alloc(&current,i);
9261 current.isconst=0;
9262 }
9263 else
9264 {
9265 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9266 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9267 // Alloc the branch condition registers
9268 if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9269 if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9270 if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9271 {
9272 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9273 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9274 }
9275 }
9276 memcpy(&branch_regs[i-1],&current,sizeof(current));
9277 branch_regs[i-1].isconst=0;
9278 branch_regs[i-1].wasconst=0;
9279 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9280 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9281 }
9282 else
9283 if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9284 {
9285 alloc_cc(&current,i-1);
9286 dirty_reg(&current,CCREG);
9287 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9288 // The delay slot overwrote the branch condition
9289 // Delay slot goes after the test (in order)
9290 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9291 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9292 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9293 current.u|=1;
9294 current.uu|=1;
9295 delayslot_alloc(&current,i);
9296 current.isconst=0;
9297 }
9298 else
9299 {
9300 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9301 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9302 // Alloc the branch condition register
9303 alloc_reg(&current,i-1,rs1[i-1]);
9304 if(!(current.is32>>rs1[i-1]&1))
9305 {
9306 alloc_reg64(&current,i-1,rs1[i-1]);
9307 }
9308 }
9309 memcpy(&branch_regs[i-1],&current,sizeof(current));
9310 branch_regs[i-1].isconst=0;
9311 branch_regs[i-1].wasconst=0;
9312 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9313 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9314 }
9315 else
9316 // Alloc the delay slot in case the branch is taken
9317 if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9318 {
9319 memcpy(&branch_regs[i-1],&current,sizeof(current));
9320 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9321 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9322 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9323 alloc_cc(&branch_regs[i-1],i);
9324 dirty_reg(&branch_regs[i-1],CCREG);
9325 delayslot_alloc(&branch_regs[i-1],i);
9326 branch_regs[i-1].isconst=0;
9327 alloc_reg(&current,i,CCREG); // Not taken path
9328 dirty_reg(&current,CCREG);
9329 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9330 }
9331 else
9332 if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9333 {
9334 memcpy(&branch_regs[i-1],&current,sizeof(current));
9335 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9336 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9337 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9338 alloc_cc(&branch_regs[i-1],i);
9339 dirty_reg(&branch_regs[i-1],CCREG);
9340 delayslot_alloc(&branch_regs[i-1],i);
9341 branch_regs[i-1].isconst=0;
9342 alloc_reg(&current,i,CCREG); // Not taken path
9343 dirty_reg(&current,CCREG);
9344 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9345 }
9346 break;
9347 case SJUMP:
9348 //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9349 if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9350 {
9351 alloc_cc(&current,i-1);
9352 dirty_reg(&current,CCREG);
9353 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9354 // The delay slot overwrote the branch condition
9355 // Delay slot goes after the test (in order)
9356 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9357 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9358 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9359 current.u|=1;
9360 current.uu|=1;
9361 delayslot_alloc(&current,i);
9362 current.isconst=0;
9363 }
9364 else
9365 {
9366 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9367 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9368 // Alloc the branch condition register
9369 alloc_reg(&current,i-1,rs1[i-1]);
9370 if(!(current.is32>>rs1[i-1]&1))
9371 {
9372 alloc_reg64(&current,i-1,rs1[i-1]);
9373 }
9374 }
9375 memcpy(&branch_regs[i-1],&current,sizeof(current));
9376 branch_regs[i-1].isconst=0;
9377 branch_regs[i-1].wasconst=0;
9378 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9379 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9380 }
9381 else
9382 // Alloc the delay slot in case the branch is taken
9383 if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9384 {
9385 memcpy(&branch_regs[i-1],&current,sizeof(current));
9386 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9387 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9388 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9389 alloc_cc(&branch_regs[i-1],i);
9390 dirty_reg(&branch_regs[i-1],CCREG);
9391 delayslot_alloc(&branch_regs[i-1],i);
9392 branch_regs[i-1].isconst=0;
9393 alloc_reg(&current,i,CCREG); // Not taken path
9394 dirty_reg(&current,CCREG);
9395 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9396 }
9397 // FIXME: BLTZAL/BGEZAL
9398 if(opcode2[i-1]&0x10) { // BxxZAL
9399 alloc_reg(&branch_regs[i-1],i-1,31);
9400 dirty_reg(&branch_regs[i-1],31);
9401 branch_regs[i-1].is32|=1LL<<31;
9402 }
9403 break;
9404 case FJUMP:
9405 if(likely[i-1]==0) // BC1F/BC1T
9406 {
9407 alloc_cc(&current,i-1);
9408 dirty_reg(&current,CCREG);
9409 if(itype[i]==FCOMP) {
9410 // The delay slot overwrote the branch condition
9411 // Delay slot goes after the test (in order)
9412 delayslot_alloc(&current,i);
9413 current.isconst=0;
9414 }
9415 else
9416 {
9417 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9418 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9419 // Alloc the branch condition register
9420 alloc_reg(&current,i-1,FSREG);
9421 }
9422 memcpy(&branch_regs[i-1],&current,sizeof(current));
9423 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9424 }
9425 else // BC1FL/BC1TL
9426 {
9427 // Alloc the delay slot in case the branch is taken
9428 memcpy(&branch_regs[i-1],&current,sizeof(current));
9429 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9430 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9431 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9432 alloc_cc(&branch_regs[i-1],i);
9433 dirty_reg(&branch_regs[i-1],CCREG);
9434 delayslot_alloc(&branch_regs[i-1],i);
9435 branch_regs[i-1].isconst=0;
9436 alloc_reg(&current,i,CCREG); // Not taken path
9437 dirty_reg(&current,CCREG);
9438 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9439 }
9440 break;
9441 }
9442
9443 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9444 {
9445 if(rt1[i-1]==31) // JAL/JALR
9446 {
9447 // Subroutine call will return here, don't alloc any registers
9448 current.is32=1;
9449 current.dirty=0;
9450 clear_all_regs(current.regmap);
9451 alloc_reg(&current,i,CCREG);
9452 dirty_reg(&current,CCREG);
9453 }
9454 else if(i+1<slen)
9455 {
9456 // Internal branch will jump here, match registers to caller
9457 current.is32=0x3FFFFFFFFLL;
9458 current.dirty=0;
9459 clear_all_regs(current.regmap);
9460 alloc_reg(&current,i,CCREG);
9461 dirty_reg(&current,CCREG);
9462 for(j=i-1;j>=0;j--)
9463 {
9464 if(ba[j]==start+i*4+4) {
9465 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9466 current.is32=branch_regs[j].is32;
9467 current.dirty=branch_regs[j].dirty;
9468 break;
9469 }
9470 }
9471 while(j>=0) {
9472 if(ba[j]==start+i*4+4) {
9473 for(hr=0;hr<HOST_REGS;hr++) {
9474 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9475 current.regmap[hr]=-1;
9476 }
9477 current.is32&=branch_regs[j].is32;
9478 current.dirty&=branch_regs[j].dirty;
9479 }
9480 }
9481 j--;
9482 }
9483 }
9484 }
9485 }
9486
9487 // Count cycles in between branches
9488 ccadj[i]=cc;
7139f3c8 9489 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
57871462 9490 {
9491 cc=0;
9492 }
fb407447 9493#ifdef PCSX
9494 else if(/*itype[i]==LOAD||*/itype[i]==STORE||itype[i]==C1LS) // load causes weird timing issues
9495 {
9496 cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
9497 }
9498 else if(itype[i]==C2LS)
9499 {
9500 cc+=4;
9501 }
9502#endif
57871462 9503 else
9504 {
9505 cc++;
9506 }
9507
9508 flush_dirty_uppers(&current);
9509 if(!is_ds[i]) {
9510 regs[i].is32=current.is32;
9511 regs[i].dirty=current.dirty;
9512 regs[i].isconst=current.isconst;
9513 memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9514 }
9515 for(hr=0;hr<HOST_REGS;hr++) {
9516 if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9517 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9518 regs[i].wasconst&=~(1<<hr);
9519 }
9520 }
9521 }
9522 if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9523 }
9524
9525 /* Pass 4 - Cull unused host registers */
9526
9527 uint64_t nr=0;
9528
9529 for (i=slen-1;i>=0;i--)
9530 {
9531 int hr;
9532 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9533 {
9534 if(ba[i]<start || ba[i]>=(start+slen*4))
9535 {
9536 // Branch out of this block, don't need anything
9537 nr=0;
9538 }
9539 else
9540 {
9541 // Internal branch
9542 // Need whatever matches the target
9543 nr=0;
9544 int t=(ba[i]-start)>>2;
9545 for(hr=0;hr<HOST_REGS;hr++)
9546 {
9547 if(regs[i].regmap_entry[hr]>=0) {
9548 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9549 }
9550 }
9551 }
9552 // Conditional branch may need registers for following instructions
9553 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9554 {
9555 if(i<slen-2) {
9556 nr|=needed_reg[i+2];
9557 for(hr=0;hr<HOST_REGS;hr++)
9558 {
9559 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9560 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9561 }
9562 }
9563 }
9564 // Don't need stuff which is overwritten
9565 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9566 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9567 // Merge in delay slot
9568 for(hr=0;hr<HOST_REGS;hr++)
9569 {
9570 if(!likely[i]) {
9571 // These are overwritten unless the branch is "likely"
9572 // and the delay slot is nullified if not taken
9573 if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9574 if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9575 }
9576 if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9577 if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9578 if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9579 if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9580 if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9581 if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9582 if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9583 if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9584 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9585 if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9586 if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9587 }
9588 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9589 if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9590 if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9591 }
b9b61529 9592 if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
57871462 9593 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9594 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9595 }
9596 }
9597 }
1e973cb0 9598 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 9599 {
9600 // SYSCALL instruction (software interrupt)
9601 nr=0;
9602 }
9603 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9604 {
9605 // ERET instruction (return from interrupt)
9606 nr=0;
9607 }
9608 else // Non-branch
9609 {
9610 if(i<slen-1) {
9611 for(hr=0;hr<HOST_REGS;hr++) {
9612 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9613 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9614 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9615 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9616 }
9617 }
9618 }
9619 for(hr=0;hr<HOST_REGS;hr++)
9620 {
9621 // Overwritten registers are not needed
9622 if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9623 if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9624 if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9625 // Source registers are needed
9626 if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9627 if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9628 if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9629 if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9630 if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9631 if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9632 if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9633 if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9634 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9635 if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9636 if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9637 }
9638 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9639 if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9640 if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9641 }
b9b61529 9642 if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
57871462 9643 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9644 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9645 }
9646 // Don't store a register immediately after writing it,
9647 // may prevent dual-issue.
9648 // But do so if this is a branch target, otherwise we
9649 // might have to load the register before the branch.
9650 if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9651 if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9652 (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9653 if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9654 if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9655 }
9656 if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9657 (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9658 if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9659 if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9660 }
9661 }
9662 }
9663 // Cycle count is needed at branches. Assume it is needed at the target too.
9664 if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9665 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9666 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9667 }
9668 // Save it
9669 needed_reg[i]=nr;
9670
9671 // Deallocate unneeded registers
9672 for(hr=0;hr<HOST_REGS;hr++)
9673 {
9674 if(!((nr>>hr)&1)) {
9675 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9676 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9677 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9678 (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9679 {
9680 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9681 {
9682 if(likely[i]) {
9683 regs[i].regmap[hr]=-1;
9684 regs[i].isconst&=~(1<<hr);
9685 if(i<slen-2) regmap_pre[i+2][hr]=-1;
9686 }
9687 }
9688 }
9689 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9690 {
9691 int d1=0,d2=0,map=0,temp=0;
9692 if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9693 {
9694 d1=dep1[i+1];
9695 d2=dep2[i+1];
9696 }
9697 if(using_tlb) {
9698 if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9699 itype[i+1]==STORE || itype[i+1]==STORELR ||
b9b61529 9700 itype[i+1]==C1LS || itype[i+1]==C2LS)
57871462 9701 map=TLREG;
9702 } else
b9b61529 9703 if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9704 (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
57871462 9705 map=INVCP;
9706 }
9707 if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
b9b61529 9708 itype[i+1]==C1LS || itype[i+1]==C2LS)
57871462 9709 temp=FTEMP;
9710 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9711 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9712 (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9713 (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9714 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9715 regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9716 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9717 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9718 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9719 regs[i].regmap[hr]!=map )
9720 {
9721 regs[i].regmap[hr]=-1;
9722 regs[i].isconst&=~(1<<hr);
9723 if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9724 (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9725 (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9726 (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9727 (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9728 branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9729 (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9730 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9731 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9732 branch_regs[i].regmap[hr]!=map)
9733 {
9734 branch_regs[i].regmap[hr]=-1;
9735 branch_regs[i].regmap_entry[hr]=-1;
9736 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9737 {
9738 if(!likely[i]&&i<slen-2) {
9739 regmap_pre[i+2][hr]=-1;
9740 }
9741 }
9742 }
9743 }
9744 }
9745 else
9746 {
9747 // Non-branch
9748 if(i>0)
9749 {
9750 int d1=0,d2=0,map=-1,temp=-1;
9751 if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9752 {
9753 d1=dep1[i];
9754 d2=dep2[i];
9755 }
9756 if(using_tlb) {
9757 if(itype[i]==LOAD || itype[i]==LOADLR ||
9758 itype[i]==STORE || itype[i]==STORELR ||
b9b61529 9759 itype[i]==C1LS || itype[i]==C2LS)
57871462 9760 map=TLREG;
b9b61529 9761 } else if(itype[i]==STORE || itype[i]==STORELR ||
9762 (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
57871462 9763 map=INVCP;
9764 }
9765 if(itype[i]==LOADLR || itype[i]==STORELR ||
b9b61529 9766 itype[i]==C1LS || itype[i]==C2LS)
57871462 9767 temp=FTEMP;
9768 if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9769 (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9770 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9771 regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9772 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9773 (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9774 {
9775 if(i<slen-1&&!is_ds[i]) {
9776 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9777 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9778 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9779 {
9780 printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9781 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9782 }
9783 regmap_pre[i+1][hr]=-1;
9784 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9785 }
9786 regs[i].regmap[hr]=-1;
9787 regs[i].isconst&=~(1<<hr);
9788 }
9789 }
9790 }
9791 }
9792 }
9793 }
9794
9795 /* Pass 5 - Pre-allocate registers */
9796
9797 // If a register is allocated during a loop, try to allocate it for the
9798 // entire loop, if possible. This avoids loading/storing registers
9799 // inside of the loop.
9800
9801 signed char f_regmap[HOST_REGS];
9802 clear_all_regs(f_regmap);
9803 for(i=0;i<slen-1;i++)
9804 {
9805 if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9806 {
9807 if(ba[i]>=start && ba[i]<(start+i*4))
9808 if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9809 ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9810 ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9811 ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
b9b61529 9812 ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9813 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
57871462 9814 {
9815 int t=(ba[i]-start)>>2;
9816 if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9817 if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
9818 for(hr=0;hr<HOST_REGS;hr++)
9819 {
9820 if(regs[i].regmap[hr]>64) {
9821 if(!((regs[i].dirty>>hr)&1))
9822 f_regmap[hr]=regs[i].regmap[hr];
9823 else f_regmap[hr]=-1;
9824 }
b372a952 9825 else if(regs[i].regmap[hr]>=0) {
9826 if(f_regmap[hr]!=regs[i].regmap[hr]) {
9827 // dealloc old register
9828 int n;
9829 for(n=0;n<HOST_REGS;n++)
9830 {
9831 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9832 }
9833 // and alloc new one
9834 f_regmap[hr]=regs[i].regmap[hr];
9835 }
9836 }
57871462 9837 if(branch_regs[i].regmap[hr]>64) {
9838 if(!((branch_regs[i].dirty>>hr)&1))
9839 f_regmap[hr]=branch_regs[i].regmap[hr];
9840 else f_regmap[hr]=-1;
9841 }
b372a952 9842 else if(branch_regs[i].regmap[hr]>=0) {
9843 if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
9844 // dealloc old register
9845 int n;
9846 for(n=0;n<HOST_REGS;n++)
9847 {
9848 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
9849 }
9850 // and alloc new one
9851 f_regmap[hr]=branch_regs[i].regmap[hr];
9852 }
9853 }
e1190b87 9854 if(ooo[i]) {
9855 if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
9856 f_regmap[hr]=branch_regs[i].regmap[hr];
9857 }else{
9858 if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
57871462 9859 f_regmap[hr]=branch_regs[i].regmap[hr];
9860 }
9861 // Avoid dirty->clean transition
e1190b87 9862 #ifdef DESTRUCTIVE_WRITEBACK
57871462 9863 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
e1190b87 9864 #endif
9865 // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
9866 // case above, however it's always a good idea. We can't hoist the
9867 // load if the register was already allocated, so there's no point
9868 // wasting time analyzing most of these cases. It only "succeeds"
9869 // when the mapping was different and the load can be replaced with
9870 // a mov, which is of negligible benefit. So such cases are
9871 // skipped below.
57871462 9872 if(f_regmap[hr]>0) {
e1190b87 9873 if(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0) {
57871462 9874 int r=f_regmap[hr];
9875 for(j=t;j<=i;j++)
9876 {
9877 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9878 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9879 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9880 if(r>63) {
9881 // NB This can exclude the case where the upper-half
9882 // register is lower numbered than the lower-half
9883 // register. Not sure if it's worth fixing...
9884 if(get_reg(regs[j].regmap,r&63)<0) break;
e1190b87 9885 if(get_reg(regs[j].regmap_entry,r&63)<0) break;
57871462 9886 if(regs[j].is32&(1LL<<(r&63))) break;
9887 }
9888 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9889 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9890 int k;
9891 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9892 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9893 if(r>63) {
9894 if(get_reg(regs[i].regmap,r&63)<0) break;
9895 if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9896 }
9897 k=i;
9898 while(k>1&&regs[k-1].regmap[hr]==-1) {
e1190b87 9899 if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9900 //printf("no free regs for store %x\n",start+(k-1)*4);
9901 break;
57871462 9902 }
57871462 9903 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9904 //printf("no-match due to different register\n");
9905 break;
9906 }
9907 if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9908 //printf("no-match due to branch\n");
9909 break;
9910 }
9911 // call/ret fast path assumes no registers allocated
9912 if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
9913 break;
9914 }
9915 if(r>63) {
9916 // NB This can exclude the case where the upper-half
9917 // register is lower numbered than the lower-half
9918 // register. Not sure if it's worth fixing...
9919 if(get_reg(regs[k-1].regmap,r&63)<0) break;
9920 if(regs[k-1].is32&(1LL<<(r&63))) break;
9921 }
9922 k--;
9923 }
9924 if(i<slen-1) {
9925 if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9926 (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9927 //printf("bad match after branch\n");
9928 break;
9929 }
9930 }
9931 if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9932 //printf("Extend r%d, %x ->\n",hr,start+k*4);
9933 while(k<i) {
9934 regs[k].regmap_entry[hr]=f_regmap[hr];
9935 regs[k].regmap[hr]=f_regmap[hr];
9936 regmap_pre[k+1][hr]=f_regmap[hr];
9937 regs[k].wasdirty&=~(1<<hr);
9938 regs[k].dirty&=~(1<<hr);
9939 regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9940 regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9941 regs[k].wasconst&=~(1<<hr);
9942 regs[k].isconst&=~(1<<hr);
9943 k++;
9944 }
9945 }
9946 else {
9947 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9948 break;
9949 }
9950 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9951 if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9952 //printf("OK fill %x (r%d)\n",start+i*4,hr);
9953 regs[i].regmap_entry[hr]=f_regmap[hr];
9954 regs[i].regmap[hr]=f_regmap[hr];
9955 regs[i].wasdirty&=~(1<<hr);
9956 regs[i].dirty&=~(1<<hr);
9957 regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9958 regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9959 regs[i].wasconst&=~(1<<hr);
9960 regs[i].isconst&=~(1<<hr);
9961 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9962 branch_regs[i].wasdirty&=~(1<<hr);
9963 branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9964 branch_regs[i].regmap[hr]=f_regmap[hr];
9965 branch_regs[i].dirty&=~(1<<hr);
9966 branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9967 branch_regs[i].wasconst&=~(1<<hr);
9968 branch_regs[i].isconst&=~(1<<hr);
9969 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9970 regmap_pre[i+2][hr]=f_regmap[hr];
9971 regs[i+2].wasdirty&=~(1<<hr);
9972 regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9973 assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9974 (regs[i+2].was32&(1LL<<f_regmap[hr])));
9975 }
9976 }
9977 }
9978 for(k=t;k<j;k++) {
e1190b87 9979 // Alloc register clean at beginning of loop,
9980 // but may dirty it in pass 6
57871462 9981 regs[k].regmap_entry[hr]=f_regmap[hr];
9982 regs[k].regmap[hr]=f_regmap[hr];
57871462 9983 regs[k].dirty&=~(1<<hr);
9984 regs[k].wasconst&=~(1<<hr);
9985 regs[k].isconst&=~(1<<hr);
e1190b87 9986 if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
9987 branch_regs[k].regmap_entry[hr]=f_regmap[hr];
9988 branch_regs[k].regmap[hr]=f_regmap[hr];
9989 branch_regs[k].dirty&=~(1<<hr);
9990 branch_regs[k].wasconst&=~(1<<hr);
9991 branch_regs[k].isconst&=~(1<<hr);
9992 if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
9993 regmap_pre[k+2][hr]=f_regmap[hr];
9994 regs[k+2].wasdirty&=~(1<<hr);
9995 assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
9996 (regs[k+2].was32&(1LL<<f_regmap[hr])));
9997 }
9998 }
9999 else
10000 {
10001 regmap_pre[k+1][hr]=f_regmap[hr];
10002 regs[k+1].wasdirty&=~(1<<hr);
10003 }
57871462 10004 }
10005 if(regs[j].regmap[hr]==f_regmap[hr])
10006 regs[j].regmap_entry[hr]=f_regmap[hr];
10007 break;
10008 }
10009 if(j==i) break;
10010 if(regs[j].regmap[hr]>=0)
10011 break;
10012 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
10013 //printf("no-match due to different register\n");
10014 break;
10015 }
10016 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
10017 //printf("32/64 mismatch %x %d\n",start+j*4,hr);
10018 break;
10019 }
e1190b87 10020 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10021 {
10022 // Stop on unconditional branch
10023 break;
10024 }
10025 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
10026 {
10027 if(ooo[j]) {
10028 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
10029 break;
10030 }else{
10031 if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
10032 break;
10033 }
10034 if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
10035 //printf("no-match due to different register (branch)\n");
57871462 10036 break;
10037 }
10038 }
e1190b87 10039 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10040 //printf("No free regs for store %x\n",start+j*4);
10041 break;
10042 }
57871462 10043 if(f_regmap[hr]>=64) {
10044 if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
10045 break;
10046 }
10047 else
10048 {
10049 if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
10050 break;
10051 }
10052 }
10053 }
10054 }
10055 }
10056 }
10057 }
10058 }
10059 }else{
10060 int count=0;
10061 for(hr=0;hr<HOST_REGS;hr++)
10062 {
10063 if(hr!=EXCLUDE_REG) {
10064 if(regs[i].regmap[hr]>64) {
10065 if(!((regs[i].dirty>>hr)&1))
10066 f_regmap[hr]=regs[i].regmap[hr];
10067 }
b372a952 10068 else if(regs[i].regmap[hr]>=0) {
10069 if(f_regmap[hr]!=regs[i].regmap[hr]) {
10070 // dealloc old register
10071 int n;
10072 for(n=0;n<HOST_REGS;n++)
10073 {
10074 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10075 }
10076 // and alloc new one
10077 f_regmap[hr]=regs[i].regmap[hr];
10078 }
10079 }
57871462 10080 else if(regs[i].regmap[hr]<0) count++;
10081 }
10082 }
10083 // Try to restore cycle count at branch targets
10084 if(bt[i]) {
10085 for(j=i;j<slen-1;j++) {
10086 if(regs[j].regmap[HOST_CCREG]!=-1) break;
e1190b87 10087 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10088 //printf("no free regs for store %x\n",start+j*4);
10089 break;
57871462 10090 }
57871462 10091 }
10092 if(regs[j].regmap[HOST_CCREG]==CCREG) {
10093 int k=i;
10094 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
10095 while(k<j) {
10096 regs[k].regmap_entry[HOST_CCREG]=CCREG;
10097 regs[k].regmap[HOST_CCREG]=CCREG;
10098 regmap_pre[k+1][HOST_CCREG]=CCREG;
10099 regs[k+1].wasdirty|=1<<HOST_CCREG;
10100 regs[k].dirty|=1<<HOST_CCREG;
10101 regs[k].wasconst&=~(1<<HOST_CCREG);
10102 regs[k].isconst&=~(1<<HOST_CCREG);
10103 k++;
10104 }
10105 regs[j].regmap_entry[HOST_CCREG]=CCREG;
10106 }
10107 // Work backwards from the branch target
10108 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
10109 {
10110 //printf("Extend backwards\n");
10111 int k;
10112 k=i;
10113 while(regs[k-1].regmap[HOST_CCREG]==-1) {
e1190b87 10114 if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10115 //printf("no free regs for store %x\n",start+(k-1)*4);
10116 break;
57871462 10117 }
57871462 10118 k--;
10119 }
10120 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
10121 //printf("Extend CC, %x ->\n",start+k*4);
10122 while(k<=i) {
10123 regs[k].regmap_entry[HOST_CCREG]=CCREG;
10124 regs[k].regmap[HOST_CCREG]=CCREG;
10125 regmap_pre[k+1][HOST_CCREG]=CCREG;
10126 regs[k+1].wasdirty|=1<<HOST_CCREG;
10127 regs[k].dirty|=1<<HOST_CCREG;
10128 regs[k].wasconst&=~(1<<HOST_CCREG);
10129 regs[k].isconst&=~(1<<HOST_CCREG);
10130 k++;
10131 }
10132 }
10133 else {
10134 //printf("Fail Extend CC, %x ->\n",start+k*4);
10135 }
10136 }
10137 }
10138 if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
10139 itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
10140 itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
e1190b87 10141 itype[i]!=FCONV&&itype[i]!=FCOMP)
57871462 10142 {
10143 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
10144 }
10145 }
10146 }
10147
10148 // This allocates registers (if possible) one instruction prior
10149 // to use, which can avoid a load-use penalty on certain CPUs.
10150 for(i=0;i<slen-1;i++)
10151 {
10152 if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10153 {
10154 if(!bt[i+1])
10155 {
b9b61529 10156 if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10157 ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
57871462 10158 {
10159 if(rs1[i+1]) {
10160 if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10161 {
10162 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10163 {
10164 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10165 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10166 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10167 regs[i].isconst&=~(1<<hr);
10168 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10169 constmap[i][hr]=constmap[i+1][hr];
10170 regs[i+1].wasdirty&=~(1<<hr);
10171 regs[i].dirty&=~(1<<hr);
10172 }
10173 }
10174 }
10175 if(rs2[i+1]) {
10176 if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10177 {
10178 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10179 {
10180 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10181 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10182 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10183 regs[i].isconst&=~(1<<hr);
10184 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10185 constmap[i][hr]=constmap[i+1][hr];
10186 regs[i+1].wasdirty&=~(1<<hr);
10187 regs[i].dirty&=~(1<<hr);
10188 }
10189 }
10190 }
10191 if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10192 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10193 {
10194 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10195 {
10196 regs[i].regmap[hr]=rs1[i+1];
10197 regmap_pre[i+1][hr]=rs1[i+1];
10198 regs[i+1].regmap_entry[hr]=rs1[i+1];
10199 regs[i].isconst&=~(1<<hr);
10200 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10201 constmap[i][hr]=constmap[i+1][hr];
10202 regs[i+1].wasdirty&=~(1<<hr);
10203 regs[i].dirty&=~(1<<hr);
10204 }
10205 }
10206 }
10207 if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10208 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10209 {
10210 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10211 {
10212 regs[i].regmap[hr]=rs1[i+1];
10213 regmap_pre[i+1][hr]=rs1[i+1];
10214 regs[i+1].regmap_entry[hr]=rs1[i+1];
10215 regs[i].isconst&=~(1<<hr);
10216 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10217 constmap[i][hr]=constmap[i+1][hr];
10218 regs[i+1].wasdirty&=~(1<<hr);
10219 regs[i].dirty&=~(1<<hr);
10220 }
10221 }
10222 }
10223 #ifndef HOST_IMM_ADDR32
b9b61529 10224 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
57871462 10225 hr=get_reg(regs[i+1].regmap,TLREG);
10226 if(hr>=0) {
10227 int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10228 if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10229 int nr;
10230 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10231 {
10232 regs[i].regmap[hr]=MGEN1+((i+1)&1);
10233 regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10234 regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10235 regs[i].isconst&=~(1<<hr);
10236 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10237 constmap[i][hr]=constmap[i+1][hr];
10238 regs[i+1].wasdirty&=~(1<<hr);
10239 regs[i].dirty&=~(1<<hr);
10240 }
10241 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10242 {
10243 // move it to another register
10244 regs[i+1].regmap[hr]=-1;
10245 regmap_pre[i+2][hr]=-1;
10246 regs[i+1].regmap[nr]=TLREG;
10247 regmap_pre[i+2][nr]=TLREG;
10248 regs[i].regmap[nr]=MGEN1+((i+1)&1);
10249 regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10250 regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10251 regs[i].isconst&=~(1<<nr);
10252 regs[i+1].isconst&=~(1<<nr);
10253 regs[i].dirty&=~(1<<nr);
10254 regs[i+1].wasdirty&=~(1<<nr);
10255 regs[i+1].dirty&=~(1<<nr);
10256 regs[i+2].wasdirty&=~(1<<nr);
10257 }
10258 }
10259 }
10260 }
10261 #endif
b9b61529 10262 if(itype[i+1]==STORE||itype[i+1]==STORELR
10263 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
57871462 10264 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10265 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10266 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10267 else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10268 assert(hr>=0);
10269 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10270 {
10271 regs[i].regmap[hr]=rs1[i+1];
10272 regmap_pre[i+1][hr]=rs1[i+1];
10273 regs[i+1].regmap_entry[hr]=rs1[i+1];
10274 regs[i].isconst&=~(1<<hr);
10275 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10276 constmap[i][hr]=constmap[i+1][hr];
10277 regs[i+1].wasdirty&=~(1<<hr);
10278 regs[i].dirty&=~(1<<hr);
10279 }
10280 }
10281 }
b9b61529 10282 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
57871462 10283 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10284 int nr;
10285 hr=get_reg(regs[i+1].regmap,FTEMP);
10286 assert(hr>=0);
10287 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10288 {
10289 regs[i].regmap[hr]=rs1[i+1];
10290 regmap_pre[i+1][hr]=rs1[i+1];
10291 regs[i+1].regmap_entry[hr]=rs1[i+1];
10292 regs[i].isconst&=~(1<<hr);
10293 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10294 constmap[i][hr]=constmap[i+1][hr];
10295 regs[i+1].wasdirty&=~(1<<hr);
10296 regs[i].dirty&=~(1<<hr);
10297 }
10298 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10299 {
10300 // move it to another register
10301 regs[i+1].regmap[hr]=-1;
10302 regmap_pre[i+2][hr]=-1;
10303 regs[i+1].regmap[nr]=FTEMP;
10304 regmap_pre[i+2][nr]=FTEMP;
10305 regs[i].regmap[nr]=rs1[i+1];
10306 regmap_pre[i+1][nr]=rs1[i+1];
10307 regs[i+1].regmap_entry[nr]=rs1[i+1];
10308 regs[i].isconst&=~(1<<nr);
10309 regs[i+1].isconst&=~(1<<nr);
10310 regs[i].dirty&=~(1<<nr);
10311 regs[i+1].wasdirty&=~(1<<nr);
10312 regs[i+1].dirty&=~(1<<nr);
10313 regs[i+2].wasdirty&=~(1<<nr);
10314 }
10315 }
10316 }
b9b61529 10317 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
57871462 10318 if(itype[i+1]==LOAD)
10319 hr=get_reg(regs[i+1].regmap,rt1[i+1]);
b9b61529 10320 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
57871462 10321 hr=get_reg(regs[i+1].regmap,FTEMP);
b9b61529 10322 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
57871462 10323 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10324 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10325 }
10326 if(hr>=0&&regs[i].regmap[hr]<0) {
10327 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10328 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10329 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10330 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10331 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10332 regs[i].isconst&=~(1<<hr);
10333 regs[i+1].wasdirty&=~(1<<hr);
10334 regs[i].dirty&=~(1<<hr);
10335 }
10336 }
10337 }
10338 }
10339 }
10340 }
10341 }
10342
10343 /* Pass 6 - Optimize clean/dirty state */
10344 clean_registers(0,slen-1,1);
10345
10346 /* Pass 7 - Identify 32-bit registers */
a28c6ce8 10347#ifndef FORCE32
57871462 10348 provisional_r32();
10349
10350 u_int r32=0;
10351
10352 for (i=slen-1;i>=0;i--)
10353 {
10354 int hr;
10355 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10356 {
10357 if(ba[i]<start || ba[i]>=(start+slen*4))
10358 {
10359 // Branch out of this block, don't need anything
10360 r32=0;
10361 }
10362 else
10363 {
10364 // Internal branch
10365 // Need whatever matches the target
10366 // (and doesn't get overwritten by the delay slot instruction)
10367 r32=0;
10368 int t=(ba[i]-start)>>2;
10369 if(ba[i]>start+i*4) {
10370 // Forward branch
10371 if(!(requires_32bit[t]&~regs[i].was32))
10372 r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10373 }else{
10374 // Backward branch
10375 //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10376 // r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10377 if(!(pr32[t]&~regs[i].was32))
10378 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10379 }
10380 }
10381 // Conditional branch may need registers for following instructions
10382 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10383 {
10384 if(i<slen-2) {
10385 r32|=requires_32bit[i+2];
10386 r32&=regs[i].was32;
10387 // Mark this address as a branch target since it may be called
10388 // upon return from interrupt
10389 bt[i+2]=1;
10390 }
10391 }
10392 // Merge in delay slot
10393 if(!likely[i]) {
10394 // These are overwritten unless the branch is "likely"
10395 // and the delay slot is nullified if not taken
10396 r32&=~(1LL<<rt1[i+1]);
10397 r32&=~(1LL<<rt2[i+1]);
10398 }
10399 // Assume these are needed (delay slot)
10400 if(us1[i+1]>0)
10401 {
10402 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10403 }
10404 if(us2[i+1]>0)
10405 {
10406 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10407 }
10408 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10409 {
10410 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10411 }
10412 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10413 {
10414 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10415 }
10416 }
1e973cb0 10417 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 10418 {
10419 // SYSCALL instruction (software interrupt)
10420 r32=0;
10421 }
10422 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10423 {
10424 // ERET instruction (return from interrupt)
10425 r32=0;
10426 }
10427 // Check 32 bits
10428 r32&=~(1LL<<rt1[i]);
10429 r32&=~(1LL<<rt2[i]);
10430 if(us1[i]>0)
10431 {
10432 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10433 }
10434 if(us2[i]>0)
10435 {
10436 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10437 }
10438 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10439 {
10440 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10441 }
10442 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10443 {
10444 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10445 }
10446 requires_32bit[i]=r32;
10447
10448 // Dirty registers which are 32-bit, require 32-bit input
10449 // as they will be written as 32-bit values
10450 for(hr=0;hr<HOST_REGS;hr++)
10451 {
10452 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10453 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10454 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10455 requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10456 }
10457 }
10458 }
10459 //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10460 }
a28c6ce8 10461#endif
57871462 10462
10463 if(itype[slen-1]==SPAN) {
10464 bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10465 }
10466
10467 /* Debug/disassembly */
10468 if((void*)assem_debug==(void*)printf)
10469 for(i=0;i<slen;i++)
10470 {
10471 printf("U:");
10472 int r;
10473 for(r=1;r<=CCREG;r++) {
10474 if((unneeded_reg[i]>>r)&1) {
10475 if(r==HIREG) printf(" HI");
10476 else if(r==LOREG) printf(" LO");
10477 else printf(" r%d",r);
10478 }
10479 }
90ae6d4e 10480#ifndef FORCE32
57871462 10481 printf(" UU:");
10482 for(r=1;r<=CCREG;r++) {
10483 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10484 if(r==HIREG) printf(" HI");
10485 else if(r==LOREG) printf(" LO");
10486 else printf(" r%d",r);
10487 }
10488 }
10489 printf(" 32:");
10490 for(r=0;r<=CCREG;r++) {
10491 //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10492 if((regs[i].was32>>r)&1) {
10493 if(r==CCREG) printf(" CC");
10494 else if(r==HIREG) printf(" HI");
10495 else if(r==LOREG) printf(" LO");
10496 else printf(" r%d",r);
10497 }
10498 }
90ae6d4e 10499#endif
57871462 10500 printf("\n");
10501 #if defined(__i386__) || defined(__x86_64__)
10502 printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10503 #endif
10504 #ifdef __arm__
10505 printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10506 #endif
10507 printf("needs: ");
10508 if(needed_reg[i]&1) printf("eax ");
10509 if((needed_reg[i]>>1)&1) printf("ecx ");
10510 if((needed_reg[i]>>2)&1) printf("edx ");
10511 if((needed_reg[i]>>3)&1) printf("ebx ");
10512 if((needed_reg[i]>>5)&1) printf("ebp ");
10513 if((needed_reg[i]>>6)&1) printf("esi ");
10514 if((needed_reg[i]>>7)&1) printf("edi ");
10515 printf("r:");
10516 for(r=0;r<=CCREG;r++) {
10517 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10518 if((requires_32bit[i]>>r)&1) {
10519 if(r==CCREG) printf(" CC");
10520 else if(r==HIREG) printf(" HI");
10521 else if(r==LOREG) printf(" LO");
10522 else printf(" r%d",r);
10523 }
10524 }
10525 printf("\n");
10526 /*printf("pr:");
10527 for(r=0;r<=CCREG;r++) {
10528 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10529 if((pr32[i]>>r)&1) {
10530 if(r==CCREG) printf(" CC");
10531 else if(r==HIREG) printf(" HI");
10532 else if(r==LOREG) printf(" LO");
10533 else printf(" r%d",r);
10534 }
10535 }
10536 if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10537 printf("\n");*/
10538 #if defined(__i386__) || defined(__x86_64__)
10539 printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10540 printf("dirty: ");
10541 if(regs[i].wasdirty&1) printf("eax ");
10542 if((regs[i].wasdirty>>1)&1) printf("ecx ");
10543 if((regs[i].wasdirty>>2)&1) printf("edx ");
10544 if((regs[i].wasdirty>>3)&1) printf("ebx ");
10545 if((regs[i].wasdirty>>5)&1) printf("ebp ");
10546 if((regs[i].wasdirty>>6)&1) printf("esi ");
10547 if((regs[i].wasdirty>>7)&1) printf("edi ");
10548 #endif
10549 #ifdef __arm__
10550 printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10551 printf("dirty: ");
10552 if(regs[i].wasdirty&1) printf("r0 ");
10553 if((regs[i].wasdirty>>1)&1) printf("r1 ");
10554 if((regs[i].wasdirty>>2)&1) printf("r2 ");
10555 if((regs[i].wasdirty>>3)&1) printf("r3 ");
10556 if((regs[i].wasdirty>>4)&1) printf("r4 ");
10557 if((regs[i].wasdirty>>5)&1) printf("r5 ");
10558 if((regs[i].wasdirty>>6)&1) printf("r6 ");
10559 if((regs[i].wasdirty>>7)&1) printf("r7 ");
10560 if((regs[i].wasdirty>>8)&1) printf("r8 ");
10561 if((regs[i].wasdirty>>9)&1) printf("r9 ");
10562 if((regs[i].wasdirty>>10)&1) printf("r10 ");
10563 if((regs[i].wasdirty>>12)&1) printf("r12 ");
10564 #endif
10565 printf("\n");
10566 disassemble_inst(i);
10567 //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10568 #if defined(__i386__) || defined(__x86_64__)
10569 printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
10570 if(regs[i].dirty&1) printf("eax ");
10571 if((regs[i].dirty>>1)&1) printf("ecx ");
10572 if((regs[i].dirty>>2)&1) printf("edx ");
10573 if((regs[i].dirty>>3)&1) printf("ebx ");
10574 if((regs[i].dirty>>5)&1) printf("ebp ");
10575 if((regs[i].dirty>>6)&1) printf("esi ");
10576 if((regs[i].dirty>>7)&1) printf("edi ");
10577 #endif
10578 #ifdef __arm__
10579 printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
10580 if(regs[i].dirty&1) printf("r0 ");
10581 if((regs[i].dirty>>1)&1) printf("r1 ");
10582 if((regs[i].dirty>>2)&1) printf("r2 ");
10583 if((regs[i].dirty>>3)&1) printf("r3 ");
10584 if((regs[i].dirty>>4)&1) printf("r4 ");
10585 if((regs[i].dirty>>5)&1) printf("r5 ");
10586 if((regs[i].dirty>>6)&1) printf("r6 ");
10587 if((regs[i].dirty>>7)&1) printf("r7 ");
10588 if((regs[i].dirty>>8)&1) printf("r8 ");
10589 if((regs[i].dirty>>9)&1) printf("r9 ");
10590 if((regs[i].dirty>>10)&1) printf("r10 ");
10591 if((regs[i].dirty>>12)&1) printf("r12 ");
10592 #endif
10593 printf("\n");
10594 if(regs[i].isconst) {
10595 printf("constants: ");
10596 #if defined(__i386__) || defined(__x86_64__)
10597 if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
10598 if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
10599 if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
10600 if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
10601 if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
10602 if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
10603 if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
10604 #endif
10605 #ifdef __arm__
10606 if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
10607 if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
10608 if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
10609 if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
10610 if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
10611 if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
10612 if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
10613 if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
10614 if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
10615 if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
10616 if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
10617 if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
10618 #endif
10619 printf("\n");
10620 }
90ae6d4e 10621#ifndef FORCE32
57871462 10622 printf(" 32:");
10623 for(r=0;r<=CCREG;r++) {
10624 if((regs[i].is32>>r)&1) {
10625 if(r==CCREG) printf(" CC");
10626 else if(r==HIREG) printf(" HI");
10627 else if(r==LOREG) printf(" LO");
10628 else printf(" r%d",r);
10629 }
10630 }
10631 printf("\n");
90ae6d4e 10632#endif
57871462 10633 /*printf(" p32:");
10634 for(r=0;r<=CCREG;r++) {
10635 if((p32[i]>>r)&1) {
10636 if(r==CCREG) printf(" CC");
10637 else if(r==HIREG) printf(" HI");
10638 else if(r==LOREG) printf(" LO");
10639 else printf(" r%d",r);
10640 }
10641 }
10642 if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
10643 else printf("\n");*/
10644 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10645 #if defined(__i386__) || defined(__x86_64__)
10646 printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
10647 if(branch_regs[i].dirty&1) printf("eax ");
10648 if((branch_regs[i].dirty>>1)&1) printf("ecx ");
10649 if((branch_regs[i].dirty>>2)&1) printf("edx ");
10650 if((branch_regs[i].dirty>>3)&1) printf("ebx ");
10651 if((branch_regs[i].dirty>>5)&1) printf("ebp ");
10652 if((branch_regs[i].dirty>>6)&1) printf("esi ");
10653 if((branch_regs[i].dirty>>7)&1) printf("edi ");
10654 #endif
10655 #ifdef __arm__
10656 printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
10657 if(branch_regs[i].dirty&1) printf("r0 ");
10658 if((branch_regs[i].dirty>>1)&1) printf("r1 ");
10659 if((branch_regs[i].dirty>>2)&1) printf("r2 ");
10660 if((branch_regs[i].dirty>>3)&1) printf("r3 ");
10661 if((branch_regs[i].dirty>>4)&1) printf("r4 ");
10662 if((branch_regs[i].dirty>>5)&1) printf("r5 ");
10663 if((branch_regs[i].dirty>>6)&1) printf("r6 ");
10664 if((branch_regs[i].dirty>>7)&1) printf("r7 ");
10665 if((branch_regs[i].dirty>>8)&1) printf("r8 ");
10666 if((branch_regs[i].dirty>>9)&1) printf("r9 ");
10667 if((branch_regs[i].dirty>>10)&1) printf("r10 ");
10668 if((branch_regs[i].dirty>>12)&1) printf("r12 ");
10669 #endif
90ae6d4e 10670#ifndef FORCE32
57871462 10671 printf(" 32:");
10672 for(r=0;r<=CCREG;r++) {
10673 if((branch_regs[i].is32>>r)&1) {
10674 if(r==CCREG) printf(" CC");
10675 else if(r==HIREG) printf(" HI");
10676 else if(r==LOREG) printf(" LO");
10677 else printf(" r%d",r);
10678 }
10679 }
10680 printf("\n");
90ae6d4e 10681#endif
57871462 10682 }
10683 }
10684
10685 /* Pass 8 - Assembly */
10686 linkcount=0;stubcount=0;
10687 ds=0;is_delayslot=0;
10688 cop1_usable=0;
10689 uint64_t is32_pre=0;
10690 u_int dirty_pre=0;
10691 u_int beginning=(u_int)out;
10692 if((u_int)addr&1) {
10693 ds=1;
10694 pagespan_ds();
10695 }
9ad4d757 10696 u_int instr_addr0_override=0;
10697
10698#ifdef PCSX
10699 if (start == 0x80030000) {
10700 // nasty hack for fastbios thing
10701 instr_addr0_override=(u_int)out;
10702 emit_movimm(start,0);
10703 emit_readword((int)&pcaddr,1);
10704 emit_writeword(0,(int)&pcaddr);
10705 emit_cmp(0,1);
10706 emit_jne((int)new_dyna_leave);
10707 }
10708#endif
57871462 10709 for(i=0;i<slen;i++)
10710 {
10711 //if(ds) printf("ds: ");
10712 if((void*)assem_debug==(void*)printf) disassemble_inst(i);
10713 if(ds) {
10714 ds=0; // Skip delay slot
10715 if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10716 instr_addr[i]=0;
10717 } else {
10718 #ifndef DESTRUCTIVE_WRITEBACK
10719 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10720 {
10721 wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
10722 unneeded_reg[i],unneeded_reg_upper[i]);
10723 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10724 unneeded_reg[i],unneeded_reg_upper[i]);
10725 }
10726 is32_pre=regs[i].is32;
10727 dirty_pre=regs[i].dirty;
10728 #endif
10729 // write back
10730 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10731 {
10732 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10733 unneeded_reg[i],unneeded_reg_upper[i]);
10734 loop_preload(regmap_pre[i],regs[i].regmap_entry);
10735 }
10736 // branch target entry point
10737 instr_addr[i]=(u_int)out;
10738 assem_debug("<->\n");
10739 // load regs
10740 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10741 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10742 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10743 address_generation(i,&regs[i],regs[i].regmap_entry);
10744 load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10745 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10746 {
10747 // Load the delay slot registers if necessary
10748 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10749 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10750 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10751 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
b9b61529 10752 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
57871462 10753 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10754 }
10755 else if(i+1<slen)
10756 {
10757 // Preload registers for following instruction
10758 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10759 if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10760 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10761 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10762 if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10763 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10764 }
10765 // TODO: if(is_ooo(i)) address_generation(i+1);
10766 if(itype[i]==CJUMP||itype[i]==FJUMP)
10767 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
b9b61529 10768 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
57871462 10769 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10770 if(bt[i]) cop1_usable=0;
10771 // assemble
10772 switch(itype[i]) {
10773 case ALU:
10774 alu_assemble(i,&regs[i]);break;
10775 case IMM16:
10776 imm16_assemble(i,&regs[i]);break;
10777 case SHIFT:
10778 shift_assemble(i,&regs[i]);break;
10779 case SHIFTIMM:
10780 shiftimm_assemble(i,&regs[i]);break;
10781 case LOAD:
10782 load_assemble(i,&regs[i]);break;
10783 case LOADLR:
10784 loadlr_assemble(i,&regs[i]);break;
10785 case STORE:
10786 store_assemble(i,&regs[i]);break;
10787 case STORELR:
10788 storelr_assemble(i,&regs[i]);break;
10789 case COP0:
10790 cop0_assemble(i,&regs[i]);break;
10791 case COP1:
10792 cop1_assemble(i,&regs[i]);break;
10793 case C1LS:
10794 c1ls_assemble(i,&regs[i]);break;
b9b61529 10795 case COP2:
10796 cop2_assemble(i,&regs[i]);break;
10797 case C2LS:
10798 c2ls_assemble(i,&regs[i]);break;
10799 case C2OP:
10800 c2op_assemble(i,&regs[i]);break;
57871462 10801 case FCONV:
10802 fconv_assemble(i,&regs[i]);break;
10803 case FLOAT:
10804 float_assemble(i,&regs[i]);break;
10805 case FCOMP:
10806 fcomp_assemble(i,&regs[i]);break;
10807 case MULTDIV:
10808 multdiv_assemble(i,&regs[i]);break;
10809 case MOV:
10810 mov_assemble(i,&regs[i]);break;
10811 case SYSCALL:
10812 syscall_assemble(i,&regs[i]);break;
7139f3c8 10813 case HLECALL:
10814 hlecall_assemble(i,&regs[i]);break;
1e973cb0 10815 case INTCALL:
10816 intcall_assemble(i,&regs[i]);break;
57871462 10817 case UJUMP:
10818 ujump_assemble(i,&regs[i]);ds=1;break;
10819 case RJUMP:
10820 rjump_assemble(i,&regs[i]);ds=1;break;
10821 case CJUMP:
10822 cjump_assemble(i,&regs[i]);ds=1;break;
10823 case SJUMP:
10824 sjump_assemble(i,&regs[i]);ds=1;break;
10825 case FJUMP:
10826 fjump_assemble(i,&regs[i]);ds=1;break;
10827 case SPAN:
10828 pagespan_assemble(i,&regs[i]);break;
10829 }
10830 if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10831 literal_pool(1024);
10832 else
10833 literal_pool_jumpover(256);
10834 }
10835 }
10836 //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10837 // If the block did not end with an unconditional branch,
10838 // add a jump to the next instruction.
10839 if(i>1) {
10840 if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10841 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10842 assert(i==slen);
10843 if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10844 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10845 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10846 emit_loadreg(CCREG,HOST_CCREG);
10847 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10848 }
10849 else if(!likely[i-2])
10850 {
10851 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10852 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10853 }
10854 else
10855 {
10856 store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10857 assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10858 }
10859 add_to_linker((int)out,start+i*4,0);
10860 emit_jmp(0);
10861 }
10862 }
10863 else
10864 {
10865 assert(i>0);
10866 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10867 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10868 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10869 emit_loadreg(CCREG,HOST_CCREG);
10870 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10871 add_to_linker((int)out,start+i*4,0);
10872 emit_jmp(0);
10873 }
10874
10875 // TODO: delay slot stubs?
10876 // Stubs
10877 for(i=0;i<stubcount;i++)
10878 {
10879 switch(stubs[i][0])
10880 {
10881 case LOADB_STUB:
10882 case LOADH_STUB:
10883 case LOADW_STUB:
10884 case LOADD_STUB:
10885 case LOADBU_STUB:
10886 case LOADHU_STUB:
10887 do_readstub(i);break;
10888 case STOREB_STUB:
10889 case STOREH_STUB:
10890 case STOREW_STUB:
10891 case STORED_STUB:
10892 do_writestub(i);break;
10893 case CC_STUB:
10894 do_ccstub(i);break;
10895 case INVCODE_STUB:
10896 do_invstub(i);break;
10897 case FP_STUB:
10898 do_cop1stub(i);break;
10899 case STORELR_STUB:
10900 do_unalignedwritestub(i);break;
10901 }
10902 }
10903
9ad4d757 10904 if (instr_addr0_override)
10905 instr_addr[0] = instr_addr0_override;
10906
57871462 10907 /* Pass 9 - Linker */
10908 for(i=0;i<linkcount;i++)
10909 {
10910 assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10911 literal_pool(64);
10912 if(!link_addr[i][2])
10913 {
10914 void *stub=out;
10915 void *addr=check_addr(link_addr[i][1]);
10916 emit_extjump(link_addr[i][0],link_addr[i][1]);
10917 if(addr) {
10918 set_jump_target(link_addr[i][0],(int)addr);
10919 add_link(link_addr[i][1],stub);
10920 }
10921 else set_jump_target(link_addr[i][0],(int)stub);
10922 }
10923 else
10924 {
10925 // Internal branch
10926 int target=(link_addr[i][1]-start)>>2;
10927 assert(target>=0&&target<slen);
10928 assert(instr_addr[target]);
10929 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10930 //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10931 //#else
10932 set_jump_target(link_addr[i][0],instr_addr[target]);
10933 //#endif
10934 }
10935 }
10936 // External Branch Targets (jump_in)
10937 if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10938 for(i=0;i<slen;i++)
10939 {
10940 if(bt[i]||i==0)
10941 {
10942 if(instr_addr[i]) // TODO - delay slots (=null)
10943 {
10944 u_int vaddr=start+i*4;
94d23bb9 10945 u_int page=get_page(vaddr);
10946 u_int vpage=get_vpage(vaddr);
57871462 10947 literal_pool(256);
10948 //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
a28c6ce8 10949#ifndef FORCE32
57871462 10950 if(!requires_32bit[i])
a28c6ce8 10951#else
10952 if(1)
10953#endif
57871462 10954 {
10955 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10956 assem_debug("jump_in: %x\n",start+i*4);
10957 ll_add(jump_dirty+vpage,vaddr,(void *)out);
10958 int entry_point=do_dirty_stub(i);
10959 ll_add(jump_in+page,vaddr,(void *)entry_point);
10960 // If there was an existing entry in the hash table,
10961 // replace it with the new address.
10962 // Don't add new entries. We'll insert the
10963 // ones that actually get used in check_addr().
10964 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10965 if(ht_bin[0]==vaddr) {
10966 ht_bin[1]=entry_point;
10967 }
10968 if(ht_bin[2]==vaddr) {
10969 ht_bin[3]=entry_point;
10970 }
10971 }
10972 else
10973 {
10974 u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
10975 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10976 assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
10977 //int entry_point=(int)out;
10978 ////assem_debug("entry_point: %x\n",entry_point);
10979 //load_regs_entry(i);
10980 //if(entry_point==(int)out)
10981 // entry_point=instr_addr[i];
10982 //else
10983 // emit_jmp(instr_addr[i]);
10984 //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10985 ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
10986 int entry_point=do_dirty_stub(i);
10987 ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10988 }
10989 }
10990 }
10991 }
10992 // Write out the literal pool if necessary
10993 literal_pool(0);
10994 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10995 // Align code
10996 if(((u_int)out)&7) emit_addnop(13);
10997 #endif
10998 assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
10999 //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
11000 memcpy(copy,source,slen*4);
11001 copy+=slen*4;
11002
11003 #ifdef __arm__
11004 __clear_cache((void *)beginning,out);
11005 #endif
11006
11007 // If we're within 256K of the end of the buffer,
11008 // start over from the beginning. (Is 256K enough?)
11009 if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
11010
11011 // Trap writes to any of the pages we compiled
11012 for(i=start>>12;i<=(start+slen*4)>>12;i++) {
11013 invalid_code[i]=0;
90ae6d4e 11014#ifndef DISABLE_TLB
57871462 11015 memory_map[i]|=0x40000000;
11016 if((signed int)start>=(signed int)0xC0000000) {
11017 assert(using_tlb);
11018 j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
11019 invalid_code[j]=0;
11020 memory_map[j]|=0x40000000;
11021 //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
11022 }
90ae6d4e 11023#endif
57871462 11024 }
11025
11026 /* Pass 10 - Free memory by expiring oldest blocks */
11027
11028 int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
11029 while(expirep!=end)
11030 {
11031 int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
11032 int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
11033 inv_debug("EXP: Phase %d\n",expirep);
11034 switch((expirep>>11)&3)
11035 {
11036 case 0:
11037 // Clear jump_in and jump_dirty
11038 ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
11039 ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
11040 ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
11041 ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
11042 break;
11043 case 1:
11044 // Clear pointers
11045 ll_kill_pointers(jump_out[expirep&2047],base,shift);
11046 ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
11047 break;
11048 case 2:
11049 // Clear hash table
11050 for(i=0;i<32;i++) {
11051 int *ht_bin=hash_table[((expirep&2047)<<5)+i];
11052 if((ht_bin[3]>>shift)==(base>>shift) ||
11053 ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11054 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
11055 ht_bin[2]=ht_bin[3]=-1;
11056 }
11057 if((ht_bin[1]>>shift)==(base>>shift) ||
11058 ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11059 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
11060 ht_bin[0]=ht_bin[2];
11061 ht_bin[1]=ht_bin[3];
11062 ht_bin[2]=ht_bin[3]=-1;
11063 }
11064 }
11065 break;
11066 case 3:
11067 // Clear jump_out
dd3a91a1 11068 #ifdef __arm__
11069 if((expirep&2047)==0)
11070 do_clear_cache();
11071 #endif
57871462 11072 ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
11073 ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
11074 break;
11075 }
11076 expirep=(expirep+1)&65535;
11077 }
11078 return 0;
11079}
b9b61529 11080
11081// vim:shiftwidth=2:expandtab