drc: implement shiftimm constant propagation
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
CommitLineData
57871462 1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
20d507ba 3 * Copyright (C) 2009-2011 Ari64 *
57871462 4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21#include <stdlib.h>
22#include <stdint.h> //include for uint64_t
23#include <assert.h>
4600ba03 24#include <sys/mman.h>
57871462 25
3d624f89 26#include "emu_if.h" //emulator interface
57871462 27
4600ba03 28//#define DISASM
29//#define assem_debug printf
30//#define inv_debug printf
31#define assem_debug(...)
32#define inv_debug(...)
57871462 33
34#ifdef __i386__
35#include "assem_x86.h"
36#endif
37#ifdef __x86_64__
38#include "assem_x64.h"
39#endif
40#ifdef __arm__
41#include "assem_arm.h"
42#endif
43
44#define MAXBLOCK 4096
45#define MAX_OUTPUT_BLOCK_SIZE 262144
46#define CLOCK_DIVIDER 2
47
48struct regstat
49{
50 signed char regmap_entry[HOST_REGS];
51 signed char regmap[HOST_REGS];
52 uint64_t was32;
53 uint64_t is32;
54 uint64_t wasdirty;
55 uint64_t dirty;
56 uint64_t u;
57 uint64_t uu;
58 u_int wasconst;
59 u_int isconst;
60 uint64_t constmap[HOST_REGS];
61};
62
63struct ll_entry
64{
65 u_int vaddr;
66 u_int reg32;
67 void *addr;
68 struct ll_entry *next;
69};
70
71 u_int start;
72 u_int *source;
73 u_int pagelimit;
74 char insn[MAXBLOCK][10];
75 u_char itype[MAXBLOCK];
76 u_char opcode[MAXBLOCK];
77 u_char opcode2[MAXBLOCK];
78 u_char bt[MAXBLOCK];
79 u_char rs1[MAXBLOCK];
80 u_char rs2[MAXBLOCK];
81 u_char rt1[MAXBLOCK];
82 u_char rt2[MAXBLOCK];
83 u_char us1[MAXBLOCK];
84 u_char us2[MAXBLOCK];
85 u_char dep1[MAXBLOCK];
86 u_char dep2[MAXBLOCK];
87 u_char lt1[MAXBLOCK];
bedfea38 88 static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
89 static uint64_t gte_rt[MAXBLOCK];
90 static uint64_t gte_unneeded[MAXBLOCK];
91 static int gte_reads_flags; // gte flag read encountered
57871462 92 int imm[MAXBLOCK];
93 u_int ba[MAXBLOCK];
94 char likely[MAXBLOCK];
95 char is_ds[MAXBLOCK];
e1190b87 96 char ooo[MAXBLOCK];
57871462 97 uint64_t unneeded_reg[MAXBLOCK];
98 uint64_t unneeded_reg_upper[MAXBLOCK];
99 uint64_t branch_unneeded_reg[MAXBLOCK];
100 uint64_t branch_unneeded_reg_upper[MAXBLOCK];
101 uint64_t p32[MAXBLOCK];
102 uint64_t pr32[MAXBLOCK];
103 signed char regmap_pre[MAXBLOCK][HOST_REGS];
104 signed char regmap[MAXBLOCK][HOST_REGS];
105 signed char regmap_entry[MAXBLOCK][HOST_REGS];
106 uint64_t constmap[MAXBLOCK][HOST_REGS];
57871462 107 struct regstat regs[MAXBLOCK];
108 struct regstat branch_regs[MAXBLOCK];
e1190b87 109 signed char minimum_free_regs[MAXBLOCK];
57871462 110 u_int needed_reg[MAXBLOCK];
111 uint64_t requires_32bit[MAXBLOCK];
112 u_int wont_dirty[MAXBLOCK];
113 u_int will_dirty[MAXBLOCK];
114 int ccadj[MAXBLOCK];
115 int slen;
116 u_int instr_addr[MAXBLOCK];
117 u_int link_addr[MAXBLOCK][3];
118 int linkcount;
119 u_int stubs[MAXBLOCK*3][8];
120 int stubcount;
121 u_int literals[1024][2];
122 int literalcount;
123 int is_delayslot;
124 int cop1_usable;
125 u_char *out;
126 struct ll_entry *jump_in[4096];
127 struct ll_entry *jump_out[4096];
128 struct ll_entry *jump_dirty[4096];
129 u_int hash_table[65536][4] __attribute__((aligned(16)));
130 char shadow[1048576] __attribute__((aligned(16)));
131 void *copy;
132 int expirep;
af4ee1fe 133#ifndef PCSX
57871462 134 u_int using_tlb;
af4ee1fe 135#else
136 static const u_int using_tlb=0;
137#endif
dadf55f2 138 static u_int sp_in_mirror;
2f546f9a 139 int new_dynarec_did_compile;
57871462 140 u_int stop_after_jal;
141 extern u_char restore_candidate[512];
142 extern int cycle_count;
143
144 /* registers that may be allocated */
145 /* 1-31 gpr */
146#define HIREG 32 // hi
147#define LOREG 33 // lo
148#define FSREG 34 // FPU status (FCSR)
149#define CSREG 35 // Coprocessor status
150#define CCREG 36 // Cycle count
151#define INVCP 37 // Pointer to invalid_code
619e5ded 152#define MMREG 38 // Pointer to memory_map
153#define ROREG 39 // ram offset (if rdram!=0x80000000)
154#define TEMPREG 40
155#define FTEMP 40 // FPU temporary register
156#define PTEMP 41 // Prefetch temporary register
157#define TLREG 42 // TLB mapping offset
158#define RHASH 43 // Return address hash
159#define RHTBL 44 // Return address hash table address
160#define RTEMP 45 // JR/JALR address register
161#define MAXREG 45
162#define AGEN1 46 // Address generation temporary register
163#define AGEN2 47 // Address generation temporary register
164#define MGEN1 48 // Maptable address generation temporary register
165#define MGEN2 49 // Maptable address generation temporary register
166#define BTREG 50 // Branch target temporary register
57871462 167
168 /* instruction types */
169#define NOP 0 // No operation
170#define LOAD 1 // Load
171#define STORE 2 // Store
172#define LOADLR 3 // Unaligned load
173#define STORELR 4 // Unaligned store
174#define MOV 5 // Move
175#define ALU 6 // Arithmetic/logic
176#define MULTDIV 7 // Multiply/divide
177#define SHIFT 8 // Shift by register
178#define SHIFTIMM 9// Shift by immediate
179#define IMM16 10 // 16-bit immediate
180#define RJUMP 11 // Unconditional jump to register
181#define UJUMP 12 // Unconditional jump
182#define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
183#define SJUMP 14 // Conditional branch (regimm format)
184#define COP0 15 // Coprocessor 0
185#define COP1 16 // Coprocessor 1
186#define C1LS 17 // Coprocessor 1 load/store
187#define FJUMP 18 // Conditional branch (floating point)
188#define FLOAT 19 // Floating point unit
189#define FCONV 20 // Convert integer to float
190#define FCOMP 21 // Floating point compare (sets FSREG)
191#define SYSCALL 22// SYSCALL
192#define OTHER 23 // Other
193#define SPAN 24 // Branch/delay slot spans 2 pages
194#define NI 25 // Not implemented
7139f3c8 195#define HLECALL 26// PCSX fake opcodes for HLE
b9b61529 196#define COP2 27 // Coprocessor 2 move
197#define C2LS 28 // Coprocessor 2 load/store
198#define C2OP 29 // Coprocessor 2 operation
1e973cb0 199#define INTCALL 30// Call interpreter to handle rare corner cases
57871462 200
201 /* stubs */
202#define CC_STUB 1
203#define FP_STUB 2
204#define LOADB_STUB 3
205#define LOADH_STUB 4
206#define LOADW_STUB 5
207#define LOADD_STUB 6
208#define LOADBU_STUB 7
209#define LOADHU_STUB 8
210#define STOREB_STUB 9
211#define STOREH_STUB 10
212#define STOREW_STUB 11
213#define STORED_STUB 12
214#define STORELR_STUB 13
215#define INVCODE_STUB 14
216
217 /* branch codes */
218#define TAKEN 1
219#define NOTTAKEN 2
220#define NULLDS 3
221
222// asm linkage
223int new_recompile_block(int addr);
224void *get_addr_ht(u_int vaddr);
225void invalidate_block(u_int block);
226void invalidate_addr(u_int addr);
227void remove_hash(int vaddr);
228void jump_vaddr();
229void dyna_linker();
230void dyna_linker_ds();
231void verify_code();
232void verify_code_vm();
233void verify_code_ds();
234void cc_interrupt();
235void fp_exception();
236void fp_exception_ds();
237void jump_syscall();
7139f3c8 238void jump_syscall_hle();
57871462 239void jump_eret();
7139f3c8 240void jump_hlecall();
1e973cb0 241void jump_intcall();
7139f3c8 242void new_dyna_leave();
57871462 243
244// TLB
245void TLBWI_new();
246void TLBWR_new();
247void read_nomem_new();
248void read_nomemb_new();
249void read_nomemh_new();
250void read_nomemd_new();
251void write_nomem_new();
252void write_nomemb_new();
253void write_nomemh_new();
254void write_nomemd_new();
255void write_rdram_new();
256void write_rdramb_new();
257void write_rdramh_new();
258void write_rdramd_new();
259extern u_int memory_map[1048576];
260
261// Needed by assembler
262void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
263void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
264void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
265void load_all_regs(signed char i_regmap[]);
266void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
267void load_regs_entry(int t);
268void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
269
270int tracedebug=0;
271
272//#define DEBUG_CYCLE_COUNT 1
273
94d23bb9 274static void tlb_hacks()
57871462 275{
94d23bb9 276#ifndef DISABLE_TLB
57871462 277 // Goldeneye hack
278 if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
279 {
280 u_int addr;
281 int n;
282 switch (ROM_HEADER->Country_code&0xFF)
283 {
284 case 0x45: // U
285 addr=0x34b30;
286 break;
287 case 0x4A: // J
288 addr=0x34b70;
289 break;
290 case 0x50: // E
291 addr=0x329f0;
292 break;
293 default:
294 // Unknown country code
295 addr=0;
296 break;
297 }
298 u_int rom_addr=(u_int)rom;
299 #ifdef ROM_COPY
300 // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
301 // in the lower 4G of memory to use this hack. Copy it if necessary.
302 if((void *)rom>(void *)0xffffffff) {
303 munmap(ROM_COPY, 67108864);
304 if(mmap(ROM_COPY, 12582912,
305 PROT_READ | PROT_WRITE,
306 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
307 -1, 0) <= 0) {printf("mmap() failed\n");}
308 memcpy(ROM_COPY,rom,12582912);
309 rom_addr=(u_int)ROM_COPY;
310 }
311 #endif
312 if(addr) {
313 for(n=0x7F000;n<0x80000;n++) {
314 memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
315 }
316 }
317 }
94d23bb9 318#endif
57871462 319}
320
94d23bb9 321static u_int get_page(u_int vaddr)
57871462 322{
0ce47d46 323#ifndef PCSX
57871462 324 u_int page=(vaddr^0x80000000)>>12;
0ce47d46 325#else
326 u_int page=vaddr&~0xe0000000;
327 if (page < 0x1000000)
328 page &= ~0x0e00000; // RAM mirrors
329 page>>=12;
330#endif
94d23bb9 331#ifndef DISABLE_TLB
57871462 332 if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
94d23bb9 333#endif
57871462 334 if(page>2048) page=2048+(page&2047);
94d23bb9 335 return page;
336}
337
338static u_int get_vpage(u_int vaddr)
339{
340 u_int vpage=(vaddr^0x80000000)>>12;
341#ifndef DISABLE_TLB
57871462 342 if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
94d23bb9 343#endif
57871462 344 if(vpage>2048) vpage=2048+(vpage&2047);
94d23bb9 345 return vpage;
346}
347
348// Get address from virtual address
349// This is called from the recompiled JR/JALR instructions
350void *get_addr(u_int vaddr)
351{
352 u_int page=get_page(vaddr);
353 u_int vpage=get_vpage(vaddr);
57871462 354 struct ll_entry *head;
355 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
356 head=jump_in[page];
357 while(head!=NULL) {
358 if(head->vaddr==vaddr&&head->reg32==0) {
359 //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
360 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
361 ht_bin[3]=ht_bin[1];
362 ht_bin[2]=ht_bin[0];
363 ht_bin[1]=(int)head->addr;
364 ht_bin[0]=vaddr;
365 return head->addr;
366 }
367 head=head->next;
368 }
369 head=jump_dirty[vpage];
370 while(head!=NULL) {
371 if(head->vaddr==vaddr&&head->reg32==0) {
372 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
373 // Don't restore blocks which are about to expire from the cache
374 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
375 if(verify_dirty(head->addr)) {
376 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
377 invalid_code[vaddr>>12]=0;
9be4ba64 378 inv_code_start=inv_code_end=~0;
57871462 379 memory_map[vaddr>>12]|=0x40000000;
380 if(vpage<2048) {
94d23bb9 381#ifndef DISABLE_TLB
57871462 382 if(tlb_LUT_r[vaddr>>12]) {
383 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
384 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
385 }
94d23bb9 386#endif
57871462 387 restore_candidate[vpage>>3]|=1<<(vpage&7);
388 }
389 else restore_candidate[page>>3]|=1<<(page&7);
390 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
391 if(ht_bin[0]==vaddr) {
392 ht_bin[1]=(int)head->addr; // Replace existing entry
393 }
394 else
395 {
396 ht_bin[3]=ht_bin[1];
397 ht_bin[2]=ht_bin[0];
398 ht_bin[1]=(int)head->addr;
399 ht_bin[0]=vaddr;
400 }
401 return head->addr;
402 }
403 }
404 head=head->next;
405 }
406 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
407 int r=new_recompile_block(vaddr);
408 if(r==0) return get_addr(vaddr);
409 // Execute in unmapped page, generate pagefault execption
410 Status|=2;
411 Cause=(vaddr<<31)|0x8;
412 EPC=(vaddr&1)?vaddr-5:vaddr;
413 BadVAddr=(vaddr&~1);
414 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
415 EntryHi=BadVAddr&0xFFFFE000;
416 return get_addr_ht(0x80000000);
417}
418// Look up address in hash table first
419void *get_addr_ht(u_int vaddr)
420{
421 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
422 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
423 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
424 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
425 return get_addr(vaddr);
426}
427
428void *get_addr_32(u_int vaddr,u_int flags)
429{
7139f3c8 430#ifdef FORCE32
431 return get_addr(vaddr);
560e4a12 432#else
57871462 433 //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
434 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
435 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
436 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
94d23bb9 437 u_int page=get_page(vaddr);
438 u_int vpage=get_vpage(vaddr);
57871462 439 struct ll_entry *head;
440 head=jump_in[page];
441 while(head!=NULL) {
442 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
443 //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
444 if(head->reg32==0) {
445 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
446 if(ht_bin[0]==-1) {
447 ht_bin[1]=(int)head->addr;
448 ht_bin[0]=vaddr;
449 }else if(ht_bin[2]==-1) {
450 ht_bin[3]=(int)head->addr;
451 ht_bin[2]=vaddr;
452 }
453 //ht_bin[3]=ht_bin[1];
454 //ht_bin[2]=ht_bin[0];
455 //ht_bin[1]=(int)head->addr;
456 //ht_bin[0]=vaddr;
457 }
458 return head->addr;
459 }
460 head=head->next;
461 }
462 head=jump_dirty[vpage];
463 while(head!=NULL) {
464 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
465 //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
466 // Don't restore blocks which are about to expire from the cache
467 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
468 if(verify_dirty(head->addr)) {
469 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
470 invalid_code[vaddr>>12]=0;
9be4ba64 471 inv_code_start=inv_code_end=~0;
57871462 472 memory_map[vaddr>>12]|=0x40000000;
473 if(vpage<2048) {
94d23bb9 474#ifndef DISABLE_TLB
57871462 475 if(tlb_LUT_r[vaddr>>12]) {
476 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
477 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
478 }
94d23bb9 479#endif
57871462 480 restore_candidate[vpage>>3]|=1<<(vpage&7);
481 }
482 else restore_candidate[page>>3]|=1<<(page&7);
483 if(head->reg32==0) {
484 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
485 if(ht_bin[0]==-1) {
486 ht_bin[1]=(int)head->addr;
487 ht_bin[0]=vaddr;
488 }else if(ht_bin[2]==-1) {
489 ht_bin[3]=(int)head->addr;
490 ht_bin[2]=vaddr;
491 }
492 //ht_bin[3]=ht_bin[1];
493 //ht_bin[2]=ht_bin[0];
494 //ht_bin[1]=(int)head->addr;
495 //ht_bin[0]=vaddr;
496 }
497 return head->addr;
498 }
499 }
500 head=head->next;
501 }
502 //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
503 int r=new_recompile_block(vaddr);
504 if(r==0) return get_addr(vaddr);
505 // Execute in unmapped page, generate pagefault execption
506 Status|=2;
507 Cause=(vaddr<<31)|0x8;
508 EPC=(vaddr&1)?vaddr-5:vaddr;
509 BadVAddr=(vaddr&~1);
510 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
511 EntryHi=BadVAddr&0xFFFFE000;
512 return get_addr_ht(0x80000000);
560e4a12 513#endif
57871462 514}
515
516void clear_all_regs(signed char regmap[])
517{
518 int hr;
519 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
520}
521
522signed char get_reg(signed char regmap[],int r)
523{
524 int hr;
525 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
526 return -1;
527}
528
529// Find a register that is available for two consecutive cycles
530signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
531{
532 int hr;
533 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
534 return -1;
535}
536
537int count_free_regs(signed char regmap[])
538{
539 int count=0;
540 int hr;
541 for(hr=0;hr<HOST_REGS;hr++)
542 {
543 if(hr!=EXCLUDE_REG) {
544 if(regmap[hr]<0) count++;
545 }
546 }
547 return count;
548}
549
550void dirty_reg(struct regstat *cur,signed char reg)
551{
552 int hr;
553 if(!reg) return;
554 for (hr=0;hr<HOST_REGS;hr++) {
555 if((cur->regmap[hr]&63)==reg) {
556 cur->dirty|=1<<hr;
557 }
558 }
559}
560
561// If we dirty the lower half of a 64 bit register which is now being
562// sign-extended, we need to dump the upper half.
563// Note: Do this only after completion of the instruction, because
564// some instructions may need to read the full 64-bit value even if
565// overwriting it (eg SLTI, DSRA32).
566static void flush_dirty_uppers(struct regstat *cur)
567{
568 int hr,reg;
569 for (hr=0;hr<HOST_REGS;hr++) {
570 if((cur->dirty>>hr)&1) {
571 reg=cur->regmap[hr];
572 if(reg>=64)
573 if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
574 }
575 }
576}
577
578void set_const(struct regstat *cur,signed char reg,uint64_t value)
579{
580 int hr;
581 if(!reg) return;
582 for (hr=0;hr<HOST_REGS;hr++) {
583 if(cur->regmap[hr]==reg) {
584 cur->isconst|=1<<hr;
585 cur->constmap[hr]=value;
586 }
587 else if((cur->regmap[hr]^64)==reg) {
588 cur->isconst|=1<<hr;
589 cur->constmap[hr]=value>>32;
590 }
591 }
592}
593
594void clear_const(struct regstat *cur,signed char reg)
595{
596 int hr;
597 if(!reg) return;
598 for (hr=0;hr<HOST_REGS;hr++) {
599 if((cur->regmap[hr]&63)==reg) {
600 cur->isconst&=~(1<<hr);
601 }
602 }
603}
604
605int is_const(struct regstat *cur,signed char reg)
606{
607 int hr;
79c75f1b 608 if(reg<0) return 0;
57871462 609 if(!reg) return 1;
610 for (hr=0;hr<HOST_REGS;hr++) {
611 if((cur->regmap[hr]&63)==reg) {
612 return (cur->isconst>>hr)&1;
613 }
614 }
615 return 0;
616}
617uint64_t get_const(struct regstat *cur,signed char reg)
618{
619 int hr;
620 if(!reg) return 0;
621 for (hr=0;hr<HOST_REGS;hr++) {
622 if(cur->regmap[hr]==reg) {
623 return cur->constmap[hr];
624 }
625 }
626 printf("Unknown constant in r%d\n",reg);
627 exit(1);
628}
629
630// Least soon needed registers
631// Look at the next ten instructions and see which registers
632// will be used. Try not to reallocate these.
633void lsn(u_char hsn[], int i, int *preferred_reg)
634{
635 int j;
636 int b=-1;
637 for(j=0;j<9;j++)
638 {
639 if(i+j>=slen) {
640 j=slen-i-1;
641 break;
642 }
643 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
644 {
645 // Don't go past an unconditonal jump
646 j++;
647 break;
648 }
649 }
650 for(;j>=0;j--)
651 {
652 if(rs1[i+j]) hsn[rs1[i+j]]=j;
653 if(rs2[i+j]) hsn[rs2[i+j]]=j;
654 if(rt1[i+j]) hsn[rt1[i+j]]=j;
655 if(rt2[i+j]) hsn[rt2[i+j]]=j;
656 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
657 // Stores can allocate zero
658 hsn[rs1[i+j]]=j;
659 hsn[rs2[i+j]]=j;
660 }
661 // On some architectures stores need invc_ptr
662 #if defined(HOST_IMM8)
b9b61529 663 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
57871462 664 hsn[INVCP]=j;
665 }
666 #endif
667 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
668 {
669 hsn[CCREG]=j;
670 b=j;
671 }
672 }
673 if(b>=0)
674 {
675 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
676 {
677 // Follow first branch
678 int t=(ba[i+b]-start)>>2;
679 j=7-b;if(t+j>=slen) j=slen-t-1;
680 for(;j>=0;j--)
681 {
682 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
683 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
684 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
685 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
686 }
687 }
688 // TODO: preferred register based on backward branch
689 }
690 // Delay slot should preferably not overwrite branch conditions or cycle count
691 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
692 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
693 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
694 hsn[CCREG]=1;
695 // ...or hash tables
696 hsn[RHASH]=1;
697 hsn[RHTBL]=1;
698 }
699 // Coprocessor load/store needs FTEMP, even if not declared
b9b61529 700 if(itype[i]==C1LS||itype[i]==C2LS) {
57871462 701 hsn[FTEMP]=0;
702 }
703 // Load L/R also uses FTEMP as a temporary register
704 if(itype[i]==LOADLR) {
705 hsn[FTEMP]=0;
706 }
b7918751 707 // Also SWL/SWR/SDL/SDR
708 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
57871462 709 hsn[FTEMP]=0;
710 }
711 // Don't remove the TLB registers either
b9b61529 712 if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
57871462 713 hsn[TLREG]=0;
714 }
715 // Don't remove the miniht registers
716 if(itype[i]==UJUMP||itype[i]==RJUMP)
717 {
718 hsn[RHASH]=0;
719 hsn[RHTBL]=0;
720 }
721}
722
723// We only want to allocate registers if we're going to use them again soon
724int needed_again(int r, int i)
725{
726 int j;
727 int b=-1;
728 int rn=10;
57871462 729
730 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
731 {
732 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
733 return 0; // Don't need any registers if exiting the block
734 }
735 for(j=0;j<9;j++)
736 {
737 if(i+j>=slen) {
738 j=slen-i-1;
739 break;
740 }
741 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
742 {
743 // Don't go past an unconditonal jump
744 j++;
745 break;
746 }
1e973cb0 747 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
57871462 748 {
749 break;
750 }
751 }
752 for(;j>=1;j--)
753 {
754 if(rs1[i+j]==r) rn=j;
755 if(rs2[i+j]==r) rn=j;
756 if((unneeded_reg[i+j]>>r)&1) rn=10;
757 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
758 {
759 b=j;
760 }
761 }
762 /*
763 if(b>=0)
764 {
765 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
766 {
767 // Follow first branch
768 int o=rn;
769 int t=(ba[i+b]-start)>>2;
770 j=7-b;if(t+j>=slen) j=slen-t-1;
771 for(;j>=0;j--)
772 {
773 if(!((unneeded_reg[t+j]>>r)&1)) {
774 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
775 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
776 }
777 else rn=o;
778 }
779 }
780 }*/
b7217e13 781 if(rn<10) return 1;
57871462 782 return 0;
783}
784
785// Try to match register allocations at the end of a loop with those
786// at the beginning
787int loop_reg(int i, int r, int hr)
788{
789 int j,k;
790 for(j=0;j<9;j++)
791 {
792 if(i+j>=slen) {
793 j=slen-i-1;
794 break;
795 }
796 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
797 {
798 // Don't go past an unconditonal jump
799 j++;
800 break;
801 }
802 }
803 k=0;
804 if(i>0){
805 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
806 k--;
807 }
808 for(;k<j;k++)
809 {
810 if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
811 if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
812 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
813 {
814 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
815 {
816 int t=(ba[i+k]-start)>>2;
817 int reg=get_reg(regs[t].regmap_entry,r);
818 if(reg>=0) return reg;
819 //reg=get_reg(regs[t+1].regmap_entry,r);
820 //if(reg>=0) return reg;
821 }
822 }
823 }
824 return hr;
825}
826
827
828// Allocate every register, preserving source/target regs
829void alloc_all(struct regstat *cur,int i)
830{
831 int hr;
832
833 for(hr=0;hr<HOST_REGS;hr++) {
834 if(hr!=EXCLUDE_REG) {
835 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
836 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
837 {
838 cur->regmap[hr]=-1;
839 cur->dirty&=~(1<<hr);
840 }
841 // Don't need zeros
842 if((cur->regmap[hr]&63)==0)
843 {
844 cur->regmap[hr]=-1;
845 cur->dirty&=~(1<<hr);
846 }
847 }
848 }
849}
850
4600ba03 851#ifndef FORCE32
57871462 852void div64(int64_t dividend,int64_t divisor)
853{
854 lo=dividend/divisor;
855 hi=dividend%divisor;
856 //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
857 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
858}
859void divu64(uint64_t dividend,uint64_t divisor)
860{
861 lo=dividend/divisor;
862 hi=dividend%divisor;
863 //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
864 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
865}
866
867void mult64(uint64_t m1,uint64_t m2)
868{
869 unsigned long long int op1, op2, op3, op4;
870 unsigned long long int result1, result2, result3, result4;
871 unsigned long long int temp1, temp2, temp3, temp4;
872 int sign = 0;
873
874 if (m1 < 0)
875 {
876 op2 = -m1;
877 sign = 1 - sign;
878 }
879 else op2 = m1;
880 if (m2 < 0)
881 {
882 op4 = -m2;
883 sign = 1 - sign;
884 }
885 else op4 = m2;
886
887 op1 = op2 & 0xFFFFFFFF;
888 op2 = (op2 >> 32) & 0xFFFFFFFF;
889 op3 = op4 & 0xFFFFFFFF;
890 op4 = (op4 >> 32) & 0xFFFFFFFF;
891
892 temp1 = op1 * op3;
893 temp2 = (temp1 >> 32) + op1 * op4;
894 temp3 = op2 * op3;
895 temp4 = (temp3 >> 32) + op2 * op4;
896
897 result1 = temp1 & 0xFFFFFFFF;
898 result2 = temp2 + (temp3 & 0xFFFFFFFF);
899 result3 = (result2 >> 32) + temp4;
900 result4 = (result3 >> 32);
901
902 lo = result1 | (result2 << 32);
903 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
904 if (sign)
905 {
906 hi = ~hi;
907 if (!lo) hi++;
908 else lo = ~lo + 1;
909 }
910}
911
912void multu64(uint64_t m1,uint64_t m2)
913{
914 unsigned long long int op1, op2, op3, op4;
915 unsigned long long int result1, result2, result3, result4;
916 unsigned long long int temp1, temp2, temp3, temp4;
917
918 op1 = m1 & 0xFFFFFFFF;
919 op2 = (m1 >> 32) & 0xFFFFFFFF;
920 op3 = m2 & 0xFFFFFFFF;
921 op4 = (m2 >> 32) & 0xFFFFFFFF;
922
923 temp1 = op1 * op3;
924 temp2 = (temp1 >> 32) + op1 * op4;
925 temp3 = op2 * op3;
926 temp4 = (temp3 >> 32) + op2 * op4;
927
928 result1 = temp1 & 0xFFFFFFFF;
929 result2 = temp2 + (temp3 & 0xFFFFFFFF);
930 result3 = (result2 >> 32) + temp4;
931 result4 = (result3 >> 32);
932
933 lo = result1 | (result2 << 32);
934 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
935
936 //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
937 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
938}
939
940uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
941{
942 if(bits) {
943 original<<=64-bits;
944 original>>=64-bits;
945 loaded<<=bits;
946 original|=loaded;
947 }
948 else original=loaded;
949 return original;
950}
951uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
952{
953 if(bits^56) {
954 original>>=64-(bits^56);
955 original<<=64-(bits^56);
956 loaded>>=bits^56;
957 original|=loaded;
958 }
959 else original=loaded;
960 return original;
961}
4600ba03 962#endif
57871462 963
964#ifdef __i386__
965#include "assem_x86.c"
966#endif
967#ifdef __x86_64__
968#include "assem_x64.c"
969#endif
970#ifdef __arm__
971#include "assem_arm.c"
972#endif
973
974// Add virtual address mapping to linked list
975void ll_add(struct ll_entry **head,int vaddr,void *addr)
976{
977 struct ll_entry *new_entry;
978 new_entry=malloc(sizeof(struct ll_entry));
979 assert(new_entry!=NULL);
980 new_entry->vaddr=vaddr;
981 new_entry->reg32=0;
982 new_entry->addr=addr;
983 new_entry->next=*head;
984 *head=new_entry;
985}
986
987// Add virtual address mapping for 32-bit compiled block
988void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
989{
7139f3c8 990 ll_add(head,vaddr,addr);
991#ifndef FORCE32
992 (*head)->reg32=reg32;
993#endif
57871462 994}
995
996// Check if an address is already compiled
997// but don't return addresses which are about to expire from the cache
998void *check_addr(u_int vaddr)
999{
1000 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
1001 if(ht_bin[0]==vaddr) {
1002 if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1003 if(isclean(ht_bin[1])) return (void *)ht_bin[1];
1004 }
1005 if(ht_bin[2]==vaddr) {
1006 if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1007 if(isclean(ht_bin[3])) return (void *)ht_bin[3];
1008 }
94d23bb9 1009 u_int page=get_page(vaddr);
57871462 1010 struct ll_entry *head;
1011 head=jump_in[page];
1012 while(head!=NULL) {
1013 if(head->vaddr==vaddr&&head->reg32==0) {
1014 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1015 // Update existing entry with current address
1016 if(ht_bin[0]==vaddr) {
1017 ht_bin[1]=(int)head->addr;
1018 return head->addr;
1019 }
1020 if(ht_bin[2]==vaddr) {
1021 ht_bin[3]=(int)head->addr;
1022 return head->addr;
1023 }
1024 // Insert into hash table with low priority.
1025 // Don't evict existing entries, as they are probably
1026 // addresses that are being accessed frequently.
1027 if(ht_bin[0]==-1) {
1028 ht_bin[1]=(int)head->addr;
1029 ht_bin[0]=vaddr;
1030 }else if(ht_bin[2]==-1) {
1031 ht_bin[3]=(int)head->addr;
1032 ht_bin[2]=vaddr;
1033 }
1034 return head->addr;
1035 }
1036 }
1037 head=head->next;
1038 }
1039 return 0;
1040}
1041
1042void remove_hash(int vaddr)
1043{
1044 //printf("remove hash: %x\n",vaddr);
1045 int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1046 if(ht_bin[2]==vaddr) {
1047 ht_bin[2]=ht_bin[3]=-1;
1048 }
1049 if(ht_bin[0]==vaddr) {
1050 ht_bin[0]=ht_bin[2];
1051 ht_bin[1]=ht_bin[3];
1052 ht_bin[2]=ht_bin[3]=-1;
1053 }
1054}
1055
1056void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1057{
1058 struct ll_entry *next;
1059 while(*head) {
1060 if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
1061 ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1062 {
1063 inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1064 remove_hash((*head)->vaddr);
1065 next=(*head)->next;
1066 free(*head);
1067 *head=next;
1068 }
1069 else
1070 {
1071 head=&((*head)->next);
1072 }
1073 }
1074}
1075
1076// Remove all entries from linked list
1077void ll_clear(struct ll_entry **head)
1078{
1079 struct ll_entry *cur;
1080 struct ll_entry *next;
1081 if(cur=*head) {
1082 *head=0;
1083 while(cur) {
1084 next=cur->next;
1085 free(cur);
1086 cur=next;
1087 }
1088 }
1089}
1090
1091// Dereference the pointers and remove if it matches
1092void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1093{
1094 while(head) {
1095 int ptr=get_pointer(head->addr);
1096 inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1097 if(((ptr>>shift)==(addr>>shift)) ||
1098 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1099 {
5088bb70 1100 inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
f76eeef9 1101 u_int host_addr=(u_int)kill_pointer(head->addr);
dd3a91a1 1102 #ifdef __arm__
1103 needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1104 #endif
57871462 1105 }
1106 head=head->next;
1107 }
1108}
1109
1110// This is called when we write to a compiled block (see do_invstub)
f76eeef9 1111void invalidate_page(u_int page)
57871462 1112{
57871462 1113 struct ll_entry *head;
1114 struct ll_entry *next;
1115 head=jump_in[page];
1116 jump_in[page]=0;
1117 while(head!=NULL) {
1118 inv_debug("INVALIDATE: %x\n",head->vaddr);
1119 remove_hash(head->vaddr);
1120 next=head->next;
1121 free(head);
1122 head=next;
1123 }
1124 head=jump_out[page];
1125 jump_out[page]=0;
1126 while(head!=NULL) {
1127 inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
f76eeef9 1128 u_int host_addr=(u_int)kill_pointer(head->addr);
dd3a91a1 1129 #ifdef __arm__
1130 needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1131 #endif
57871462 1132 next=head->next;
1133 free(head);
1134 head=next;
1135 }
57871462 1136}
9be4ba64 1137
1138static void invalidate_block_range(u_int block, u_int first, u_int last)
57871462 1139{
94d23bb9 1140 u_int page=get_page(block<<12);
57871462 1141 //printf("first=%d last=%d\n",first,last);
f76eeef9 1142 invalidate_page(page);
57871462 1143 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1144 assert(last<page+5);
1145 // Invalidate the adjacent pages if a block crosses a 4K boundary
1146 while(first<page) {
1147 invalidate_page(first);
1148 first++;
1149 }
1150 for(first=page+1;first<last;first++) {
1151 invalidate_page(first);
1152 }
dd3a91a1 1153 #ifdef __arm__
1154 do_clear_cache();
1155 #endif
57871462 1156
1157 // Don't trap writes
1158 invalid_code[block]=1;
94d23bb9 1159#ifndef DISABLE_TLB
57871462 1160 // If there is a valid TLB entry for this page, remove write protect
1161 if(tlb_LUT_w[block]) {
1162 assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1163 // CHECK: Is this right?
1164 memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1165 u_int real_block=tlb_LUT_w[block]>>12;
1166 invalid_code[real_block]=1;
1167 if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1168 }
1169 else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
94d23bb9 1170#endif
f76eeef9 1171
57871462 1172 #ifdef USE_MINI_HT
1173 memset(mini_ht,-1,sizeof(mini_ht));
1174 #endif
1175}
9be4ba64 1176
1177void invalidate_block(u_int block)
1178{
1179 u_int page=get_page(block<<12);
1180 u_int vpage=get_vpage(block<<12);
1181 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1182 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1183 u_int first,last;
1184 first=last=page;
1185 struct ll_entry *head;
1186 head=jump_dirty[vpage];
1187 //printf("page=%d vpage=%d\n",page,vpage);
1188 while(head!=NULL) {
1189 u_int start,end;
1190 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1191 get_bounds((int)head->addr,&start,&end);
1192 //printf("start: %x end: %x\n",start,end);
1193 if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
1194 if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1195 if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1196 if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1197 }
1198 }
1199#ifndef DISABLE_TLB
1200 if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1201 if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1202 if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1203 if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1204 }
1205 }
1206#endif
1207 }
1208 head=head->next;
1209 }
1210 invalidate_block_range(block,first,last);
1211}
1212
57871462 1213void invalidate_addr(u_int addr)
1214{
9be4ba64 1215#ifdef PCSX
1216 //static int rhits;
1217 // this check is done by the caller
1218 //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1219 u_int page=get_page(addr);
1220 if(page<2048) { // RAM
1221 struct ll_entry *head;
1222 u_int addr_min=~0, addr_max=0;
1223 int mask=RAM_SIZE-1;
1224 int pg1;
1225 inv_code_start=addr&~0xfff;
1226 inv_code_end=addr|0xfff;
1227 pg1=page;
1228 if (pg1>0) {
1229 // must check previous page too because of spans..
1230 pg1--;
1231 inv_code_start-=0x1000;
1232 }
1233 for(;pg1<=page;pg1++) {
1234 for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1235 u_int start,end;
1236 get_bounds((int)head->addr,&start,&end);
1237 if((start&mask)<=(addr&mask)&&(addr&mask)<(end&mask)) {
1238 if(start<addr_min) addr_min=start;
1239 if(end>addr_max) addr_max=end;
1240 }
1241 else if(addr<start) {
1242 if(start<inv_code_end)
1243 inv_code_end=start-1;
1244 }
1245 else {
1246 if(end>inv_code_start)
1247 inv_code_start=end;
1248 }
1249 }
1250 }
1251 if (addr_min!=~0) {
1252 inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1253 inv_code_start=inv_code_end=~0;
1254 invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1255 return;
1256 }
1257 else {
1258 inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);//rhits);
1259 }
1260 //rhits=0;
1261 if(page!=0) // FIXME: don't know what's up with page 0 (Klonoa)
1262 return;
1263 }
1264#endif
57871462 1265 invalidate_block(addr>>12);
1266}
9be4ba64 1267
dd3a91a1 1268// This is called when loading a save state.
1269// Anything could have changed, so invalidate everything.
57871462 1270void invalidate_all_pages()
1271{
1272 u_int page,n;
1273 for(page=0;page<4096;page++)
1274 invalidate_page(page);
1275 for(page=0;page<1048576;page++)
1276 if(!invalid_code[page]) {
1277 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1278 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1279 }
1280 #ifdef __arm__
1281 __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1282 #endif
1283 #ifdef USE_MINI_HT
1284 memset(mini_ht,-1,sizeof(mini_ht));
1285 #endif
94d23bb9 1286 #ifndef DISABLE_TLB
57871462 1287 // TLB
1288 for(page=0;page<0x100000;page++) {
1289 if(tlb_LUT_r[page]) {
1290 memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1291 if(!tlb_LUT_w[page]||!invalid_code[page])
1292 memory_map[page]|=0x40000000; // Write protect
1293 }
1294 else memory_map[page]=-1;
1295 if(page==0x80000) page=0xC0000;
1296 }
1297 tlb_hacks();
94d23bb9 1298 #endif
57871462 1299}
1300
1301// Add an entry to jump_out after making a link
1302void add_link(u_int vaddr,void *src)
1303{
94d23bb9 1304 u_int page=get_page(vaddr);
57871462 1305 inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
76f71c27 1306 int *ptr=(int *)(src+4);
1307 assert((*ptr&0x0fff0000)==0x059f0000);
57871462 1308 ll_add(jump_out+page,vaddr,src);
1309 //int ptr=get_pointer(src);
1310 //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1311}
1312
1313// If a code block was found to be unmodified (bit was set in
1314// restore_candidate) and it remains unmodified (bit is clear
1315// in invalid_code) then move the entries for that 4K page from
1316// the dirty list to the clean list.
1317void clean_blocks(u_int page)
1318{
1319 struct ll_entry *head;
1320 inv_debug("INV: clean_blocks page=%d\n",page);
1321 head=jump_dirty[page];
1322 while(head!=NULL) {
1323 if(!invalid_code[head->vaddr>>12]) {
1324 // Don't restore blocks which are about to expire from the cache
1325 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1326 u_int start,end;
1327 if(verify_dirty((int)head->addr)) {
1328 //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1329 u_int i;
1330 u_int inv=0;
1331 get_bounds((int)head->addr,&start,&end);
4cb76aa4 1332 if(start-(u_int)rdram<RAM_SIZE) {
57871462 1333 for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1334 inv|=invalid_code[i];
1335 }
1336 }
1337 if((signed int)head->vaddr>=(signed int)0xC0000000) {
1338 u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1339 //printf("addr=%x start=%x end=%x\n",addr,start,end);
1340 if(addr<start||addr>=end) inv=1;
1341 }
4cb76aa4 1342 else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
57871462 1343 inv=1;
1344 }
1345 if(!inv) {
1346 void * clean_addr=(void *)get_clean_addr((int)head->addr);
1347 if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1348 u_int ppage=page;
94d23bb9 1349#ifndef DISABLE_TLB
57871462 1350 if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
94d23bb9 1351#endif
57871462 1352 inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1353 //printf("page=%x, addr=%x\n",page,head->vaddr);
1354 //assert(head->vaddr>>12==(page|0x80000));
1355 ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1356 int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1357 if(!head->reg32) {
1358 if(ht_bin[0]==head->vaddr) {
1359 ht_bin[1]=(int)clean_addr; // Replace existing entry
1360 }
1361 if(ht_bin[2]==head->vaddr) {
1362 ht_bin[3]=(int)clean_addr; // Replace existing entry
1363 }
1364 }
1365 }
1366 }
1367 }
1368 }
1369 }
1370 head=head->next;
1371 }
1372}
1373
1374
1375void mov_alloc(struct regstat *current,int i)
1376{
1377 // Note: Don't need to actually alloc the source registers
1378 if((~current->is32>>rs1[i])&1) {
1379 //alloc_reg64(current,i,rs1[i]);
1380 alloc_reg64(current,i,rt1[i]);
1381 current->is32&=~(1LL<<rt1[i]);
1382 } else {
1383 //alloc_reg(current,i,rs1[i]);
1384 alloc_reg(current,i,rt1[i]);
1385 current->is32|=(1LL<<rt1[i]);
1386 }
1387 clear_const(current,rs1[i]);
1388 clear_const(current,rt1[i]);
1389 dirty_reg(current,rt1[i]);
1390}
1391
1392void shiftimm_alloc(struct regstat *current,int i)
1393{
57871462 1394 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1395 {
1396 if(rt1[i]) {
1397 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1398 else lt1[i]=rs1[i];
1399 alloc_reg(current,i,rt1[i]);
1400 current->is32|=1LL<<rt1[i];
1401 dirty_reg(current,rt1[i]);
dc49e339 1402 if(is_const(current,rs1[i])) {
1403 int v=get_const(current,rs1[i]);
1404 if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1405 if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1406 if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1407 }
1408 else clear_const(current,rt1[i]);
57871462 1409 }
1410 }
dc49e339 1411 else
1412 {
1413 clear_const(current,rs1[i]);
1414 clear_const(current,rt1[i]);
1415 }
1416
57871462 1417 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1418 {
1419 if(rt1[i]) {
1420 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1421 alloc_reg64(current,i,rt1[i]);
1422 current->is32&=~(1LL<<rt1[i]);
1423 dirty_reg(current,rt1[i]);
1424 }
1425 }
1426 if(opcode2[i]==0x3c) // DSLL32
1427 {
1428 if(rt1[i]) {
1429 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1430 alloc_reg64(current,i,rt1[i]);
1431 current->is32&=~(1LL<<rt1[i]);
1432 dirty_reg(current,rt1[i]);
1433 }
1434 }
1435 if(opcode2[i]==0x3e) // DSRL32
1436 {
1437 if(rt1[i]) {
1438 alloc_reg64(current,i,rs1[i]);
1439 if(imm[i]==32) {
1440 alloc_reg64(current,i,rt1[i]);
1441 current->is32&=~(1LL<<rt1[i]);
1442 } else {
1443 alloc_reg(current,i,rt1[i]);
1444 current->is32|=1LL<<rt1[i];
1445 }
1446 dirty_reg(current,rt1[i]);
1447 }
1448 }
1449 if(opcode2[i]==0x3f) // DSRA32
1450 {
1451 if(rt1[i]) {
1452 alloc_reg64(current,i,rs1[i]);
1453 alloc_reg(current,i,rt1[i]);
1454 current->is32|=1LL<<rt1[i];
1455 dirty_reg(current,rt1[i]);
1456 }
1457 }
1458}
1459
1460void shift_alloc(struct regstat *current,int i)
1461{
1462 if(rt1[i]) {
1463 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1464 {
1465 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1466 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1467 alloc_reg(current,i,rt1[i]);
e1190b87 1468 if(rt1[i]==rs2[i]) {
1469 alloc_reg_temp(current,i,-1);
1470 minimum_free_regs[i]=1;
1471 }
57871462 1472 current->is32|=1LL<<rt1[i];
1473 } else { // DSLLV/DSRLV/DSRAV
1474 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1475 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1476 alloc_reg64(current,i,rt1[i]);
1477 current->is32&=~(1LL<<rt1[i]);
1478 if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
e1190b87 1479 {
57871462 1480 alloc_reg_temp(current,i,-1);
e1190b87 1481 minimum_free_regs[i]=1;
1482 }
57871462 1483 }
1484 clear_const(current,rs1[i]);
1485 clear_const(current,rs2[i]);
1486 clear_const(current,rt1[i]);
1487 dirty_reg(current,rt1[i]);
1488 }
1489}
1490
1491void alu_alloc(struct regstat *current,int i)
1492{
1493 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1494 if(rt1[i]) {
1495 if(rs1[i]&&rs2[i]) {
1496 alloc_reg(current,i,rs1[i]);
1497 alloc_reg(current,i,rs2[i]);
1498 }
1499 else {
1500 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1501 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1502 }
1503 alloc_reg(current,i,rt1[i]);
1504 }
1505 current->is32|=1LL<<rt1[i];
1506 }
1507 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1508 if(rt1[i]) {
1509 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1510 {
1511 alloc_reg64(current,i,rs1[i]);
1512 alloc_reg64(current,i,rs2[i]);
1513 alloc_reg(current,i,rt1[i]);
1514 } else {
1515 alloc_reg(current,i,rs1[i]);
1516 alloc_reg(current,i,rs2[i]);
1517 alloc_reg(current,i,rt1[i]);
1518 }
1519 }
1520 current->is32|=1LL<<rt1[i];
1521 }
1522 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1523 if(rt1[i]) {
1524 if(rs1[i]&&rs2[i]) {
1525 alloc_reg(current,i,rs1[i]);
1526 alloc_reg(current,i,rs2[i]);
1527 }
1528 else
1529 {
1530 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1531 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1532 }
1533 alloc_reg(current,i,rt1[i]);
1534 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1535 {
1536 if(!((current->uu>>rt1[i])&1)) {
1537 alloc_reg64(current,i,rt1[i]);
1538 }
1539 if(get_reg(current->regmap,rt1[i]|64)>=0) {
1540 if(rs1[i]&&rs2[i]) {
1541 alloc_reg64(current,i,rs1[i]);
1542 alloc_reg64(current,i,rs2[i]);
1543 }
1544 else
1545 {
1546 // Is is really worth it to keep 64-bit values in registers?
1547 #ifdef NATIVE_64BIT
1548 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1549 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1550 #endif
1551 }
1552 }
1553 current->is32&=~(1LL<<rt1[i]);
1554 } else {
1555 current->is32|=1LL<<rt1[i];
1556 }
1557 }
1558 }
1559 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1560 if(rt1[i]) {
1561 if(rs1[i]&&rs2[i]) {
1562 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1563 alloc_reg64(current,i,rs1[i]);
1564 alloc_reg64(current,i,rs2[i]);
1565 alloc_reg64(current,i,rt1[i]);
1566 } else {
1567 alloc_reg(current,i,rs1[i]);
1568 alloc_reg(current,i,rs2[i]);
1569 alloc_reg(current,i,rt1[i]);
1570 }
1571 }
1572 else {
1573 alloc_reg(current,i,rt1[i]);
1574 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1575 // DADD used as move, or zeroing
1576 // If we have a 64-bit source, then make the target 64 bits too
1577 if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1578 if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1579 alloc_reg64(current,i,rt1[i]);
1580 } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1581 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1582 alloc_reg64(current,i,rt1[i]);
1583 }
1584 if(opcode2[i]>=0x2e&&rs2[i]) {
1585 // DSUB used as negation - 64-bit result
1586 // If we have a 32-bit register, extend it to 64 bits
1587 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1588 alloc_reg64(current,i,rt1[i]);
1589 }
1590 }
1591 }
1592 if(rs1[i]&&rs2[i]) {
1593 current->is32&=~(1LL<<rt1[i]);
1594 } else if(rs1[i]) {
1595 current->is32&=~(1LL<<rt1[i]);
1596 if((current->is32>>rs1[i])&1)
1597 current->is32|=1LL<<rt1[i];
1598 } else if(rs2[i]) {
1599 current->is32&=~(1LL<<rt1[i]);
1600 if((current->is32>>rs2[i])&1)
1601 current->is32|=1LL<<rt1[i];
1602 } else {
1603 current->is32|=1LL<<rt1[i];
1604 }
1605 }
1606 }
1607 clear_const(current,rs1[i]);
1608 clear_const(current,rs2[i]);
1609 clear_const(current,rt1[i]);
1610 dirty_reg(current,rt1[i]);
1611}
1612
1613void imm16_alloc(struct regstat *current,int i)
1614{
1615 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1616 else lt1[i]=rs1[i];
1617 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1618 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1619 current->is32&=~(1LL<<rt1[i]);
1620 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1621 // TODO: Could preserve the 32-bit flag if the immediate is zero
1622 alloc_reg64(current,i,rt1[i]);
1623 alloc_reg64(current,i,rs1[i]);
1624 }
1625 clear_const(current,rs1[i]);
1626 clear_const(current,rt1[i]);
1627 }
1628 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1629 if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1630 current->is32|=1LL<<rt1[i];
1631 clear_const(current,rs1[i]);
1632 clear_const(current,rt1[i]);
1633 }
1634 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1635 if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1636 if(rs1[i]!=rt1[i]) {
1637 if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1638 alloc_reg64(current,i,rt1[i]);
1639 current->is32&=~(1LL<<rt1[i]);
1640 }
1641 }
1642 else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1643 if(is_const(current,rs1[i])) {
1644 int v=get_const(current,rs1[i]);
1645 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1646 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1647 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1648 }
1649 else clear_const(current,rt1[i]);
1650 }
1651 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1652 if(is_const(current,rs1[i])) {
1653 int v=get_const(current,rs1[i]);
1654 set_const(current,rt1[i],v+imm[i]);
1655 }
1656 else clear_const(current,rt1[i]);
1657 current->is32|=1LL<<rt1[i];
1658 }
1659 else {
1660 set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1661 current->is32|=1LL<<rt1[i];
1662 }
1663 dirty_reg(current,rt1[i]);
1664}
1665
1666void load_alloc(struct regstat *current,int i)
1667{
1668 clear_const(current,rt1[i]);
1669 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1670 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1671 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
373d1d07 1672 if(rt1[i]&&!((current->u>>rt1[i])&1)) {
57871462 1673 alloc_reg(current,i,rt1[i]);
373d1d07 1674 assert(get_reg(current->regmap,rt1[i])>=0);
57871462 1675 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1676 {
1677 current->is32&=~(1LL<<rt1[i]);
1678 alloc_reg64(current,i,rt1[i]);
1679 }
1680 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1681 {
1682 current->is32&=~(1LL<<rt1[i]);
1683 alloc_reg64(current,i,rt1[i]);
1684 alloc_all(current,i);
1685 alloc_reg64(current,i,FTEMP);
e1190b87 1686 minimum_free_regs[i]=HOST_REGS;
57871462 1687 }
1688 else current->is32|=1LL<<rt1[i];
1689 dirty_reg(current,rt1[i]);
1690 // If using TLB, need a register for pointer to the mapping table
1691 if(using_tlb) alloc_reg(current,i,TLREG);
1692 // LWL/LWR need a temporary register for the old value
1693 if(opcode[i]==0x22||opcode[i]==0x26)
1694 {
1695 alloc_reg(current,i,FTEMP);
1696 alloc_reg_temp(current,i,-1);
e1190b87 1697 minimum_free_regs[i]=1;
57871462 1698 }
1699 }
1700 else
1701 {
373d1d07 1702 // Load to r0 or unneeded register (dummy load)
57871462 1703 // but we still need a register to calculate the address
535d208a 1704 if(opcode[i]==0x22||opcode[i]==0x26)
1705 {
1706 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1707 }
373d1d07 1708 // If using TLB, need a register for pointer to the mapping table
1709 if(using_tlb) alloc_reg(current,i,TLREG);
57871462 1710 alloc_reg_temp(current,i,-1);
e1190b87 1711 minimum_free_regs[i]=1;
535d208a 1712 if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1713 {
1714 alloc_all(current,i);
1715 alloc_reg64(current,i,FTEMP);
e1190b87 1716 minimum_free_regs[i]=HOST_REGS;
535d208a 1717 }
57871462 1718 }
1719}
1720
1721void store_alloc(struct regstat *current,int i)
1722{
1723 clear_const(current,rs2[i]);
1724 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1725 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1726 alloc_reg(current,i,rs2[i]);
1727 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1728 alloc_reg64(current,i,rs2[i]);
1729 if(rs2[i]) alloc_reg(current,i,FTEMP);
1730 }
1731 // If using TLB, need a register for pointer to the mapping table
1732 if(using_tlb) alloc_reg(current,i,TLREG);
1733 #if defined(HOST_IMM8)
1734 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1735 else alloc_reg(current,i,INVCP);
1736 #endif
b7918751 1737 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
57871462 1738 alloc_reg(current,i,FTEMP);
1739 }
1740 // We need a temporary register for address generation
1741 alloc_reg_temp(current,i,-1);
e1190b87 1742 minimum_free_regs[i]=1;
57871462 1743}
1744
1745void c1ls_alloc(struct regstat *current,int i)
1746{
1747 //clear_const(current,rs1[i]); // FIXME
1748 clear_const(current,rt1[i]);
1749 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1750 alloc_reg(current,i,CSREG); // Status
1751 alloc_reg(current,i,FTEMP);
1752 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1753 alloc_reg64(current,i,FTEMP);
1754 }
1755 // If using TLB, need a register for pointer to the mapping table
1756 if(using_tlb) alloc_reg(current,i,TLREG);
1757 #if defined(HOST_IMM8)
1758 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1759 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1760 alloc_reg(current,i,INVCP);
1761 #endif
1762 // We need a temporary register for address generation
1763 alloc_reg_temp(current,i,-1);
1764}
1765
b9b61529 1766void c2ls_alloc(struct regstat *current,int i)
1767{
1768 clear_const(current,rt1[i]);
1769 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1770 alloc_reg(current,i,FTEMP);
1771 // If using TLB, need a register for pointer to the mapping table
1772 if(using_tlb) alloc_reg(current,i,TLREG);
1773 #if defined(HOST_IMM8)
1774 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1775 else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1776 alloc_reg(current,i,INVCP);
1777 #endif
1778 // We need a temporary register for address generation
1779 alloc_reg_temp(current,i,-1);
e1190b87 1780 minimum_free_regs[i]=1;
b9b61529 1781}
1782
57871462 1783#ifndef multdiv_alloc
1784void multdiv_alloc(struct regstat *current,int i)
1785{
1786 // case 0x18: MULT
1787 // case 0x19: MULTU
1788 // case 0x1A: DIV
1789 // case 0x1B: DIVU
1790 // case 0x1C: DMULT
1791 // case 0x1D: DMULTU
1792 // case 0x1E: DDIV
1793 // case 0x1F: DDIVU
1794 clear_const(current,rs1[i]);
1795 clear_const(current,rs2[i]);
1796 if(rs1[i]&&rs2[i])
1797 {
1798 if((opcode2[i]&4)==0) // 32-bit
1799 {
1800 current->u&=~(1LL<<HIREG);
1801 current->u&=~(1LL<<LOREG);
1802 alloc_reg(current,i,HIREG);
1803 alloc_reg(current,i,LOREG);
1804 alloc_reg(current,i,rs1[i]);
1805 alloc_reg(current,i,rs2[i]);
1806 current->is32|=1LL<<HIREG;
1807 current->is32|=1LL<<LOREG;
1808 dirty_reg(current,HIREG);
1809 dirty_reg(current,LOREG);
1810 }
1811 else // 64-bit
1812 {
1813 current->u&=~(1LL<<HIREG);
1814 current->u&=~(1LL<<LOREG);
1815 current->uu&=~(1LL<<HIREG);
1816 current->uu&=~(1LL<<LOREG);
1817 alloc_reg64(current,i,HIREG);
1818 //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1819 alloc_reg64(current,i,rs1[i]);
1820 alloc_reg64(current,i,rs2[i]);
1821 alloc_all(current,i);
1822 current->is32&=~(1LL<<HIREG);
1823 current->is32&=~(1LL<<LOREG);
1824 dirty_reg(current,HIREG);
1825 dirty_reg(current,LOREG);
e1190b87 1826 minimum_free_regs[i]=HOST_REGS;
57871462 1827 }
1828 }
1829 else
1830 {
1831 // Multiply by zero is zero.
1832 // MIPS does not have a divide by zero exception.
1833 // The result is undefined, we return zero.
1834 alloc_reg(current,i,HIREG);
1835 alloc_reg(current,i,LOREG);
1836 current->is32|=1LL<<HIREG;
1837 current->is32|=1LL<<LOREG;
1838 dirty_reg(current,HIREG);
1839 dirty_reg(current,LOREG);
1840 }
1841}
1842#endif
1843
1844void cop0_alloc(struct regstat *current,int i)
1845{
1846 if(opcode2[i]==0) // MFC0
1847 {
1848 if(rt1[i]) {
1849 clear_const(current,rt1[i]);
1850 alloc_all(current,i);
1851 alloc_reg(current,i,rt1[i]);
1852 current->is32|=1LL<<rt1[i];
1853 dirty_reg(current,rt1[i]);
1854 }
1855 }
1856 else if(opcode2[i]==4) // MTC0
1857 {
1858 if(rs1[i]){
1859 clear_const(current,rs1[i]);
1860 alloc_reg(current,i,rs1[i]);
1861 alloc_all(current,i);
1862 }
1863 else {
1864 alloc_all(current,i); // FIXME: Keep r0
1865 current->u&=~1LL;
1866 alloc_reg(current,i,0);
1867 }
1868 }
1869 else
1870 {
1871 // TLBR/TLBWI/TLBWR/TLBP/ERET
1872 assert(opcode2[i]==0x10);
1873 alloc_all(current,i);
1874 }
e1190b87 1875 minimum_free_regs[i]=HOST_REGS;
57871462 1876}
1877
1878void cop1_alloc(struct regstat *current,int i)
1879{
1880 alloc_reg(current,i,CSREG); // Load status
1881 if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1882 {
7de557a6 1883 if(rt1[i]){
1884 clear_const(current,rt1[i]);
1885 if(opcode2[i]==1) {
1886 alloc_reg64(current,i,rt1[i]); // DMFC1
1887 current->is32&=~(1LL<<rt1[i]);
1888 }else{
1889 alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1890 current->is32|=1LL<<rt1[i];
1891 }
1892 dirty_reg(current,rt1[i]);
57871462 1893 }
57871462 1894 alloc_reg_temp(current,i,-1);
1895 }
1896 else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1897 {
1898 if(rs1[i]){
1899 clear_const(current,rs1[i]);
1900 if(opcode2[i]==5)
1901 alloc_reg64(current,i,rs1[i]); // DMTC1
1902 else
1903 alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1904 alloc_reg_temp(current,i,-1);
1905 }
1906 else {
1907 current->u&=~1LL;
1908 alloc_reg(current,i,0);
1909 alloc_reg_temp(current,i,-1);
1910 }
1911 }
e1190b87 1912 minimum_free_regs[i]=1;
57871462 1913}
1914void fconv_alloc(struct regstat *current,int i)
1915{
1916 alloc_reg(current,i,CSREG); // Load status
1917 alloc_reg_temp(current,i,-1);
e1190b87 1918 minimum_free_regs[i]=1;
57871462 1919}
1920void float_alloc(struct regstat *current,int i)
1921{
1922 alloc_reg(current,i,CSREG); // Load status
1923 alloc_reg_temp(current,i,-1);
e1190b87 1924 minimum_free_regs[i]=1;
57871462 1925}
b9b61529 1926void c2op_alloc(struct regstat *current,int i)
1927{
1928 alloc_reg_temp(current,i,-1);
1929}
57871462 1930void fcomp_alloc(struct regstat *current,int i)
1931{
1932 alloc_reg(current,i,CSREG); // Load status
1933 alloc_reg(current,i,FSREG); // Load flags
1934 dirty_reg(current,FSREG); // Flag will be modified
1935 alloc_reg_temp(current,i,-1);
e1190b87 1936 minimum_free_regs[i]=1;
57871462 1937}
1938
1939void syscall_alloc(struct regstat *current,int i)
1940{
1941 alloc_cc(current,i);
1942 dirty_reg(current,CCREG);
1943 alloc_all(current,i);
e1190b87 1944 minimum_free_regs[i]=HOST_REGS;
57871462 1945 current->isconst=0;
1946}
1947
1948void delayslot_alloc(struct regstat *current,int i)
1949{
1950 switch(itype[i]) {
1951 case UJUMP:
1952 case CJUMP:
1953 case SJUMP:
1954 case RJUMP:
1955 case FJUMP:
1956 case SYSCALL:
7139f3c8 1957 case HLECALL:
57871462 1958 case SPAN:
1959 assem_debug("jump in the delay slot. this shouldn't happen.\n");//exit(1);
1960 printf("Disabled speculative precompilation\n");
1961 stop_after_jal=1;
1962 break;
1963 case IMM16:
1964 imm16_alloc(current,i);
1965 break;
1966 case LOAD:
1967 case LOADLR:
1968 load_alloc(current,i);
1969 break;
1970 case STORE:
1971 case STORELR:
1972 store_alloc(current,i);
1973 break;
1974 case ALU:
1975 alu_alloc(current,i);
1976 break;
1977 case SHIFT:
1978 shift_alloc(current,i);
1979 break;
1980 case MULTDIV:
1981 multdiv_alloc(current,i);
1982 break;
1983 case SHIFTIMM:
1984 shiftimm_alloc(current,i);
1985 break;
1986 case MOV:
1987 mov_alloc(current,i);
1988 break;
1989 case COP0:
1990 cop0_alloc(current,i);
1991 break;
1992 case COP1:
b9b61529 1993 case COP2:
57871462 1994 cop1_alloc(current,i);
1995 break;
1996 case C1LS:
1997 c1ls_alloc(current,i);
1998 break;
b9b61529 1999 case C2LS:
2000 c2ls_alloc(current,i);
2001 break;
57871462 2002 case FCONV:
2003 fconv_alloc(current,i);
2004 break;
2005 case FLOAT:
2006 float_alloc(current,i);
2007 break;
2008 case FCOMP:
2009 fcomp_alloc(current,i);
2010 break;
b9b61529 2011 case C2OP:
2012 c2op_alloc(current,i);
2013 break;
57871462 2014 }
2015}
2016
2017// Special case where a branch and delay slot span two pages in virtual memory
2018static void pagespan_alloc(struct regstat *current,int i)
2019{
2020 current->isconst=0;
2021 current->wasconst=0;
2022 regs[i].wasconst=0;
e1190b87 2023 minimum_free_regs[i]=HOST_REGS;
57871462 2024 alloc_all(current,i);
2025 alloc_cc(current,i);
2026 dirty_reg(current,CCREG);
2027 if(opcode[i]==3) // JAL
2028 {
2029 alloc_reg(current,i,31);
2030 dirty_reg(current,31);
2031 }
2032 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
2033 {
2034 alloc_reg(current,i,rs1[i]);
5067f341 2035 if (rt1[i]!=0) {
2036 alloc_reg(current,i,rt1[i]);
2037 dirty_reg(current,rt1[i]);
57871462 2038 }
2039 }
2040 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
2041 {
2042 if(rs1[i]) alloc_reg(current,i,rs1[i]);
2043 if(rs2[i]) alloc_reg(current,i,rs2[i]);
2044 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
2045 {
2046 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2047 if(rs2[i]) alloc_reg64(current,i,rs2[i]);
2048 }
2049 }
2050 else
2051 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
2052 {
2053 if(rs1[i]) alloc_reg(current,i,rs1[i]);
2054 if(!((current->is32>>rs1[i])&1))
2055 {
2056 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2057 }
2058 }
2059 else
2060 if(opcode[i]==0x11) // BC1
2061 {
2062 alloc_reg(current,i,FSREG);
2063 alloc_reg(current,i,CSREG);
2064 }
2065 //else ...
2066}
2067
2068add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
2069{
2070 stubs[stubcount][0]=type;
2071 stubs[stubcount][1]=addr;
2072 stubs[stubcount][2]=retaddr;
2073 stubs[stubcount][3]=a;
2074 stubs[stubcount][4]=b;
2075 stubs[stubcount][5]=c;
2076 stubs[stubcount][6]=d;
2077 stubs[stubcount][7]=e;
2078 stubcount++;
2079}
2080
2081// Write out a single register
2082void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
2083{
2084 int hr;
2085 for(hr=0;hr<HOST_REGS;hr++) {
2086 if(hr!=EXCLUDE_REG) {
2087 if((regmap[hr]&63)==r) {
2088 if((dirty>>hr)&1) {
2089 if(regmap[hr]<64) {
2090 emit_storereg(r,hr);
24385cae 2091#ifndef FORCE32
57871462 2092 if((is32>>regmap[hr])&1) {
2093 emit_sarimm(hr,31,hr);
2094 emit_storereg(r|64,hr);
2095 }
24385cae 2096#endif
57871462 2097 }else{
2098 emit_storereg(r|64,hr);
2099 }
2100 }
2101 }
2102 }
2103 }
2104}
2105
2106int mchecksum()
2107{
2108 //if(!tracedebug) return 0;
2109 int i;
2110 int sum=0;
2111 for(i=0;i<2097152;i++) {
2112 unsigned int temp=sum;
2113 sum<<=1;
2114 sum|=(~temp)>>31;
2115 sum^=((u_int *)rdram)[i];
2116 }
2117 return sum;
2118}
2119int rchecksum()
2120{
2121 int i;
2122 int sum=0;
2123 for(i=0;i<64;i++)
2124 sum^=((u_int *)reg)[i];
2125 return sum;
2126}
57871462 2127void rlist()
2128{
2129 int i;
2130 printf("TRACE: ");
2131 for(i=0;i<32;i++)
2132 printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2133 printf("\n");
3d624f89 2134#ifndef DISABLE_COP1
57871462 2135 printf("TRACE: ");
2136 for(i=0;i<32;i++)
2137 printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2138 printf("\n");
3d624f89 2139#endif
57871462 2140}
2141
2142void enabletrace()
2143{
2144 tracedebug=1;
2145}
2146
2147void memdebug(int i)
2148{
2149 //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2150 //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2151 //rlist();
2152 //if(tracedebug) {
2153 //if(Count>=-2084597794) {
2154 if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2155 //if(0) {
2156 printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2157 //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2158 //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2159 rlist();
2160 #ifdef __i386__
2161 printf("TRACE: %x\n",(&i)[-1]);
2162 #endif
2163 #ifdef __arm__
2164 int j;
2165 printf("TRACE: %x \n",(&j)[10]);
2166 printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2167 #endif
2168 //fflush(stdout);
2169 }
2170 //printf("TRACE: %x\n",(&i)[-1]);
2171}
2172
2173void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2174{
2175 printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2176}
2177
2178void alu_assemble(int i,struct regstat *i_regs)
2179{
2180 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2181 if(rt1[i]) {
2182 signed char s1,s2,t;
2183 t=get_reg(i_regs->regmap,rt1[i]);
2184 if(t>=0) {
2185 s1=get_reg(i_regs->regmap,rs1[i]);
2186 s2=get_reg(i_regs->regmap,rs2[i]);
2187 if(rs1[i]&&rs2[i]) {
2188 assert(s1>=0);
2189 assert(s2>=0);
2190 if(opcode2[i]&2) emit_sub(s1,s2,t);
2191 else emit_add(s1,s2,t);
2192 }
2193 else if(rs1[i]) {
2194 if(s1>=0) emit_mov(s1,t);
2195 else emit_loadreg(rs1[i],t);
2196 }
2197 else if(rs2[i]) {
2198 if(s2>=0) {
2199 if(opcode2[i]&2) emit_neg(s2,t);
2200 else emit_mov(s2,t);
2201 }
2202 else {
2203 emit_loadreg(rs2[i],t);
2204 if(opcode2[i]&2) emit_neg(t,t);
2205 }
2206 }
2207 else emit_zeroreg(t);
2208 }
2209 }
2210 }
2211 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2212 if(rt1[i]) {
2213 signed char s1l,s2l,s1h,s2h,tl,th;
2214 tl=get_reg(i_regs->regmap,rt1[i]);
2215 th=get_reg(i_regs->regmap,rt1[i]|64);
2216 if(tl>=0) {
2217 s1l=get_reg(i_regs->regmap,rs1[i]);
2218 s2l=get_reg(i_regs->regmap,rs2[i]);
2219 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2220 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2221 if(rs1[i]&&rs2[i]) {
2222 assert(s1l>=0);
2223 assert(s2l>=0);
2224 if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2225 else emit_adds(s1l,s2l,tl);
2226 if(th>=0) {
2227 #ifdef INVERTED_CARRY
2228 if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2229 #else
2230 if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2231 #endif
2232 else emit_add(s1h,s2h,th);
2233 }
2234 }
2235 else if(rs1[i]) {
2236 if(s1l>=0) emit_mov(s1l,tl);
2237 else emit_loadreg(rs1[i],tl);
2238 if(th>=0) {
2239 if(s1h>=0) emit_mov(s1h,th);
2240 else emit_loadreg(rs1[i]|64,th);
2241 }
2242 }
2243 else if(rs2[i]) {
2244 if(s2l>=0) {
2245 if(opcode2[i]&2) emit_negs(s2l,tl);
2246 else emit_mov(s2l,tl);
2247 }
2248 else {
2249 emit_loadreg(rs2[i],tl);
2250 if(opcode2[i]&2) emit_negs(tl,tl);
2251 }
2252 if(th>=0) {
2253 #ifdef INVERTED_CARRY
2254 if(s2h>=0) emit_mov(s2h,th);
2255 else emit_loadreg(rs2[i]|64,th);
2256 if(opcode2[i]&2) {
2257 emit_adcimm(-1,th); // x86 has inverted carry flag
2258 emit_not(th,th);
2259 }
2260 #else
2261 if(opcode2[i]&2) {
2262 if(s2h>=0) emit_rscimm(s2h,0,th);
2263 else {
2264 emit_loadreg(rs2[i]|64,th);
2265 emit_rscimm(th,0,th);
2266 }
2267 }else{
2268 if(s2h>=0) emit_mov(s2h,th);
2269 else emit_loadreg(rs2[i]|64,th);
2270 }
2271 #endif
2272 }
2273 }
2274 else {
2275 emit_zeroreg(tl);
2276 if(th>=0) emit_zeroreg(th);
2277 }
2278 }
2279 }
2280 }
2281 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2282 if(rt1[i]) {
2283 signed char s1l,s1h,s2l,s2h,t;
2284 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2285 {
2286 t=get_reg(i_regs->regmap,rt1[i]);
2287 //assert(t>=0);
2288 if(t>=0) {
2289 s1l=get_reg(i_regs->regmap,rs1[i]);
2290 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2291 s2l=get_reg(i_regs->regmap,rs2[i]);
2292 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2293 if(rs2[i]==0) // rx<r0
2294 {
2295 assert(s1h>=0);
2296 if(opcode2[i]==0x2a) // SLT
2297 emit_shrimm(s1h,31,t);
2298 else // SLTU (unsigned can not be less than zero)
2299 emit_zeroreg(t);
2300 }
2301 else if(rs1[i]==0) // r0<rx
2302 {
2303 assert(s2h>=0);
2304 if(opcode2[i]==0x2a) // SLT
2305 emit_set_gz64_32(s2h,s2l,t);
2306 else // SLTU (set if not zero)
2307 emit_set_nz64_32(s2h,s2l,t);
2308 }
2309 else {
2310 assert(s1l>=0);assert(s1h>=0);
2311 assert(s2l>=0);assert(s2h>=0);
2312 if(opcode2[i]==0x2a) // SLT
2313 emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2314 else // SLTU
2315 emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2316 }
2317 }
2318 } else {
2319 t=get_reg(i_regs->regmap,rt1[i]);
2320 //assert(t>=0);
2321 if(t>=0) {
2322 s1l=get_reg(i_regs->regmap,rs1[i]);
2323 s2l=get_reg(i_regs->regmap,rs2[i]);
2324 if(rs2[i]==0) // rx<r0
2325 {
2326 assert(s1l>=0);
2327 if(opcode2[i]==0x2a) // SLT
2328 emit_shrimm(s1l,31,t);
2329 else // SLTU (unsigned can not be less than zero)
2330 emit_zeroreg(t);
2331 }
2332 else if(rs1[i]==0) // r0<rx
2333 {
2334 assert(s2l>=0);
2335 if(opcode2[i]==0x2a) // SLT
2336 emit_set_gz32(s2l,t);
2337 else // SLTU (set if not zero)
2338 emit_set_nz32(s2l,t);
2339 }
2340 else{
2341 assert(s1l>=0);assert(s2l>=0);
2342 if(opcode2[i]==0x2a) // SLT
2343 emit_set_if_less32(s1l,s2l,t);
2344 else // SLTU
2345 emit_set_if_carry32(s1l,s2l,t);
2346 }
2347 }
2348 }
2349 }
2350 }
2351 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2352 if(rt1[i]) {
2353 signed char s1l,s1h,s2l,s2h,th,tl;
2354 tl=get_reg(i_regs->regmap,rt1[i]);
2355 th=get_reg(i_regs->regmap,rt1[i]|64);
2356 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2357 {
2358 assert(tl>=0);
2359 if(tl>=0) {
2360 s1l=get_reg(i_regs->regmap,rs1[i]);
2361 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2362 s2l=get_reg(i_regs->regmap,rs2[i]);
2363 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2364 if(rs1[i]&&rs2[i]) {
2365 assert(s1l>=0);assert(s1h>=0);
2366 assert(s2l>=0);assert(s2h>=0);
2367 if(opcode2[i]==0x24) { // AND
2368 emit_and(s1l,s2l,tl);
2369 emit_and(s1h,s2h,th);
2370 } else
2371 if(opcode2[i]==0x25) { // OR
2372 emit_or(s1l,s2l,tl);
2373 emit_or(s1h,s2h,th);
2374 } else
2375 if(opcode2[i]==0x26) { // XOR
2376 emit_xor(s1l,s2l,tl);
2377 emit_xor(s1h,s2h,th);
2378 } else
2379 if(opcode2[i]==0x27) { // NOR
2380 emit_or(s1l,s2l,tl);
2381 emit_or(s1h,s2h,th);
2382 emit_not(tl,tl);
2383 emit_not(th,th);
2384 }
2385 }
2386 else
2387 {
2388 if(opcode2[i]==0x24) { // AND
2389 emit_zeroreg(tl);
2390 emit_zeroreg(th);
2391 } else
2392 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2393 if(rs1[i]){
2394 if(s1l>=0) emit_mov(s1l,tl);
2395 else emit_loadreg(rs1[i],tl);
2396 if(s1h>=0) emit_mov(s1h,th);
2397 else emit_loadreg(rs1[i]|64,th);
2398 }
2399 else
2400 if(rs2[i]){
2401 if(s2l>=0) emit_mov(s2l,tl);
2402 else emit_loadreg(rs2[i],tl);
2403 if(s2h>=0) emit_mov(s2h,th);
2404 else emit_loadreg(rs2[i]|64,th);
2405 }
2406 else{
2407 emit_zeroreg(tl);
2408 emit_zeroreg(th);
2409 }
2410 } else
2411 if(opcode2[i]==0x27) { // NOR
2412 if(rs1[i]){
2413 if(s1l>=0) emit_not(s1l,tl);
2414 else{
2415 emit_loadreg(rs1[i],tl);
2416 emit_not(tl,tl);
2417 }
2418 if(s1h>=0) emit_not(s1h,th);
2419 else{
2420 emit_loadreg(rs1[i]|64,th);
2421 emit_not(th,th);
2422 }
2423 }
2424 else
2425 if(rs2[i]){
2426 if(s2l>=0) emit_not(s2l,tl);
2427 else{
2428 emit_loadreg(rs2[i],tl);
2429 emit_not(tl,tl);
2430 }
2431 if(s2h>=0) emit_not(s2h,th);
2432 else{
2433 emit_loadreg(rs2[i]|64,th);
2434 emit_not(th,th);
2435 }
2436 }
2437 else {
2438 emit_movimm(-1,tl);
2439 emit_movimm(-1,th);
2440 }
2441 }
2442 }
2443 }
2444 }
2445 else
2446 {
2447 // 32 bit
2448 if(tl>=0) {
2449 s1l=get_reg(i_regs->regmap,rs1[i]);
2450 s2l=get_reg(i_regs->regmap,rs2[i]);
2451 if(rs1[i]&&rs2[i]) {
2452 assert(s1l>=0);
2453 assert(s2l>=0);
2454 if(opcode2[i]==0x24) { // AND
2455 emit_and(s1l,s2l,tl);
2456 } else
2457 if(opcode2[i]==0x25) { // OR
2458 emit_or(s1l,s2l,tl);
2459 } else
2460 if(opcode2[i]==0x26) { // XOR
2461 emit_xor(s1l,s2l,tl);
2462 } else
2463 if(opcode2[i]==0x27) { // NOR
2464 emit_or(s1l,s2l,tl);
2465 emit_not(tl,tl);
2466 }
2467 }
2468 else
2469 {
2470 if(opcode2[i]==0x24) { // AND
2471 emit_zeroreg(tl);
2472 } else
2473 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2474 if(rs1[i]){
2475 if(s1l>=0) emit_mov(s1l,tl);
2476 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2477 }
2478 else
2479 if(rs2[i]){
2480 if(s2l>=0) emit_mov(s2l,tl);
2481 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2482 }
2483 else emit_zeroreg(tl);
2484 } else
2485 if(opcode2[i]==0x27) { // NOR
2486 if(rs1[i]){
2487 if(s1l>=0) emit_not(s1l,tl);
2488 else {
2489 emit_loadreg(rs1[i],tl);
2490 emit_not(tl,tl);
2491 }
2492 }
2493 else
2494 if(rs2[i]){
2495 if(s2l>=0) emit_not(s2l,tl);
2496 else {
2497 emit_loadreg(rs2[i],tl);
2498 emit_not(tl,tl);
2499 }
2500 }
2501 else emit_movimm(-1,tl);
2502 }
2503 }
2504 }
2505 }
2506 }
2507 }
2508}
2509
2510void imm16_assemble(int i,struct regstat *i_regs)
2511{
2512 if (opcode[i]==0x0f) { // LUI
2513 if(rt1[i]) {
2514 signed char t;
2515 t=get_reg(i_regs->regmap,rt1[i]);
2516 //assert(t>=0);
2517 if(t>=0) {
2518 if(!((i_regs->isconst>>t)&1))
2519 emit_movimm(imm[i]<<16,t);
2520 }
2521 }
2522 }
2523 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2524 if(rt1[i]) {
2525 signed char s,t;
2526 t=get_reg(i_regs->regmap,rt1[i]);
2527 s=get_reg(i_regs->regmap,rs1[i]);
2528 if(rs1[i]) {
2529 //assert(t>=0);
2530 //assert(s>=0);
2531 if(t>=0) {
2532 if(!((i_regs->isconst>>t)&1)) {
2533 if(s<0) {
2534 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2535 emit_addimm(t,imm[i],t);
2536 }else{
2537 if(!((i_regs->wasconst>>s)&1))
2538 emit_addimm(s,imm[i],t);
2539 else
2540 emit_movimm(constmap[i][s]+imm[i],t);
2541 }
2542 }
2543 }
2544 } else {
2545 if(t>=0) {
2546 if(!((i_regs->isconst>>t)&1))
2547 emit_movimm(imm[i],t);
2548 }
2549 }
2550 }
2551 }
2552 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2553 if(rt1[i]) {
2554 signed char sh,sl,th,tl;
2555 th=get_reg(i_regs->regmap,rt1[i]|64);
2556 tl=get_reg(i_regs->regmap,rt1[i]);
2557 sh=get_reg(i_regs->regmap,rs1[i]|64);
2558 sl=get_reg(i_regs->regmap,rs1[i]);
2559 if(tl>=0) {
2560 if(rs1[i]) {
2561 assert(sh>=0);
2562 assert(sl>=0);
2563 if(th>=0) {
2564 emit_addimm64_32(sh,sl,imm[i],th,tl);
2565 }
2566 else {
2567 emit_addimm(sl,imm[i],tl);
2568 }
2569 } else {
2570 emit_movimm(imm[i],tl);
2571 if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2572 }
2573 }
2574 }
2575 }
2576 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2577 if(rt1[i]) {
2578 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2579 signed char sh,sl,t;
2580 t=get_reg(i_regs->regmap,rt1[i]);
2581 sh=get_reg(i_regs->regmap,rs1[i]|64);
2582 sl=get_reg(i_regs->regmap,rs1[i]);
2583 //assert(t>=0);
2584 if(t>=0) {
2585 if(rs1[i]>0) {
2586 if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2587 if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2588 if(opcode[i]==0x0a) { // SLTI
2589 if(sl<0) {
2590 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2591 emit_slti32(t,imm[i],t);
2592 }else{
2593 emit_slti32(sl,imm[i],t);
2594 }
2595 }
2596 else { // SLTIU
2597 if(sl<0) {
2598 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2599 emit_sltiu32(t,imm[i],t);
2600 }else{
2601 emit_sltiu32(sl,imm[i],t);
2602 }
2603 }
2604 }else{ // 64-bit
2605 assert(sl>=0);
2606 if(opcode[i]==0x0a) // SLTI
2607 emit_slti64_32(sh,sl,imm[i],t);
2608 else // SLTIU
2609 emit_sltiu64_32(sh,sl,imm[i],t);
2610 }
2611 }else{
2612 // SLTI(U) with r0 is just stupid,
2613 // nonetheless examples can be found
2614 if(opcode[i]==0x0a) // SLTI
2615 if(0<imm[i]) emit_movimm(1,t);
2616 else emit_zeroreg(t);
2617 else // SLTIU
2618 {
2619 if(imm[i]) emit_movimm(1,t);
2620 else emit_zeroreg(t);
2621 }
2622 }
2623 }
2624 }
2625 }
2626 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2627 if(rt1[i]) {
2628 signed char sh,sl,th,tl;
2629 th=get_reg(i_regs->regmap,rt1[i]|64);
2630 tl=get_reg(i_regs->regmap,rt1[i]);
2631 sh=get_reg(i_regs->regmap,rs1[i]|64);
2632 sl=get_reg(i_regs->regmap,rs1[i]);
2633 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2634 if(opcode[i]==0x0c) //ANDI
2635 {
2636 if(rs1[i]) {
2637 if(sl<0) {
2638 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2639 emit_andimm(tl,imm[i],tl);
2640 }else{
2641 if(!((i_regs->wasconst>>sl)&1))
2642 emit_andimm(sl,imm[i],tl);
2643 else
2644 emit_movimm(constmap[i][sl]&imm[i],tl);
2645 }
2646 }
2647 else
2648 emit_zeroreg(tl);
2649 if(th>=0) emit_zeroreg(th);
2650 }
2651 else
2652 {
2653 if(rs1[i]) {
2654 if(sl<0) {
2655 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2656 }
2657 if(th>=0) {
2658 if(sh<0) {
2659 emit_loadreg(rs1[i]|64,th);
2660 }else{
2661 emit_mov(sh,th);
2662 }
2663 }
2664 if(opcode[i]==0x0d) //ORI
2665 if(sl<0) {
2666 emit_orimm(tl,imm[i],tl);
2667 }else{
2668 if(!((i_regs->wasconst>>sl)&1))
2669 emit_orimm(sl,imm[i],tl);
2670 else
2671 emit_movimm(constmap[i][sl]|imm[i],tl);
2672 }
2673 if(opcode[i]==0x0e) //XORI
2674 if(sl<0) {
2675 emit_xorimm(tl,imm[i],tl);
2676 }else{
2677 if(!((i_regs->wasconst>>sl)&1))
2678 emit_xorimm(sl,imm[i],tl);
2679 else
2680 emit_movimm(constmap[i][sl]^imm[i],tl);
2681 }
2682 }
2683 else {
2684 emit_movimm(imm[i],tl);
2685 if(th>=0) emit_zeroreg(th);
2686 }
2687 }
2688 }
2689 }
2690 }
2691}
2692
2693void shiftimm_assemble(int i,struct regstat *i_regs)
2694{
2695 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2696 {
2697 if(rt1[i]) {
2698 signed char s,t;
2699 t=get_reg(i_regs->regmap,rt1[i]);
2700 s=get_reg(i_regs->regmap,rs1[i]);
2701 //assert(t>=0);
dc49e339 2702 if(t>=0&&!((i_regs->isconst>>t)&1)){
57871462 2703 if(rs1[i]==0)
2704 {
2705 emit_zeroreg(t);
2706 }
2707 else
2708 {
2709 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2710 if(imm[i]) {
2711 if(opcode2[i]==0) // SLL
2712 {
2713 emit_shlimm(s<0?t:s,imm[i],t);
2714 }
2715 if(opcode2[i]==2) // SRL
2716 {
2717 emit_shrimm(s<0?t:s,imm[i],t);
2718 }
2719 if(opcode2[i]==3) // SRA
2720 {
2721 emit_sarimm(s<0?t:s,imm[i],t);
2722 }
2723 }else{
2724 // Shift by zero
2725 if(s>=0 && s!=t) emit_mov(s,t);
2726 }
2727 }
2728 }
2729 //emit_storereg(rt1[i],t); //DEBUG
2730 }
2731 }
2732 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2733 {
2734 if(rt1[i]) {
2735 signed char sh,sl,th,tl;
2736 th=get_reg(i_regs->regmap,rt1[i]|64);
2737 tl=get_reg(i_regs->regmap,rt1[i]);
2738 sh=get_reg(i_regs->regmap,rs1[i]|64);
2739 sl=get_reg(i_regs->regmap,rs1[i]);
2740 if(tl>=0) {
2741 if(rs1[i]==0)
2742 {
2743 emit_zeroreg(tl);
2744 if(th>=0) emit_zeroreg(th);
2745 }
2746 else
2747 {
2748 assert(sl>=0);
2749 assert(sh>=0);
2750 if(imm[i]) {
2751 if(opcode2[i]==0x38) // DSLL
2752 {
2753 if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2754 emit_shlimm(sl,imm[i],tl);
2755 }
2756 if(opcode2[i]==0x3a) // DSRL
2757 {
2758 emit_shrdimm(sl,sh,imm[i],tl);
2759 if(th>=0) emit_shrimm(sh,imm[i],th);
2760 }
2761 if(opcode2[i]==0x3b) // DSRA
2762 {
2763 emit_shrdimm(sl,sh,imm[i],tl);
2764 if(th>=0) emit_sarimm(sh,imm[i],th);
2765 }
2766 }else{
2767 // Shift by zero
2768 if(sl!=tl) emit_mov(sl,tl);
2769 if(th>=0&&sh!=th) emit_mov(sh,th);
2770 }
2771 }
2772 }
2773 }
2774 }
2775 if(opcode2[i]==0x3c) // DSLL32
2776 {
2777 if(rt1[i]) {
2778 signed char sl,tl,th;
2779 tl=get_reg(i_regs->regmap,rt1[i]);
2780 th=get_reg(i_regs->regmap,rt1[i]|64);
2781 sl=get_reg(i_regs->regmap,rs1[i]);
2782 if(th>=0||tl>=0){
2783 assert(tl>=0);
2784 assert(th>=0);
2785 assert(sl>=0);
2786 emit_mov(sl,th);
2787 emit_zeroreg(tl);
2788 if(imm[i]>32)
2789 {
2790 emit_shlimm(th,imm[i]&31,th);
2791 }
2792 }
2793 }
2794 }
2795 if(opcode2[i]==0x3e) // DSRL32
2796 {
2797 if(rt1[i]) {
2798 signed char sh,tl,th;
2799 tl=get_reg(i_regs->regmap,rt1[i]);
2800 th=get_reg(i_regs->regmap,rt1[i]|64);
2801 sh=get_reg(i_regs->regmap,rs1[i]|64);
2802 if(tl>=0){
2803 assert(sh>=0);
2804 emit_mov(sh,tl);
2805 if(th>=0) emit_zeroreg(th);
2806 if(imm[i]>32)
2807 {
2808 emit_shrimm(tl,imm[i]&31,tl);
2809 }
2810 }
2811 }
2812 }
2813 if(opcode2[i]==0x3f) // DSRA32
2814 {
2815 if(rt1[i]) {
2816 signed char sh,tl;
2817 tl=get_reg(i_regs->regmap,rt1[i]);
2818 sh=get_reg(i_regs->regmap,rs1[i]|64);
2819 if(tl>=0){
2820 assert(sh>=0);
2821 emit_mov(sh,tl);
2822 if(imm[i]>32)
2823 {
2824 emit_sarimm(tl,imm[i]&31,tl);
2825 }
2826 }
2827 }
2828 }
2829}
2830
2831#ifndef shift_assemble
2832void shift_assemble(int i,struct regstat *i_regs)
2833{
2834 printf("Need shift_assemble for this architecture.\n");
2835 exit(1);
2836}
2837#endif
2838
2839void load_assemble(int i,struct regstat *i_regs)
2840{
2841 int s,th,tl,addr,map=-1;
2842 int offset;
2843 int jaddr=0;
5bf843dc 2844 int memtarget=0,c=0;
b1570849 2845 int fastload_reg_override=0;
57871462 2846 u_int hr,reglist=0;
2847 th=get_reg(i_regs->regmap,rt1[i]|64);
2848 tl=get_reg(i_regs->regmap,rt1[i]);
2849 s=get_reg(i_regs->regmap,rs1[i]);
2850 offset=imm[i];
2851 for(hr=0;hr<HOST_REGS;hr++) {
2852 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2853 }
2854 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2855 if(s>=0) {
2856 c=(i_regs->wasconst>>s)&1;
af4ee1fe 2857 if (c) {
2858 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2859 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2860 }
57871462 2861 }
57871462 2862 //printf("load_assemble: c=%d\n",c);
2863 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2864 // FIXME: Even if the load is a NOP, we should check for pagefaults...
5bf843dc 2865#ifdef PCSX
f18c0f46 2866 if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2867 ||rt1[i]==0) {
5bf843dc 2868 // could be FIFO, must perform the read
f18c0f46 2869 // ||dummy read
5bf843dc 2870 assem_debug("(forced read)\n");
2871 tl=get_reg(i_regs->regmap,-1);
2872 assert(tl>=0);
5bf843dc 2873 }
f18c0f46 2874#endif
5bf843dc 2875 if(offset||s<0||c) addr=tl;
2876 else addr=s;
535d208a 2877 //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2878 if(tl>=0) {
2879 //printf("load_assemble: c=%d\n",c);
2880 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2881 assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2882 reglist&=~(1<<tl);
2883 if(th>=0) reglist&=~(1<<th);
2884 if(!using_tlb) {
2885 if(!c) {
2886 #ifdef RAM_OFFSET
2887 map=get_reg(i_regs->regmap,ROREG);
2888 if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2889 #endif
57871462 2890//#define R29_HACK 1
535d208a 2891 #ifdef R29_HACK
2892 // Strmnnrmn's speed hack
2893 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2894 #endif
2895 {
dadf55f2 2896 #ifdef PCSX
2897 if(sp_in_mirror&&rs1[i]==29) {
2898 emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2899 emit_cmpimm(HOST_TEMPREG,RAM_SIZE);
b1570849 2900 fastload_reg_override=HOST_TEMPREG;
dadf55f2 2901 }
2902 else
2903 #endif
535d208a 2904 emit_cmpimm(addr,RAM_SIZE);
2905 jaddr=(int)out;
2906 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2907 // Hint to branch predictor that the branch is unlikely to be taken
2908 if(rs1[i]>=28)
2909 emit_jno_unlikely(0);
2910 else
57871462 2911 #endif
535d208a 2912 emit_jno(0);
57871462 2913 }
535d208a 2914 }
2915 }else{ // using tlb
2916 int x=0;
2917 if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2918 if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2919 map=get_reg(i_regs->regmap,TLREG);
2920 assert(map>=0);
ea3d2e6e 2921 reglist&=~(1<<map);
535d208a 2922 map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2923 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2924 }
2925 int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2926 if (opcode[i]==0x20) { // LB
2927 if(!c||memtarget) {
2928 if(!dummy) {
57871462 2929 #ifdef HOST_IMM_ADDR32
2930 if(c)
2931 emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2932 else
2933 #endif
2934 {
2935 //emit_xorimm(addr,3,tl);
2936 //gen_tlb_addr_r(tl,map);
2937 //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
535d208a 2938 int x=0,a=tl;
2002a1db 2939#ifdef BIG_ENDIAN_MIPS
57871462 2940 if(!c) emit_xorimm(addr,3,tl);
2941 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 2942#else
535d208a 2943 if(!c) a=addr;
dadf55f2 2944#endif
b1570849 2945 if(fastload_reg_override) a=fastload_reg_override;
2946
535d208a 2947 emit_movsbl_indexed_tlb(x,a,map,tl);
57871462 2948 }
57871462 2949 }
535d208a 2950 if(jaddr)
2951 add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2952 }
535d208a 2953 else
2954 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2955 }
2956 if (opcode[i]==0x21) { // LH
2957 if(!c||memtarget) {
2958 if(!dummy) {
57871462 2959 #ifdef HOST_IMM_ADDR32
2960 if(c)
2961 emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2962 else
2963 #endif
2964 {
535d208a 2965 int x=0,a=tl;
2002a1db 2966#ifdef BIG_ENDIAN_MIPS
57871462 2967 if(!c) emit_xorimm(addr,2,tl);
2968 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 2969#else
535d208a 2970 if(!c) a=addr;
dadf55f2 2971#endif
b1570849 2972 if(fastload_reg_override) a=fastload_reg_override;
57871462 2973 //#ifdef
2974 //emit_movswl_indexed_tlb(x,tl,map,tl);
2975 //else
2976 if(map>=0) {
535d208a 2977 gen_tlb_addr_r(a,map);
2978 emit_movswl_indexed(x,a,tl);
2979 }else{
2980 #ifdef RAM_OFFSET
2981 emit_movswl_indexed(x,a,tl);
2982 #else
2983 emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2984 #endif
2985 }
57871462 2986 }
57871462 2987 }
535d208a 2988 if(jaddr)
2989 add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2990 }
535d208a 2991 else
2992 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2993 }
2994 if (opcode[i]==0x23) { // LW
2995 if(!c||memtarget) {
2996 if(!dummy) {
dadf55f2 2997 int a=addr;
b1570849 2998 if(fastload_reg_override) a=fastload_reg_override;
57871462 2999 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
3000 #ifdef HOST_IMM_ADDR32
3001 if(c)
3002 emit_readword_tlb(constmap[i][s]+offset,map,tl);
3003 else
3004 #endif
dadf55f2 3005 emit_readword_indexed_tlb(0,a,map,tl);
57871462 3006 }
535d208a 3007 if(jaddr)
3008 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 3009 }
535d208a 3010 else
3011 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3012 }
3013 if (opcode[i]==0x24) { // LBU
3014 if(!c||memtarget) {
3015 if(!dummy) {
57871462 3016 #ifdef HOST_IMM_ADDR32
3017 if(c)
3018 emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
3019 else
3020 #endif
3021 {
3022 //emit_xorimm(addr,3,tl);
3023 //gen_tlb_addr_r(tl,map);
3024 //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
535d208a 3025 int x=0,a=tl;
2002a1db 3026#ifdef BIG_ENDIAN_MIPS
57871462 3027 if(!c) emit_xorimm(addr,3,tl);
3028 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 3029#else
535d208a 3030 if(!c) a=addr;
dadf55f2 3031#endif
b1570849 3032 if(fastload_reg_override) a=fastload_reg_override;
3033
535d208a 3034 emit_movzbl_indexed_tlb(x,a,map,tl);
57871462 3035 }
57871462 3036 }
535d208a 3037 if(jaddr)
3038 add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 3039 }
535d208a 3040 else
3041 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3042 }
3043 if (opcode[i]==0x25) { // LHU
3044 if(!c||memtarget) {
3045 if(!dummy) {
57871462 3046 #ifdef HOST_IMM_ADDR32
3047 if(c)
3048 emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
3049 else
3050 #endif
3051 {
535d208a 3052 int x=0,a=tl;
2002a1db 3053#ifdef BIG_ENDIAN_MIPS
57871462 3054 if(!c) emit_xorimm(addr,2,tl);
3055 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 3056#else
535d208a 3057 if(!c) a=addr;
dadf55f2 3058#endif
b1570849 3059 if(fastload_reg_override) a=fastload_reg_override;
57871462 3060 //#ifdef
3061 //emit_movzwl_indexed_tlb(x,tl,map,tl);
3062 //#else
3063 if(map>=0) {
535d208a 3064 gen_tlb_addr_r(a,map);
3065 emit_movzwl_indexed(x,a,tl);
3066 }else{
3067 #ifdef RAM_OFFSET
3068 emit_movzwl_indexed(x,a,tl);
3069 #else
3070 emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
3071 #endif
3072 }
57871462 3073 }
3074 }
535d208a 3075 if(jaddr)
3076 add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 3077 }
535d208a 3078 else
3079 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3080 }
3081 if (opcode[i]==0x27) { // LWU
3082 assert(th>=0);
3083 if(!c||memtarget) {
3084 if(!dummy) {
dadf55f2 3085 int a=addr;
b1570849 3086 if(fastload_reg_override) a=fastload_reg_override;
57871462 3087 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
3088 #ifdef HOST_IMM_ADDR32
3089 if(c)
3090 emit_readword_tlb(constmap[i][s]+offset,map,tl);
3091 else
3092 #endif
dadf55f2 3093 emit_readword_indexed_tlb(0,a,map,tl);
57871462 3094 }
535d208a 3095 if(jaddr)
3096 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3097 }
3098 else {
3099 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
57871462 3100 }
535d208a 3101 emit_zeroreg(th);
3102 }
3103 if (opcode[i]==0x37) { // LD
3104 if(!c||memtarget) {
3105 if(!dummy) {
dadf55f2 3106 int a=addr;
b1570849 3107 if(fastload_reg_override) a=fastload_reg_override;
57871462 3108 //gen_tlb_addr_r(tl,map);
3109 //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
3110 //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
3111 #ifdef HOST_IMM_ADDR32
3112 if(c)
3113 emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3114 else
3115 #endif
dadf55f2 3116 emit_readdword_indexed_tlb(0,a,map,th,tl);
57871462 3117 }
535d208a 3118 if(jaddr)
3119 add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 3120 }
535d208a 3121 else
3122 inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
57871462 3123 }
535d208a 3124 }
3125 //emit_storereg(rt1[i],tl); // DEBUG
57871462 3126 //if(opcode[i]==0x23)
3127 //if(opcode[i]==0x24)
3128 //if(opcode[i]==0x23||opcode[i]==0x24)
3129 /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
3130 {
3131 //emit_pusha();
3132 save_regs(0x100f);
3133 emit_readword((int)&last_count,ECX);
3134 #ifdef __i386__
3135 if(get_reg(i_regs->regmap,CCREG)<0)
3136 emit_loadreg(CCREG,HOST_CCREG);
3137 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3138 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3139 emit_writeword(HOST_CCREG,(int)&Count);
3140 #endif
3141 #ifdef __arm__
3142 if(get_reg(i_regs->regmap,CCREG)<0)
3143 emit_loadreg(CCREG,0);
3144 else
3145 emit_mov(HOST_CCREG,0);
3146 emit_add(0,ECX,0);
3147 emit_addimm(0,2*ccadj[i],0);
3148 emit_writeword(0,(int)&Count);
3149 #endif
3150 emit_call((int)memdebug);
3151 //emit_popa();
3152 restore_regs(0x100f);
3153 }/**/
3154}
3155
3156#ifndef loadlr_assemble
3157void loadlr_assemble(int i,struct regstat *i_regs)
3158{
3159 printf("Need loadlr_assemble for this architecture.\n");
3160 exit(1);
3161}
3162#endif
3163
3164void store_assemble(int i,struct regstat *i_regs)
3165{
3166 int s,th,tl,map=-1;
3167 int addr,temp;
3168 int offset;
3169 int jaddr=0,jaddr2,type;
666a299d 3170 int memtarget=0,c=0;
57871462 3171 int agr=AGEN1+(i&1);
b1570849 3172 int faststore_reg_override=0;
57871462 3173 u_int hr,reglist=0;
3174 th=get_reg(i_regs->regmap,rs2[i]|64);
3175 tl=get_reg(i_regs->regmap,rs2[i]);
3176 s=get_reg(i_regs->regmap,rs1[i]);
3177 temp=get_reg(i_regs->regmap,agr);
3178 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3179 offset=imm[i];
3180 if(s>=0) {
3181 c=(i_regs->wasconst>>s)&1;
af4ee1fe 3182 if(c) {
3183 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3184 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3185 }
57871462 3186 }
3187 assert(tl>=0);
3188 assert(temp>=0);
3189 for(hr=0;hr<HOST_REGS;hr++) {
3190 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3191 }
3192 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3193 if(offset||s<0||c) addr=temp;
3194 else addr=s;
3195 if(!using_tlb) {
3196 if(!c) {
dadf55f2 3197 #ifdef PCSX
3198 if(sp_in_mirror&&rs1[i]==29) {
3199 emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
3200 emit_cmpimm(HOST_TEMPREG,RAM_SIZE);
b1570849 3201 faststore_reg_override=HOST_TEMPREG;
dadf55f2 3202 }
3203 else
3204 #endif
57871462 3205 #ifdef R29_HACK
3206 // Strmnnrmn's speed hack
4cb76aa4 3207 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
57871462 3208 #endif
4cb76aa4 3209 emit_cmpimm(addr,RAM_SIZE);
57871462 3210 #ifdef DESTRUCTIVE_SHIFT
3211 if(s==addr) emit_mov(s,temp);
3212 #endif
3213 #ifdef R29_HACK
dadf55f2 3214 memtarget=1;
4cb76aa4 3215 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
57871462 3216 #endif
3217 {
3218 jaddr=(int)out;
3219 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3220 // Hint to branch predictor that the branch is unlikely to be taken
3221 if(rs1[i]>=28)
3222 emit_jno_unlikely(0);
3223 else
3224 #endif
3225 emit_jno(0);
3226 }
3227 }
3228 }else{ // using tlb
3229 int x=0;
3230 if (opcode[i]==0x28) x=3; // SB
3231 if (opcode[i]==0x29) x=2; // SH
3232 map=get_reg(i_regs->regmap,TLREG);
3233 assert(map>=0);
ea3d2e6e 3234 reglist&=~(1<<map);
57871462 3235 map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3236 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3237 }
3238
3239 if (opcode[i]==0x28) { // SB
3240 if(!c||memtarget) {
97a238a6 3241 int x=0,a=temp;
2002a1db 3242#ifdef BIG_ENDIAN_MIPS
57871462 3243 if(!c) emit_xorimm(addr,3,temp);
3244 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 3245#else
97a238a6 3246 if(!c) a=addr;
dadf55f2 3247#endif
b1570849 3248 if(faststore_reg_override) a=faststore_reg_override;
57871462 3249 //gen_tlb_addr_w(temp,map);
3250 //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
97a238a6 3251 emit_writebyte_indexed_tlb(tl,x,a,map,a);
57871462 3252 }
3253 type=STOREB_STUB;
3254 }
3255 if (opcode[i]==0x29) { // SH
3256 if(!c||memtarget) {
97a238a6 3257 int x=0,a=temp;
2002a1db 3258#ifdef BIG_ENDIAN_MIPS
57871462 3259 if(!c) emit_xorimm(addr,2,temp);
3260 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 3261#else
97a238a6 3262 if(!c) a=addr;
dadf55f2 3263#endif
b1570849 3264 if(faststore_reg_override) a=faststore_reg_override;
57871462 3265 //#ifdef
3266 //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3267 //#else
3268 if(map>=0) {
97a238a6 3269 gen_tlb_addr_w(a,map);
3270 emit_writehword_indexed(tl,x,a);
57871462 3271 }else
97a238a6 3272 emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
57871462 3273 }
3274 type=STOREH_STUB;
3275 }
3276 if (opcode[i]==0x2B) { // SW
dadf55f2 3277 if(!c||memtarget) {
3278 int a=addr;
b1570849 3279 if(faststore_reg_override) a=faststore_reg_override;
57871462 3280 //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
dadf55f2 3281 emit_writeword_indexed_tlb(tl,0,a,map,temp);
3282 }
57871462 3283 type=STOREW_STUB;
3284 }
3285 if (opcode[i]==0x3F) { // SD
3286 if(!c||memtarget) {
dadf55f2 3287 int a=addr;
b1570849 3288 if(faststore_reg_override) a=faststore_reg_override;
57871462 3289 if(rs2[i]) {
3290 assert(th>=0);
3291 //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3292 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
dadf55f2 3293 emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
57871462 3294 }else{
3295 // Store zero
3296 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3297 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
dadf55f2 3298 emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
57871462 3299 }
3300 }
3301 type=STORED_STUB;
3302 }
b96d3df7 3303#ifdef PCSX
3304 if(jaddr) {
3305 // PCSX store handlers don't check invcode again
3306 reglist|=1<<addr;
3307 add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3308 jaddr=0;
3309 }
3310#endif
57871462 3311 if(!using_tlb) {
3312 if(!c||memtarget) {
3313 #ifdef DESTRUCTIVE_SHIFT
3314 // The x86 shift operation is 'destructive'; it overwrites the
3315 // source register, so we need to make a copy first and use that.
3316 addr=temp;
3317 #endif
3318 #if defined(HOST_IMM8)
3319 int ir=get_reg(i_regs->regmap,INVCP);
3320 assert(ir>=0);
3321 emit_cmpmem_indexedsr12_reg(ir,addr,1);
3322 #else
3323 emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3324 #endif
0bbd1454 3325 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3326 emit_callne(invalidate_addr_reg[addr]);
3327 #else
57871462 3328 jaddr2=(int)out;
3329 emit_jne(0);
3330 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
0bbd1454 3331 #endif
57871462 3332 }
3333 }
3eaa7048 3334 if(jaddr) {
3335 add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3336 } else if(c&&!memtarget) {
3337 inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3338 }
57871462 3339 //if(opcode[i]==0x2B || opcode[i]==0x3F)
3340 //if(opcode[i]==0x2B || opcode[i]==0x28)
3341 //if(opcode[i]==0x2B || opcode[i]==0x29)
3342 //if(opcode[i]==0x2B)
3343 /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3344 {
28d74ee8 3345 #ifdef __i386__
3346 emit_pusha();
3347 #endif
3348 #ifdef __arm__
57871462 3349 save_regs(0x100f);
28d74ee8 3350 #endif
57871462 3351 emit_readword((int)&last_count,ECX);
3352 #ifdef __i386__
3353 if(get_reg(i_regs->regmap,CCREG)<0)
3354 emit_loadreg(CCREG,HOST_CCREG);
3355 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3356 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3357 emit_writeword(HOST_CCREG,(int)&Count);
3358 #endif
3359 #ifdef __arm__
3360 if(get_reg(i_regs->regmap,CCREG)<0)
3361 emit_loadreg(CCREG,0);
3362 else
3363 emit_mov(HOST_CCREG,0);
3364 emit_add(0,ECX,0);
3365 emit_addimm(0,2*ccadj[i],0);
3366 emit_writeword(0,(int)&Count);
3367 #endif
3368 emit_call((int)memdebug);
28d74ee8 3369 #ifdef __i386__
3370 emit_popa();
3371 #endif
3372 #ifdef __arm__
57871462 3373 restore_regs(0x100f);
28d74ee8 3374 #endif
57871462 3375 }/**/
3376}
3377
3378void storelr_assemble(int i,struct regstat *i_regs)
3379{
3380 int s,th,tl;
3381 int temp;
3382 int temp2;
3383 int offset;
3384 int jaddr=0,jaddr2;
3385 int case1,case2,case3;
3386 int done0,done1,done2;
af4ee1fe 3387 int memtarget=0,c=0;
fab5d06d 3388 int agr=AGEN1+(i&1);
57871462 3389 u_int hr,reglist=0;
3390 th=get_reg(i_regs->regmap,rs2[i]|64);
3391 tl=get_reg(i_regs->regmap,rs2[i]);
3392 s=get_reg(i_regs->regmap,rs1[i]);
fab5d06d 3393 temp=get_reg(i_regs->regmap,agr);
3394 if(temp<0) temp=get_reg(i_regs->regmap,-1);
57871462 3395 offset=imm[i];
3396 if(s>=0) {
3397 c=(i_regs->isconst>>s)&1;
af4ee1fe 3398 if(c) {
3399 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3400 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3401 }
57871462 3402 }
3403 assert(tl>=0);
3404 for(hr=0;hr<HOST_REGS;hr++) {
3405 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3406 }
535d208a 3407 assert(temp>=0);
3408 if(!using_tlb) {
3409 if(!c) {
3410 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3411 if(!offset&&s!=temp) emit_mov(s,temp);
3412 jaddr=(int)out;
3413 emit_jno(0);
3414 }
3415 else
3416 {
3417 if(!memtarget||!rs1[i]) {
57871462 3418 jaddr=(int)out;
3419 emit_jmp(0);
3420 }
57871462 3421 }
535d208a 3422 #ifdef RAM_OFFSET
3423 int map=get_reg(i_regs->regmap,ROREG);
3424 if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3425 gen_tlb_addr_w(temp,map);
3426 #else
3427 if((u_int)rdram!=0x80000000)
3428 emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3429 #endif
3430 }else{ // using tlb
3431 int map=get_reg(i_regs->regmap,TLREG);
3432 assert(map>=0);
ea3d2e6e 3433 reglist&=~(1<<map);
535d208a 3434 map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3435 if(!c&&!offset&&s>=0) emit_mov(s,temp);
3436 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3437 if(!jaddr&&!memtarget) {
3438 jaddr=(int)out;
3439 emit_jmp(0);
57871462 3440 }
535d208a 3441 gen_tlb_addr_w(temp,map);
3442 }
3443
3444 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3445 temp2=get_reg(i_regs->regmap,FTEMP);
3446 if(!rs2[i]) temp2=th=tl;
3447 }
57871462 3448
2002a1db 3449#ifndef BIG_ENDIAN_MIPS
3450 emit_xorimm(temp,3,temp);
3451#endif
535d208a 3452 emit_testimm(temp,2);
3453 case2=(int)out;
3454 emit_jne(0);
3455 emit_testimm(temp,1);
3456 case1=(int)out;
3457 emit_jne(0);
3458 // 0
3459 if (opcode[i]==0x2A) { // SWL
3460 emit_writeword_indexed(tl,0,temp);
3461 }
3462 if (opcode[i]==0x2E) { // SWR
3463 emit_writebyte_indexed(tl,3,temp);
3464 }
3465 if (opcode[i]==0x2C) { // SDL
3466 emit_writeword_indexed(th,0,temp);
3467 if(rs2[i]) emit_mov(tl,temp2);
3468 }
3469 if (opcode[i]==0x2D) { // SDR
3470 emit_writebyte_indexed(tl,3,temp);
3471 if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3472 }
3473 done0=(int)out;
3474 emit_jmp(0);
3475 // 1
3476 set_jump_target(case1,(int)out);
3477 if (opcode[i]==0x2A) { // SWL
3478 // Write 3 msb into three least significant bytes
3479 if(rs2[i]) emit_rorimm(tl,8,tl);
3480 emit_writehword_indexed(tl,-1,temp);
3481 if(rs2[i]) emit_rorimm(tl,16,tl);
3482 emit_writebyte_indexed(tl,1,temp);
3483 if(rs2[i]) emit_rorimm(tl,8,tl);
3484 }
3485 if (opcode[i]==0x2E) { // SWR
3486 // Write two lsb into two most significant bytes
3487 emit_writehword_indexed(tl,1,temp);
3488 }
3489 if (opcode[i]==0x2C) { // SDL
3490 if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3491 // Write 3 msb into three least significant bytes
3492 if(rs2[i]) emit_rorimm(th,8,th);
3493 emit_writehword_indexed(th,-1,temp);
3494 if(rs2[i]) emit_rorimm(th,16,th);
3495 emit_writebyte_indexed(th,1,temp);
3496 if(rs2[i]) emit_rorimm(th,8,th);
3497 }
3498 if (opcode[i]==0x2D) { // SDR
3499 if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3500 // Write two lsb into two most significant bytes
3501 emit_writehword_indexed(tl,1,temp);
3502 }
3503 done1=(int)out;
3504 emit_jmp(0);
3505 // 2
3506 set_jump_target(case2,(int)out);
3507 emit_testimm(temp,1);
3508 case3=(int)out;
3509 emit_jne(0);
3510 if (opcode[i]==0x2A) { // SWL
3511 // Write two msb into two least significant bytes
3512 if(rs2[i]) emit_rorimm(tl,16,tl);
3513 emit_writehword_indexed(tl,-2,temp);
3514 if(rs2[i]) emit_rorimm(tl,16,tl);
3515 }
3516 if (opcode[i]==0x2E) { // SWR
3517 // Write 3 lsb into three most significant bytes
3518 emit_writebyte_indexed(tl,-1,temp);
3519 if(rs2[i]) emit_rorimm(tl,8,tl);
3520 emit_writehword_indexed(tl,0,temp);
3521 if(rs2[i]) emit_rorimm(tl,24,tl);
3522 }
3523 if (opcode[i]==0x2C) { // SDL
3524 if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3525 // Write two msb into two least significant bytes
3526 if(rs2[i]) emit_rorimm(th,16,th);
3527 emit_writehword_indexed(th,-2,temp);
3528 if(rs2[i]) emit_rorimm(th,16,th);
3529 }
3530 if (opcode[i]==0x2D) { // SDR
3531 if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3532 // Write 3 lsb into three most significant bytes
3533 emit_writebyte_indexed(tl,-1,temp);
3534 if(rs2[i]) emit_rorimm(tl,8,tl);
3535 emit_writehword_indexed(tl,0,temp);
3536 if(rs2[i]) emit_rorimm(tl,24,tl);
3537 }
3538 done2=(int)out;
3539 emit_jmp(0);
3540 // 3
3541 set_jump_target(case3,(int)out);
3542 if (opcode[i]==0x2A) { // SWL
3543 // Write msb into least significant byte
3544 if(rs2[i]) emit_rorimm(tl,24,tl);
3545 emit_writebyte_indexed(tl,-3,temp);
3546 if(rs2[i]) emit_rorimm(tl,8,tl);
3547 }
3548 if (opcode[i]==0x2E) { // SWR
3549 // Write entire word
3550 emit_writeword_indexed(tl,-3,temp);
3551 }
3552 if (opcode[i]==0x2C) { // SDL
3553 if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3554 // Write msb into least significant byte
3555 if(rs2[i]) emit_rorimm(th,24,th);
3556 emit_writebyte_indexed(th,-3,temp);
3557 if(rs2[i]) emit_rorimm(th,8,th);
3558 }
3559 if (opcode[i]==0x2D) { // SDR
3560 if(rs2[i]) emit_mov(th,temp2);
3561 // Write entire word
3562 emit_writeword_indexed(tl,-3,temp);
3563 }
3564 set_jump_target(done0,(int)out);
3565 set_jump_target(done1,(int)out);
3566 set_jump_target(done2,(int)out);
3567 if (opcode[i]==0x2C) { // SDL
3568 emit_testimm(temp,4);
57871462 3569 done0=(int)out;
57871462 3570 emit_jne(0);
535d208a 3571 emit_andimm(temp,~3,temp);
3572 emit_writeword_indexed(temp2,4,temp);
3573 set_jump_target(done0,(int)out);
3574 }
3575 if (opcode[i]==0x2D) { // SDR
3576 emit_testimm(temp,4);
3577 done0=(int)out;
3578 emit_jeq(0);
3579 emit_andimm(temp,~3,temp);
3580 emit_writeword_indexed(temp2,-4,temp);
57871462 3581 set_jump_target(done0,(int)out);
57871462 3582 }
535d208a 3583 if(!c||!memtarget)
3584 add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
57871462 3585 if(!using_tlb) {
535d208a 3586 #ifdef RAM_OFFSET
3587 int map=get_reg(i_regs->regmap,ROREG);
3588 if(map<0) map=HOST_TEMPREG;
3589 gen_orig_addr_w(temp,map);
3590 #else
57871462 3591 emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
535d208a 3592 #endif
57871462 3593 #if defined(HOST_IMM8)
3594 int ir=get_reg(i_regs->regmap,INVCP);
3595 assert(ir>=0);
3596 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3597 #else
3598 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3599 #endif
535d208a 3600 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3601 emit_callne(invalidate_addr_reg[temp]);
3602 #else
57871462 3603 jaddr2=(int)out;
3604 emit_jne(0);
3605 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
535d208a 3606 #endif
57871462 3607 }
3608 /*
3609 emit_pusha();
3610 //save_regs(0x100f);
3611 emit_readword((int)&last_count,ECX);
3612 if(get_reg(i_regs->regmap,CCREG)<0)
3613 emit_loadreg(CCREG,HOST_CCREG);
3614 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3615 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3616 emit_writeword(HOST_CCREG,(int)&Count);
3617 emit_call((int)memdebug);
3618 emit_popa();
3619 //restore_regs(0x100f);
3620 /**/
3621}
3622
3623void c1ls_assemble(int i,struct regstat *i_regs)
3624{
3d624f89 3625#ifndef DISABLE_COP1
57871462 3626 int s,th,tl;
3627 int temp,ar;
3628 int map=-1;
3629 int offset;
3630 int c=0;
3631 int jaddr,jaddr2=0,jaddr3,type;
3632 int agr=AGEN1+(i&1);
3633 u_int hr,reglist=0;
3634 th=get_reg(i_regs->regmap,FTEMP|64);
3635 tl=get_reg(i_regs->regmap,FTEMP);
3636 s=get_reg(i_regs->regmap,rs1[i]);
3637 temp=get_reg(i_regs->regmap,agr);
3638 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3639 offset=imm[i];
3640 assert(tl>=0);
3641 assert(rs1[i]>0);
3642 assert(temp>=0);
3643 for(hr=0;hr<HOST_REGS;hr++) {
3644 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3645 }
3646 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3647 if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3648 {
3649 // Loads use a temporary register which we need to save
3650 reglist|=1<<temp;
3651 }
3652 if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3653 ar=temp;
3654 else // LWC1/LDC1
3655 ar=tl;
3656 //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3657 //else c=(i_regs->wasconst>>s)&1;
3658 if(s>=0) c=(i_regs->wasconst>>s)&1;
3659 // Check cop1 unusable
3660 if(!cop1_usable) {
3661 signed char rs=get_reg(i_regs->regmap,CSREG);
3662 assert(rs>=0);
3663 emit_testimm(rs,0x20000000);
3664 jaddr=(int)out;
3665 emit_jeq(0);
3666 add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3667 cop1_usable=1;
3668 }
3669 if (opcode[i]==0x39) { // SWC1 (get float address)
3670 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3671 }
3672 if (opcode[i]==0x3D) { // SDC1 (get double address)
3673 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3674 }
3675 // Generate address + offset
3676 if(!using_tlb) {
3677 if(!c)
4cb76aa4 3678 emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
57871462 3679 }
3680 else
3681 {
3682 map=get_reg(i_regs->regmap,TLREG);
3683 assert(map>=0);
ea3d2e6e 3684 reglist&=~(1<<map);
57871462 3685 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3686 map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3687 }
3688 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3689 map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3690 }
3691 }
3692 if (opcode[i]==0x39) { // SWC1 (read float)
3693 emit_readword_indexed(0,tl,tl);
3694 }
3695 if (opcode[i]==0x3D) { // SDC1 (read double)
3696 emit_readword_indexed(4,tl,th);
3697 emit_readword_indexed(0,tl,tl);
3698 }
3699 if (opcode[i]==0x31) { // LWC1 (get target address)
3700 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3701 }
3702 if (opcode[i]==0x35) { // LDC1 (get target address)
3703 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3704 }
3705 if(!using_tlb) {
3706 if(!c) {
3707 jaddr2=(int)out;
3708 emit_jno(0);
3709 }
4cb76aa4 3710 else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
57871462 3711 jaddr2=(int)out;
3712 emit_jmp(0); // inline_readstub/inline_writestub? Very rare case
3713 }
3714 #ifdef DESTRUCTIVE_SHIFT
3715 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3716 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3717 }
3718 #endif
3719 }else{
3720 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3721 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3722 }
3723 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3724 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3725 }
3726 }
3727 if (opcode[i]==0x31) { // LWC1
3728 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3729 //gen_tlb_addr_r(ar,map);
3730 //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3731 #ifdef HOST_IMM_ADDR32
3732 if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3733 else
3734 #endif
3735 emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3736 type=LOADW_STUB;
3737 }
3738 if (opcode[i]==0x35) { // LDC1
3739 assert(th>=0);
3740 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3741 //gen_tlb_addr_r(ar,map);
3742 //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3743 //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3744 #ifdef HOST_IMM_ADDR32
3745 if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3746 else
3747 #endif
3748 emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3749 type=LOADD_STUB;
3750 }
3751 if (opcode[i]==0x39) { // SWC1
3752 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3753 emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3754 type=STOREW_STUB;
3755 }
3756 if (opcode[i]==0x3D) { // SDC1
3757 assert(th>=0);
3758 //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3759 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3760 emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3761 type=STORED_STUB;
3762 }
3763 if(!using_tlb) {
3764 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3765 #ifndef DESTRUCTIVE_SHIFT
3766 temp=offset||c||s<0?ar:s;
3767 #endif
3768 #if defined(HOST_IMM8)
3769 int ir=get_reg(i_regs->regmap,INVCP);
3770 assert(ir>=0);
3771 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3772 #else
3773 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3774 #endif
0bbd1454 3775 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3776 emit_callne(invalidate_addr_reg[temp]);
3777 #else
57871462 3778 jaddr3=(int)out;
3779 emit_jne(0);
3780 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
0bbd1454 3781 #endif
57871462 3782 }
3783 }
3784 if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3785 if (opcode[i]==0x31) { // LWC1 (write float)
3786 emit_writeword_indexed(tl,0,temp);
3787 }
3788 if (opcode[i]==0x35) { // LDC1 (write double)
3789 emit_writeword_indexed(th,4,temp);
3790 emit_writeword_indexed(tl,0,temp);
3791 }
3792 //if(opcode[i]==0x39)
3793 /*if(opcode[i]==0x39||opcode[i]==0x31)
3794 {
3795 emit_pusha();
3796 emit_readword((int)&last_count,ECX);
3797 if(get_reg(i_regs->regmap,CCREG)<0)
3798 emit_loadreg(CCREG,HOST_CCREG);
3799 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3800 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3801 emit_writeword(HOST_CCREG,(int)&Count);
3802 emit_call((int)memdebug);
3803 emit_popa();
3804 }/**/
3d624f89 3805#else
3806 cop1_unusable(i, i_regs);
3807#endif
57871462 3808}
3809
b9b61529 3810void c2ls_assemble(int i,struct regstat *i_regs)
3811{
3812 int s,tl;
3813 int ar;
3814 int offset;
1fd1aceb 3815 int memtarget=0,c=0;
c2e3bd42 3816 int jaddr2=0,jaddr3,type;
b9b61529 3817 int agr=AGEN1+(i&1);
3818 u_int hr,reglist=0;
3819 u_int copr=(source[i]>>16)&0x1f;
3820 s=get_reg(i_regs->regmap,rs1[i]);
3821 tl=get_reg(i_regs->regmap,FTEMP);
3822 offset=imm[i];
3823 assert(rs1[i]>0);
3824 assert(tl>=0);
3825 assert(!using_tlb);
3826
3827 for(hr=0;hr<HOST_REGS;hr++) {
3828 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3829 }
3830 if(i_regs->regmap[HOST_CCREG]==CCREG)
3831 reglist&=~(1<<HOST_CCREG);
3832
3833 // get the address
3834 if (opcode[i]==0x3a) { // SWC2
3835 ar=get_reg(i_regs->regmap,agr);
3836 if(ar<0) ar=get_reg(i_regs->regmap,-1);
3837 reglist|=1<<ar;
3838 } else { // LWC2
3839 ar=tl;
3840 }
1fd1aceb 3841 if(s>=0) c=(i_regs->wasconst>>s)&1;
3842 memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
b9b61529 3843 if (!offset&&!c&&s>=0) ar=s;
3844 assert(ar>=0);
3845
3846 if (opcode[i]==0x3a) { // SWC2
3847 cop2_get_dreg(copr,tl,HOST_TEMPREG);
1fd1aceb 3848 type=STOREW_STUB;
b9b61529 3849 }
1fd1aceb 3850 else
b9b61529 3851 type=LOADW_STUB;
1fd1aceb 3852
3853 if(c&&!memtarget) {
3854 jaddr2=(int)out;
3855 emit_jmp(0); // inline_readstub/inline_writestub?
b9b61529 3856 }
1fd1aceb 3857 else {
3858 if(!c) {
3859 emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3860 jaddr2=(int)out;
3861 emit_jno(0);
3862 }
3863 if (opcode[i]==0x32) { // LWC2
3864 #ifdef HOST_IMM_ADDR32
3865 if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3866 else
3867 #endif
3868 emit_readword_indexed(0,ar,tl);
3869 }
3870 if (opcode[i]==0x3a) { // SWC2
3871 #ifdef DESTRUCTIVE_SHIFT
3872 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3873 #endif
3874 emit_writeword_indexed(tl,0,ar);
3875 }
b9b61529 3876 }
3877 if(jaddr2)
3878 add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3879 if (opcode[i]==0x3a) { // SWC2
3880#if defined(HOST_IMM8)
3881 int ir=get_reg(i_regs->regmap,INVCP);
3882 assert(ir>=0);
3883 emit_cmpmem_indexedsr12_reg(ir,ar,1);
3884#else
3885 emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3886#endif
0bbd1454 3887 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3888 emit_callne(invalidate_addr_reg[ar]);
3889 #else
b9b61529 3890 jaddr3=(int)out;
3891 emit_jne(0);
3892 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
0bbd1454 3893 #endif
b9b61529 3894 }
3895 if (opcode[i]==0x32) { // LWC2
3896 cop2_put_dreg(copr,tl,HOST_TEMPREG);
3897 }
3898}
3899
57871462 3900#ifndef multdiv_assemble
3901void multdiv_assemble(int i,struct regstat *i_regs)
3902{
3903 printf("Need multdiv_assemble for this architecture.\n");
3904 exit(1);
3905}
3906#endif
3907
3908void mov_assemble(int i,struct regstat *i_regs)
3909{
3910 //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3911 //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
57871462 3912 if(rt1[i]) {
3913 signed char sh,sl,th,tl;
3914 th=get_reg(i_regs->regmap,rt1[i]|64);
3915 tl=get_reg(i_regs->regmap,rt1[i]);
3916 //assert(tl>=0);
3917 if(tl>=0) {
3918 sh=get_reg(i_regs->regmap,rs1[i]|64);
3919 sl=get_reg(i_regs->regmap,rs1[i]);
3920 if(sl>=0) emit_mov(sl,tl);
3921 else emit_loadreg(rs1[i],tl);
3922 if(th>=0) {
3923 if(sh>=0) emit_mov(sh,th);
3924 else emit_loadreg(rs1[i]|64,th);
3925 }
3926 }
3927 }
3928}
3929
3930#ifndef fconv_assemble
3931void fconv_assemble(int i,struct regstat *i_regs)
3932{
3933 printf("Need fconv_assemble for this architecture.\n");
3934 exit(1);
3935}
3936#endif
3937
3938#if 0
3939void float_assemble(int i,struct regstat *i_regs)
3940{
3941 printf("Need float_assemble for this architecture.\n");
3942 exit(1);
3943}
3944#endif
3945
3946void syscall_assemble(int i,struct regstat *i_regs)
3947{
3948 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3949 assert(ccreg==HOST_CCREG);
3950 assert(!is_delayslot);
3951 emit_movimm(start+i*4,EAX); // Get PC
3952 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
7139f3c8 3953 emit_jmp((int)jump_syscall_hle); // XXX
3954}
3955
3956void hlecall_assemble(int i,struct regstat *i_regs)
3957{
3958 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3959 assert(ccreg==HOST_CCREG);
3960 assert(!is_delayslot);
3961 emit_movimm(start+i*4+4,0); // Get PC
67ba0fb4 3962 emit_movimm((int)psxHLEt[source[i]&7],1);
7139f3c8 3963 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
67ba0fb4 3964 emit_jmp((int)jump_hlecall);
57871462 3965}
3966
1e973cb0 3967void intcall_assemble(int i,struct regstat *i_regs)
3968{
3969 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3970 assert(ccreg==HOST_CCREG);
3971 assert(!is_delayslot);
3972 emit_movimm(start+i*4,0); // Get PC
3973 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG);
3974 emit_jmp((int)jump_intcall);
3975}
3976
57871462 3977void ds_assemble(int i,struct regstat *i_regs)
3978{
3979 is_delayslot=1;
3980 switch(itype[i]) {
3981 case ALU:
3982 alu_assemble(i,i_regs);break;
3983 case IMM16:
3984 imm16_assemble(i,i_regs);break;
3985 case SHIFT:
3986 shift_assemble(i,i_regs);break;
3987 case SHIFTIMM:
3988 shiftimm_assemble(i,i_regs);break;
3989 case LOAD:
3990 load_assemble(i,i_regs);break;
3991 case LOADLR:
3992 loadlr_assemble(i,i_regs);break;
3993 case STORE:
3994 store_assemble(i,i_regs);break;
3995 case STORELR:
3996 storelr_assemble(i,i_regs);break;
3997 case COP0:
3998 cop0_assemble(i,i_regs);break;
3999 case COP1:
4000 cop1_assemble(i,i_regs);break;
4001 case C1LS:
4002 c1ls_assemble(i,i_regs);break;
b9b61529 4003 case COP2:
4004 cop2_assemble(i,i_regs);break;
4005 case C2LS:
4006 c2ls_assemble(i,i_regs);break;
4007 case C2OP:
4008 c2op_assemble(i,i_regs);break;
57871462 4009 case FCONV:
4010 fconv_assemble(i,i_regs);break;
4011 case FLOAT:
4012 float_assemble(i,i_regs);break;
4013 case FCOMP:
4014 fcomp_assemble(i,i_regs);break;
4015 case MULTDIV:
4016 multdiv_assemble(i,i_regs);break;
4017 case MOV:
4018 mov_assemble(i,i_regs);break;
4019 case SYSCALL:
7139f3c8 4020 case HLECALL:
1e973cb0 4021 case INTCALL:
57871462 4022 case SPAN:
4023 case UJUMP:
4024 case RJUMP:
4025 case CJUMP:
4026 case SJUMP:
4027 case FJUMP:
4028 printf("Jump in the delay slot. This is probably a bug.\n");
4029 }
4030 is_delayslot=0;
4031}
4032
4033// Is the branch target a valid internal jump?
4034int internal_branch(uint64_t i_is32,int addr)
4035{
4036 if(addr&1) return 0; // Indirect (register) jump
4037 if(addr>=start && addr<start+slen*4-4)
4038 {
4039 int t=(addr-start)>>2;
4040 // Delay slots are not valid branch targets
4041 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4042 // 64 -> 32 bit transition requires a recompile
4043 /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
4044 {
4045 if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
4046 else printf("optimizable: yes\n");
4047 }*/
4048 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
a28c6ce8 4049#ifndef FORCE32
57871462 4050 if(requires_32bit[t]&~i_is32) return 0;
a28c6ce8 4051 else
4052#endif
4053 return 1;
57871462 4054 }
4055 return 0;
4056}
4057
4058#ifndef wb_invalidate
4059void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
4060 uint64_t u,uint64_t uu)
4061{
4062 int hr;
4063 for(hr=0;hr<HOST_REGS;hr++) {
4064 if(hr!=EXCLUDE_REG) {
4065 if(pre[hr]!=entry[hr]) {
4066 if(pre[hr]>=0) {
4067 if((dirty>>hr)&1) {
4068 if(get_reg(entry,pre[hr])<0) {
4069 if(pre[hr]<64) {
4070 if(!((u>>pre[hr])&1)) {
4071 emit_storereg(pre[hr],hr);
4072 if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
4073 emit_sarimm(hr,31,hr);
4074 emit_storereg(pre[hr]|64,hr);
4075 }
4076 }
4077 }else{
4078 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
4079 emit_storereg(pre[hr],hr);
4080 }
4081 }
4082 }
4083 }
4084 }
4085 }
4086 }
4087 }
4088 // Move from one register to another (no writeback)
4089 for(hr=0;hr<HOST_REGS;hr++) {
4090 if(hr!=EXCLUDE_REG) {
4091 if(pre[hr]!=entry[hr]) {
4092 if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
4093 int nr;
4094 if((nr=get_reg(entry,pre[hr]))>=0) {
4095 emit_mov(hr,nr);
4096 }
4097 }
4098 }
4099 }
4100 }
4101}
4102#endif
4103
4104// Load the specified registers
4105// This only loads the registers given as arguments because
4106// we don't want to load things that will be overwritten
4107void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
4108{
4109 int hr;
4110 // Load 32-bit regs
4111 for(hr=0;hr<HOST_REGS;hr++) {
4112 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4113 if(entry[hr]!=regmap[hr]) {
4114 if(regmap[hr]==rs1||regmap[hr]==rs2)
4115 {
4116 if(regmap[hr]==0) {
4117 emit_zeroreg(hr);
4118 }
4119 else
4120 {
4121 emit_loadreg(regmap[hr],hr);
4122 }
4123 }
4124 }
4125 }
4126 }
4127 //Load 64-bit regs
4128 for(hr=0;hr<HOST_REGS;hr++) {
4129 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4130 if(entry[hr]!=regmap[hr]) {
4131 if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
4132 {
4133 assert(regmap[hr]!=64);
4134 if((is32>>(regmap[hr]&63))&1) {
4135 int lr=get_reg(regmap,regmap[hr]-64);
4136 if(lr>=0)
4137 emit_sarimm(lr,31,hr);
4138 else
4139 emit_loadreg(regmap[hr],hr);
4140 }
4141 else
4142 {
4143 emit_loadreg(regmap[hr],hr);
4144 }
4145 }
4146 }
4147 }
4148 }
4149}
4150
4151// Load registers prior to the start of a loop
4152// so that they are not loaded within the loop
4153static void loop_preload(signed char pre[],signed char entry[])
4154{
4155 int hr;
4156 for(hr=0;hr<HOST_REGS;hr++) {
4157 if(hr!=EXCLUDE_REG) {
4158 if(pre[hr]!=entry[hr]) {
4159 if(entry[hr]>=0) {
4160 if(get_reg(pre,entry[hr])<0) {
4161 assem_debug("loop preload:\n");
4162 //printf("loop preload: %d\n",hr);
4163 if(entry[hr]==0) {
4164 emit_zeroreg(hr);
4165 }
4166 else if(entry[hr]<TEMPREG)
4167 {
4168 emit_loadreg(entry[hr],hr);
4169 }
4170 else if(entry[hr]-64<TEMPREG)
4171 {
4172 emit_loadreg(entry[hr],hr);
4173 }
4174 }
4175 }
4176 }
4177 }
4178 }
4179}
4180
4181// Generate address for load/store instruction
b9b61529 4182// goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
57871462 4183void address_generation(int i,struct regstat *i_regs,signed char entry[])
4184{
b9b61529 4185 if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
5194fb95 4186 int ra=-1;
57871462 4187 int agr=AGEN1+(i&1);
4188 int mgr=MGEN1+(i&1);
4189 if(itype[i]==LOAD) {
4190 ra=get_reg(i_regs->regmap,rt1[i]);
535d208a 4191 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4192 assert(ra>=0);
57871462 4193 }
4194 if(itype[i]==LOADLR) {
4195 ra=get_reg(i_regs->regmap,FTEMP);
4196 }
4197 if(itype[i]==STORE||itype[i]==STORELR) {
4198 ra=get_reg(i_regs->regmap,agr);
4199 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4200 }
b9b61529 4201 if(itype[i]==C1LS||itype[i]==C2LS) {
4202 if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
57871462 4203 ra=get_reg(i_regs->regmap,FTEMP);
1fd1aceb 4204 else { // SWC1/SDC1/SWC2/SDC2
57871462 4205 ra=get_reg(i_regs->regmap,agr);
4206 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4207 }
4208 }
4209 int rs=get_reg(i_regs->regmap,rs1[i]);
4210 int rm=get_reg(i_regs->regmap,TLREG);
4211 if(ra>=0) {
4212 int offset=imm[i];
4213 int c=(i_regs->wasconst>>rs)&1;
4214 if(rs1[i]==0) {
4215 // Using r0 as a base address
4216 /*if(rm>=0) {
4217 if(!entry||entry[rm]!=mgr) {
4218 generate_map_const(offset,rm);
4219 } // else did it in the previous cycle
4220 }*/
4221 if(!entry||entry[ra]!=agr) {
4222 if (opcode[i]==0x22||opcode[i]==0x26) {
4223 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4224 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4225 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4226 }else{
4227 emit_movimm(offset,ra);
4228 }
4229 } // else did it in the previous cycle
4230 }
4231 else if(rs<0) {
4232 if(!entry||entry[ra]!=rs1[i])
4233 emit_loadreg(rs1[i],ra);
4234 //if(!entry||entry[ra]!=rs1[i])
4235 // printf("poor load scheduling!\n");
4236 }
4237 else if(c) {
4238 if(rm>=0) {
4239 if(!entry||entry[rm]!=mgr) {
b9b61529 4240 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
57871462 4241 // Stores to memory go thru the mapper to detect self-modifying
4242 // code, loads don't.
4243 if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4cb76aa4 4244 (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
57871462 4245 generate_map_const(constmap[i][rs]+offset,rm);
4246 }else{
4247 if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4248 generate_map_const(constmap[i][rs]+offset,rm);
4249 }
4250 }
4251 }
4252 if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4253 if(!entry||entry[ra]!=agr) {
4254 if (opcode[i]==0x22||opcode[i]==0x26) {
4255 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4256 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4257 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4258 }else{
4259 #ifdef HOST_IMM_ADDR32
b9b61529 4260 if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
57871462 4261 (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4262 #endif
4263 emit_movimm(constmap[i][rs]+offset,ra);
4264 }
4265 } // else did it in the previous cycle
4266 } // else load_consts already did it
4267 }
4268 if(offset&&!c&&rs1[i]) {
4269 if(rs>=0) {
4270 emit_addimm(rs,offset,ra);
4271 }else{
4272 emit_addimm(ra,offset,ra);
4273 }
4274 }
4275 }
4276 }
4277 // Preload constants for next instruction
b9b61529 4278 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
57871462 4279 int agr,ra;
4280 #ifndef HOST_IMM_ADDR32
4281 // Mapper entry
4282 agr=MGEN1+((i+1)&1);
4283 ra=get_reg(i_regs->regmap,agr);
4284 if(ra>=0) {
4285 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4286 int offset=imm[i+1];
4287 int c=(regs[i+1].wasconst>>rs)&1;
4288 if(c) {
b9b61529 4289 if(itype[i+1]==STORE||itype[i+1]==STORELR
4290 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
57871462 4291 // Stores to memory go thru the mapper to detect self-modifying
4292 // code, loads don't.
4293 if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4cb76aa4 4294 (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
57871462 4295 generate_map_const(constmap[i+1][rs]+offset,ra);
4296 }else{
4297 if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4298 generate_map_const(constmap[i+1][rs]+offset,ra);
4299 }
4300 }
4301 /*else if(rs1[i]==0) {
4302 generate_map_const(offset,ra);
4303 }*/
4304 }
4305 #endif
4306 // Actual address
4307 agr=AGEN1+((i+1)&1);
4308 ra=get_reg(i_regs->regmap,agr);
4309 if(ra>=0) {
4310 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4311 int offset=imm[i+1];
4312 int c=(regs[i+1].wasconst>>rs)&1;
4313 if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4314 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4315 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4316 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4317 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4318 }else{
4319 #ifdef HOST_IMM_ADDR32
b9b61529 4320 if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
57871462 4321 (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4322 #endif
4323 emit_movimm(constmap[i+1][rs]+offset,ra);
4324 }
4325 }
4326 else if(rs1[i+1]==0) {
4327 // Using r0 as a base address
4328 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4329 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4330 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4331 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4332 }else{
4333 emit_movimm(offset,ra);
4334 }
4335 }
4336 }
4337 }
4338}
4339
4340int get_final_value(int hr, int i, int *value)
4341{
4342 int reg=regs[i].regmap[hr];
4343 while(i<slen-1) {
4344 if(regs[i+1].regmap[hr]!=reg) break;
4345 if(!((regs[i+1].isconst>>hr)&1)) break;
4346 if(bt[i+1]) break;
4347 i++;
4348 }
4349 if(i<slen-1) {
4350 if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4351 *value=constmap[i][hr];
4352 return 1;
4353 }
4354 if(!bt[i+1]) {
4355 if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4356 // Load in delay slot, out-of-order execution
4357 if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4358 {
4359 #ifdef HOST_IMM_ADDR32
4360 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4361 #endif
4362 // Precompute load address
4363 *value=constmap[i][hr]+imm[i+2];
4364 return 1;
4365 }
4366 }
4367 if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4368 {
4369 #ifdef HOST_IMM_ADDR32
4370 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4371 #endif
4372 // Precompute load address
4373 *value=constmap[i][hr]+imm[i+1];
4374 //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4375 return 1;
4376 }
4377 }
4378 }
4379 *value=constmap[i][hr];
4380 //printf("c=%x\n",(int)constmap[i][hr]);
4381 if(i==slen-1) return 1;
4382 if(reg<64) {
4383 return !((unneeded_reg[i+1]>>reg)&1);
4384 }else{
4385 return !((unneeded_reg_upper[i+1]>>reg)&1);
4386 }
4387}
4388
4389// Load registers with known constants
4390void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4391{
4392 int hr;
4393 // Load 32-bit regs
4394 for(hr=0;hr<HOST_REGS;hr++) {
4395 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4396 //if(entry[hr]!=regmap[hr]) {
4397 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4398 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4399 int value;
4400 if(get_final_value(hr,i,&value)) {
4401 if(value==0) {
4402 emit_zeroreg(hr);
4403 }
4404 else {
4405 emit_movimm(value,hr);
4406 }
4407 }
4408 }
4409 }
4410 }
4411 }
4412 // Load 64-bit regs
4413 for(hr=0;hr<HOST_REGS;hr++) {
4414 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4415 //if(entry[hr]!=regmap[hr]) {
4416 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4417 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4418 if((is32>>(regmap[hr]&63))&1) {
4419 int lr=get_reg(regmap,regmap[hr]-64);
4420 assert(lr>=0);
4421 emit_sarimm(lr,31,hr);
4422 }
4423 else
4424 {
4425 int value;
4426 if(get_final_value(hr,i,&value)) {
4427 if(value==0) {
4428 emit_zeroreg(hr);
4429 }
4430 else {
4431 emit_movimm(value,hr);
4432 }
4433 }
4434 }
4435 }
4436 }
4437 }
4438 }
4439}
4440void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4441{
4442 int hr;
4443 // Load 32-bit regs
4444 for(hr=0;hr<HOST_REGS;hr++) {
4445 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4446 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4447 int value=constmap[i][hr];
4448 if(value==0) {
4449 emit_zeroreg(hr);
4450 }
4451 else {
4452 emit_movimm(value,hr);
4453 }
4454 }
4455 }
4456 }
4457 // Load 64-bit regs
4458 for(hr=0;hr<HOST_REGS;hr++) {
4459 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4460 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4461 if((is32>>(regmap[hr]&63))&1) {
4462 int lr=get_reg(regmap,regmap[hr]-64);
4463 assert(lr>=0);
4464 emit_sarimm(lr,31,hr);
4465 }
4466 else
4467 {
4468 int value=constmap[i][hr];
4469 if(value==0) {
4470 emit_zeroreg(hr);
4471 }
4472 else {
4473 emit_movimm(value,hr);
4474 }
4475 }
4476 }
4477 }
4478 }
4479}
4480
4481// Write out all dirty registers (except cycle count)
4482void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4483{
4484 int hr;
4485 for(hr=0;hr<HOST_REGS;hr++) {
4486 if(hr!=EXCLUDE_REG) {
4487 if(i_regmap[hr]>0) {
4488 if(i_regmap[hr]!=CCREG) {
4489 if((i_dirty>>hr)&1) {
4490 if(i_regmap[hr]<64) {
4491 emit_storereg(i_regmap[hr],hr);
24385cae 4492#ifndef FORCE32
57871462 4493 if( ((i_is32>>i_regmap[hr])&1) ) {
4494 #ifdef DESTRUCTIVE_WRITEBACK
4495 emit_sarimm(hr,31,hr);
4496 emit_storereg(i_regmap[hr]|64,hr);
4497 #else
4498 emit_sarimm(hr,31,HOST_TEMPREG);
4499 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4500 #endif
4501 }
24385cae 4502#endif
57871462 4503 }else{
4504 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4505 emit_storereg(i_regmap[hr],hr);
4506 }
4507 }
4508 }
4509 }
4510 }
4511 }
4512 }
4513}
4514// Write out dirty registers that we need to reload (pair with load_needed_regs)
4515// This writes the registers not written by store_regs_bt
4516void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4517{
4518 int hr;
4519 int t=(addr-start)>>2;
4520 for(hr=0;hr<HOST_REGS;hr++) {
4521 if(hr!=EXCLUDE_REG) {
4522 if(i_regmap[hr]>0) {
4523 if(i_regmap[hr]!=CCREG) {
4524 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4525 if((i_dirty>>hr)&1) {
4526 if(i_regmap[hr]<64) {
4527 emit_storereg(i_regmap[hr],hr);
24385cae 4528#ifndef FORCE32
57871462 4529 if( ((i_is32>>i_regmap[hr])&1) ) {
4530 #ifdef DESTRUCTIVE_WRITEBACK
4531 emit_sarimm(hr,31,hr);
4532 emit_storereg(i_regmap[hr]|64,hr);
4533 #else
4534 emit_sarimm(hr,31,HOST_TEMPREG);
4535 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4536 #endif
4537 }
24385cae 4538#endif
57871462 4539 }else{
4540 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4541 emit_storereg(i_regmap[hr],hr);
4542 }
4543 }
4544 }
4545 }
4546 }
4547 }
4548 }
4549 }
4550}
4551
4552// Load all registers (except cycle count)
4553void load_all_regs(signed char i_regmap[])
4554{
4555 int hr;
4556 for(hr=0;hr<HOST_REGS;hr++) {
4557 if(hr!=EXCLUDE_REG) {
4558 if(i_regmap[hr]==0) {
4559 emit_zeroreg(hr);
4560 }
4561 else
ea3d2e6e 4562 if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
57871462 4563 {
4564 emit_loadreg(i_regmap[hr],hr);
4565 }
4566 }
4567 }
4568}
4569
4570// Load all current registers also needed by next instruction
4571void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4572{
4573 int hr;
4574 for(hr=0;hr<HOST_REGS;hr++) {
4575 if(hr!=EXCLUDE_REG) {
4576 if(get_reg(next_regmap,i_regmap[hr])>=0) {
4577 if(i_regmap[hr]==0) {
4578 emit_zeroreg(hr);
4579 }
4580 else
ea3d2e6e 4581 if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
57871462 4582 {
4583 emit_loadreg(i_regmap[hr],hr);
4584 }
4585 }
4586 }
4587 }
4588}
4589
4590// Load all regs, storing cycle count if necessary
4591void load_regs_entry(int t)
4592{
4593 int hr;
4594 if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4595 else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4596 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4597 emit_storereg(CCREG,HOST_CCREG);
4598 }
4599 // Load 32-bit regs
4600 for(hr=0;hr<HOST_REGS;hr++) {
ea3d2e6e 4601 if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
57871462 4602 if(regs[t].regmap_entry[hr]==0) {
4603 emit_zeroreg(hr);
4604 }
4605 else if(regs[t].regmap_entry[hr]!=CCREG)
4606 {
4607 emit_loadreg(regs[t].regmap_entry[hr],hr);
4608 }
4609 }
4610 }
4611 // Load 64-bit regs
4612 for(hr=0;hr<HOST_REGS;hr++) {
ea3d2e6e 4613 if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
57871462 4614 assert(regs[t].regmap_entry[hr]!=64);
4615 if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4616 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4617 if(lr<0) {
4618 emit_loadreg(regs[t].regmap_entry[hr],hr);
4619 }
4620 else
4621 {
4622 emit_sarimm(lr,31,hr);
4623 }
4624 }
4625 else
4626 {
4627 emit_loadreg(regs[t].regmap_entry[hr],hr);
4628 }
4629 }
4630 }
4631}
4632
4633// Store dirty registers prior to branch
4634void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4635{
4636 if(internal_branch(i_is32,addr))
4637 {
4638 int t=(addr-start)>>2;
4639 int hr;
4640 for(hr=0;hr<HOST_REGS;hr++) {
4641 if(hr!=EXCLUDE_REG) {
4642 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4643 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4644 if((i_dirty>>hr)&1) {
4645 if(i_regmap[hr]<64) {
4646 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4647 emit_storereg(i_regmap[hr],hr);
4648 if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4649 #ifdef DESTRUCTIVE_WRITEBACK
4650 emit_sarimm(hr,31,hr);
4651 emit_storereg(i_regmap[hr]|64,hr);
4652 #else
4653 emit_sarimm(hr,31,HOST_TEMPREG);
4654 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4655 #endif
4656 }
4657 }
4658 }else{
4659 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4660 emit_storereg(i_regmap[hr],hr);
4661 }
4662 }
4663 }
4664 }
4665 }
4666 }
4667 }
4668 }
4669 else
4670 {
4671 // Branch out of this block, write out all dirty regs
4672 wb_dirtys(i_regmap,i_is32,i_dirty);
4673 }
4674}
4675
4676// Load all needed registers for branch target
4677void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4678{
4679 //if(addr>=start && addr<(start+slen*4))
4680 if(internal_branch(i_is32,addr))
4681 {
4682 int t=(addr-start)>>2;
4683 int hr;
4684 // Store the cycle count before loading something else
4685 if(i_regmap[HOST_CCREG]!=CCREG) {
4686 assert(i_regmap[HOST_CCREG]==-1);
4687 }
4688 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4689 emit_storereg(CCREG,HOST_CCREG);
4690 }
4691 // Load 32-bit regs
4692 for(hr=0;hr<HOST_REGS;hr++) {
ea3d2e6e 4693 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
57871462 4694 #ifdef DESTRUCTIVE_WRITEBACK
4695 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4696 #else
4697 if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4698 #endif
4699 if(regs[t].regmap_entry[hr]==0) {
4700 emit_zeroreg(hr);
4701 }
4702 else if(regs[t].regmap_entry[hr]!=CCREG)
4703 {
4704 emit_loadreg(regs[t].regmap_entry[hr],hr);
4705 }
4706 }
4707 }
4708 }
4709 //Load 64-bit regs
4710 for(hr=0;hr<HOST_REGS;hr++) {
ea3d2e6e 4711 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
57871462 4712 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4713 assert(regs[t].regmap_entry[hr]!=64);
4714 if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4715 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4716 if(lr<0) {
4717 emit_loadreg(regs[t].regmap_entry[hr],hr);
4718 }
4719 else
4720 {
4721 emit_sarimm(lr,31,hr);
4722 }
4723 }
4724 else
4725 {
4726 emit_loadreg(regs[t].regmap_entry[hr],hr);
4727 }
4728 }
4729 else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4730 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4731 assert(lr>=0);
4732 emit_sarimm(lr,31,hr);
4733 }
4734 }
4735 }
4736 }
4737}
4738
4739int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4740{
4741 if(addr>=start && addr<start+slen*4-4)
4742 {
4743 int t=(addr-start)>>2;
4744 int hr;
4745 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4746 for(hr=0;hr<HOST_REGS;hr++)
4747 {
4748 if(hr!=EXCLUDE_REG)
4749 {
4750 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4751 {
ea3d2e6e 4752 if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
57871462 4753 {
4754 return 0;
4755 }
4756 else
4757 if((i_dirty>>hr)&1)
4758 {
ea3d2e6e 4759 if(i_regmap[hr]<TEMPREG)
57871462 4760 {
4761 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4762 return 0;
4763 }
ea3d2e6e 4764 else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
57871462 4765 {
4766 if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4767 return 0;
4768 }
4769 }
4770 }
4771 else // Same register but is it 32-bit or dirty?
4772 if(i_regmap[hr]>=0)
4773 {
4774 if(!((regs[t].dirty>>hr)&1))
4775 {
4776 if((i_dirty>>hr)&1)
4777 {
4778 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4779 {
4780 //printf("%x: dirty no match\n",addr);
4781 return 0;
4782 }
4783 }
4784 }
4785 if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4786 {
4787 //printf("%x: is32 no match\n",addr);
4788 return 0;
4789 }
4790 }
4791 }
4792 }
4793 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
a28c6ce8 4794#ifndef FORCE32
57871462 4795 if(requires_32bit[t]&~i_is32) return 0;
a28c6ce8 4796#endif
57871462 4797 // Delay slots are not valid branch targets
4798 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4799 // Delay slots require additional processing, so do not match
4800 if(is_ds[t]) return 0;
4801 }
4802 else
4803 {
4804 int hr;
4805 for(hr=0;hr<HOST_REGS;hr++)
4806 {
4807 if(hr!=EXCLUDE_REG)
4808 {
4809 if(i_regmap[hr]>=0)
4810 {
4811 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4812 {
4813 if((i_dirty>>hr)&1)
4814 {
4815 return 0;
4816 }
4817 }
4818 }
4819 }
4820 }
4821 }
4822 return 1;
4823}
4824
4825// Used when a branch jumps into the delay slot of another branch
4826void ds_assemble_entry(int i)
4827{
4828 int t=(ba[i]-start)>>2;
4829 if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4830 assem_debug("Assemble delay slot at %x\n",ba[i]);
4831 assem_debug("<->\n");
4832 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4833 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4834 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4835 address_generation(t,&regs[t],regs[t].regmap_entry);
b9b61529 4836 if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
57871462 4837 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4838 cop1_usable=0;
4839 is_delayslot=0;
4840 switch(itype[t]) {
4841 case ALU:
4842 alu_assemble(t,&regs[t]);break;
4843 case IMM16:
4844 imm16_assemble(t,&regs[t]);break;
4845 case SHIFT:
4846 shift_assemble(t,&regs[t]);break;
4847 case SHIFTIMM:
4848 shiftimm_assemble(t,&regs[t]);break;
4849 case LOAD:
4850 load_assemble(t,&regs[t]);break;
4851 case LOADLR:
4852 loadlr_assemble(t,&regs[t]);break;
4853 case STORE:
4854 store_assemble(t,&regs[t]);break;
4855 case STORELR:
4856 storelr_assemble(t,&regs[t]);break;
4857 case COP0:
4858 cop0_assemble(t,&regs[t]);break;
4859 case COP1:
4860 cop1_assemble(t,&regs[t]);break;
4861 case C1LS:
4862 c1ls_assemble(t,&regs[t]);break;
b9b61529 4863 case COP2:
4864 cop2_assemble(t,&regs[t]);break;
4865 case C2LS:
4866 c2ls_assemble(t,&regs[t]);break;
4867 case C2OP:
4868 c2op_assemble(t,&regs[t]);break;
57871462 4869 case FCONV:
4870 fconv_assemble(t,&regs[t]);break;
4871 case FLOAT:
4872 float_assemble(t,&regs[t]);break;
4873 case FCOMP:
4874 fcomp_assemble(t,&regs[t]);break;
4875 case MULTDIV:
4876 multdiv_assemble(t,&regs[t]);break;
4877 case MOV:
4878 mov_assemble(t,&regs[t]);break;
4879 case SYSCALL:
7139f3c8 4880 case HLECALL:
1e973cb0 4881 case INTCALL:
57871462 4882 case SPAN:
4883 case UJUMP:
4884 case RJUMP:
4885 case CJUMP:
4886 case SJUMP:
4887 case FJUMP:
4888 printf("Jump in the delay slot. This is probably a bug.\n");
4889 }
4890 store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4891 load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4892 if(internal_branch(regs[t].is32,ba[i]+4))
4893 assem_debug("branch: internal\n");
4894 else
4895 assem_debug("branch: external\n");
4896 assert(internal_branch(regs[t].is32,ba[i]+4));
4897 add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4898 emit_jmp(0);
4899}
4900
4901void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4902{
4903 int count;
4904 int jaddr;
4905 int idle=0;
4906 if(itype[i]==RJUMP)
4907 {
4908 *adj=0;
4909 }
4910 //if(ba[i]>=start && ba[i]<(start+slen*4))
4911 if(internal_branch(branch_regs[i].is32,ba[i]))
4912 {
4913 int t=(ba[i]-start)>>2;
4914 if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4915 else *adj=ccadj[t];
4916 }
4917 else
4918 {
4919 *adj=0;
4920 }
4921 count=ccadj[i];
4922 if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4923 // Idle loop
4924 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4925 idle=(int)out;
4926 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4927 emit_andimm(HOST_CCREG,3,HOST_CCREG);
4928 jaddr=(int)out;
4929 emit_jmp(0);
4930 }
4931 else if(*adj==0||invert) {
4932 emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4933 jaddr=(int)out;
4934 emit_jns(0);
4935 }
4936 else
4937 {
eeb1feeb 4938 emit_cmpimm(HOST_CCREG,-CLOCK_DIVIDER*(count+2));
57871462 4939 jaddr=(int)out;
4940 emit_jns(0);
4941 }
4942 add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4943}
4944
4945void do_ccstub(int n)
4946{
4947 literal_pool(256);
4948 assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4949 set_jump_target(stubs[n][1],(int)out);
4950 int i=stubs[n][4];
4951 if(stubs[n][6]==NULLDS) {
4952 // Delay slot instruction is nullified ("likely" branch)
4953 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4954 }
4955 else if(stubs[n][6]!=TAKEN) {
4956 wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4957 }
4958 else {
4959 if(internal_branch(branch_regs[i].is32,ba[i]))
4960 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4961 }
4962 if(stubs[n][5]!=-1)
4963 {
4964 // Save PC as return address
4965 emit_movimm(stubs[n][5],EAX);
4966 emit_writeword(EAX,(int)&pcaddr);
4967 }
4968 else
4969 {
4970 // Return address depends on which way the branch goes
4971 if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4972 {
4973 int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4974 int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4975 int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4976 int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4977 if(rs1[i]==0)
4978 {
4979 s1l=s2l;s1h=s2h;
4980 s2l=s2h=-1;
4981 }
4982 else if(rs2[i]==0)
4983 {
4984 s2l=s2h=-1;
4985 }
4986 if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4987 s1h=s2h=-1;
4988 }
4989 assert(s1l>=0);
4990 #ifdef DESTRUCTIVE_WRITEBACK
4991 if(rs1[i]) {
4992 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4993 emit_loadreg(rs1[i],s1l);
4994 }
4995 else {
4996 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4997 emit_loadreg(rs2[i],s1l);
4998 }
4999 if(s2l>=0)
5000 if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
5001 emit_loadreg(rs2[i],s2l);
5002 #endif
5003 int hr=0;
5194fb95 5004 int addr=-1,alt=-1,ntaddr=-1;
57871462 5005 while(hr<HOST_REGS)
5006 {
5007 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5008 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5009 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5010 {
5011 addr=hr++;break;
5012 }
5013 hr++;
5014 }
5015 while(hr<HOST_REGS)
5016 {
5017 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5018 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5019 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5020 {
5021 alt=hr++;break;
5022 }
5023 hr++;
5024 }
5025 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5026 {
5027 while(hr<HOST_REGS)
5028 {
5029 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5030 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5031 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5032 {
5033 ntaddr=hr;break;
5034 }
5035 hr++;
5036 }
5037 assert(hr<HOST_REGS);
5038 }
5039 if((opcode[i]&0x2f)==4) // BEQ
5040 {
5041 #ifdef HAVE_CMOV_IMM
5042 if(s1h<0) {
5043 if(s2l>=0) emit_cmp(s1l,s2l);
5044 else emit_test(s1l,s1l);
5045 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5046 }
5047 else
5048 #endif
5049 {
5050 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5051 if(s1h>=0) {
5052 if(s2h>=0) emit_cmp(s1h,s2h);
5053 else emit_test(s1h,s1h);
5054 emit_cmovne_reg(alt,addr);
5055 }
5056 if(s2l>=0) emit_cmp(s1l,s2l);
5057 else emit_test(s1l,s1l);
5058 emit_cmovne_reg(alt,addr);
5059 }
5060 }
5061 if((opcode[i]&0x2f)==5) // BNE
5062 {
5063 #ifdef HAVE_CMOV_IMM
5064 if(s1h<0) {
5065 if(s2l>=0) emit_cmp(s1l,s2l);
5066 else emit_test(s1l,s1l);
5067 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5068 }
5069 else
5070 #endif
5071 {
5072 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5073 if(s1h>=0) {
5074 if(s2h>=0) emit_cmp(s1h,s2h);
5075 else emit_test(s1h,s1h);
5076 emit_cmovne_reg(alt,addr);
5077 }
5078 if(s2l>=0) emit_cmp(s1l,s2l);
5079 else emit_test(s1l,s1l);
5080 emit_cmovne_reg(alt,addr);
5081 }
5082 }
5083 if((opcode[i]&0x2f)==6) // BLEZ
5084 {
5085 //emit_movimm(ba[i],alt);
5086 //emit_movimm(start+i*4+8,addr);
5087 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5088 emit_cmpimm(s1l,1);
5089 if(s1h>=0) emit_mov(addr,ntaddr);
5090 emit_cmovl_reg(alt,addr);
5091 if(s1h>=0) {
5092 emit_test(s1h,s1h);
5093 emit_cmovne_reg(ntaddr,addr);
5094 emit_cmovs_reg(alt,addr);
5095 }
5096 }
5097 if((opcode[i]&0x2f)==7) // BGTZ
5098 {
5099 //emit_movimm(ba[i],addr);
5100 //emit_movimm(start+i*4+8,ntaddr);
5101 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
5102 emit_cmpimm(s1l,1);
5103 if(s1h>=0) emit_mov(addr,alt);
5104 emit_cmovl_reg(ntaddr,addr);
5105 if(s1h>=0) {
5106 emit_test(s1h,s1h);
5107 emit_cmovne_reg(alt,addr);
5108 emit_cmovs_reg(ntaddr,addr);
5109 }
5110 }
5111 if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
5112 {
5113 //emit_movimm(ba[i],alt);
5114 //emit_movimm(start+i*4+8,addr);
5115 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5116 if(s1h>=0) emit_test(s1h,s1h);
5117 else emit_test(s1l,s1l);
5118 emit_cmovs_reg(alt,addr);
5119 }
5120 if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
5121 {
5122 //emit_movimm(ba[i],addr);
5123 //emit_movimm(start+i*4+8,alt);
5124 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5125 if(s1h>=0) emit_test(s1h,s1h);
5126 else emit_test(s1l,s1l);
5127 emit_cmovs_reg(alt,addr);
5128 }
5129 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
5130 if(source[i]&0x10000) // BC1T
5131 {
5132 //emit_movimm(ba[i],alt);
5133 //emit_movimm(start+i*4+8,addr);
5134 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5135 emit_testimm(s1l,0x800000);
5136 emit_cmovne_reg(alt,addr);
5137 }
5138 else // BC1F
5139 {
5140 //emit_movimm(ba[i],addr);
5141 //emit_movimm(start+i*4+8,alt);
5142 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5143 emit_testimm(s1l,0x800000);
5144 emit_cmovne_reg(alt,addr);
5145 }
5146 }
5147 emit_writeword(addr,(int)&pcaddr);
5148 }
5149 else
5150 if(itype[i]==RJUMP)
5151 {
5152 int r=get_reg(branch_regs[i].regmap,rs1[i]);
5153 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5154 r=get_reg(branch_regs[i].regmap,RTEMP);
5155 }
5156 emit_writeword(r,(int)&pcaddr);
5157 }
5158 else {printf("Unknown branch type in do_ccstub\n");exit(1);}
5159 }
5160 // Update cycle count
5161 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
5162 if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5163 emit_call((int)cc_interrupt);
5164 if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5165 if(stubs[n][6]==TAKEN) {
5166 if(internal_branch(branch_regs[i].is32,ba[i]))
5167 load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
5168 else if(itype[i]==RJUMP) {
5169 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5170 emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5171 else
5172 emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
5173 }
5174 }else if(stubs[n][6]==NOTTAKEN) {
5175 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5176 else load_all_regs(branch_regs[i].regmap);
5177 }else if(stubs[n][6]==NULLDS) {
5178 // Delay slot instruction is nullified ("likely" branch)
5179 if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
5180 else load_all_regs(regs[i].regmap);
5181 }else{
5182 load_all_regs(branch_regs[i].regmap);
5183 }
5184 emit_jmp(stubs[n][2]); // return address
5185
5186 /* This works but uses a lot of memory...
5187 emit_readword((int)&last_count,ECX);
5188 emit_add(HOST_CCREG,ECX,EAX);
5189 emit_writeword(EAX,(int)&Count);
5190 emit_call((int)gen_interupt);
5191 emit_readword((int)&Count,HOST_CCREG);
5192 emit_readword((int)&next_interupt,EAX);
5193 emit_readword((int)&pending_exception,EBX);
5194 emit_writeword(EAX,(int)&last_count);
5195 emit_sub(HOST_CCREG,EAX,HOST_CCREG);
5196 emit_test(EBX,EBX);
5197 int jne_instr=(int)out;
5198 emit_jne(0);
5199 if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
5200 load_all_regs(branch_regs[i].regmap);
5201 emit_jmp(stubs[n][2]); // return address
5202 set_jump_target(jne_instr,(int)out);
5203 emit_readword((int)&pcaddr,EAX);
5204 // Call get_addr_ht instead of doing the hash table here.
5205 // This code is executed infrequently and takes up a lot of space
5206 // so smaller is better.
5207 emit_storereg(CCREG,HOST_CCREG);
5208 emit_pushreg(EAX);
5209 emit_call((int)get_addr_ht);
5210 emit_loadreg(CCREG,HOST_CCREG);
5211 emit_addimm(ESP,4,ESP);
5212 emit_jmpreg(EAX);*/
5213}
5214
5215add_to_linker(int addr,int target,int ext)
5216{
5217 link_addr[linkcount][0]=addr;
5218 link_addr[linkcount][1]=target;
5219 link_addr[linkcount][2]=ext;
5220 linkcount++;
5221}
5222
eba830cd 5223static void ujump_assemble_write_ra(int i)
5224{
5225 int rt;
5226 unsigned int return_address;
5227 rt=get_reg(branch_regs[i].regmap,31);
5228 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5229 //assert(rt>=0);
5230 return_address=start+i*4+8;
5231 if(rt>=0) {
5232 #ifdef USE_MINI_HT
5233 if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
5234 int temp=-1; // note: must be ds-safe
5235 #ifdef HOST_TEMPREG
5236 temp=HOST_TEMPREG;
5237 #endif
5238 if(temp>=0) do_miniht_insert(return_address,rt,temp);
5239 else emit_movimm(return_address,rt);
5240 }
5241 else
5242 #endif
5243 {
5244 #ifdef REG_PREFETCH
5245 if(temp>=0)
5246 {
5247 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5248 }
5249 #endif
5250 emit_movimm(return_address,rt); // PC into link register
5251 #ifdef IMM_PREFETCH
5252 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5253 #endif
5254 }
5255 }
5256}
5257
57871462 5258void ujump_assemble(int i,struct regstat *i_regs)
5259{
5260 signed char *i_regmap=i_regs->regmap;
eba830cd 5261 int ra_done=0;
57871462 5262 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5263 address_generation(i+1,i_regs,regs[i].regmap_entry);
5264 #ifdef REG_PREFETCH
5265 int temp=get_reg(branch_regs[i].regmap,PTEMP);
5266 if(rt1[i]==31&&temp>=0)
5267 {
5268 int return_address=start+i*4+8;
5269 if(get_reg(branch_regs[i].regmap,31)>0)
5270 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5271 }
5272 #endif
eba830cd 5273 if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5274 ujump_assemble_write_ra(i); // writeback ra for DS
5275 ra_done=1;
57871462 5276 }
4ef8f67d 5277 ds_assemble(i+1,i_regs);
5278 uint64_t bc_unneeded=branch_regs[i].u;
5279 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5280 bc_unneeded|=1|(1LL<<rt1[i]);
5281 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5282 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5283 bc_unneeded,bc_unneeded_upper);
5284 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
eba830cd 5285 if(!ra_done&&rt1[i]==31)
5286 ujump_assemble_write_ra(i);
57871462 5287 int cc,adj;
5288 cc=get_reg(branch_regs[i].regmap,CCREG);
5289 assert(cc==HOST_CCREG);
5290 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5291 #ifdef REG_PREFETCH
5292 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5293 #endif
5294 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5295 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5296 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5297 if(internal_branch(branch_regs[i].is32,ba[i]))
5298 assem_debug("branch: internal\n");
5299 else
5300 assem_debug("branch: external\n");
5301 if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5302 ds_assemble_entry(i);
5303 }
5304 else {
5305 add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5306 emit_jmp(0);
5307 }
5308}
5309
eba830cd 5310static void rjump_assemble_write_ra(int i)
5311{
5312 int rt,return_address;
5313 assert(rt1[i+1]!=rt1[i]);
5314 assert(rt2[i+1]!=rt1[i]);
5315 rt=get_reg(branch_regs[i].regmap,rt1[i]);
5316 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5317 assert(rt>=0);
5318 return_address=start+i*4+8;
5319 #ifdef REG_PREFETCH
5320 if(temp>=0)
5321 {
5322 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5323 }
5324 #endif
5325 emit_movimm(return_address,rt); // PC into link register
5326 #ifdef IMM_PREFETCH
5327 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5328 #endif
5329}
5330
57871462 5331void rjump_assemble(int i,struct regstat *i_regs)
5332{
5333 signed char *i_regmap=i_regs->regmap;
5334 int temp;
5335 int rs,cc,adj;
eba830cd 5336 int ra_done=0;
57871462 5337 rs=get_reg(branch_regs[i].regmap,rs1[i]);
5338 assert(rs>=0);
5339 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5340 // Delay slot abuse, make a copy of the branch address register
5341 temp=get_reg(branch_regs[i].regmap,RTEMP);
5342 assert(temp>=0);
5343 assert(regs[i].regmap[temp]==RTEMP);
5344 emit_mov(rs,temp);
5345 rs=temp;
5346 }
5347 address_generation(i+1,i_regs,regs[i].regmap_entry);
5348 #ifdef REG_PREFETCH
5349 if(rt1[i]==31)
5350 {
5351 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5352 int return_address=start+i*4+8;
5353 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5354 }
5355 }
5356 #endif
5357 #ifdef USE_MINI_HT
5358 if(rs1[i]==31) {
5359 int rh=get_reg(regs[i].regmap,RHASH);
5360 if(rh>=0) do_preload_rhash(rh);
5361 }
5362 #endif
eba830cd 5363 if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5364 rjump_assemble_write_ra(i);
5365 ra_done=1;
57871462 5366 }
d5910d5d 5367 ds_assemble(i+1,i_regs);
5368 uint64_t bc_unneeded=branch_regs[i].u;
5369 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5370 bc_unneeded|=1|(1LL<<rt1[i]);
5371 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5372 bc_unneeded&=~(1LL<<rs1[i]);
5373 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5374 bc_unneeded,bc_unneeded_upper);
5375 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
eba830cd 5376 if(!ra_done&&rt1[i]!=0)
5377 rjump_assemble_write_ra(i);
57871462 5378 cc=get_reg(branch_regs[i].regmap,CCREG);
5379 assert(cc==HOST_CCREG);
5380 #ifdef USE_MINI_HT
5381 int rh=get_reg(branch_regs[i].regmap,RHASH);
5382 int ht=get_reg(branch_regs[i].regmap,RHTBL);
5383 if(rs1[i]==31) {
5384 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5385 do_preload_rhtbl(ht);
5386 do_rhash(rs,rh);
5387 }
5388 #endif
5389 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5390 #ifdef DESTRUCTIVE_WRITEBACK
5391 if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5392 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5393 emit_loadreg(rs1[i],rs);
5394 }
5395 }
5396 #endif
5397 #ifdef REG_PREFETCH
5398 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5399 #endif
5400 #ifdef USE_MINI_HT
5401 if(rs1[i]==31) {
5402 do_miniht_load(ht,rh);
5403 }
5404 #endif
5405 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5406 //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5407 //assert(adj==0);
5408 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5409 add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5410 emit_jns(0);
5411 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5412 #ifdef USE_MINI_HT
5413 if(rs1[i]==31) {
5414 do_miniht_jump(rs,rh,ht);
5415 }
5416 else
5417 #endif
5418 {
5419 //if(rs!=EAX) emit_mov(rs,EAX);
5420 //emit_jmp((int)jump_vaddr_eax);
5421 emit_jmp(jump_vaddr_reg[rs]);
5422 }
5423 /* Check hash table
5424 temp=!rs;
5425 emit_mov(rs,temp);
5426 emit_shrimm(rs,16,rs);
5427 emit_xor(temp,rs,rs);
5428 emit_movzwl_reg(rs,rs);
5429 emit_shlimm(rs,4,rs);
5430 emit_cmpmem_indexed((int)hash_table,rs,temp);
5431 emit_jne((int)out+14);
5432 emit_readword_indexed((int)hash_table+4,rs,rs);
5433 emit_jmpreg(rs);
5434 emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5435 emit_addimm_no_flags(8,rs);
5436 emit_jeq((int)out-17);
5437 // No hit on hash table, call compiler
5438 emit_pushreg(temp);
5439//DEBUG >
5440#ifdef DEBUG_CYCLE_COUNT
5441 emit_readword((int)&last_count,ECX);
5442 emit_add(HOST_CCREG,ECX,HOST_CCREG);
5443 emit_readword((int)&next_interupt,ECX);
5444 emit_writeword(HOST_CCREG,(int)&Count);
5445 emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5446 emit_writeword(ECX,(int)&last_count);
5447#endif
5448//DEBUG <
5449 emit_storereg(CCREG,HOST_CCREG);
5450 emit_call((int)get_addr);
5451 emit_loadreg(CCREG,HOST_CCREG);
5452 emit_addimm(ESP,4,ESP);
5453 emit_jmpreg(EAX);*/
5454 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5455 if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5456 #endif
5457}
5458
5459void cjump_assemble(int i,struct regstat *i_regs)
5460{
5461 signed char *i_regmap=i_regs->regmap;
5462 int cc;
5463 int match;
5464 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5465 assem_debug("match=%d\n",match);
5466 int s1h,s1l,s2h,s2l;
5467 int prev_cop1_usable=cop1_usable;
5468 int unconditional=0,nop=0;
5469 int only32=0;
57871462 5470 int invert=0;
5471 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5472 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
57871462 5473 if(!match) invert=1;
5474 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5475 if(i>(ba[i]-start)>>2) invert=1;
5476 #endif
e1190b87 5477
5478 if(ooo[i]) {
57871462 5479 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5480 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5481 s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5482 s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5483 }
5484 else {
5485 s1l=get_reg(i_regmap,rs1[i]);
5486 s1h=get_reg(i_regmap,rs1[i]|64);
5487 s2l=get_reg(i_regmap,rs2[i]);
5488 s2h=get_reg(i_regmap,rs2[i]|64);
5489 }
5490 if(rs1[i]==0&&rs2[i]==0)
5491 {
5492 if(opcode[i]&1) nop=1;
5493 else unconditional=1;
5494 //assert(opcode[i]!=5);
5495 //assert(opcode[i]!=7);
5496 //assert(opcode[i]!=0x15);
5497 //assert(opcode[i]!=0x17);
5498 }
5499 else if(rs1[i]==0)
5500 {
5501 s1l=s2l;s1h=s2h;
5502 s2l=s2h=-1;
5503 only32=(regs[i].was32>>rs2[i])&1;
5504 }
5505 else if(rs2[i]==0)
5506 {
5507 s2l=s2h=-1;
5508 only32=(regs[i].was32>>rs1[i])&1;
5509 }
5510 else {
5511 only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5512 }
5513
e1190b87 5514 if(ooo[i]) {
57871462 5515 // Out of order execution (delay slot first)
5516 //printf("OOOE\n");
5517 address_generation(i+1,i_regs,regs[i].regmap_entry);
5518 ds_assemble(i+1,i_regs);
5519 int adj;
5520 uint64_t bc_unneeded=branch_regs[i].u;
5521 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5522 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5523 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5524 bc_unneeded|=1;
5525 bc_unneeded_upper|=1;
5526 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5527 bc_unneeded,bc_unneeded_upper);
5528 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5529 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5530 cc=get_reg(branch_regs[i].regmap,CCREG);
5531 assert(cc==HOST_CCREG);
5532 if(unconditional)
5533 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5534 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5535 //assem_debug("cycle count (adj)\n");
5536 if(unconditional) {
5537 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5538 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5539 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5540 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5541 if(internal)
5542 assem_debug("branch: internal\n");
5543 else
5544 assem_debug("branch: external\n");
5545 if(internal&&is_ds[(ba[i]-start)>>2]) {
5546 ds_assemble_entry(i);
5547 }
5548 else {
5549 add_to_linker((int)out,ba[i],internal);
5550 emit_jmp(0);
5551 }
5552 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5553 if(((u_int)out)&7) emit_addnop(0);
5554 #endif
5555 }
5556 }
5557 else if(nop) {
5558 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5559 int jaddr=(int)out;
5560 emit_jns(0);
5561 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5562 }
5563 else {
5564 int taken=0,nottaken=0,nottaken1=0;
5565 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5566 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5567 if(!only32)
5568 {
5569 assert(s1h>=0);
5570 if(opcode[i]==4) // BEQ
5571 {
5572 if(s2h>=0) emit_cmp(s1h,s2h);
5573 else emit_test(s1h,s1h);
5574 nottaken1=(int)out;
5575 emit_jne(1);
5576 }
5577 if(opcode[i]==5) // BNE
5578 {
5579 if(s2h>=0) emit_cmp(s1h,s2h);
5580 else emit_test(s1h,s1h);
5581 if(invert) taken=(int)out;
5582 else add_to_linker((int)out,ba[i],internal);
5583 emit_jne(0);
5584 }
5585 if(opcode[i]==6) // BLEZ
5586 {
5587 emit_test(s1h,s1h);
5588 if(invert) taken=(int)out;
5589 else add_to_linker((int)out,ba[i],internal);
5590 emit_js(0);
5591 nottaken1=(int)out;
5592 emit_jne(1);
5593 }
5594 if(opcode[i]==7) // BGTZ
5595 {
5596 emit_test(s1h,s1h);
5597 nottaken1=(int)out;
5598 emit_js(1);
5599 if(invert) taken=(int)out;
5600 else add_to_linker((int)out,ba[i],internal);
5601 emit_jne(0);
5602 }
5603 } // if(!only32)
5604
5605 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5606 assert(s1l>=0);
5607 if(opcode[i]==4) // BEQ
5608 {
5609 if(s2l>=0) emit_cmp(s1l,s2l);
5610 else emit_test(s1l,s1l);
5611 if(invert){
5612 nottaken=(int)out;
5613 emit_jne(1);
5614 }else{
5615 add_to_linker((int)out,ba[i],internal);
5616 emit_jeq(0);
5617 }
5618 }
5619 if(opcode[i]==5) // BNE
5620 {
5621 if(s2l>=0) emit_cmp(s1l,s2l);
5622 else emit_test(s1l,s1l);
5623 if(invert){
5624 nottaken=(int)out;
5625 emit_jeq(1);
5626 }else{
5627 add_to_linker((int)out,ba[i],internal);
5628 emit_jne(0);
5629 }
5630 }
5631 if(opcode[i]==6) // BLEZ
5632 {
5633 emit_cmpimm(s1l,1);
5634 if(invert){
5635 nottaken=(int)out;
5636 emit_jge(1);
5637 }else{
5638 add_to_linker((int)out,ba[i],internal);
5639 emit_jl(0);
5640 }
5641 }
5642 if(opcode[i]==7) // BGTZ
5643 {
5644 emit_cmpimm(s1l,1);
5645 if(invert){
5646 nottaken=(int)out;
5647 emit_jl(1);
5648 }else{
5649 add_to_linker((int)out,ba[i],internal);
5650 emit_jge(0);
5651 }
5652 }
5653 if(invert) {
5654 if(taken) set_jump_target(taken,(int)out);
5655 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5656 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5657 if(adj) {
5658 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5659 add_to_linker((int)out,ba[i],internal);
5660 }else{
5661 emit_addnop(13);
5662 add_to_linker((int)out,ba[i],internal*2);
5663 }
5664 emit_jmp(0);
5665 }else
5666 #endif
5667 {
5668 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5669 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5670 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5671 if(internal)
5672 assem_debug("branch: internal\n");
5673 else
5674 assem_debug("branch: external\n");
5675 if(internal&&is_ds[(ba[i]-start)>>2]) {
5676 ds_assemble_entry(i);
5677 }
5678 else {
5679 add_to_linker((int)out,ba[i],internal);
5680 emit_jmp(0);
5681 }
5682 }
5683 set_jump_target(nottaken,(int)out);
5684 }
5685
5686 if(nottaken1) set_jump_target(nottaken1,(int)out);
5687 if(adj) {
5688 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5689 }
5690 } // (!unconditional)
5691 } // if(ooo)
5692 else
5693 {
5694 // In-order execution (branch first)
5695 //if(likely[i]) printf("IOL\n");
5696 //else
5697 //printf("IOE\n");
5698 int taken=0,nottaken=0,nottaken1=0;
5699 if(!unconditional&&!nop) {
5700 if(!only32)
5701 {
5702 assert(s1h>=0);
5703 if((opcode[i]&0x2f)==4) // BEQ
5704 {
5705 if(s2h>=0) emit_cmp(s1h,s2h);
5706 else emit_test(s1h,s1h);
5707 nottaken1=(int)out;
5708 emit_jne(2);
5709 }
5710 if((opcode[i]&0x2f)==5) // BNE
5711 {
5712 if(s2h>=0) emit_cmp(s1h,s2h);
5713 else emit_test(s1h,s1h);
5714 taken=(int)out;
5715 emit_jne(1);
5716 }
5717 if((opcode[i]&0x2f)==6) // BLEZ
5718 {
5719 emit_test(s1h,s1h);
5720 taken=(int)out;
5721 emit_js(1);
5722 nottaken1=(int)out;
5723 emit_jne(2);
5724 }
5725 if((opcode[i]&0x2f)==7) // BGTZ
5726 {
5727 emit_test(s1h,s1h);
5728 nottaken1=(int)out;
5729 emit_js(2);
5730 taken=(int)out;
5731 emit_jne(1);
5732 }
5733 } // if(!only32)
5734
5735 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5736 assert(s1l>=0);
5737 if((opcode[i]&0x2f)==4) // BEQ
5738 {
5739 if(s2l>=0) emit_cmp(s1l,s2l);
5740 else emit_test(s1l,s1l);
5741 nottaken=(int)out;
5742 emit_jne(2);
5743 }
5744 if((opcode[i]&0x2f)==5) // BNE
5745 {
5746 if(s2l>=0) emit_cmp(s1l,s2l);
5747 else emit_test(s1l,s1l);
5748 nottaken=(int)out;
5749 emit_jeq(2);
5750 }
5751 if((opcode[i]&0x2f)==6) // BLEZ
5752 {
5753 emit_cmpimm(s1l,1);
5754 nottaken=(int)out;
5755 emit_jge(2);
5756 }
5757 if((opcode[i]&0x2f)==7) // BGTZ
5758 {
5759 emit_cmpimm(s1l,1);
5760 nottaken=(int)out;
5761 emit_jl(2);
5762 }
5763 } // if(!unconditional)
5764 int adj;
5765 uint64_t ds_unneeded=branch_regs[i].u;
5766 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5767 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5768 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5769 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5770 ds_unneeded|=1;
5771 ds_unneeded_upper|=1;
5772 // branch taken
5773 if(!nop) {
5774 if(taken) set_jump_target(taken,(int)out);
5775 assem_debug("1:\n");
5776 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5777 ds_unneeded,ds_unneeded_upper);
5778 // load regs
5779 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5780 address_generation(i+1,&branch_regs[i],0);
5781 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5782 ds_assemble(i+1,&branch_regs[i]);
5783 cc=get_reg(branch_regs[i].regmap,CCREG);
5784 if(cc==-1) {
5785 emit_loadreg(CCREG,cc=HOST_CCREG);
5786 // CHECK: Is the following instruction (fall thru) allocated ok?
5787 }
5788 assert(cc==HOST_CCREG);
5789 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5790 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5791 assem_debug("cycle count (adj)\n");
5792 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5793 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5794 if(internal)
5795 assem_debug("branch: internal\n");
5796 else
5797 assem_debug("branch: external\n");
5798 if(internal&&is_ds[(ba[i]-start)>>2]) {
5799 ds_assemble_entry(i);
5800 }
5801 else {
5802 add_to_linker((int)out,ba[i],internal);
5803 emit_jmp(0);
5804 }
5805 }
5806 // branch not taken
5807 cop1_usable=prev_cop1_usable;
5808 if(!unconditional) {
5809 if(nottaken1) set_jump_target(nottaken1,(int)out);
5810 set_jump_target(nottaken,(int)out);
5811 assem_debug("2:\n");
5812 if(!likely[i]) {
5813 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5814 ds_unneeded,ds_unneeded_upper);
5815 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5816 address_generation(i+1,&branch_regs[i],0);
5817 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5818 ds_assemble(i+1,&branch_regs[i]);
5819 }
5820 cc=get_reg(branch_regs[i].regmap,CCREG);
5821 if(cc==-1&&!likely[i]) {
5822 // Cycle count isn't in a register, temporarily load it then write it out
5823 emit_loadreg(CCREG,HOST_CCREG);
5824 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5825 int jaddr=(int)out;
5826 emit_jns(0);
5827 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5828 emit_storereg(CCREG,HOST_CCREG);
5829 }
5830 else{
5831 cc=get_reg(i_regmap,CCREG);
5832 assert(cc==HOST_CCREG);
5833 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5834 int jaddr=(int)out;
5835 emit_jns(0);
5836 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5837 }
5838 }
5839 }
5840}
5841
5842void sjump_assemble(int i,struct regstat *i_regs)
5843{
5844 signed char *i_regmap=i_regs->regmap;
5845 int cc;
5846 int match;
5847 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5848 assem_debug("smatch=%d\n",match);
5849 int s1h,s1l;
5850 int prev_cop1_usable=cop1_usable;
5851 int unconditional=0,nevertaken=0;
5852 int only32=0;
57871462 5853 int invert=0;
5854 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5855 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
57871462 5856 if(!match) invert=1;
5857 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5858 if(i>(ba[i]-start)>>2) invert=1;
5859 #endif
5860
5861 //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
df894a3a 5862 //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
57871462 5863
e1190b87 5864 if(ooo[i]) {
57871462 5865 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5866 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5867 }
5868 else {
5869 s1l=get_reg(i_regmap,rs1[i]);
5870 s1h=get_reg(i_regmap,rs1[i]|64);
5871 }
5872 if(rs1[i]==0)
5873 {
5874 if(opcode2[i]&1) unconditional=1;
5875 else nevertaken=1;
5876 // These are never taken (r0 is never less than zero)
5877 //assert(opcode2[i]!=0);
5878 //assert(opcode2[i]!=2);
5879 //assert(opcode2[i]!=0x10);
5880 //assert(opcode2[i]!=0x12);
5881 }
5882 else {
5883 only32=(regs[i].was32>>rs1[i])&1;
5884 }
5885
e1190b87 5886 if(ooo[i]) {
57871462 5887 // Out of order execution (delay slot first)
5888 //printf("OOOE\n");
5889 address_generation(i+1,i_regs,regs[i].regmap_entry);
5890 ds_assemble(i+1,i_regs);
5891 int adj;
5892 uint64_t bc_unneeded=branch_regs[i].u;
5893 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5894 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5895 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5896 bc_unneeded|=1;
5897 bc_unneeded_upper|=1;
5898 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5899 bc_unneeded,bc_unneeded_upper);
5900 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5901 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5902 if(rt1[i]==31) {
5903 int rt,return_address;
57871462 5904 rt=get_reg(branch_regs[i].regmap,31);
5905 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5906 if(rt>=0) {
5907 // Save the PC even if the branch is not taken
5908 return_address=start+i*4+8;
5909 emit_movimm(return_address,rt); // PC into link register
5910 #ifdef IMM_PREFETCH
5911 if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5912 #endif
5913 }
5914 }
5915 cc=get_reg(branch_regs[i].regmap,CCREG);
5916 assert(cc==HOST_CCREG);
5917 if(unconditional)
5918 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5919 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5920 assem_debug("cycle count (adj)\n");
5921 if(unconditional) {
5922 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5923 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5924 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5925 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5926 if(internal)
5927 assem_debug("branch: internal\n");
5928 else
5929 assem_debug("branch: external\n");
5930 if(internal&&is_ds[(ba[i]-start)>>2]) {
5931 ds_assemble_entry(i);
5932 }
5933 else {
5934 add_to_linker((int)out,ba[i],internal);
5935 emit_jmp(0);
5936 }
5937 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5938 if(((u_int)out)&7) emit_addnop(0);
5939 #endif
5940 }
5941 }
5942 else if(nevertaken) {
5943 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5944 int jaddr=(int)out;
5945 emit_jns(0);
5946 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5947 }
5948 else {
5949 int nottaken=0;
5950 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5951 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5952 if(!only32)
5953 {
5954 assert(s1h>=0);
df894a3a 5955 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
57871462 5956 {
5957 emit_test(s1h,s1h);
5958 if(invert){
5959 nottaken=(int)out;
5960 emit_jns(1);
5961 }else{
5962 add_to_linker((int)out,ba[i],internal);
5963 emit_js(0);
5964 }
5965 }
df894a3a 5966 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
57871462 5967 {
5968 emit_test(s1h,s1h);
5969 if(invert){
5970 nottaken=(int)out;
5971 emit_js(1);
5972 }else{
5973 add_to_linker((int)out,ba[i],internal);
5974 emit_jns(0);
5975 }
5976 }
5977 } // if(!only32)
5978 else
5979 {
5980 assert(s1l>=0);
df894a3a 5981 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
57871462 5982 {
5983 emit_test(s1l,s1l);
5984 if(invert){
5985 nottaken=(int)out;
5986 emit_jns(1);
5987 }else{
5988 add_to_linker((int)out,ba[i],internal);
5989 emit_js(0);
5990 }
5991 }
df894a3a 5992 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
57871462 5993 {
5994 emit_test(s1l,s1l);
5995 if(invert){
5996 nottaken=(int)out;
5997 emit_js(1);
5998 }else{
5999 add_to_linker((int)out,ba[i],internal);
6000 emit_jns(0);
6001 }
6002 }
6003 } // if(!only32)
6004
6005 if(invert) {
6006 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6007 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
6008 if(adj) {
6009 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6010 add_to_linker((int)out,ba[i],internal);
6011 }else{
6012 emit_addnop(13);
6013 add_to_linker((int)out,ba[i],internal*2);
6014 }
6015 emit_jmp(0);
6016 }else
6017 #endif
6018 {
6019 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6020 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6021 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6022 if(internal)
6023 assem_debug("branch: internal\n");
6024 else
6025 assem_debug("branch: external\n");
6026 if(internal&&is_ds[(ba[i]-start)>>2]) {
6027 ds_assemble_entry(i);
6028 }
6029 else {
6030 add_to_linker((int)out,ba[i],internal);
6031 emit_jmp(0);
6032 }
6033 }
6034 set_jump_target(nottaken,(int)out);
6035 }
6036
6037 if(adj) {
6038 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6039 }
6040 } // (!unconditional)
6041 } // if(ooo)
6042 else
6043 {
6044 // In-order execution (branch first)
6045 //printf("IOE\n");
6046 int nottaken=0;
a6491170 6047 if(rt1[i]==31) {
6048 int rt,return_address;
a6491170 6049 rt=get_reg(branch_regs[i].regmap,31);
6050 if(rt>=0) {
6051 // Save the PC even if the branch is not taken
6052 return_address=start+i*4+8;
6053 emit_movimm(return_address,rt); // PC into link register
6054 #ifdef IMM_PREFETCH
6055 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
6056 #endif
6057 }
6058 }
57871462 6059 if(!unconditional) {
6060 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6061 if(!only32)
6062 {
6063 assert(s1h>=0);
a6491170 6064 if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
57871462 6065 {
6066 emit_test(s1h,s1h);
6067 nottaken=(int)out;
6068 emit_jns(1);
6069 }
a6491170 6070 if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
57871462 6071 {
6072 emit_test(s1h,s1h);
6073 nottaken=(int)out;
6074 emit_js(1);
6075 }
6076 } // if(!only32)
6077 else
6078 {
6079 assert(s1l>=0);
a6491170 6080 if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
57871462 6081 {
6082 emit_test(s1l,s1l);
6083 nottaken=(int)out;
6084 emit_jns(1);
6085 }
a6491170 6086 if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
57871462 6087 {
6088 emit_test(s1l,s1l);
6089 nottaken=(int)out;
6090 emit_js(1);
6091 }
6092 }
6093 } // if(!unconditional)
6094 int adj;
6095 uint64_t ds_unneeded=branch_regs[i].u;
6096 uint64_t ds_unneeded_upper=branch_regs[i].uu;
6097 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6098 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6099 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6100 ds_unneeded|=1;
6101 ds_unneeded_upper|=1;
6102 // branch taken
6103 if(!nevertaken) {
6104 //assem_debug("1:\n");
6105 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6106 ds_unneeded,ds_unneeded_upper);
6107 // load regs
6108 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6109 address_generation(i+1,&branch_regs[i],0);
6110 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6111 ds_assemble(i+1,&branch_regs[i]);
6112 cc=get_reg(branch_regs[i].regmap,CCREG);
6113 if(cc==-1) {
6114 emit_loadreg(CCREG,cc=HOST_CCREG);
6115 // CHECK: Is the following instruction (fall thru) allocated ok?
6116 }
6117 assert(cc==HOST_CCREG);
6118 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6119 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6120 assem_debug("cycle count (adj)\n");
6121 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6122 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6123 if(internal)
6124 assem_debug("branch: internal\n");
6125 else
6126 assem_debug("branch: external\n");
6127 if(internal&&is_ds[(ba[i]-start)>>2]) {
6128 ds_assemble_entry(i);
6129 }
6130 else {
6131 add_to_linker((int)out,ba[i],internal);
6132 emit_jmp(0);
6133 }
6134 }
6135 // branch not taken
6136 cop1_usable=prev_cop1_usable;
6137 if(!unconditional) {
6138 set_jump_target(nottaken,(int)out);
6139 assem_debug("1:\n");
6140 if(!likely[i]) {
6141 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6142 ds_unneeded,ds_unneeded_upper);
6143 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6144 address_generation(i+1,&branch_regs[i],0);
6145 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6146 ds_assemble(i+1,&branch_regs[i]);
6147 }
6148 cc=get_reg(branch_regs[i].regmap,CCREG);
6149 if(cc==-1&&!likely[i]) {
6150 // Cycle count isn't in a register, temporarily load it then write it out
6151 emit_loadreg(CCREG,HOST_CCREG);
6152 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6153 int jaddr=(int)out;
6154 emit_jns(0);
6155 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6156 emit_storereg(CCREG,HOST_CCREG);
6157 }
6158 else{
6159 cc=get_reg(i_regmap,CCREG);
6160 assert(cc==HOST_CCREG);
6161 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6162 int jaddr=(int)out;
6163 emit_jns(0);
6164 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6165 }
6166 }
6167 }
6168}
6169
6170void fjump_assemble(int i,struct regstat *i_regs)
6171{
6172 signed char *i_regmap=i_regs->regmap;
6173 int cc;
6174 int match;
6175 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6176 assem_debug("fmatch=%d\n",match);
6177 int fs,cs;
6178 int eaddr;
57871462 6179 int invert=0;
6180 int internal=internal_branch(branch_regs[i].is32,ba[i]);
6181 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
57871462 6182 if(!match) invert=1;
6183 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6184 if(i>(ba[i]-start)>>2) invert=1;
6185 #endif
6186
e1190b87 6187 if(ooo[i]) {
57871462 6188 fs=get_reg(branch_regs[i].regmap,FSREG);
6189 address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
6190 }
6191 else {
6192 fs=get_reg(i_regmap,FSREG);
6193 }
6194
6195 // Check cop1 unusable
6196 if(!cop1_usable) {
6197 cs=get_reg(i_regmap,CSREG);
6198 assert(cs>=0);
6199 emit_testimm(cs,0x20000000);
6200 eaddr=(int)out;
6201 emit_jeq(0);
6202 add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
6203 cop1_usable=1;
6204 }
6205
e1190b87 6206 if(ooo[i]) {
57871462 6207 // Out of order execution (delay slot first)
6208 //printf("OOOE\n");
6209 ds_assemble(i+1,i_regs);
6210 int adj;
6211 uint64_t bc_unneeded=branch_regs[i].u;
6212 uint64_t bc_unneeded_upper=branch_regs[i].uu;
6213 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6214 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
6215 bc_unneeded|=1;
6216 bc_unneeded_upper|=1;
6217 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6218 bc_unneeded,bc_unneeded_upper);
6219 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
6220 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6221 cc=get_reg(branch_regs[i].regmap,CCREG);
6222 assert(cc==HOST_CCREG);
6223 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6224 assem_debug("cycle count (adj)\n");
6225 if(1) {
6226 int nottaken=0;
6227 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6228 if(1) {
6229 assert(fs>=0);
6230 emit_testimm(fs,0x800000);
6231 if(source[i]&0x10000) // BC1T
6232 {
6233 if(invert){
6234 nottaken=(int)out;
6235 emit_jeq(1);
6236 }else{
6237 add_to_linker((int)out,ba[i],internal);
6238 emit_jne(0);
6239 }
6240 }
6241 else // BC1F
6242 if(invert){
6243 nottaken=(int)out;
6244 emit_jne(1);
6245 }else{
6246 add_to_linker((int)out,ba[i],internal);
6247 emit_jeq(0);
6248 }
6249 {
6250 }
6251 } // if(!only32)
6252
6253 if(invert) {
6254 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6255 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6256 else if(match) emit_addnop(13);
6257 #endif
6258 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6259 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6260 if(internal)
6261 assem_debug("branch: internal\n");
6262 else
6263 assem_debug("branch: external\n");
6264 if(internal&&is_ds[(ba[i]-start)>>2]) {
6265 ds_assemble_entry(i);
6266 }
6267 else {
6268 add_to_linker((int)out,ba[i],internal);
6269 emit_jmp(0);
6270 }
6271 set_jump_target(nottaken,(int)out);
6272 }
6273
6274 if(adj) {
6275 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6276 }
6277 } // (!unconditional)
6278 } // if(ooo)
6279 else
6280 {
6281 // In-order execution (branch first)
6282 //printf("IOE\n");
6283 int nottaken=0;
6284 if(1) {
6285 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6286 if(1) {
6287 assert(fs>=0);
6288 emit_testimm(fs,0x800000);
6289 if(source[i]&0x10000) // BC1T
6290 {
6291 nottaken=(int)out;
6292 emit_jeq(1);
6293 }
6294 else // BC1F
6295 {
6296 nottaken=(int)out;
6297 emit_jne(1);
6298 }
6299 }
6300 } // if(!unconditional)
6301 int adj;
6302 uint64_t ds_unneeded=branch_regs[i].u;
6303 uint64_t ds_unneeded_upper=branch_regs[i].uu;
6304 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6305 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6306 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6307 ds_unneeded|=1;
6308 ds_unneeded_upper|=1;
6309 // branch taken
6310 //assem_debug("1:\n");
6311 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6312 ds_unneeded,ds_unneeded_upper);
6313 // load regs
6314 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6315 address_generation(i+1,&branch_regs[i],0);
6316 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6317 ds_assemble(i+1,&branch_regs[i]);
6318 cc=get_reg(branch_regs[i].regmap,CCREG);
6319 if(cc==-1) {
6320 emit_loadreg(CCREG,cc=HOST_CCREG);
6321 // CHECK: Is the following instruction (fall thru) allocated ok?
6322 }
6323 assert(cc==HOST_CCREG);
6324 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6325 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6326 assem_debug("cycle count (adj)\n");
6327 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6328 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6329 if(internal)
6330 assem_debug("branch: internal\n");
6331 else
6332 assem_debug("branch: external\n");
6333 if(internal&&is_ds[(ba[i]-start)>>2]) {
6334 ds_assemble_entry(i);
6335 }
6336 else {
6337 add_to_linker((int)out,ba[i],internal);
6338 emit_jmp(0);
6339 }
6340
6341 // branch not taken
6342 if(1) { // <- FIXME (don't need this)
6343 set_jump_target(nottaken,(int)out);
6344 assem_debug("1:\n");
6345 if(!likely[i]) {
6346 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6347 ds_unneeded,ds_unneeded_upper);
6348 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6349 address_generation(i+1,&branch_regs[i],0);
6350 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6351 ds_assemble(i+1,&branch_regs[i]);
6352 }
6353 cc=get_reg(branch_regs[i].regmap,CCREG);
6354 if(cc==-1&&!likely[i]) {
6355 // Cycle count isn't in a register, temporarily load it then write it out
6356 emit_loadreg(CCREG,HOST_CCREG);
6357 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6358 int jaddr=(int)out;
6359 emit_jns(0);
6360 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6361 emit_storereg(CCREG,HOST_CCREG);
6362 }
6363 else{
6364 cc=get_reg(i_regmap,CCREG);
6365 assert(cc==HOST_CCREG);
6366 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6367 int jaddr=(int)out;
6368 emit_jns(0);
6369 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6370 }
6371 }
6372 }
6373}
6374
6375static void pagespan_assemble(int i,struct regstat *i_regs)
6376{
6377 int s1l=get_reg(i_regs->regmap,rs1[i]);
6378 int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6379 int s2l=get_reg(i_regs->regmap,rs2[i]);
6380 int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6381 void *nt_branch=NULL;
6382 int taken=0;
6383 int nottaken=0;
6384 int unconditional=0;
6385 if(rs1[i]==0)
6386 {
6387 s1l=s2l;s1h=s2h;
6388 s2l=s2h=-1;
6389 }
6390 else if(rs2[i]==0)
6391 {
6392 s2l=s2h=-1;
6393 }
6394 if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6395 s1h=s2h=-1;
6396 }
6397 int hr=0;
6398 int addr,alt,ntaddr;
6399 if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6400 else {
6401 while(hr<HOST_REGS)
6402 {
6403 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6404 (i_regs->regmap[hr]&63)!=rs1[i] &&
6405 (i_regs->regmap[hr]&63)!=rs2[i] )
6406 {
6407 addr=hr++;break;
6408 }
6409 hr++;
6410 }
6411 }
6412 while(hr<HOST_REGS)
6413 {
6414 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6415 (i_regs->regmap[hr]&63)!=rs1[i] &&
6416 (i_regs->regmap[hr]&63)!=rs2[i] )
6417 {
6418 alt=hr++;break;
6419 }
6420 hr++;
6421 }
6422 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6423 {
6424 while(hr<HOST_REGS)
6425 {
6426 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6427 (i_regs->regmap[hr]&63)!=rs1[i] &&
6428 (i_regs->regmap[hr]&63)!=rs2[i] )
6429 {
6430 ntaddr=hr;break;
6431 }
6432 hr++;
6433 }
6434 }
6435 assert(hr<HOST_REGS);
6436 if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6437 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6438 }
6439 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6440 if(opcode[i]==2) // J
6441 {
6442 unconditional=1;
6443 }
6444 if(opcode[i]==3) // JAL
6445 {
6446 // TODO: mini_ht
6447 int rt=get_reg(i_regs->regmap,31);
6448 emit_movimm(start+i*4+8,rt);
6449 unconditional=1;
6450 }
6451 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6452 {
6453 emit_mov(s1l,addr);
6454 if(opcode2[i]==9) // JALR
6455 {
5067f341 6456 int rt=get_reg(i_regs->regmap,rt1[i]);
57871462 6457 emit_movimm(start+i*4+8,rt);
6458 }
6459 }
6460 if((opcode[i]&0x3f)==4) // BEQ
6461 {
6462 if(rs1[i]==rs2[i])
6463 {
6464 unconditional=1;
6465 }
6466 else
6467 #ifdef HAVE_CMOV_IMM
6468 if(s1h<0) {
6469 if(s2l>=0) emit_cmp(s1l,s2l);
6470 else emit_test(s1l,s1l);
6471 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6472 }
6473 else
6474 #endif
6475 {
6476 assert(s1l>=0);
6477 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6478 if(s1h>=0) {
6479 if(s2h>=0) emit_cmp(s1h,s2h);
6480 else emit_test(s1h,s1h);
6481 emit_cmovne_reg(alt,addr);
6482 }
6483 if(s2l>=0) emit_cmp(s1l,s2l);
6484 else emit_test(s1l,s1l);
6485 emit_cmovne_reg(alt,addr);
6486 }
6487 }
6488 if((opcode[i]&0x3f)==5) // BNE
6489 {
6490 #ifdef HAVE_CMOV_IMM
6491 if(s1h<0) {
6492 if(s2l>=0) emit_cmp(s1l,s2l);
6493 else emit_test(s1l,s1l);
6494 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6495 }
6496 else
6497 #endif
6498 {
6499 assert(s1l>=0);
6500 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6501 if(s1h>=0) {
6502 if(s2h>=0) emit_cmp(s1h,s2h);
6503 else emit_test(s1h,s1h);
6504 emit_cmovne_reg(alt,addr);
6505 }
6506 if(s2l>=0) emit_cmp(s1l,s2l);
6507 else emit_test(s1l,s1l);
6508 emit_cmovne_reg(alt,addr);
6509 }
6510 }
6511 if((opcode[i]&0x3f)==0x14) // BEQL
6512 {
6513 if(s1h>=0) {
6514 if(s2h>=0) emit_cmp(s1h,s2h);
6515 else emit_test(s1h,s1h);
6516 nottaken=(int)out;
6517 emit_jne(0);
6518 }
6519 if(s2l>=0) emit_cmp(s1l,s2l);
6520 else emit_test(s1l,s1l);
6521 if(nottaken) set_jump_target(nottaken,(int)out);
6522 nottaken=(int)out;
6523 emit_jne(0);
6524 }
6525 if((opcode[i]&0x3f)==0x15) // BNEL
6526 {
6527 if(s1h>=0) {
6528 if(s2h>=0) emit_cmp(s1h,s2h);
6529 else emit_test(s1h,s1h);
6530 taken=(int)out;
6531 emit_jne(0);
6532 }
6533 if(s2l>=0) emit_cmp(s1l,s2l);
6534 else emit_test(s1l,s1l);
6535 nottaken=(int)out;
6536 emit_jeq(0);
6537 if(taken) set_jump_target(taken,(int)out);
6538 }
6539 if((opcode[i]&0x3f)==6) // BLEZ
6540 {
6541 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6542 emit_cmpimm(s1l,1);
6543 if(s1h>=0) emit_mov(addr,ntaddr);
6544 emit_cmovl_reg(alt,addr);
6545 if(s1h>=0) {
6546 emit_test(s1h,s1h);
6547 emit_cmovne_reg(ntaddr,addr);
6548 emit_cmovs_reg(alt,addr);
6549 }
6550 }
6551 if((opcode[i]&0x3f)==7) // BGTZ
6552 {
6553 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6554 emit_cmpimm(s1l,1);
6555 if(s1h>=0) emit_mov(addr,alt);
6556 emit_cmovl_reg(ntaddr,addr);
6557 if(s1h>=0) {
6558 emit_test(s1h,s1h);
6559 emit_cmovne_reg(alt,addr);
6560 emit_cmovs_reg(ntaddr,addr);
6561 }
6562 }
6563 if((opcode[i]&0x3f)==0x16) // BLEZL
6564 {
6565 assert((opcode[i]&0x3f)!=0x16);
6566 }
6567 if((opcode[i]&0x3f)==0x17) // BGTZL
6568 {
6569 assert((opcode[i]&0x3f)!=0x17);
6570 }
6571 assert(opcode[i]!=1); // BLTZ/BGEZ
6572
6573 //FIXME: Check CSREG
6574 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6575 if((source[i]&0x30000)==0) // BC1F
6576 {
6577 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6578 emit_testimm(s1l,0x800000);
6579 emit_cmovne_reg(alt,addr);
6580 }
6581 if((source[i]&0x30000)==0x10000) // BC1T
6582 {
6583 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6584 emit_testimm(s1l,0x800000);
6585 emit_cmovne_reg(alt,addr);
6586 }
6587 if((source[i]&0x30000)==0x20000) // BC1FL
6588 {
6589 emit_testimm(s1l,0x800000);
6590 nottaken=(int)out;
6591 emit_jne(0);
6592 }
6593 if((source[i]&0x30000)==0x30000) // BC1TL
6594 {
6595 emit_testimm(s1l,0x800000);
6596 nottaken=(int)out;
6597 emit_jeq(0);
6598 }
6599 }
6600
6601 assert(i_regs->regmap[HOST_CCREG]==CCREG);
6602 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6603 if(likely[i]||unconditional)
6604 {
6605 emit_movimm(ba[i],HOST_BTREG);
6606 }
6607 else if(addr!=HOST_BTREG)
6608 {
6609 emit_mov(addr,HOST_BTREG);
6610 }
6611 void *branch_addr=out;
6612 emit_jmp(0);
6613 int target_addr=start+i*4+5;
6614 void *stub=out;
6615 void *compiled_target_addr=check_addr(target_addr);
6616 emit_extjump_ds((int)branch_addr,target_addr);
6617 if(compiled_target_addr) {
6618 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6619 add_link(target_addr,stub);
6620 }
6621 else set_jump_target((int)branch_addr,(int)stub);
6622 if(likely[i]) {
6623 // Not-taken path
6624 set_jump_target((int)nottaken,(int)out);
6625 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6626 void *branch_addr=out;
6627 emit_jmp(0);
6628 int target_addr=start+i*4+8;
6629 void *stub=out;
6630 void *compiled_target_addr=check_addr(target_addr);
6631 emit_extjump_ds((int)branch_addr,target_addr);
6632 if(compiled_target_addr) {
6633 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6634 add_link(target_addr,stub);
6635 }
6636 else set_jump_target((int)branch_addr,(int)stub);
6637 }
6638}
6639
6640// Assemble the delay slot for the above
6641static void pagespan_ds()
6642{
6643 assem_debug("initial delay slot:\n");
6644 u_int vaddr=start+1;
94d23bb9 6645 u_int page=get_page(vaddr);
6646 u_int vpage=get_vpage(vaddr);
57871462 6647 ll_add(jump_dirty+vpage,vaddr,(void *)out);
6648 do_dirty_stub_ds();
6649 ll_add(jump_in+page,vaddr,(void *)out);
6650 assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6651 if(regs[0].regmap[HOST_CCREG]!=CCREG)
6652 wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6653 if(regs[0].regmap[HOST_BTREG]!=BTREG)
6654 emit_writeword(HOST_BTREG,(int)&branch_target);
6655 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6656 address_generation(0,&regs[0],regs[0].regmap_entry);
b9b61529 6657 if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
57871462 6658 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6659 cop1_usable=0;
6660 is_delayslot=0;
6661 switch(itype[0]) {
6662 case ALU:
6663 alu_assemble(0,&regs[0]);break;
6664 case IMM16:
6665 imm16_assemble(0,&regs[0]);break;
6666 case SHIFT:
6667 shift_assemble(0,&regs[0]);break;
6668 case SHIFTIMM:
6669 shiftimm_assemble(0,&regs[0]);break;
6670 case LOAD:
6671 load_assemble(0,&regs[0]);break;
6672 case LOADLR:
6673 loadlr_assemble(0,&regs[0]);break;
6674 case STORE:
6675 store_assemble(0,&regs[0]);break;
6676 case STORELR:
6677 storelr_assemble(0,&regs[0]);break;
6678 case COP0:
6679 cop0_assemble(0,&regs[0]);break;
6680 case COP1:
6681 cop1_assemble(0,&regs[0]);break;
6682 case C1LS:
6683 c1ls_assemble(0,&regs[0]);break;
b9b61529 6684 case COP2:
6685 cop2_assemble(0,&regs[0]);break;
6686 case C2LS:
6687 c2ls_assemble(0,&regs[0]);break;
6688 case C2OP:
6689 c2op_assemble(0,&regs[0]);break;
57871462 6690 case FCONV:
6691 fconv_assemble(0,&regs[0]);break;
6692 case FLOAT:
6693 float_assemble(0,&regs[0]);break;
6694 case FCOMP:
6695 fcomp_assemble(0,&regs[0]);break;
6696 case MULTDIV:
6697 multdiv_assemble(0,&regs[0]);break;
6698 case MOV:
6699 mov_assemble(0,&regs[0]);break;
6700 case SYSCALL:
7139f3c8 6701 case HLECALL:
1e973cb0 6702 case INTCALL:
57871462 6703 case SPAN:
6704 case UJUMP:
6705 case RJUMP:
6706 case CJUMP:
6707 case SJUMP:
6708 case FJUMP:
6709 printf("Jump in the delay slot. This is probably a bug.\n");
6710 }
6711 int btaddr=get_reg(regs[0].regmap,BTREG);
6712 if(btaddr<0) {
6713 btaddr=get_reg(regs[0].regmap,-1);
6714 emit_readword((int)&branch_target,btaddr);
6715 }
6716 assert(btaddr!=HOST_CCREG);
6717 if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6718#ifdef HOST_IMM8
6719 emit_movimm(start+4,HOST_TEMPREG);
6720 emit_cmp(btaddr,HOST_TEMPREG);
6721#else
6722 emit_cmpimm(btaddr,start+4);
6723#endif
6724 int branch=(int)out;
6725 emit_jeq(0);
6726 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6727 emit_jmp(jump_vaddr_reg[btaddr]);
6728 set_jump_target(branch,(int)out);
6729 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6730 load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6731}
6732
6733// Basic liveness analysis for MIPS registers
6734void unneeded_registers(int istart,int iend,int r)
6735{
6736 int i;
bedfea38 6737 uint64_t u,uu,gte_u,b,bu,gte_bu;
6738 uint64_t temp_u,temp_uu,temp_gte_u;
57871462 6739 uint64_t tdep;
6740 if(iend==slen-1) {
6741 u=1;uu=1;
6742 }else{
6743 u=unneeded_reg[iend+1];
6744 uu=unneeded_reg_upper[iend+1];
6745 u=1;uu=1;
6746 }
bedfea38 6747 gte_u=temp_gte_u=0;
6748
57871462 6749 for (i=iend;i>=istart;i--)
6750 {
6751 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6752 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6753 {
6754 // If subroutine call, flag return address as a possible branch target
6755 if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6756
6757 if(ba[i]<start || ba[i]>=(start+slen*4))
6758 {
6759 // Branch out of this block, flush all regs
6760 u=1;
6761 uu=1;
bedfea38 6762 gte_u=0;
57871462 6763 /* Hexagon hack
6764 if(itype[i]==UJUMP&&rt1[i]==31)
6765 {
6766 uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6767 }
6768 if(itype[i]==RJUMP&&rs1[i]==31)
6769 {
6770 uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6771 }
4cb76aa4 6772 if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
57871462 6773 if(itype[i]==UJUMP&&rt1[i]==31)
6774 {
6775 //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6776 uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6777 }
6778 if(itype[i]==RJUMP&&rs1[i]==31)
6779 {
6780 //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6781 uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6782 }
6783 }*/
6784 branch_unneeded_reg[i]=u;
6785 branch_unneeded_reg_upper[i]=uu;
6786 // Merge in delay slot
6787 tdep=(~uu>>rt1[i+1])&1;
6788 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6789 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6790 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6791 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6792 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6793 u|=1;uu|=1;
bedfea38 6794 gte_u|=gte_rt[i+1];
6795 gte_u&=~gte_rs[i+1];
57871462 6796 // If branch is "likely" (and conditional)
6797 // then we skip the delay slot on the fall-thru path
6798 if(likely[i]) {
6799 if(i<slen-1) {
6800 u&=unneeded_reg[i+2];
6801 uu&=unneeded_reg_upper[i+2];
bedfea38 6802 gte_u&=gte_unneeded[i+2];
57871462 6803 }
6804 else
6805 {
6806 u=1;
6807 uu=1;
bedfea38 6808 gte_u=0;
57871462 6809 }
6810 }
6811 }
6812 else
6813 {
6814 // Internal branch, flag target
6815 bt[(ba[i]-start)>>2]=1;
6816 if(ba[i]<=start+i*4) {
6817 // Backward branch
6818 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6819 {
6820 // Unconditional branch
6821 temp_u=1;temp_uu=1;
bedfea38 6822 temp_gte_u=0;
57871462 6823 } else {
6824 // Conditional branch (not taken case)
6825 temp_u=unneeded_reg[i+2];
6826 temp_uu=unneeded_reg_upper[i+2];
bedfea38 6827 temp_gte_u&=gte_unneeded[i+2];
57871462 6828 }
6829 // Merge in delay slot
6830 tdep=(~temp_uu>>rt1[i+1])&1;
6831 temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6832 temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6833 temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6834 temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6835 temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6836 temp_u|=1;temp_uu|=1;
bedfea38 6837 temp_gte_u|=gte_rt[i+1];
6838 temp_gte_u&=~gte_rs[i+1];
57871462 6839 // If branch is "likely" (and conditional)
6840 // then we skip the delay slot on the fall-thru path
6841 if(likely[i]) {
6842 if(i<slen-1) {
6843 temp_u&=unneeded_reg[i+2];
6844 temp_uu&=unneeded_reg_upper[i+2];
bedfea38 6845 temp_gte_u&=gte_unneeded[i+2];
57871462 6846 }
6847 else
6848 {
6849 temp_u=1;
6850 temp_uu=1;
bedfea38 6851 temp_gte_u=0;
57871462 6852 }
6853 }
6854 tdep=(~temp_uu>>rt1[i])&1;
6855 temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6856 temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6857 temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6858 temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6859 temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6860 temp_u|=1;temp_uu|=1;
bedfea38 6861 temp_gte_u|=gte_rt[i];
6862 temp_gte_u&=~gte_rs[i];
57871462 6863 unneeded_reg[i]=temp_u;
6864 unneeded_reg_upper[i]=temp_uu;
bedfea38 6865 gte_unneeded[i]=temp_gte_u;
57871462 6866 // Only go three levels deep. This recursion can take an
6867 // excessive amount of time if there are a lot of nested loops.
6868 if(r<2) {
6869 unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6870 }else{
6871 unneeded_reg[(ba[i]-start)>>2]=1;
6872 unneeded_reg_upper[(ba[i]-start)>>2]=1;
bedfea38 6873 gte_unneeded[(ba[i]-start)>>2]=0;
57871462 6874 }
6875 } /*else*/ if(1) {
6876 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6877 {
6878 // Unconditional branch
6879 u=unneeded_reg[(ba[i]-start)>>2];
6880 uu=unneeded_reg_upper[(ba[i]-start)>>2];
bedfea38 6881 gte_u=gte_unneeded[(ba[i]-start)>>2];
57871462 6882 branch_unneeded_reg[i]=u;
6883 branch_unneeded_reg_upper[i]=uu;
6884 //u=1;
6885 //uu=1;
6886 //branch_unneeded_reg[i]=u;
6887 //branch_unneeded_reg_upper[i]=uu;
6888 // Merge in delay slot
6889 tdep=(~uu>>rt1[i+1])&1;
6890 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6891 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6892 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6893 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6894 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6895 u|=1;uu|=1;
bedfea38 6896 gte_u|=gte_rt[i+1];
6897 gte_u&=~gte_rs[i+1];
57871462 6898 } else {
6899 // Conditional branch
6900 b=unneeded_reg[(ba[i]-start)>>2];
6901 bu=unneeded_reg_upper[(ba[i]-start)>>2];
bedfea38 6902 gte_bu=gte_unneeded[(ba[i]-start)>>2];
57871462 6903 branch_unneeded_reg[i]=b;
6904 branch_unneeded_reg_upper[i]=bu;
6905 //b=1;
6906 //bu=1;
6907 //branch_unneeded_reg[i]=b;
6908 //branch_unneeded_reg_upper[i]=bu;
6909 // Branch delay slot
6910 tdep=(~uu>>rt1[i+1])&1;
6911 b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6912 bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6913 b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6914 bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6915 bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6916 b|=1;bu|=1;
bedfea38 6917 gte_bu|=gte_rt[i+1];
6918 gte_bu&=~gte_rs[i+1];
57871462 6919 // If branch is "likely" then we skip the
6920 // delay slot on the fall-thru path
6921 if(likely[i]) {
6922 u=b;
6923 uu=bu;
bedfea38 6924 gte_u=gte_bu;
57871462 6925 if(i<slen-1) {
6926 u&=unneeded_reg[i+2];
6927 uu&=unneeded_reg_upper[i+2];
bedfea38 6928 gte_u&=gte_unneeded[i+2];
57871462 6929 //u=1;
6930 //uu=1;
6931 }
6932 } else {
6933 u&=b;
6934 uu&=bu;
bedfea38 6935 gte_u&=gte_bu;
57871462 6936 //u=1;
6937 //uu=1;
6938 }
6939 if(i<slen-1) {
6940 branch_unneeded_reg[i]&=unneeded_reg[i+2];
6941 branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6942 //branch_unneeded_reg[i]=1;
6943 //branch_unneeded_reg_upper[i]=1;
6944 } else {
6945 branch_unneeded_reg[i]=1;
6946 branch_unneeded_reg_upper[i]=1;
6947 }
6948 }
6949 }
6950 }
6951 }
1e973cb0 6952 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 6953 {
6954 // SYSCALL instruction (software interrupt)
6955 u=1;
6956 uu=1;
6957 }
6958 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6959 {
6960 // ERET instruction (return from interrupt)
6961 u=1;
6962 uu=1;
6963 }
6964 //u=uu=1; // DEBUG
6965 tdep=(~uu>>rt1[i])&1;
6966 // Written registers are unneeded
6967 u|=1LL<<rt1[i];
6968 u|=1LL<<rt2[i];
6969 uu|=1LL<<rt1[i];
6970 uu|=1LL<<rt2[i];
bedfea38 6971 gte_u|=gte_rt[i];
57871462 6972 // Accessed registers are needed
6973 u&=~(1LL<<rs1[i]);
6974 u&=~(1LL<<rs2[i]);
6975 uu&=~(1LL<<us1[i]);
6976 uu&=~(1LL<<us2[i]);
bedfea38 6977 gte_u&=~gte_rs[i];
57871462 6978 // Source-target dependencies
6979 uu&=~(tdep<<dep1[i]);
6980 uu&=~(tdep<<dep2[i]);
6981 // R0 is always unneeded
6982 u|=1;uu|=1;
6983 // Save it
6984 unneeded_reg[i]=u;
6985 unneeded_reg_upper[i]=uu;
bedfea38 6986 gte_unneeded[i]=gte_u;
57871462 6987 /*
6988 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6989 printf("U:");
6990 int r;
6991 for(r=1;r<=CCREG;r++) {
6992 if((unneeded_reg[i]>>r)&1) {
6993 if(r==HIREG) printf(" HI");
6994 else if(r==LOREG) printf(" LO");
6995 else printf(" r%d",r);
6996 }
6997 }
6998 printf(" UU:");
6999 for(r=1;r<=CCREG;r++) {
7000 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
7001 if(r==HIREG) printf(" HI");
7002 else if(r==LOREG) printf(" LO");
7003 else printf(" r%d",r);
7004 }
7005 }
7006 printf("\n");*/
7007 }
252c20fc 7008#ifdef FORCE32
7009 for (i=iend;i>=istart;i--)
7010 {
7011 unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
7012 }
7013#endif
57871462 7014}
7015
7016// Identify registers which are likely to contain 32-bit values
7017// This is used to predict whether any branches will jump to a
7018// location with 64-bit values in registers.
7019static void provisional_32bit()
7020{
7021 int i,j;
7022 uint64_t is32=1;
7023 uint64_t lastbranch=1;
7024
7025 for(i=0;i<slen;i++)
7026 {
7027 if(i>0) {
7028 if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
7029 if(i>1) is32=lastbranch;
7030 else is32=1;
7031 }
7032 }
7033 if(i>1)
7034 {
7035 if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
7036 if(likely[i-2]) {
7037 if(i>2) is32=lastbranch;
7038 else is32=1;
7039 }
7040 }
7041 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
7042 {
7043 if(rs1[i-2]==0||rs2[i-2]==0)
7044 {
7045 if(rs1[i-2]) {
7046 is32|=1LL<<rs1[i-2];
7047 }
7048 if(rs2[i-2]) {
7049 is32|=1LL<<rs2[i-2];
7050 }
7051 }
7052 }
7053 }
7054 // If something jumps here with 64-bit values
7055 // then promote those registers to 64 bits
7056 if(bt[i])
7057 {
7058 uint64_t temp_is32=is32;
7059 for(j=i-1;j>=0;j--)
7060 {
7061 if(ba[j]==start+i*4)
7062 //temp_is32&=branch_regs[j].is32;
7063 temp_is32&=p32[j];
7064 }
7065 for(j=i;j<slen;j++)
7066 {
7067 if(ba[j]==start+i*4)
7068 temp_is32=1;
7069 }
7070 is32=temp_is32;
7071 }
7072 int type=itype[i];
7073 int op=opcode[i];
7074 int op2=opcode2[i];
7075 int rt=rt1[i];
7076 int s1=rs1[i];
7077 int s2=rs2[i];
7078 if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7079 // Branches don't write registers, consider the delay slot instead.
7080 type=itype[i+1];
7081 op=opcode[i+1];
7082 op2=opcode2[i+1];
7083 rt=rt1[i+1];
7084 s1=rs1[i+1];
7085 s2=rs2[i+1];
7086 lastbranch=is32;
7087 }
7088 switch(type) {
7089 case LOAD:
7090 if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
7091 opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
7092 is32&=~(1LL<<rt);
7093 else
7094 is32|=1LL<<rt;
7095 break;
7096 case STORE:
7097 case STORELR:
7098 break;
7099 case LOADLR:
7100 if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
7101 if(op==0x22) is32|=1LL<<rt; // LWL
7102 break;
7103 case IMM16:
7104 if (op==0x08||op==0x09|| // ADDI/ADDIU
7105 op==0x0a||op==0x0b|| // SLTI/SLTIU
7106 op==0x0c|| // ANDI
7107 op==0x0f) // LUI
7108 {
7109 is32|=1LL<<rt;
7110 }
7111 if(op==0x18||op==0x19) { // DADDI/DADDIU
7112 is32&=~(1LL<<rt);
7113 //if(imm[i]==0)
7114 // is32|=((is32>>s1)&1LL)<<rt;
7115 }
7116 if(op==0x0d||op==0x0e) { // ORI/XORI
7117 uint64_t sr=((is32>>s1)&1LL);
7118 is32&=~(1LL<<rt);
7119 is32|=sr<<rt;
7120 }
7121 break;
7122 case UJUMP:
7123 break;
7124 case RJUMP:
7125 break;
7126 case CJUMP:
7127 break;
7128 case SJUMP:
7129 break;
7130 case FJUMP:
7131 break;
7132 case ALU:
7133 if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
7134 is32|=1LL<<rt;
7135 }
7136 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7137 is32|=1LL<<rt;
7138 }
7139 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7140 uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
7141 is32&=~(1LL<<rt);
7142 is32|=sr<<rt;
7143 }
7144 else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
7145 if(s1==0&&s2==0) {
7146 is32|=1LL<<rt;
7147 }
7148 else if(s2==0) {
7149 uint64_t sr=((is32>>s1)&1LL);
7150 is32&=~(1LL<<rt);
7151 is32|=sr<<rt;
7152 }
7153 else if(s1==0) {
7154 uint64_t sr=((is32>>s2)&1LL);
7155 is32&=~(1LL<<rt);
7156 is32|=sr<<rt;
7157 }
7158 else {
7159 is32&=~(1LL<<rt);
7160 }
7161 }
7162 else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
7163 if(s1==0&&s2==0) {
7164 is32|=1LL<<rt;
7165 }
7166 else if(s2==0) {
7167 uint64_t sr=((is32>>s1)&1LL);
7168 is32&=~(1LL<<rt);
7169 is32|=sr<<rt;
7170 }
7171 else {
7172 is32&=~(1LL<<rt);
7173 }
7174 }
7175 break;
7176 case MULTDIV:
7177 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7178 is32&=~((1LL<<HIREG)|(1LL<<LOREG));
7179 }
7180 else {
7181 is32|=(1LL<<HIREG)|(1LL<<LOREG);
7182 }
7183 break;
7184 case MOV:
7185 {
7186 uint64_t sr=((is32>>s1)&1LL);
7187 is32&=~(1LL<<rt);
7188 is32|=sr<<rt;
7189 }
7190 break;
7191 case SHIFT:
7192 if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
7193 else is32|=1LL<<rt; // SLLV/SRLV/SRAV
7194 break;
7195 case SHIFTIMM:
7196 is32|=1LL<<rt;
7197 // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
7198 if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
7199 break;
7200 case COP0:
7201 if(op2==0) is32|=1LL<<rt; // MFC0
7202 break;
7203 case COP1:
b9b61529 7204 case COP2:
57871462 7205 if(op2==0) is32|=1LL<<rt; // MFC1
7206 if(op2==1) is32&=~(1LL<<rt); // DMFC1
7207 if(op2==2) is32|=1LL<<rt; // CFC1
7208 break;
7209 case C1LS:
b9b61529 7210 case C2LS:
57871462 7211 break;
7212 case FLOAT:
7213 case FCONV:
7214 break;
7215 case FCOMP:
7216 break;
b9b61529 7217 case C2OP:
57871462 7218 case SYSCALL:
7139f3c8 7219 case HLECALL:
57871462 7220 break;
7221 default:
7222 break;
7223 }
7224 is32|=1;
7225 p32[i]=is32;
7226
7227 if(i>0)
7228 {
7229 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
7230 {
7231 if(rt1[i-1]==31) // JAL/JALR
7232 {
7233 // Subroutine call will return here, don't alloc any registers
7234 is32=1;
7235 }
7236 else if(i+1<slen)
7237 {
7238 // Internal branch will jump here, match registers to caller
7239 is32=0x3FFFFFFFFLL;
7240 }
7241 }
7242 }
7243 }
7244}
7245
7246// Identify registers which may be assumed to contain 32-bit values
7247// and where optimizations will rely on this.
7248// This is used to determine whether backward branches can safely
7249// jump to a location with 64-bit values in registers.
7250static void provisional_r32()
7251{
7252 u_int r32=0;
7253 int i;
7254
7255 for (i=slen-1;i>=0;i--)
7256 {
7257 int hr;
7258 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7259 {
7260 if(ba[i]<start || ba[i]>=(start+slen*4))
7261 {
7262 // Branch out of this block, don't need anything
7263 r32=0;
7264 }
7265 else
7266 {
7267 // Internal branch
7268 // Need whatever matches the target
7269 // (and doesn't get overwritten by the delay slot instruction)
7270 r32=0;
7271 int t=(ba[i]-start)>>2;
7272 if(ba[i]>start+i*4) {
7273 // Forward branch
7274 //if(!(requires_32bit[t]&~regs[i].was32))
7275 // r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7276 if(!(pr32[t]&~regs[i].was32))
7277 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7278 }else{
7279 // Backward branch
7280 if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7281 r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7282 }
7283 }
7284 // Conditional branch may need registers for following instructions
7285 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7286 {
7287 if(i<slen-2) {
7288 //r32|=requires_32bit[i+2];
7289 r32|=pr32[i+2];
7290 r32&=regs[i].was32;
7291 // Mark this address as a branch target since it may be called
7292 // upon return from interrupt
7293 //bt[i+2]=1;
7294 }
7295 }
7296 // Merge in delay slot
7297 if(!likely[i]) {
7298 // These are overwritten unless the branch is "likely"
7299 // and the delay slot is nullified if not taken
7300 r32&=~(1LL<<rt1[i+1]);
7301 r32&=~(1LL<<rt2[i+1]);
7302 }
7303 // Assume these are needed (delay slot)
7304 if(us1[i+1]>0)
7305 {
7306 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7307 }
7308 if(us2[i+1]>0)
7309 {
7310 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7311 }
7312 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7313 {
7314 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7315 }
7316 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7317 {
7318 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7319 }
7320 }
1e973cb0 7321 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 7322 {
7323 // SYSCALL instruction (software interrupt)
7324 r32=0;
7325 }
7326 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7327 {
7328 // ERET instruction (return from interrupt)
7329 r32=0;
7330 }
7331 // Check 32 bits
7332 r32&=~(1LL<<rt1[i]);
7333 r32&=~(1LL<<rt2[i]);
7334 if(us1[i]>0)
7335 {
7336 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7337 }
7338 if(us2[i]>0)
7339 {
7340 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7341 }
7342 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7343 {
7344 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7345 }
7346 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7347 {
7348 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7349 }
7350 //requires_32bit[i]=r32;
7351 pr32[i]=r32;
7352
7353 // Dirty registers which are 32-bit, require 32-bit input
7354 // as they will be written as 32-bit values
7355 for(hr=0;hr<HOST_REGS;hr++)
7356 {
7357 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7358 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7359 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7360 pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7361 //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7362 }
7363 }
7364 }
7365 }
7366}
7367
7368// Write back dirty registers as soon as we will no longer modify them,
7369// so that we don't end up with lots of writes at the branches.
7370void clean_registers(int istart,int iend,int wr)
7371{
7372 int i;
7373 int r;
7374 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7375 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7376 if(iend==slen-1) {
7377 will_dirty_i=will_dirty_next=0;
7378 wont_dirty_i=wont_dirty_next=0;
7379 }else{
7380 will_dirty_i=will_dirty_next=will_dirty[iend+1];
7381 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7382 }
7383 for (i=iend;i>=istart;i--)
7384 {
7385 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7386 {
7387 if(ba[i]<start || ba[i]>=(start+slen*4))
7388 {
7389 // Branch out of this block, flush all regs
7390 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7391 {
7392 // Unconditional branch
7393 will_dirty_i=0;
7394 wont_dirty_i=0;
7395 // Merge in delay slot (will dirty)
7396 for(r=0;r<HOST_REGS;r++) {
7397 if(r!=EXCLUDE_REG) {
7398 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7399 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7400 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7401 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7402 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7403 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7404 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7405 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7406 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7407 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7408 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7409 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7410 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7411 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7412 }
7413 }
7414 }
7415 else
7416 {
7417 // Conditional branch
7418 will_dirty_i=0;
7419 wont_dirty_i=wont_dirty_next;
7420 // Merge in delay slot (will dirty)
7421 for(r=0;r<HOST_REGS;r++) {
7422 if(r!=EXCLUDE_REG) {
7423 if(!likely[i]) {
7424 // Might not dirty if likely branch is not taken
7425 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7426 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7427 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7428 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7429 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7430 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7431 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7432 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7433 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7434 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7435 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7436 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7437 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7438 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7439 }
7440 }
7441 }
7442 }
7443 // Merge in delay slot (wont dirty)
7444 for(r=0;r<HOST_REGS;r++) {
7445 if(r!=EXCLUDE_REG) {
7446 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7447 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7448 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7449 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7450 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7451 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7452 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7453 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7454 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7455 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7456 }
7457 }
7458 if(wr) {
7459 #ifndef DESTRUCTIVE_WRITEBACK
7460 branch_regs[i].dirty&=wont_dirty_i;
7461 #endif
7462 branch_regs[i].dirty|=will_dirty_i;
7463 }
7464 }
7465 else
7466 {
7467 // Internal branch
7468 if(ba[i]<=start+i*4) {
7469 // Backward branch
7470 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7471 {
7472 // Unconditional branch
7473 temp_will_dirty=0;
7474 temp_wont_dirty=0;
7475 // Merge in delay slot (will dirty)
7476 for(r=0;r<HOST_REGS;r++) {
7477 if(r!=EXCLUDE_REG) {
7478 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7479 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7480 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7481 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7482 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7483 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7484 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7485 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7486 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7487 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7488 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7489 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7490 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7491 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7492 }
7493 }
7494 } else {
7495 // Conditional branch (not taken case)
7496 temp_will_dirty=will_dirty_next;
7497 temp_wont_dirty=wont_dirty_next;
7498 // Merge in delay slot (will dirty)
7499 for(r=0;r<HOST_REGS;r++) {
7500 if(r!=EXCLUDE_REG) {
7501 if(!likely[i]) {
7502 // Will not dirty if likely branch is not taken
7503 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7504 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7505 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7506 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7507 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7508 if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7509 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7510 //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7511 //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7512 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7513 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7514 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7515 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7516 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7517 }
7518 }
7519 }
7520 }
7521 // Merge in delay slot (wont dirty)
7522 for(r=0;r<HOST_REGS;r++) {
7523 if(r!=EXCLUDE_REG) {
7524 if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7525 if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7526 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7527 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7528 if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7529 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7530 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7531 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7532 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7533 if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7534 }
7535 }
7536 // Deal with changed mappings
7537 if(i<iend) {
7538 for(r=0;r<HOST_REGS;r++) {
7539 if(r!=EXCLUDE_REG) {
7540 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7541 temp_will_dirty&=~(1<<r);
7542 temp_wont_dirty&=~(1<<r);
7543 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7544 temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7545 temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7546 } else {
7547 temp_will_dirty|=1<<r;
7548 temp_wont_dirty|=1<<r;
7549 }
7550 }
7551 }
7552 }
7553 }
7554 if(wr) {
7555 will_dirty[i]=temp_will_dirty;
7556 wont_dirty[i]=temp_wont_dirty;
7557 clean_registers((ba[i]-start)>>2,i-1,0);
7558 }else{
7559 // Limit recursion. It can take an excessive amount
7560 // of time if there are a lot of nested loops.
7561 will_dirty[(ba[i]-start)>>2]=0;
7562 wont_dirty[(ba[i]-start)>>2]=-1;
7563 }
7564 }
7565 /*else*/ if(1)
7566 {
7567 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7568 {
7569 // Unconditional branch
7570 will_dirty_i=0;
7571 wont_dirty_i=0;
7572 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7573 for(r=0;r<HOST_REGS;r++) {
7574 if(r!=EXCLUDE_REG) {
7575 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7576 will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7577 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7578 }
e3234ecf 7579 if(branch_regs[i].regmap[r]>=0) {
7580 will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7581 wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7582 }
57871462 7583 }
7584 }
7585 //}
7586 // Merge in delay slot
7587 for(r=0;r<HOST_REGS;r++) {
7588 if(r!=EXCLUDE_REG) {
7589 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7590 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7591 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7592 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7593 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7594 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7595 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7596 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7597 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7598 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7599 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7600 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7601 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7602 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7603 }
7604 }
7605 } else {
7606 // Conditional branch
7607 will_dirty_i=will_dirty_next;
7608 wont_dirty_i=wont_dirty_next;
7609 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7610 for(r=0;r<HOST_REGS;r++) {
7611 if(r!=EXCLUDE_REG) {
e3234ecf 7612 signed char target_reg=branch_regs[i].regmap[r];
7613 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
57871462 7614 will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7615 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7616 }
e3234ecf 7617 else if(target_reg>=0) {
7618 will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7619 wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
57871462 7620 }
7621 // Treat delay slot as part of branch too
7622 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7623 will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7624 wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7625 }
7626 else
7627 {
7628 will_dirty[i+1]&=~(1<<r);
7629 }*/
7630 }
7631 }
7632 //}
7633 // Merge in delay slot
7634 for(r=0;r<HOST_REGS;r++) {
7635 if(r!=EXCLUDE_REG) {
7636 if(!likely[i]) {
7637 // Might not dirty if likely branch is not taken
7638 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7639 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7640 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7641 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7642 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7643 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7644 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7645 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7646 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7647 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7648 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7649 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7650 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7651 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7652 }
7653 }
7654 }
7655 }
e3234ecf 7656 // Merge in delay slot (won't dirty)
57871462 7657 for(r=0;r<HOST_REGS;r++) {
7658 if(r!=EXCLUDE_REG) {
7659 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7660 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7661 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7662 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7663 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7664 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7665 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7666 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7667 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7668 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7669 }
7670 }
7671 if(wr) {
7672 #ifndef DESTRUCTIVE_WRITEBACK
7673 branch_regs[i].dirty&=wont_dirty_i;
7674 #endif
7675 branch_regs[i].dirty|=will_dirty_i;
7676 }
7677 }
7678 }
7679 }
1e973cb0 7680 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 7681 {
7682 // SYSCALL instruction (software interrupt)
7683 will_dirty_i=0;
7684 wont_dirty_i=0;
7685 }
7686 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7687 {
7688 // ERET instruction (return from interrupt)
7689 will_dirty_i=0;
7690 wont_dirty_i=0;
7691 }
7692 will_dirty_next=will_dirty_i;
7693 wont_dirty_next=wont_dirty_i;
7694 for(r=0;r<HOST_REGS;r++) {
7695 if(r!=EXCLUDE_REG) {
7696 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7697 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7698 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7699 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7700 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7701 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7702 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7703 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7704 if(i>istart) {
7705 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
7706 {
7707 // Don't store a register immediately after writing it,
7708 // may prevent dual-issue.
7709 if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7710 if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7711 }
7712 }
7713 }
7714 }
7715 // Save it
7716 will_dirty[i]=will_dirty_i;
7717 wont_dirty[i]=wont_dirty_i;
7718 // Mark registers that won't be dirtied as not dirty
7719 if(wr) {
7720 /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7721 for(r=0;r<HOST_REGS;r++) {
7722 if((will_dirty_i>>r)&1) {
7723 printf(" r%d",r);
7724 }
7725 }
7726 printf("\n");*/
7727
7728 //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7729 regs[i].dirty|=will_dirty_i;
7730 #ifndef DESTRUCTIVE_WRITEBACK
7731 regs[i].dirty&=wont_dirty_i;
7732 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7733 {
7734 if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7735 for(r=0;r<HOST_REGS;r++) {
7736 if(r!=EXCLUDE_REG) {
7737 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7738 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7739 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7740 }
7741 }
7742 }
7743 }
7744 else
7745 {
7746 if(i<iend) {
7747 for(r=0;r<HOST_REGS;r++) {
7748 if(r!=EXCLUDE_REG) {
7749 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7750 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7751 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7752 }
7753 }
7754 }
7755 }
7756 #endif
7757 //}
7758 }
7759 // Deal with changed mappings
7760 temp_will_dirty=will_dirty_i;
7761 temp_wont_dirty=wont_dirty_i;
7762 for(r=0;r<HOST_REGS;r++) {
7763 if(r!=EXCLUDE_REG) {
7764 int nr;
7765 if(regs[i].regmap[r]==regmap_pre[i][r]) {
7766 if(wr) {
7767 #ifndef DESTRUCTIVE_WRITEBACK
7768 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7769 #endif
7770 regs[i].wasdirty|=will_dirty_i&(1<<r);
7771 }
7772 }
f776eb14 7773 else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
57871462 7774 // Register moved to a different register
7775 will_dirty_i&=~(1<<r);
7776 wont_dirty_i&=~(1<<r);
7777 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7778 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7779 if(wr) {
7780 #ifndef DESTRUCTIVE_WRITEBACK
7781 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7782 #endif
7783 regs[i].wasdirty|=will_dirty_i&(1<<r);
7784 }
7785 }
7786 else {
7787 will_dirty_i&=~(1<<r);
7788 wont_dirty_i&=~(1<<r);
7789 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7790 will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7791 wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7792 } else {
7793 wont_dirty_i|=1<<r;
7794 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7795 }
7796 }
7797 }
7798 }
7799 }
7800}
7801
4600ba03 7802#ifdef DISASM
57871462 7803 /* disassembly */
7804void disassemble_inst(int i)
7805{
7806 if (bt[i]) printf("*"); else printf(" ");
7807 switch(itype[i]) {
7808 case UJUMP:
7809 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7810 case CJUMP:
7811 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7812 case SJUMP:
7813 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7814 case FJUMP:
7815 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7816 case RJUMP:
74426039 7817 if (opcode[i]==0x9&&rt1[i]!=31)
5067f341 7818 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7819 else
7820 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7821 break;
57871462 7822 case SPAN:
7823 printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7824 case IMM16:
7825 if(opcode[i]==0xf) //LUI
7826 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7827 else
7828 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7829 break;
7830 case LOAD:
7831 case LOADLR:
7832 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7833 break;
7834 case STORE:
7835 case STORELR:
7836 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7837 break;
7838 case ALU:
7839 case SHIFT:
7840 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7841 break;
7842 case MULTDIV:
7843 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7844 break;
7845 case SHIFTIMM:
7846 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7847 break;
7848 case MOV:
7849 if((opcode2[i]&0x1d)==0x10)
7850 printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7851 else if((opcode2[i]&0x1d)==0x11)
7852 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7853 else
7854 printf (" %x: %s\n",start+i*4,insn[i]);
7855 break;
7856 case COP0:
7857 if(opcode2[i]==0)
7858 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7859 else if(opcode2[i]==4)
7860 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7861 else printf (" %x: %s\n",start+i*4,insn[i]);
7862 break;
7863 case COP1:
7864 if(opcode2[i]<3)
7865 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7866 else if(opcode2[i]>3)
7867 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7868 else printf (" %x: %s\n",start+i*4,insn[i]);
7869 break;
b9b61529 7870 case COP2:
7871 if(opcode2[i]<3)
7872 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7873 else if(opcode2[i]>3)
7874 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7875 else printf (" %x: %s\n",start+i*4,insn[i]);
7876 break;
57871462 7877 case C1LS:
7878 printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7879 break;
b9b61529 7880 case C2LS:
7881 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7882 break;
1e973cb0 7883 case INTCALL:
7884 printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7885 break;
57871462 7886 default:
7887 //printf (" %s %8x\n",insn[i],source[i]);
7888 printf (" %x: %s\n",start+i*4,insn[i]);
7889 }
7890}
4600ba03 7891#else
7892static void disassemble_inst(int i) {}
7893#endif // DISASM
57871462 7894
dc990066 7895// clear the state completely, instead of just marking
7896// things invalid like invalidate_all_pages() does
7897void new_dynarec_clear_full()
57871462 7898{
57871462 7899 int n;
35775df7 7900 out=(u_char *)BASE_ADDR;
7901 memset(invalid_code,1,sizeof(invalid_code));
7902 memset(hash_table,0xff,sizeof(hash_table));
57871462 7903 memset(mini_ht,-1,sizeof(mini_ht));
7904 memset(restore_candidate,0,sizeof(restore_candidate));
dc990066 7905 memset(shadow,0,sizeof(shadow));
57871462 7906 copy=shadow;
7907 expirep=16384; // Expiry pointer, +2 blocks
7908 pending_exception=0;
7909 literalcount=0;
57871462 7910 stop_after_jal=0;
9be4ba64 7911 inv_code_start=inv_code_end=~0;
bedfea38 7912 gte_reads_flags=0;
57871462 7913 // TLB
af4ee1fe 7914#ifndef DISABLE_TLB
57871462 7915 using_tlb=0;
af4ee1fe 7916#endif
dadf55f2 7917 sp_in_mirror=0;
57871462 7918 for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7919 memory_map[n]=-1;
7920 for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7921 memory_map[n]=((u_int)rdram-0x80000000)>>2;
7922 for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7923 memory_map[n]=-1;
dc990066 7924 for(n=0;n<4096;n++) ll_clear(jump_in+n);
7925 for(n=0;n<4096;n++) ll_clear(jump_out+n);
7926 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7927}
7928
7929void new_dynarec_init()
7930{
7931 printf("Init new dynarec\n");
7932 out=(u_char *)BASE_ADDR;
7933 if (mmap (out, 1<<TARGET_SIZE_2,
7934 PROT_READ | PROT_WRITE | PROT_EXEC,
7935 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7936 -1, 0) <= 0) {printf("mmap() failed\n");}
7937#ifdef MUPEN64
7938 rdword=&readmem_dword;
7939 fake_pc.f.r.rs=&readmem_dword;
7940 fake_pc.f.r.rt=&readmem_dword;
7941 fake_pc.f.r.rd=&readmem_dword;
7942#endif
7943 int n;
7944 new_dynarec_clear_full();
7945#ifdef HOST_IMM8
7946 // Copy this into local area so we don't have to put it in every literal pool
7947 invc_ptr=invalid_code;
7948#endif
24385cae 7949#ifdef MUPEN64
57871462 7950 for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7951 writemem[n] = write_nomem_new;
7952 writememb[n] = write_nomemb_new;
7953 writememh[n] = write_nomemh_new;
24385cae 7954#ifndef FORCE32
57871462 7955 writememd[n] = write_nomemd_new;
24385cae 7956#endif
57871462 7957 readmem[n] = read_nomem_new;
7958 readmemb[n] = read_nomemb_new;
7959 readmemh[n] = read_nomemh_new;
24385cae 7960#ifndef FORCE32
57871462 7961 readmemd[n] = read_nomemd_new;
24385cae 7962#endif
57871462 7963 }
7964 for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7965 writemem[n] = write_rdram_new;
7966 writememb[n] = write_rdramb_new;
7967 writememh[n] = write_rdramh_new;
24385cae 7968#ifndef FORCE32
57871462 7969 writememd[n] = write_rdramd_new;
24385cae 7970#endif
57871462 7971 }
7972 for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7973 writemem[n] = write_nomem_new;
7974 writememb[n] = write_nomemb_new;
7975 writememh[n] = write_nomemh_new;
24385cae 7976#ifndef FORCE32
57871462 7977 writememd[n] = write_nomemd_new;
24385cae 7978#endif
57871462 7979 readmem[n] = read_nomem_new;
7980 readmemb[n] = read_nomemb_new;
7981 readmemh[n] = read_nomemh_new;
24385cae 7982#ifndef FORCE32
57871462 7983 readmemd[n] = read_nomemd_new;
24385cae 7984#endif
57871462 7985 }
24385cae 7986#endif
57871462 7987 tlb_hacks();
7988 arch_init();
7989}
7990
7991void new_dynarec_cleanup()
7992{
7993 int n;
7994 if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7995 for(n=0;n<4096;n++) ll_clear(jump_in+n);
7996 for(n=0;n<4096;n++) ll_clear(jump_out+n);
7997 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7998 #ifdef ROM_COPY
7999 if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
8000 #endif
8001}
8002
8003int new_recompile_block(int addr)
8004{
8005/*
8006 if(addr==0x800cd050) {
8007 int block;
8008 for(block=0x80000;block<0x80800;block++) invalidate_block(block);
8009 int n;
8010 for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
8011 }
8012*/
8013 //if(Count==365117028) tracedebug=1;
8014 assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8015 //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8016 //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
8017 //if(debug)
8018 //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
8019 //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
8020 /*if(Count>=312978186) {
8021 rlist();
8022 }*/
8023 //rlist();
8024 start = (u_int)addr&~3;
8025 //assert(((u_int)addr&1)==0);
2f546f9a 8026 new_dynarec_did_compile=1;
7139f3c8 8027#ifdef PCSX
dadf55f2 8028 if(!sp_in_mirror&&(signed int)(psxRegs.GPR.n.sp&0xffe00000)>0x80200000&&
8029 0x10000<=psxRegs.GPR.n.sp&&(psxRegs.GPR.n.sp&~0xe0e00000)<RAM_SIZE) {
c2e3bd42 8030 printf("SP hack enabled (%08x), @%08x\n", psxRegs.GPR.n.sp, psxRegs.pc);
dadf55f2 8031 sp_in_mirror=1;
8032 }
9ad4d757 8033 if (Config.HLE && start == 0x80001000) // hlecall
560e4a12 8034 {
7139f3c8 8035 // XXX: is this enough? Maybe check hleSoftCall?
bb5285ef 8036 u_int beginning=(u_int)out;
7139f3c8 8037 u_int page=get_page(start);
7139f3c8 8038 invalid_code[start>>12]=0;
8039 emit_movimm(start,0);
8040 emit_writeword(0,(int)&pcaddr);
bb5285ef 8041 emit_jmp((int)new_dyna_leave);
15776b68 8042 literal_pool(0);
bb5285ef 8043#ifdef __arm__
8044 __clear_cache((void *)beginning,out);
8045#endif
9ad4d757 8046 ll_add(jump_in+page,start,(void *)beginning);
7139f3c8 8047 return 0;
8048 }
560e4a12 8049 else if ((u_int)addr < 0x00200000 ||
8050 (0xa0000000 <= addr && addr < 0xa0200000)) {
7139f3c8 8051 // used for BIOS calls mostly?
560e4a12 8052 source = (u_int *)((u_int)rdram+(start&0x1fffff));
8053 pagelimit = (addr&0xa0000000)|0x00200000;
8054 }
8055 else if (!Config.HLE && (
8056/* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
8057 (0xbfc00000 <= addr && addr < 0xbfc80000))) {
8058 // BIOS
8059 source = (u_int *)((u_int)psxR+(start&0x7ffff));
8060 pagelimit = (addr&0xfff00000)|0x80000;
7139f3c8 8061 }
8062 else
8063#endif
3d624f89 8064#ifdef MUPEN64
57871462 8065 if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
8066 source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
8067 pagelimit = 0xa4001000;
8068 }
3d624f89 8069 else
8070#endif
4cb76aa4 8071 if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
57871462 8072 source = (u_int *)((u_int)rdram+start-0x80000000);
4cb76aa4 8073 pagelimit = 0x80000000+RAM_SIZE;
57871462 8074 }
90ae6d4e 8075#ifndef DISABLE_TLB
57871462 8076 else if ((signed int)addr >= (signed int)0xC0000000) {
8077 //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
8078 //if(tlb_LUT_r[start>>12])
8079 //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
8080 if((signed int)memory_map[start>>12]>=0) {
8081 source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
8082 pagelimit=(start+4096)&0xFFFFF000;
8083 int map=memory_map[start>>12];
8084 int i;
8085 for(i=0;i<5;i++) {
8086 //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
8087 if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
8088 }
8089 assem_debug("pagelimit=%x\n",pagelimit);
8090 assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
8091 }
8092 else {
8093 assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
8094 //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
560e4a12 8095 return -1; // Caller will invoke exception handler
57871462 8096 }
8097 //printf("source= %x\n",(int)source);
8098 }
90ae6d4e 8099#endif
57871462 8100 else {
8101 printf("Compile at bogus memory address: %x \n", (int)addr);
8102 exit(1);
8103 }
8104
8105 /* Pass 1: disassemble */
8106 /* Pass 2: register dependencies, branch targets */
8107 /* Pass 3: register allocation */
8108 /* Pass 4: branch dependencies */
8109 /* Pass 5: pre-alloc */
8110 /* Pass 6: optimize clean/dirty state */
8111 /* Pass 7: flag 32-bit registers */
8112 /* Pass 8: assembly */
8113 /* Pass 9: linker */
8114 /* Pass 10: garbage collection / free memory */
8115
8116 int i,j;
8117 int done=0;
8118 unsigned int type,op,op2;
8119
8120 //printf("addr = %x source = %x %x\n", addr,source,source[0]);
8121
8122 /* Pass 1 disassembly */
8123
8124 for(i=0;!done;i++) {
e1190b87 8125 bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
8126 minimum_free_regs[i]=0;
57871462 8127 opcode[i]=op=source[i]>>26;
8128 switch(op)
8129 {
8130 case 0x00: strcpy(insn[i],"special"); type=NI;
8131 op2=source[i]&0x3f;
8132 switch(op2)
8133 {
8134 case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
8135 case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
8136 case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
8137 case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
8138 case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
8139 case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
8140 case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
8141 case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
8142 case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
8143 case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
8144 case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
8145 case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
8146 case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
8147 case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
8148 case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
57871462 8149 case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
8150 case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
8151 case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
8152 case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
57871462 8153 case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
8154 case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
8155 case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
8156 case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
8157 case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
8158 case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
8159 case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
8160 case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
8161 case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
8162 case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
57871462 8163 case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
8164 case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
8165 case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
8166 case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
8167 case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
8168 case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7f2607ea 8169#ifndef FORCE32
8170 case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
8171 case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
8172 case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
8173 case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
8174 case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
8175 case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
8176 case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
8177 case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
8178 case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
8179 case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
8180 case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
57871462 8181 case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
8182 case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
8183 case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
8184 case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
8185 case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
8186 case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7f2607ea 8187#endif
57871462 8188 }
8189 break;
8190 case 0x01: strcpy(insn[i],"regimm"); type=NI;
8191 op2=(source[i]>>16)&0x1f;
8192 switch(op2)
8193 {
8194 case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
8195 case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
8196 case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
8197 case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
8198 case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
8199 case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
8200 case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
8201 case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
8202 case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
8203 case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
8204 case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
8205 case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
8206 case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
8207 case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
8208 }
8209 break;
8210 case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
8211 case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
8212 case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
8213 case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
8214 case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
8215 case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
8216 case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
8217 case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
8218 case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
8219 case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
8220 case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
8221 case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
8222 case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
8223 case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
8224 case 0x10: strcpy(insn[i],"cop0"); type=NI;
8225 op2=(source[i]>>21)&0x1f;
8226 switch(op2)
8227 {
8228 case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
8229 case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
8230 case 0x10: strcpy(insn[i],"tlb"); type=NI;
8231 switch(source[i]&0x3f)
8232 {
8233 case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
8234 case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
8235 case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
8236 case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
576bbd8f 8237#ifdef PCSX
8238 case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
8239#else
57871462 8240 case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
576bbd8f 8241#endif
57871462 8242 }
8243 }
8244 break;
8245 case 0x11: strcpy(insn[i],"cop1"); type=NI;
8246 op2=(source[i]>>21)&0x1f;
8247 switch(op2)
8248 {
8249 case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
8250 case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
8251 case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
8252 case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
8253 case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
8254 case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
8255 case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
8256 switch((source[i]>>16)&0x3)
8257 {
8258 case 0x00: strcpy(insn[i],"BC1F"); break;
8259 case 0x01: strcpy(insn[i],"BC1T"); break;
8260 case 0x02: strcpy(insn[i],"BC1FL"); break;
8261 case 0x03: strcpy(insn[i],"BC1TL"); break;
8262 }
8263 break;
8264 case 0x10: strcpy(insn[i],"C1.S"); type=NI;
8265 switch(source[i]&0x3f)
8266 {
8267 case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
8268 case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
8269 case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
8270 case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
8271 case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
8272 case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
8273 case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
8274 case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
8275 case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
8276 case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
8277 case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
8278 case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
8279 case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
8280 case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
8281 case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
8282 case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
8283 case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
8284 case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
8285 case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
8286 case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
8287 case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
8288 case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
8289 case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
8290 case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8291 case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8292 case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8293 case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8294 case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8295 case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8296 case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8297 case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8298 case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8299 case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8300 case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8301 case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8302 }
8303 break;
8304 case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8305 switch(source[i]&0x3f)
8306 {
8307 case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8308 case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8309 case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8310 case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8311 case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8312 case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8313 case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8314 case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8315 case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8316 case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8317 case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8318 case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8319 case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8320 case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8321 case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8322 case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8323 case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8324 case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8325 case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8326 case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8327 case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8328 case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8329 case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8330 case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8331 case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8332 case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8333 case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8334 case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8335 case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8336 case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8337 case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8338 case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8339 case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8340 case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8341 case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8342 }
8343 break;
8344 case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8345 switch(source[i]&0x3f)
8346 {
8347 case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8348 case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8349 }
8350 break;
8351 case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8352 switch(source[i]&0x3f)
8353 {
8354 case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8355 case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8356 }
8357 break;
8358 }
8359 break;
909168d6 8360#ifndef FORCE32
57871462 8361 case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8362 case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8363 case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8364 case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8365 case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8366 case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8367 case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8368 case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
996cc15d 8369#endif
57871462 8370 case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8371 case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8372 case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8373 case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8374 case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8375 case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8376 case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
64bd6f82 8377#ifndef FORCE32
57871462 8378 case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
64bd6f82 8379#endif
57871462 8380 case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8381 case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8382 case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8383 case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
996cc15d 8384#ifndef FORCE32
57871462 8385 case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8386 case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
996cc15d 8387#endif
57871462 8388 case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8389 case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8390 case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8391 case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
996cc15d 8392#ifndef FORCE32
57871462 8393 case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8394 case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8395 case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
996cc15d 8396#endif
57871462 8397 case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8398 case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
996cc15d 8399#ifndef FORCE32
57871462 8400 case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8401 case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8402 case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
996cc15d 8403#endif
b9b61529 8404#ifdef PCSX
8405 case 0x12: strcpy(insn[i],"COP2"); type=NI;
8406 op2=(source[i]>>21)&0x1f;
bedfea38 8407 //if (op2 & 0x10) {
8408 if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
c7abc864 8409 if (gte_handlers[source[i]&0x3f]!=NULL) {
bedfea38 8410 if (gte_regnames[source[i]&0x3f]!=NULL)
8411 strcpy(insn[i],gte_regnames[source[i]&0x3f]);
8412 else
8413 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
c7abc864 8414 type=C2OP;
8415 }
8416 }
8417 else switch(op2)
b9b61529 8418 {
8419 case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8420 case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8421 case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8422 case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
b9b61529 8423 }
8424 break;
8425 case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8426 case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8427 case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8428#endif
90ae6d4e 8429 default: strcpy(insn[i],"???"); type=NI;
75dec299 8430 printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
90ae6d4e 8431 break;
57871462 8432 }
8433 itype[i]=type;
8434 opcode2[i]=op2;
8435 /* Get registers/immediates */
8436 lt1[i]=0;
8437 us1[i]=0;
8438 us2[i]=0;
8439 dep1[i]=0;
8440 dep2[i]=0;
bedfea38 8441 gte_rs[i]=gte_rt[i]=0;
57871462 8442 switch(type) {
8443 case LOAD:
8444 rs1[i]=(source[i]>>21)&0x1f;
8445 rs2[i]=0;
8446 rt1[i]=(source[i]>>16)&0x1f;
8447 rt2[i]=0;
8448 imm[i]=(short)source[i];
8449 break;
8450 case STORE:
8451 case STORELR:
8452 rs1[i]=(source[i]>>21)&0x1f;
8453 rs2[i]=(source[i]>>16)&0x1f;
8454 rt1[i]=0;
8455 rt2[i]=0;
8456 imm[i]=(short)source[i];
8457 if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8458 break;
8459 case LOADLR:
8460 // LWL/LWR only load part of the register,
8461 // therefore the target register must be treated as a source too
8462 rs1[i]=(source[i]>>21)&0x1f;
8463 rs2[i]=(source[i]>>16)&0x1f;
8464 rt1[i]=(source[i]>>16)&0x1f;
8465 rt2[i]=0;
8466 imm[i]=(short)source[i];
8467 if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8468 if(op==0x26) dep1[i]=rt1[i]; // LWR
8469 break;
8470 case IMM16:
8471 if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8472 else rs1[i]=(source[i]>>21)&0x1f;
8473 rs2[i]=0;
8474 rt1[i]=(source[i]>>16)&0x1f;
8475 rt2[i]=0;
8476 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8477 imm[i]=(unsigned short)source[i];
8478 }else{
8479 imm[i]=(short)source[i];
8480 }
8481 if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8482 if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8483 if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8484 break;
8485 case UJUMP:
8486 rs1[i]=0;
8487 rs2[i]=0;
8488 rt1[i]=0;
8489 rt2[i]=0;
8490 // The JAL instruction writes to r31.
8491 if (op&1) {
8492 rt1[i]=31;
8493 }
8494 rs2[i]=CCREG;
8495 break;
8496 case RJUMP:
8497 rs1[i]=(source[i]>>21)&0x1f;
8498 rs2[i]=0;
8499 rt1[i]=0;
8500 rt2[i]=0;
5067f341 8501 // The JALR instruction writes to rd.
57871462 8502 if (op2&1) {
5067f341 8503 rt1[i]=(source[i]>>11)&0x1f;
57871462 8504 }
8505 rs2[i]=CCREG;
8506 break;
8507 case CJUMP:
8508 rs1[i]=(source[i]>>21)&0x1f;
8509 rs2[i]=(source[i]>>16)&0x1f;
8510 rt1[i]=0;
8511 rt2[i]=0;
8512 if(op&2) { // BGTZ/BLEZ
8513 rs2[i]=0;
8514 }
8515 us1[i]=rs1[i];
8516 us2[i]=rs2[i];
8517 likely[i]=op>>4;
8518 break;
8519 case SJUMP:
8520 rs1[i]=(source[i]>>21)&0x1f;
8521 rs2[i]=CCREG;
8522 rt1[i]=0;
8523 rt2[i]=0;
8524 us1[i]=rs1[i];
8525 if(op2&0x10) { // BxxAL
8526 rt1[i]=31;
8527 // NOTE: If the branch is not taken, r31 is still overwritten
8528 }
8529 likely[i]=(op2&2)>>1;
8530 break;
8531 case FJUMP:
8532 rs1[i]=FSREG;
8533 rs2[i]=CSREG;
8534 rt1[i]=0;
8535 rt2[i]=0;
8536 likely[i]=((source[i])>>17)&1;
8537 break;
8538 case ALU:
8539 rs1[i]=(source[i]>>21)&0x1f; // source
8540 rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8541 rt1[i]=(source[i]>>11)&0x1f; // destination
8542 rt2[i]=0;
8543 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8544 us1[i]=rs1[i];us2[i]=rs2[i];
8545 }
8546 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8547 dep1[i]=rs1[i];dep2[i]=rs2[i];
8548 }
8549 else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8550 dep1[i]=rs1[i];dep2[i]=rs2[i];
8551 }
8552 break;
8553 case MULTDIV:
8554 rs1[i]=(source[i]>>21)&0x1f; // source
8555 rs2[i]=(source[i]>>16)&0x1f; // divisor
8556 rt1[i]=HIREG;
8557 rt2[i]=LOREG;
8558 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8559 us1[i]=rs1[i];us2[i]=rs2[i];
8560 }
8561 break;
8562 case MOV:
8563 rs1[i]=0;
8564 rs2[i]=0;
8565 rt1[i]=0;
8566 rt2[i]=0;
8567 if(op2==0x10) rs1[i]=HIREG; // MFHI
8568 if(op2==0x11) rt1[i]=HIREG; // MTHI
8569 if(op2==0x12) rs1[i]=LOREG; // MFLO
8570 if(op2==0x13) rt1[i]=LOREG; // MTLO
8571 if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8572 if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8573 dep1[i]=rs1[i];
8574 break;
8575 case SHIFT:
8576 rs1[i]=(source[i]>>16)&0x1f; // target of shift
8577 rs2[i]=(source[i]>>21)&0x1f; // shift amount
8578 rt1[i]=(source[i]>>11)&0x1f; // destination
8579 rt2[i]=0;
8580 // DSLLV/DSRLV/DSRAV are 64-bit
8581 if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8582 break;
8583 case SHIFTIMM:
8584 rs1[i]=(source[i]>>16)&0x1f;
8585 rs2[i]=0;
8586 rt1[i]=(source[i]>>11)&0x1f;
8587 rt2[i]=0;
8588 imm[i]=(source[i]>>6)&0x1f;
8589 // DSxx32 instructions
8590 if(op2>=0x3c) imm[i]|=0x20;
8591 // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8592 if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8593 break;
8594 case COP0:
8595 rs1[i]=0;
8596 rs2[i]=0;
8597 rt1[i]=0;
8598 rt2[i]=0;
8599 if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8600 if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8601 if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8602 if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8603 break;
8604 case COP1:
8605 rs1[i]=0;
8606 rs2[i]=0;
8607 rt1[i]=0;
8608 rt2[i]=0;
8609 if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8610 if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8611 if(op2==5) us1[i]=rs1[i]; // DMTC1
8612 rs2[i]=CSREG;
8613 break;
bedfea38 8614 case COP2:
8615 rs1[i]=0;
8616 rs2[i]=0;
8617 rt1[i]=0;
8618 rt2[i]=0;
8619 if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
8620 if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
8621 rs2[i]=CSREG;
8622 int gr=(source[i]>>11)&0x1F;
8623 switch(op2)
8624 {
8625 case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
8626 case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
8627 case 0x02: gte_rs[i]=1ll<<(gr+32); // CFC2
8628 if(gr==31&&!gte_reads_flags) {
d3f3bf09 8629 assem_debug("gte flag read encountered @%08x\n",addr + i*4);
bedfea38 8630 gte_reads_flags=1;
8631 }
8632 break;
8633 case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
8634 }
8635 break;
57871462 8636 case C1LS:
8637 rs1[i]=(source[i]>>21)&0x1F;
8638 rs2[i]=CSREG;
8639 rt1[i]=0;
8640 rt2[i]=0;
8641 imm[i]=(short)source[i];
8642 break;
b9b61529 8643 case C2LS:
8644 rs1[i]=(source[i]>>21)&0x1F;
8645 rs2[i]=0;
8646 rt1[i]=0;
8647 rt2[i]=0;
8648 imm[i]=(short)source[i];
bedfea38 8649 if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
8650 else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
8651 break;
8652 case C2OP:
8653 rs1[i]=0;
8654 rs2[i]=0;
8655 rt1[i]=0;
8656 rt2[i]=0;
8657 gte_rt[i]=1ll<<63; // every op changes flags
8658 // TODO: other regs?
b9b61529 8659 break;
57871462 8660 case FLOAT:
8661 case FCONV:
8662 rs1[i]=0;
8663 rs2[i]=CSREG;
8664 rt1[i]=0;
8665 rt2[i]=0;
8666 break;
8667 case FCOMP:
8668 rs1[i]=FSREG;
8669 rs2[i]=CSREG;
8670 rt1[i]=FSREG;
8671 rt2[i]=0;
8672 break;
8673 case SYSCALL:
7139f3c8 8674 case HLECALL:
1e973cb0 8675 case INTCALL:
57871462 8676 rs1[i]=CCREG;
8677 rs2[i]=0;
8678 rt1[i]=0;
8679 rt2[i]=0;
8680 break;
8681 default:
8682 rs1[i]=0;
8683 rs2[i]=0;
8684 rt1[i]=0;
8685 rt2[i]=0;
8686 }
8687 /* Calculate branch target addresses */
8688 if(type==UJUMP)
8689 ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8690 else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8691 ba[i]=start+i*4+8; // Ignore never taken branch
8692 else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8693 ba[i]=start+i*4+8; // Ignore never taken branch
8694 else if(type==CJUMP||type==SJUMP||type==FJUMP)
8695 ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8696 else ba[i]=-1;
26869094 8697#ifdef PCSX
3e535354 8698 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
8699 int do_in_intrp=0;
8700 // branch in delay slot?
8701 if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
8702 // don't handle first branch and call interpreter if it's hit
8703 printf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
8704 do_in_intrp=1;
8705 }
8706 // basic load delay detection
8707 else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
8708 int t=(ba[i-1]-start)/4;
8709 if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
8710 // jump target wants DS result - potential load delay effect
8711 printf("load delay @%08x (%08x)\n", addr + i*4, addr);
8712 do_in_intrp=1;
8713 bt[t+1]=1; // expected return from interpreter
8714 }
8715 else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
8716 !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
8717 // v0 overwrite like this is a sign of trouble, bail out
8718 printf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
8719 do_in_intrp=1;
8720 }
8721 }
3e535354 8722 if(do_in_intrp) {
8723 rs1[i-1]=CCREG;
8724 rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
26869094 8725 ba[i-1]=-1;
8726 itype[i-1]=INTCALL;
8727 done=2;
3e535354 8728 i--; // don't compile the DS
26869094 8729 }
3e535354 8730 }
26869094 8731#endif
3e535354 8732 /* Is this the end of the block? */
8733 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
5067f341 8734 if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
1e973cb0 8735 done=2;
57871462 8736 }
8737 else {
8738 if(stop_after_jal) done=1;
8739 // Stop on BREAK
8740 if((source[i+1]&0xfc00003f)==0x0d) done=1;
8741 }
8742 // Don't recompile stuff that's already compiled
8743 if(check_addr(start+i*4+4)) done=1;
8744 // Don't get too close to the limit
8745 if(i>MAXBLOCK/2) done=1;
8746 }
75dec299 8747 if(itype[i]==SYSCALL&&stop_after_jal) done=1;
1e973cb0 8748 if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
8749 if(done==2) {
8750 // Does the block continue due to a branch?
8751 for(j=i-1;j>=0;j--)
8752 {
2a706964 8753 if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
1e973cb0 8754 if(ba[j]==start+i*4+4) done=j=0;
8755 if(ba[j]==start+i*4+8) done=j=0;
8756 }
8757 }
75dec299 8758 //assert(i<MAXBLOCK-1);
57871462 8759 if(start+i*4==pagelimit-4) done=1;
8760 assert(start+i*4<pagelimit);
8761 if (i==MAXBLOCK-1) done=1;
8762 // Stop if we're compiling junk
8763 if(itype[i]==NI&&opcode[i]==0x11) {
8764 done=stop_after_jal=1;
8765 printf("Disabled speculative precompilation\n");
8766 }
8767 }
8768 slen=i;
8769 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8770 if(start+i*4==pagelimit) {
8771 itype[i-1]=SPAN;
8772 }
8773 }
8774 assert(slen>0);
8775
8776 /* Pass 2 - Register dependencies and branch targets */
8777
8778 unneeded_registers(0,slen-1,0);
8779
8780 /* Pass 3 - Register allocation */
8781
8782 struct regstat current; // Current register allocations/status
8783 current.is32=1;
8784 current.dirty=0;
8785 current.u=unneeded_reg[0];
8786 current.uu=unneeded_reg_upper[0];
8787 clear_all_regs(current.regmap);
8788 alloc_reg(&current,0,CCREG);
8789 dirty_reg(&current,CCREG);
8790 current.isconst=0;
8791 current.wasconst=0;
8792 int ds=0;
8793 int cc=0;
5194fb95 8794 int hr=-1;
6ebf4adf 8795
8796#ifndef FORCE32
57871462 8797 provisional_32bit();
6ebf4adf 8798#endif
57871462 8799 if((u_int)addr&1) {
8800 // First instruction is delay slot
8801 cc=-1;
8802 bt[1]=1;
8803 ds=1;
8804 unneeded_reg[0]=1;
8805 unneeded_reg_upper[0]=1;
8806 current.regmap[HOST_BTREG]=BTREG;
8807 }
8808
8809 for(i=0;i<slen;i++)
8810 {
8811 if(bt[i])
8812 {
8813 int hr;
8814 for(hr=0;hr<HOST_REGS;hr++)
8815 {
8816 // Is this really necessary?
8817 if(current.regmap[hr]==0) current.regmap[hr]=-1;
8818 }
8819 current.isconst=0;
8820 }
8821 if(i>1)
8822 {
8823 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8824 {
8825 if(rs1[i-2]==0||rs2[i-2]==0)
8826 {
8827 if(rs1[i-2]) {
8828 current.is32|=1LL<<rs1[i-2];
8829 int hr=get_reg(current.regmap,rs1[i-2]|64);
8830 if(hr>=0) current.regmap[hr]=-1;
8831 }
8832 if(rs2[i-2]) {
8833 current.is32|=1LL<<rs2[i-2];
8834 int hr=get_reg(current.regmap,rs2[i-2]|64);
8835 if(hr>=0) current.regmap[hr]=-1;
8836 }
8837 }
8838 }
8839 }
6ebf4adf 8840#ifndef FORCE32
57871462 8841 // If something jumps here with 64-bit values
8842 // then promote those registers to 64 bits
8843 if(bt[i])
8844 {
8845 uint64_t temp_is32=current.is32;
8846 for(j=i-1;j>=0;j--)
8847 {
8848 if(ba[j]==start+i*4)
8849 temp_is32&=branch_regs[j].is32;
8850 }
8851 for(j=i;j<slen;j++)
8852 {
8853 if(ba[j]==start+i*4)
8854 //temp_is32=1;
8855 temp_is32&=p32[j];
8856 }
8857 if(temp_is32!=current.is32) {
8858 //printf("dumping 32-bit regs (%x)\n",start+i*4);
311301dc 8859 #ifndef DESTRUCTIVE_WRITEBACK
8860 if(ds)
8861 #endif
57871462 8862 for(hr=0;hr<HOST_REGS;hr++)
8863 {
8864 int r=current.regmap[hr];
8865 if(r>0&&r<64)
8866 {
8867 if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8868 temp_is32|=1LL<<r;
8869 //printf("restore %d\n",r);
8870 }
8871 }
8872 }
57871462 8873 current.is32=temp_is32;
8874 }
8875 }
6ebf4adf 8876#else
24385cae 8877 current.is32=-1LL;
8878#endif
8879
57871462 8880 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8881 regs[i].wasconst=current.isconst;
8882 regs[i].was32=current.is32;
8883 regs[i].wasdirty=current.dirty;
6ebf4adf 8884 #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
57871462 8885 // To change a dirty register from 32 to 64 bits, we must write
8886 // it out during the previous cycle (for branches, 2 cycles)
8887 if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8888 {
8889 uint64_t temp_is32=current.is32;
8890 for(j=i-1;j>=0;j--)
8891 {
8892 if(ba[j]==start+i*4+4)
8893 temp_is32&=branch_regs[j].is32;
8894 }
8895 for(j=i;j<slen;j++)
8896 {
8897 if(ba[j]==start+i*4+4)
8898 //temp_is32=1;
8899 temp_is32&=p32[j];
8900 }
8901 if(temp_is32!=current.is32) {
8902 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8903 for(hr=0;hr<HOST_REGS;hr++)
8904 {
8905 int r=current.regmap[hr];
8906 if(r>0)
8907 {
8908 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8909 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8910 {
8911 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8912 {
8913 //printf("dump %d/r%d\n",hr,r);
8914 current.regmap[hr]=-1;
8915 if(get_reg(current.regmap,r|64)>=0)
8916 current.regmap[get_reg(current.regmap,r|64)]=-1;
8917 }
8918 }
8919 }
8920 }
8921 }
8922 }
8923 }
8924 else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8925 {
8926 uint64_t temp_is32=current.is32;
8927 for(j=i-1;j>=0;j--)
8928 {
8929 if(ba[j]==start+i*4+8)
8930 temp_is32&=branch_regs[j].is32;
8931 }
8932 for(j=i;j<slen;j++)
8933 {
8934 if(ba[j]==start+i*4+8)
8935 //temp_is32=1;
8936 temp_is32&=p32[j];
8937 }
8938 if(temp_is32!=current.is32) {
8939 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8940 for(hr=0;hr<HOST_REGS;hr++)
8941 {
8942 int r=current.regmap[hr];
8943 if(r>0)
8944 {
8945 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8946 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8947 {
8948 //printf("dump %d/r%d\n",hr,r);
8949 current.regmap[hr]=-1;
8950 if(get_reg(current.regmap,r|64)>=0)
8951 current.regmap[get_reg(current.regmap,r|64)]=-1;
8952 }
8953 }
8954 }
8955 }
8956 }
8957 }
8958 #endif
8959 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8960 if(i+1<slen) {
8961 current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8962 current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8963 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8964 current.u|=1;
8965 current.uu|=1;
8966 } else {
8967 current.u=1;
8968 current.uu=1;
8969 }
8970 } else {
8971 if(i+1<slen) {
8972 current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8973 current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8974 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8975 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8976 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8977 current.u|=1;
8978 current.uu|=1;
8979 } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8980 }
8981 is_ds[i]=ds;
8982 if(ds) {
8983 ds=0; // Skip delay slot, already allocated as part of branch
8984 // ...but we need to alloc it in case something jumps here
8985 if(i+1<slen) {
8986 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8987 current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8988 }else{
8989 current.u=branch_unneeded_reg[i-1];
8990 current.uu=branch_unneeded_reg_upper[i-1];
8991 }
8992 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8993 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8994 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8995 current.u|=1;
8996 current.uu|=1;
8997 struct regstat temp;
8998 memcpy(&temp,&current,sizeof(current));
8999 temp.wasdirty=temp.dirty;
9000 temp.was32=temp.is32;
9001 // TODO: Take into account unconditional branches, as below
9002 delayslot_alloc(&temp,i);
9003 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
9004 regs[i].wasdirty=temp.wasdirty;
9005 regs[i].was32=temp.was32;
9006 regs[i].dirty=temp.dirty;
9007 regs[i].is32=temp.is32;
9008 regs[i].isconst=0;
9009 regs[i].wasconst=0;
9010 current.isconst=0;
9011 // Create entry (branch target) regmap
9012 for(hr=0;hr<HOST_REGS;hr++)
9013 {
9014 int r=temp.regmap[hr];
9015 if(r>=0) {
9016 if(r!=regmap_pre[i][hr]) {
9017 regs[i].regmap_entry[hr]=-1;
9018 }
9019 else
9020 {
9021 if(r<64){
9022 if((current.u>>r)&1) {
9023 regs[i].regmap_entry[hr]=-1;
9024 regs[i].regmap[hr]=-1;
9025 //Don't clear regs in the delay slot as the branch might need them
9026 //current.regmap[hr]=-1;
9027 }else
9028 regs[i].regmap_entry[hr]=r;
9029 }
9030 else {
9031 if((current.uu>>(r&63))&1) {
9032 regs[i].regmap_entry[hr]=-1;
9033 regs[i].regmap[hr]=-1;
9034 //Don't clear regs in the delay slot as the branch might need them
9035 //current.regmap[hr]=-1;
9036 }else
9037 regs[i].regmap_entry[hr]=r;
9038 }
9039 }
9040 } else {
9041 // First instruction expects CCREG to be allocated
9042 if(i==0&&hr==HOST_CCREG)
9043 regs[i].regmap_entry[hr]=CCREG;
9044 else
9045 regs[i].regmap_entry[hr]=-1;
9046 }
9047 }
9048 }
9049 else { // Not delay slot
9050 switch(itype[i]) {
9051 case UJUMP:
9052 //current.isconst=0; // DEBUG
9053 //current.wasconst=0; // DEBUG
9054 //regs[i].wasconst=0; // DEBUG
9055 clear_const(&current,rt1[i]);
9056 alloc_cc(&current,i);
9057 dirty_reg(&current,CCREG);
9058 if (rt1[i]==31) {
9059 alloc_reg(&current,i,31);
9060 dirty_reg(&current,31);
4ef8f67d 9061 //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
9062 //assert(rt1[i+1]!=rt1[i]);
57871462 9063 #ifdef REG_PREFETCH
9064 alloc_reg(&current,i,PTEMP);
9065 #endif
9066 //current.is32|=1LL<<rt1[i];
9067 }
269bb29a 9068 ooo[i]=1;
9069 delayslot_alloc(&current,i+1);
57871462 9070 //current.isconst=0; // DEBUG
9071 ds=1;
9072 //printf("i=%d, isconst=%x\n",i,current.isconst);
9073 break;
9074 case RJUMP:
9075 //current.isconst=0;
9076 //current.wasconst=0;
9077 //regs[i].wasconst=0;
9078 clear_const(&current,rs1[i]);
9079 clear_const(&current,rt1[i]);
9080 alloc_cc(&current,i);
9081 dirty_reg(&current,CCREG);
9082 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
9083 alloc_reg(&current,i,rs1[i]);
5067f341 9084 if (rt1[i]!=0) {
9085 alloc_reg(&current,i,rt1[i]);
9086 dirty_reg(&current,rt1[i]);
68b3faee 9087 assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
076655d1 9088 assert(rt1[i+1]!=rt1[i]);
57871462 9089 #ifdef REG_PREFETCH
9090 alloc_reg(&current,i,PTEMP);
9091 #endif
9092 }
9093 #ifdef USE_MINI_HT
9094 if(rs1[i]==31) { // JALR
9095 alloc_reg(&current,i,RHASH);
9096 #ifndef HOST_IMM_ADDR32
9097 alloc_reg(&current,i,RHTBL);
9098 #endif
9099 }
9100 #endif
9101 delayslot_alloc(&current,i+1);
9102 } else {
9103 // The delay slot overwrites our source register,
9104 // allocate a temporary register to hold the old value.
9105 current.isconst=0;
9106 current.wasconst=0;
9107 regs[i].wasconst=0;
9108 delayslot_alloc(&current,i+1);
9109 current.isconst=0;
9110 alloc_reg(&current,i,RTEMP);
9111 }
9112 //current.isconst=0; // DEBUG
e1190b87 9113 ooo[i]=1;
57871462 9114 ds=1;
9115 break;
9116 case CJUMP:
9117 //current.isconst=0;
9118 //current.wasconst=0;
9119 //regs[i].wasconst=0;
9120 clear_const(&current,rs1[i]);
9121 clear_const(&current,rs2[i]);
9122 if((opcode[i]&0x3E)==4) // BEQ/BNE
9123 {
9124 alloc_cc(&current,i);
9125 dirty_reg(&current,CCREG);
9126 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9127 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9128 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9129 {
9130 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9131 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9132 }
9133 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
9134 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
9135 // The delay slot overwrites one of our conditions.
9136 // Allocate the branch condition registers instead.
57871462 9137 current.isconst=0;
9138 current.wasconst=0;
9139 regs[i].wasconst=0;
9140 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9141 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9142 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9143 {
9144 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9145 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9146 }
9147 }
e1190b87 9148 else
9149 {
9150 ooo[i]=1;
9151 delayslot_alloc(&current,i+1);
9152 }
57871462 9153 }
9154 else
9155 if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
9156 {
9157 alloc_cc(&current,i);
9158 dirty_reg(&current,CCREG);
9159 alloc_reg(&current,i,rs1[i]);
9160 if(!(current.is32>>rs1[i]&1))
9161 {
9162 alloc_reg64(&current,i,rs1[i]);
9163 }
9164 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
9165 // The delay slot overwrites one of our conditions.
9166 // Allocate the branch condition registers instead.
57871462 9167 current.isconst=0;
9168 current.wasconst=0;
9169 regs[i].wasconst=0;
9170 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9171 if(!((current.is32>>rs1[i])&1))
9172 {
9173 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9174 }
9175 }
e1190b87 9176 else
9177 {
9178 ooo[i]=1;
9179 delayslot_alloc(&current,i+1);
9180 }
57871462 9181 }
9182 else
9183 // Don't alloc the delay slot yet because we might not execute it
9184 if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
9185 {
9186 current.isconst=0;
9187 current.wasconst=0;
9188 regs[i].wasconst=0;
9189 alloc_cc(&current,i);
9190 dirty_reg(&current,CCREG);
9191 alloc_reg(&current,i,rs1[i]);
9192 alloc_reg(&current,i,rs2[i]);
9193 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9194 {
9195 alloc_reg64(&current,i,rs1[i]);
9196 alloc_reg64(&current,i,rs2[i]);
9197 }
9198 }
9199 else
9200 if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
9201 {
9202 current.isconst=0;
9203 current.wasconst=0;
9204 regs[i].wasconst=0;
9205 alloc_cc(&current,i);
9206 dirty_reg(&current,CCREG);
9207 alloc_reg(&current,i,rs1[i]);
9208 if(!(current.is32>>rs1[i]&1))
9209 {
9210 alloc_reg64(&current,i,rs1[i]);
9211 }
9212 }
9213 ds=1;
9214 //current.isconst=0;
9215 break;
9216 case SJUMP:
9217 //current.isconst=0;
9218 //current.wasconst=0;
9219 //regs[i].wasconst=0;
9220 clear_const(&current,rs1[i]);
9221 clear_const(&current,rt1[i]);
9222 //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
9223 if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
9224 {
9225 alloc_cc(&current,i);
9226 dirty_reg(&current,CCREG);
9227 alloc_reg(&current,i,rs1[i]);
9228 if(!(current.is32>>rs1[i]&1))
9229 {
9230 alloc_reg64(&current,i,rs1[i]);
9231 }
9232 if (rt1[i]==31) { // BLTZAL/BGEZAL
9233 alloc_reg(&current,i,31);
9234 dirty_reg(&current,31);
57871462 9235 //#ifdef REG_PREFETCH
9236 //alloc_reg(&current,i,PTEMP);
9237 //#endif
9238 //current.is32|=1LL<<rt1[i];
9239 }
e1190b87 9240 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
9241 ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
57871462 9242 // Allocate the branch condition registers instead.
57871462 9243 current.isconst=0;
9244 current.wasconst=0;
9245 regs[i].wasconst=0;
9246 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9247 if(!((current.is32>>rs1[i])&1))
9248 {
9249 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9250 }
9251 }
e1190b87 9252 else
9253 {
9254 ooo[i]=1;
9255 delayslot_alloc(&current,i+1);
9256 }
57871462 9257 }
9258 else
9259 // Don't alloc the delay slot yet because we might not execute it
9260 if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
9261 {
9262 current.isconst=0;
9263 current.wasconst=0;
9264 regs[i].wasconst=0;
9265 alloc_cc(&current,i);
9266 dirty_reg(&current,CCREG);
9267 alloc_reg(&current,i,rs1[i]);
9268 if(!(current.is32>>rs1[i]&1))
9269 {
9270 alloc_reg64(&current,i,rs1[i]);
9271 }
9272 }
9273 ds=1;
9274 //current.isconst=0;
9275 break;
9276 case FJUMP:
9277 current.isconst=0;
9278 current.wasconst=0;
9279 regs[i].wasconst=0;
9280 if(likely[i]==0) // BC1F/BC1T
9281 {
9282 // TODO: Theoretically we can run out of registers here on x86.
9283 // The delay slot can allocate up to six, and we need to check
9284 // CSREG before executing the delay slot. Possibly we can drop
9285 // the cycle count and then reload it after checking that the
9286 // FPU is in a usable state, or don't do out-of-order execution.
9287 alloc_cc(&current,i);
9288 dirty_reg(&current,CCREG);
9289 alloc_reg(&current,i,FSREG);
9290 alloc_reg(&current,i,CSREG);
9291 if(itype[i+1]==FCOMP) {
9292 // The delay slot overwrites the branch condition.
9293 // Allocate the branch condition registers instead.
57871462 9294 alloc_cc(&current,i);
9295 dirty_reg(&current,CCREG);
9296 alloc_reg(&current,i,CSREG);
9297 alloc_reg(&current,i,FSREG);
9298 }
9299 else {
e1190b87 9300 ooo[i]=1;
57871462 9301 delayslot_alloc(&current,i+1);
9302 alloc_reg(&current,i+1,CSREG);
9303 }
9304 }
9305 else
9306 // Don't alloc the delay slot yet because we might not execute it
9307 if(likely[i]) // BC1FL/BC1TL
9308 {
9309 alloc_cc(&current,i);
9310 dirty_reg(&current,CCREG);
9311 alloc_reg(&current,i,CSREG);
9312 alloc_reg(&current,i,FSREG);
9313 }
9314 ds=1;
9315 current.isconst=0;
9316 break;
9317 case IMM16:
9318 imm16_alloc(&current,i);
9319 break;
9320 case LOAD:
9321 case LOADLR:
9322 load_alloc(&current,i);
9323 break;
9324 case STORE:
9325 case STORELR:
9326 store_alloc(&current,i);
9327 break;
9328 case ALU:
9329 alu_alloc(&current,i);
9330 break;
9331 case SHIFT:
9332 shift_alloc(&current,i);
9333 break;
9334 case MULTDIV:
9335 multdiv_alloc(&current,i);
9336 break;
9337 case SHIFTIMM:
9338 shiftimm_alloc(&current,i);
9339 break;
9340 case MOV:
9341 mov_alloc(&current,i);
9342 break;
9343 case COP0:
9344 cop0_alloc(&current,i);
9345 break;
9346 case COP1:
b9b61529 9347 case COP2:
57871462 9348 cop1_alloc(&current,i);
9349 break;
9350 case C1LS:
9351 c1ls_alloc(&current,i);
9352 break;
b9b61529 9353 case C2LS:
9354 c2ls_alloc(&current,i);
9355 break;
9356 case C2OP:
9357 c2op_alloc(&current,i);
9358 break;
57871462 9359 case FCONV:
9360 fconv_alloc(&current,i);
9361 break;
9362 case FLOAT:
9363 float_alloc(&current,i);
9364 break;
9365 case FCOMP:
9366 fcomp_alloc(&current,i);
9367 break;
9368 case SYSCALL:
7139f3c8 9369 case HLECALL:
1e973cb0 9370 case INTCALL:
57871462 9371 syscall_alloc(&current,i);
9372 break;
9373 case SPAN:
9374 pagespan_alloc(&current,i);
9375 break;
9376 }
9377
9378 // Drop the upper half of registers that have become 32-bit
9379 current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9380 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9381 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9382 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9383 current.uu|=1;
9384 } else {
9385 current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9386 current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9387 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9388 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9389 current.uu|=1;
9390 }
9391
9392 // Create entry (branch target) regmap
9393 for(hr=0;hr<HOST_REGS;hr++)
9394 {
9395 int r,or,er;
9396 r=current.regmap[hr];
9397 if(r>=0) {
9398 if(r!=regmap_pre[i][hr]) {
9399 // TODO: delay slot (?)
9400 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9401 if(or<0||(r&63)>=TEMPREG){
9402 regs[i].regmap_entry[hr]=-1;
9403 }
9404 else
9405 {
9406 // Just move it to a different register
9407 regs[i].regmap_entry[hr]=r;
9408 // If it was dirty before, it's still dirty
9409 if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9410 }
9411 }
9412 else
9413 {
9414 // Unneeded
9415 if(r==0){
9416 regs[i].regmap_entry[hr]=0;
9417 }
9418 else
9419 if(r<64){
9420 if((current.u>>r)&1) {
9421 regs[i].regmap_entry[hr]=-1;
9422 //regs[i].regmap[hr]=-1;
9423 current.regmap[hr]=-1;
9424 }else
9425 regs[i].regmap_entry[hr]=r;
9426 }
9427 else {
9428 if((current.uu>>(r&63))&1) {
9429 regs[i].regmap_entry[hr]=-1;
9430 //regs[i].regmap[hr]=-1;
9431 current.regmap[hr]=-1;
9432 }else
9433 regs[i].regmap_entry[hr]=r;
9434 }
9435 }
9436 } else {
9437 // Branches expect CCREG to be allocated at the target
9438 if(regmap_pre[i][hr]==CCREG)
9439 regs[i].regmap_entry[hr]=CCREG;
9440 else
9441 regs[i].regmap_entry[hr]=-1;
9442 }
9443 }
9444 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9445 }
9446 /* Branch post-alloc */
9447 if(i>0)
9448 {
9449 current.was32=current.is32;
9450 current.wasdirty=current.dirty;
9451 switch(itype[i-1]) {
9452 case UJUMP:
9453 memcpy(&branch_regs[i-1],&current,sizeof(current));
9454 branch_regs[i-1].isconst=0;
9455 branch_regs[i-1].wasconst=0;
9456 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9457 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9458 alloc_cc(&branch_regs[i-1],i-1);
9459 dirty_reg(&branch_regs[i-1],CCREG);
9460 if(rt1[i-1]==31) { // JAL
9461 alloc_reg(&branch_regs[i-1],i-1,31);
9462 dirty_reg(&branch_regs[i-1],31);
9463 branch_regs[i-1].is32|=1LL<<31;
9464 }
9465 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9466 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9467 break;
9468 case RJUMP:
9469 memcpy(&branch_regs[i-1],&current,sizeof(current));
9470 branch_regs[i-1].isconst=0;
9471 branch_regs[i-1].wasconst=0;
9472 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9473 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9474 alloc_cc(&branch_regs[i-1],i-1);
9475 dirty_reg(&branch_regs[i-1],CCREG);
9476 alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
5067f341 9477 if(rt1[i-1]!=0) { // JALR
9478 alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9479 dirty_reg(&branch_regs[i-1],rt1[i-1]);
9480 branch_regs[i-1].is32|=1LL<<rt1[i-1];
57871462 9481 }
9482 #ifdef USE_MINI_HT
9483 if(rs1[i-1]==31) { // JALR
9484 alloc_reg(&branch_regs[i-1],i-1,RHASH);
9485 #ifndef HOST_IMM_ADDR32
9486 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9487 #endif
9488 }
9489 #endif
9490 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9491 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9492 break;
9493 case CJUMP:
9494 if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9495 {
9496 alloc_cc(&current,i-1);
9497 dirty_reg(&current,CCREG);
9498 if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9499 (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9500 // The delay slot overwrote one of our conditions
9501 // Delay slot goes after the test (in order)
9502 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9503 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9504 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9505 current.u|=1;
9506 current.uu|=1;
9507 delayslot_alloc(&current,i);
9508 current.isconst=0;
9509 }
9510 else
9511 {
9512 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9513 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9514 // Alloc the branch condition registers
9515 if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9516 if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9517 if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9518 {
9519 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9520 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9521 }
9522 }
9523 memcpy(&branch_regs[i-1],&current,sizeof(current));
9524 branch_regs[i-1].isconst=0;
9525 branch_regs[i-1].wasconst=0;
9526 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9527 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9528 }
9529 else
9530 if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9531 {
9532 alloc_cc(&current,i-1);
9533 dirty_reg(&current,CCREG);
9534 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9535 // The delay slot overwrote the branch condition
9536 // Delay slot goes after the test (in order)
9537 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9538 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9539 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9540 current.u|=1;
9541 current.uu|=1;
9542 delayslot_alloc(&current,i);
9543 current.isconst=0;
9544 }
9545 else
9546 {
9547 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9548 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9549 // Alloc the branch condition register
9550 alloc_reg(&current,i-1,rs1[i-1]);
9551 if(!(current.is32>>rs1[i-1]&1))
9552 {
9553 alloc_reg64(&current,i-1,rs1[i-1]);
9554 }
9555 }
9556 memcpy(&branch_regs[i-1],&current,sizeof(current));
9557 branch_regs[i-1].isconst=0;
9558 branch_regs[i-1].wasconst=0;
9559 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9560 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9561 }
9562 else
9563 // Alloc the delay slot in case the branch is taken
9564 if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9565 {
9566 memcpy(&branch_regs[i-1],&current,sizeof(current));
9567 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9568 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9569 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9570 alloc_cc(&branch_regs[i-1],i);
9571 dirty_reg(&branch_regs[i-1],CCREG);
9572 delayslot_alloc(&branch_regs[i-1],i);
9573 branch_regs[i-1].isconst=0;
9574 alloc_reg(&current,i,CCREG); // Not taken path
9575 dirty_reg(&current,CCREG);
9576 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9577 }
9578 else
9579 if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9580 {
9581 memcpy(&branch_regs[i-1],&current,sizeof(current));
9582 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9583 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9584 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9585 alloc_cc(&branch_regs[i-1],i);
9586 dirty_reg(&branch_regs[i-1],CCREG);
9587 delayslot_alloc(&branch_regs[i-1],i);
9588 branch_regs[i-1].isconst=0;
9589 alloc_reg(&current,i,CCREG); // Not taken path
9590 dirty_reg(&current,CCREG);
9591 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9592 }
9593 break;
9594 case SJUMP:
9595 //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9596 if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9597 {
9598 alloc_cc(&current,i-1);
9599 dirty_reg(&current,CCREG);
9600 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9601 // The delay slot overwrote the branch condition
9602 // Delay slot goes after the test (in order)
9603 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9604 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9605 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9606 current.u|=1;
9607 current.uu|=1;
9608 delayslot_alloc(&current,i);
9609 current.isconst=0;
9610 }
9611 else
9612 {
9613 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9614 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9615 // Alloc the branch condition register
9616 alloc_reg(&current,i-1,rs1[i-1]);
9617 if(!(current.is32>>rs1[i-1]&1))
9618 {
9619 alloc_reg64(&current,i-1,rs1[i-1]);
9620 }
9621 }
9622 memcpy(&branch_regs[i-1],&current,sizeof(current));
9623 branch_regs[i-1].isconst=0;
9624 branch_regs[i-1].wasconst=0;
9625 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9626 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9627 }
9628 else
9629 // Alloc the delay slot in case the branch is taken
9630 if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9631 {
9632 memcpy(&branch_regs[i-1],&current,sizeof(current));
9633 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9634 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9635 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9636 alloc_cc(&branch_regs[i-1],i);
9637 dirty_reg(&branch_regs[i-1],CCREG);
9638 delayslot_alloc(&branch_regs[i-1],i);
9639 branch_regs[i-1].isconst=0;
9640 alloc_reg(&current,i,CCREG); // Not taken path
9641 dirty_reg(&current,CCREG);
9642 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9643 }
9644 // FIXME: BLTZAL/BGEZAL
9645 if(opcode2[i-1]&0x10) { // BxxZAL
9646 alloc_reg(&branch_regs[i-1],i-1,31);
9647 dirty_reg(&branch_regs[i-1],31);
9648 branch_regs[i-1].is32|=1LL<<31;
9649 }
9650 break;
9651 case FJUMP:
9652 if(likely[i-1]==0) // BC1F/BC1T
9653 {
9654 alloc_cc(&current,i-1);
9655 dirty_reg(&current,CCREG);
9656 if(itype[i]==FCOMP) {
9657 // The delay slot overwrote the branch condition
9658 // Delay slot goes after the test (in order)
9659 delayslot_alloc(&current,i);
9660 current.isconst=0;
9661 }
9662 else
9663 {
9664 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9665 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9666 // Alloc the branch condition register
9667 alloc_reg(&current,i-1,FSREG);
9668 }
9669 memcpy(&branch_regs[i-1],&current,sizeof(current));
9670 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9671 }
9672 else // BC1FL/BC1TL
9673 {
9674 // Alloc the delay slot in case the branch is taken
9675 memcpy(&branch_regs[i-1],&current,sizeof(current));
9676 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9677 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9678 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9679 alloc_cc(&branch_regs[i-1],i);
9680 dirty_reg(&branch_regs[i-1],CCREG);
9681 delayslot_alloc(&branch_regs[i-1],i);
9682 branch_regs[i-1].isconst=0;
9683 alloc_reg(&current,i,CCREG); // Not taken path
9684 dirty_reg(&current,CCREG);
9685 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9686 }
9687 break;
9688 }
9689
9690 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9691 {
9692 if(rt1[i-1]==31) // JAL/JALR
9693 {
9694 // Subroutine call will return here, don't alloc any registers
9695 current.is32=1;
9696 current.dirty=0;
9697 clear_all_regs(current.regmap);
9698 alloc_reg(&current,i,CCREG);
9699 dirty_reg(&current,CCREG);
9700 }
9701 else if(i+1<slen)
9702 {
9703 // Internal branch will jump here, match registers to caller
9704 current.is32=0x3FFFFFFFFLL;
9705 current.dirty=0;
9706 clear_all_regs(current.regmap);
9707 alloc_reg(&current,i,CCREG);
9708 dirty_reg(&current,CCREG);
9709 for(j=i-1;j>=0;j--)
9710 {
9711 if(ba[j]==start+i*4+4) {
9712 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9713 current.is32=branch_regs[j].is32;
9714 current.dirty=branch_regs[j].dirty;
9715 break;
9716 }
9717 }
9718 while(j>=0) {
9719 if(ba[j]==start+i*4+4) {
9720 for(hr=0;hr<HOST_REGS;hr++) {
9721 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9722 current.regmap[hr]=-1;
9723 }
9724 current.is32&=branch_regs[j].is32;
9725 current.dirty&=branch_regs[j].dirty;
9726 }
9727 }
9728 j--;
9729 }
9730 }
9731 }
9732 }
9733
9734 // Count cycles in between branches
9735 ccadj[i]=cc;
7139f3c8 9736 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
57871462 9737 {
9738 cc=0;
9739 }
fb407447 9740#ifdef PCSX
9741 else if(/*itype[i]==LOAD||*/itype[i]==STORE||itype[i]==C1LS) // load causes weird timing issues
9742 {
9743 cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
9744 }
9745 else if(itype[i]==C2LS)
9746 {
9747 cc+=4;
9748 }
9749#endif
57871462 9750 else
9751 {
9752 cc++;
9753 }
9754
9755 flush_dirty_uppers(&current);
9756 if(!is_ds[i]) {
9757 regs[i].is32=current.is32;
9758 regs[i].dirty=current.dirty;
9759 regs[i].isconst=current.isconst;
9760 memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9761 }
9762 for(hr=0;hr<HOST_REGS;hr++) {
9763 if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9764 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9765 regs[i].wasconst&=~(1<<hr);
9766 }
9767 }
9768 }
9769 if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9770 }
9771
9772 /* Pass 4 - Cull unused host registers */
9773
9774 uint64_t nr=0;
9775
9776 for (i=slen-1;i>=0;i--)
9777 {
9778 int hr;
9779 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9780 {
9781 if(ba[i]<start || ba[i]>=(start+slen*4))
9782 {
9783 // Branch out of this block, don't need anything
9784 nr=0;
9785 }
9786 else
9787 {
9788 // Internal branch
9789 // Need whatever matches the target
9790 nr=0;
9791 int t=(ba[i]-start)>>2;
9792 for(hr=0;hr<HOST_REGS;hr++)
9793 {
9794 if(regs[i].regmap_entry[hr]>=0) {
9795 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9796 }
9797 }
9798 }
9799 // Conditional branch may need registers for following instructions
9800 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9801 {
9802 if(i<slen-2) {
9803 nr|=needed_reg[i+2];
9804 for(hr=0;hr<HOST_REGS;hr++)
9805 {
9806 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9807 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9808 }
9809 }
9810 }
9811 // Don't need stuff which is overwritten
f5955059 9812 //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9813 //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
57871462 9814 // Merge in delay slot
9815 for(hr=0;hr<HOST_REGS;hr++)
9816 {
9817 if(!likely[i]) {
9818 // These are overwritten unless the branch is "likely"
9819 // and the delay slot is nullified if not taken
9820 if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9821 if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9822 }
9823 if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9824 if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9825 if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9826 if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9827 if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9828 if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9829 if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9830 if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9831 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9832 if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9833 if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9834 }
9835 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9836 if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9837 if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9838 }
b9b61529 9839 if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
57871462 9840 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9841 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9842 }
9843 }
9844 }
1e973cb0 9845 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 9846 {
9847 // SYSCALL instruction (software interrupt)
9848 nr=0;
9849 }
9850 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9851 {
9852 // ERET instruction (return from interrupt)
9853 nr=0;
9854 }
9855 else // Non-branch
9856 {
9857 if(i<slen-1) {
9858 for(hr=0;hr<HOST_REGS;hr++) {
9859 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9860 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9861 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9862 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9863 }
9864 }
9865 }
9866 for(hr=0;hr<HOST_REGS;hr++)
9867 {
9868 // Overwritten registers are not needed
9869 if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9870 if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9871 if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9872 // Source registers are needed
9873 if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9874 if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9875 if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9876 if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9877 if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9878 if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9879 if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9880 if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9881 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9882 if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9883 if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9884 }
9885 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9886 if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9887 if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9888 }
b9b61529 9889 if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
57871462 9890 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9891 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9892 }
9893 // Don't store a register immediately after writing it,
9894 // may prevent dual-issue.
9895 // But do so if this is a branch target, otherwise we
9896 // might have to load the register before the branch.
9897 if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9898 if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9899 (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9900 if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9901 if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9902 }
9903 if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9904 (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9905 if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9906 if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9907 }
9908 }
9909 }
9910 // Cycle count is needed at branches. Assume it is needed at the target too.
9911 if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9912 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9913 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9914 }
9915 // Save it
9916 needed_reg[i]=nr;
9917
9918 // Deallocate unneeded registers
9919 for(hr=0;hr<HOST_REGS;hr++)
9920 {
9921 if(!((nr>>hr)&1)) {
9922 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9923 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9924 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9925 (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9926 {
9927 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9928 {
9929 if(likely[i]) {
9930 regs[i].regmap[hr]=-1;
9931 regs[i].isconst&=~(1<<hr);
79c75f1b 9932 if(i<slen-2) {
9933 regmap_pre[i+2][hr]=-1;
9934 regs[i+2].wasconst&=~(1<<hr);
9935 }
57871462 9936 }
9937 }
9938 }
9939 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9940 {
9941 int d1=0,d2=0,map=0,temp=0;
9942 if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9943 {
9944 d1=dep1[i+1];
9945 d2=dep2[i+1];
9946 }
9947 if(using_tlb) {
9948 if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9949 itype[i+1]==STORE || itype[i+1]==STORELR ||
b9b61529 9950 itype[i+1]==C1LS || itype[i+1]==C2LS)
57871462 9951 map=TLREG;
9952 } else
b9b61529 9953 if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9954 (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
57871462 9955 map=INVCP;
9956 }
9957 if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
b9b61529 9958 itype[i+1]==C1LS || itype[i+1]==C2LS)
57871462 9959 temp=FTEMP;
9960 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9961 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9962 (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9963 (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9964 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9965 regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9966 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9967 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9968 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9969 regs[i].regmap[hr]!=map )
9970 {
9971 regs[i].regmap[hr]=-1;
9972 regs[i].isconst&=~(1<<hr);
9973 if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9974 (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9975 (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9976 (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9977 (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9978 branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9979 (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9980 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9981 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9982 branch_regs[i].regmap[hr]!=map)
9983 {
9984 branch_regs[i].regmap[hr]=-1;
9985 branch_regs[i].regmap_entry[hr]=-1;
9986 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9987 {
9988 if(!likely[i]&&i<slen-2) {
9989 regmap_pre[i+2][hr]=-1;
79c75f1b 9990 regs[i+2].wasconst&=~(1<<hr);
57871462 9991 }
9992 }
9993 }
9994 }
9995 }
9996 else
9997 {
9998 // Non-branch
9999 if(i>0)
10000 {
10001 int d1=0,d2=0,map=-1,temp=-1;
10002 if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
10003 {
10004 d1=dep1[i];
10005 d2=dep2[i];
10006 }
10007 if(using_tlb) {
10008 if(itype[i]==LOAD || itype[i]==LOADLR ||
10009 itype[i]==STORE || itype[i]==STORELR ||
b9b61529 10010 itype[i]==C1LS || itype[i]==C2LS)
57871462 10011 map=TLREG;
b9b61529 10012 } else if(itype[i]==STORE || itype[i]==STORELR ||
10013 (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
57871462 10014 map=INVCP;
10015 }
10016 if(itype[i]==LOADLR || itype[i]==STORELR ||
b9b61529 10017 itype[i]==C1LS || itype[i]==C2LS)
57871462 10018 temp=FTEMP;
10019 if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
10020 (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
10021 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
10022 regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
10023 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
10024 (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
10025 {
10026 if(i<slen-1&&!is_ds[i]) {
10027 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
10028 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
10029 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
10030 {
10031 printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
10032 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
10033 }
10034 regmap_pre[i+1][hr]=-1;
10035 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
79c75f1b 10036 regs[i+1].wasconst&=~(1<<hr);
57871462 10037 }
10038 regs[i].regmap[hr]=-1;
10039 regs[i].isconst&=~(1<<hr);
10040 }
10041 }
10042 }
10043 }
10044 }
10045 }
10046
10047 /* Pass 5 - Pre-allocate registers */
10048
10049 // If a register is allocated during a loop, try to allocate it for the
10050 // entire loop, if possible. This avoids loading/storing registers
10051 // inside of the loop.
198df76f 10052
57871462 10053 signed char f_regmap[HOST_REGS];
10054 clear_all_regs(f_regmap);
10055 for(i=0;i<slen-1;i++)
10056 {
10057 if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10058 {
10059 if(ba[i]>=start && ba[i]<(start+i*4))
10060 if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
10061 ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
10062 ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
10063 ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
b9b61529 10064 ||itype[i+1]==FCOMP||itype[i+1]==FCONV
10065 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
57871462 10066 {
10067 int t=(ba[i]-start)>>2;
10068 if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
198df76f 10069 if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
57871462 10070 for(hr=0;hr<HOST_REGS;hr++)
10071 {
10072 if(regs[i].regmap[hr]>64) {
10073 if(!((regs[i].dirty>>hr)&1))
10074 f_regmap[hr]=regs[i].regmap[hr];
10075 else f_regmap[hr]=-1;
10076 }
b372a952 10077 else if(regs[i].regmap[hr]>=0) {
10078 if(f_regmap[hr]!=regs[i].regmap[hr]) {
10079 // dealloc old register
10080 int n;
10081 for(n=0;n<HOST_REGS;n++)
10082 {
10083 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10084 }
10085 // and alloc new one
10086 f_regmap[hr]=regs[i].regmap[hr];
10087 }
10088 }
57871462 10089 if(branch_regs[i].regmap[hr]>64) {
10090 if(!((branch_regs[i].dirty>>hr)&1))
10091 f_regmap[hr]=branch_regs[i].regmap[hr];
10092 else f_regmap[hr]=-1;
10093 }
b372a952 10094 else if(branch_regs[i].regmap[hr]>=0) {
10095 if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
10096 // dealloc old register
10097 int n;
10098 for(n=0;n<HOST_REGS;n++)
10099 {
10100 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
10101 }
10102 // and alloc new one
10103 f_regmap[hr]=branch_regs[i].regmap[hr];
10104 }
10105 }
e1190b87 10106 if(ooo[i]) {
10107 if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
10108 f_regmap[hr]=branch_regs[i].regmap[hr];
10109 }else{
10110 if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
57871462 10111 f_regmap[hr]=branch_regs[i].regmap[hr];
10112 }
10113 // Avoid dirty->clean transition
e1190b87 10114 #ifdef DESTRUCTIVE_WRITEBACK
57871462 10115 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
e1190b87 10116 #endif
10117 // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
10118 // case above, however it's always a good idea. We can't hoist the
10119 // load if the register was already allocated, so there's no point
10120 // wasting time analyzing most of these cases. It only "succeeds"
10121 // when the mapping was different and the load can be replaced with
10122 // a mov, which is of negligible benefit. So such cases are
10123 // skipped below.
57871462 10124 if(f_regmap[hr]>0) {
198df76f 10125 if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
57871462 10126 int r=f_regmap[hr];
10127 for(j=t;j<=i;j++)
10128 {
10129 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10130 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
10131 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
10132 if(r>63) {
10133 // NB This can exclude the case where the upper-half
10134 // register is lower numbered than the lower-half
10135 // register. Not sure if it's worth fixing...
10136 if(get_reg(regs[j].regmap,r&63)<0) break;
e1190b87 10137 if(get_reg(regs[j].regmap_entry,r&63)<0) break;
57871462 10138 if(regs[j].is32&(1LL<<(r&63))) break;
10139 }
10140 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
10141 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10142 int k;
10143 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
10144 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
10145 if(r>63) {
10146 if(get_reg(regs[i].regmap,r&63)<0) break;
10147 if(get_reg(branch_regs[i].regmap,r&63)<0) break;
10148 }
10149 k=i;
10150 while(k>1&&regs[k-1].regmap[hr]==-1) {
e1190b87 10151 if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10152 //printf("no free regs for store %x\n",start+(k-1)*4);
10153 break;
57871462 10154 }
57871462 10155 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
10156 //printf("no-match due to different register\n");
10157 break;
10158 }
10159 if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
10160 //printf("no-match due to branch\n");
10161 break;
10162 }
10163 // call/ret fast path assumes no registers allocated
198df76f 10164 if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
57871462 10165 break;
10166 }
10167 if(r>63) {
10168 // NB This can exclude the case where the upper-half
10169 // register is lower numbered than the lower-half
10170 // register. Not sure if it's worth fixing...
10171 if(get_reg(regs[k-1].regmap,r&63)<0) break;
10172 if(regs[k-1].is32&(1LL<<(r&63))) break;
10173 }
10174 k--;
10175 }
10176 if(i<slen-1) {
10177 if((regs[k].is32&(1LL<<f_regmap[hr]))!=
10178 (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
10179 //printf("bad match after branch\n");
10180 break;
10181 }
10182 }
10183 if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
10184 //printf("Extend r%d, %x ->\n",hr,start+k*4);
10185 while(k<i) {
10186 regs[k].regmap_entry[hr]=f_regmap[hr];
10187 regs[k].regmap[hr]=f_regmap[hr];
10188 regmap_pre[k+1][hr]=f_regmap[hr];
10189 regs[k].wasdirty&=~(1<<hr);
10190 regs[k].dirty&=~(1<<hr);
10191 regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
10192 regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
10193 regs[k].wasconst&=~(1<<hr);
10194 regs[k].isconst&=~(1<<hr);
10195 k++;
10196 }
10197 }
10198 else {
10199 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
10200 break;
10201 }
10202 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
10203 if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
10204 //printf("OK fill %x (r%d)\n",start+i*4,hr);
10205 regs[i].regmap_entry[hr]=f_regmap[hr];
10206 regs[i].regmap[hr]=f_regmap[hr];
10207 regs[i].wasdirty&=~(1<<hr);
10208 regs[i].dirty&=~(1<<hr);
10209 regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
10210 regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
10211 regs[i].wasconst&=~(1<<hr);
10212 regs[i].isconst&=~(1<<hr);
10213 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
10214 branch_regs[i].wasdirty&=~(1<<hr);
10215 branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
10216 branch_regs[i].regmap[hr]=f_regmap[hr];
10217 branch_regs[i].dirty&=~(1<<hr);
10218 branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
10219 branch_regs[i].wasconst&=~(1<<hr);
10220 branch_regs[i].isconst&=~(1<<hr);
10221 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
10222 regmap_pre[i+2][hr]=f_regmap[hr];
10223 regs[i+2].wasdirty&=~(1<<hr);
10224 regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
10225 assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
10226 (regs[i+2].was32&(1LL<<f_regmap[hr])));
10227 }
10228 }
10229 }
10230 for(k=t;k<j;k++) {
e1190b87 10231 // Alloc register clean at beginning of loop,
10232 // but may dirty it in pass 6
57871462 10233 regs[k].regmap_entry[hr]=f_regmap[hr];
10234 regs[k].regmap[hr]=f_regmap[hr];
57871462 10235 regs[k].dirty&=~(1<<hr);
10236 regs[k].wasconst&=~(1<<hr);
10237 regs[k].isconst&=~(1<<hr);
e1190b87 10238 if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
10239 branch_regs[k].regmap_entry[hr]=f_regmap[hr];
10240 branch_regs[k].regmap[hr]=f_regmap[hr];
10241 branch_regs[k].dirty&=~(1<<hr);
10242 branch_regs[k].wasconst&=~(1<<hr);
10243 branch_regs[k].isconst&=~(1<<hr);
10244 if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
10245 regmap_pre[k+2][hr]=f_regmap[hr];
10246 regs[k+2].wasdirty&=~(1<<hr);
10247 assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
10248 (regs[k+2].was32&(1LL<<f_regmap[hr])));
10249 }
10250 }
10251 else
10252 {
10253 regmap_pre[k+1][hr]=f_regmap[hr];
10254 regs[k+1].wasdirty&=~(1<<hr);
10255 }
57871462 10256 }
10257 if(regs[j].regmap[hr]==f_regmap[hr])
10258 regs[j].regmap_entry[hr]=f_regmap[hr];
10259 break;
10260 }
10261 if(j==i) break;
10262 if(regs[j].regmap[hr]>=0)
10263 break;
10264 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
10265 //printf("no-match due to different register\n");
10266 break;
10267 }
10268 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
10269 //printf("32/64 mismatch %x %d\n",start+j*4,hr);
10270 break;
10271 }
e1190b87 10272 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10273 {
10274 // Stop on unconditional branch
10275 break;
10276 }
10277 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
10278 {
10279 if(ooo[j]) {
10280 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
10281 break;
10282 }else{
10283 if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
10284 break;
10285 }
10286 if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
10287 //printf("no-match due to different register (branch)\n");
57871462 10288 break;
10289 }
10290 }
e1190b87 10291 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10292 //printf("No free regs for store %x\n",start+j*4);
10293 break;
10294 }
57871462 10295 if(f_regmap[hr]>=64) {
10296 if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
10297 break;
10298 }
10299 else
10300 {
10301 if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
10302 break;
10303 }
10304 }
10305 }
10306 }
10307 }
10308 }
10309 }
10310 }
10311 }else{
198df76f 10312 // Non branch or undetermined branch target
57871462 10313 for(hr=0;hr<HOST_REGS;hr++)
10314 {
10315 if(hr!=EXCLUDE_REG) {
10316 if(regs[i].regmap[hr]>64) {
10317 if(!((regs[i].dirty>>hr)&1))
10318 f_regmap[hr]=regs[i].regmap[hr];
10319 }
b372a952 10320 else if(regs[i].regmap[hr]>=0) {
10321 if(f_regmap[hr]!=regs[i].regmap[hr]) {
10322 // dealloc old register
10323 int n;
10324 for(n=0;n<HOST_REGS;n++)
10325 {
10326 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10327 }
10328 // and alloc new one
10329 f_regmap[hr]=regs[i].regmap[hr];
10330 }
10331 }
57871462 10332 }
10333 }
10334 // Try to restore cycle count at branch targets
10335 if(bt[i]) {
10336 for(j=i;j<slen-1;j++) {
10337 if(regs[j].regmap[HOST_CCREG]!=-1) break;
e1190b87 10338 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10339 //printf("no free regs for store %x\n",start+j*4);
10340 break;
57871462 10341 }
57871462 10342 }
10343 if(regs[j].regmap[HOST_CCREG]==CCREG) {
10344 int k=i;
10345 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
10346 while(k<j) {
10347 regs[k].regmap_entry[HOST_CCREG]=CCREG;
10348 regs[k].regmap[HOST_CCREG]=CCREG;
10349 regmap_pre[k+1][HOST_CCREG]=CCREG;
10350 regs[k+1].wasdirty|=1<<HOST_CCREG;
10351 regs[k].dirty|=1<<HOST_CCREG;
10352 regs[k].wasconst&=~(1<<HOST_CCREG);
10353 regs[k].isconst&=~(1<<HOST_CCREG);
10354 k++;
10355 }
10356 regs[j].regmap_entry[HOST_CCREG]=CCREG;
10357 }
10358 // Work backwards from the branch target
10359 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
10360 {
10361 //printf("Extend backwards\n");
10362 int k;
10363 k=i;
10364 while(regs[k-1].regmap[HOST_CCREG]==-1) {
e1190b87 10365 if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10366 //printf("no free regs for store %x\n",start+(k-1)*4);
10367 break;
57871462 10368 }
57871462 10369 k--;
10370 }
10371 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
10372 //printf("Extend CC, %x ->\n",start+k*4);
10373 while(k<=i) {
10374 regs[k].regmap_entry[HOST_CCREG]=CCREG;
10375 regs[k].regmap[HOST_CCREG]=CCREG;
10376 regmap_pre[k+1][HOST_CCREG]=CCREG;
10377 regs[k+1].wasdirty|=1<<HOST_CCREG;
10378 regs[k].dirty|=1<<HOST_CCREG;
10379 regs[k].wasconst&=~(1<<HOST_CCREG);
10380 regs[k].isconst&=~(1<<HOST_CCREG);
10381 k++;
10382 }
10383 }
10384 else {
10385 //printf("Fail Extend CC, %x ->\n",start+k*4);
10386 }
10387 }
10388 }
10389 if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
10390 itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
10391 itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
e1190b87 10392 itype[i]!=FCONV&&itype[i]!=FCOMP)
57871462 10393 {
10394 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
10395 }
10396 }
10397 }
10398
d61de97e 10399 // Cache memory offset or tlb map pointer if a register is available
10400 #ifndef HOST_IMM_ADDR32
10401 #ifndef RAM_OFFSET
10402 if(using_tlb)
10403 #endif
10404 {
10405 int earliest_available[HOST_REGS];
10406 int loop_start[HOST_REGS];
10407 int score[HOST_REGS];
10408 int end[HOST_REGS];
10409 int reg=using_tlb?MMREG:ROREG;
10410
10411 // Init
10412 for(hr=0;hr<HOST_REGS;hr++) {
10413 score[hr]=0;earliest_available[hr]=0;
10414 loop_start[hr]=MAXBLOCK;
10415 }
10416 for(i=0;i<slen-1;i++)
10417 {
10418 // Can't do anything if no registers are available
10419 if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
10420 for(hr=0;hr<HOST_REGS;hr++) {
10421 score[hr]=0;earliest_available[hr]=i+1;
10422 loop_start[hr]=MAXBLOCK;
10423 }
10424 }
10425 if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10426 if(!ooo[i]) {
10427 if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
10428 for(hr=0;hr<HOST_REGS;hr++) {
10429 score[hr]=0;earliest_available[hr]=i+1;
10430 loop_start[hr]=MAXBLOCK;
10431 }
10432 }
198df76f 10433 }else{
10434 if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
10435 for(hr=0;hr<HOST_REGS;hr++) {
10436 score[hr]=0;earliest_available[hr]=i+1;
10437 loop_start[hr]=MAXBLOCK;
10438 }
10439 }
d61de97e 10440 }
10441 }
10442 // Mark unavailable registers
10443 for(hr=0;hr<HOST_REGS;hr++) {
10444 if(regs[i].regmap[hr]>=0) {
10445 score[hr]=0;earliest_available[hr]=i+1;
10446 loop_start[hr]=MAXBLOCK;
10447 }
10448 if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10449 if(branch_regs[i].regmap[hr]>=0) {
10450 score[hr]=0;earliest_available[hr]=i+2;
10451 loop_start[hr]=MAXBLOCK;
10452 }
10453 }
10454 }
10455 // No register allocations after unconditional jumps
10456 if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10457 {
10458 for(hr=0;hr<HOST_REGS;hr++) {
10459 score[hr]=0;earliest_available[hr]=i+2;
10460 loop_start[hr]=MAXBLOCK;
10461 }
10462 i++; // Skip delay slot too
10463 //printf("skip delay slot: %x\n",start+i*4);
10464 }
10465 else
10466 // Possible match
10467 if(itype[i]==LOAD||itype[i]==LOADLR||
10468 itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
10469 for(hr=0;hr<HOST_REGS;hr++) {
10470 if(hr!=EXCLUDE_REG) {
10471 end[hr]=i-1;
10472 for(j=i;j<slen-1;j++) {
10473 if(regs[j].regmap[hr]>=0) break;
10474 if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10475 if(branch_regs[j].regmap[hr]>=0) break;
10476 if(ooo[j]) {
10477 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
10478 }else{
10479 if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
10480 }
10481 }
10482 else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
10483 if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10484 int t=(ba[j]-start)>>2;
10485 if(t<j&&t>=earliest_available[hr]) {
198df76f 10486 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
10487 // Score a point for hoisting loop invariant
10488 if(t<loop_start[hr]) loop_start[hr]=t;
10489 //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
10490 score[hr]++;
10491 end[hr]=j;
10492 }
d61de97e 10493 }
10494 else if(t<j) {
10495 if(regs[t].regmap[hr]==reg) {
10496 // Score a point if the branch target matches this register
10497 score[hr]++;
10498 end[hr]=j;
10499 }
10500 }
10501 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
10502 itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
10503 score[hr]++;
10504 end[hr]=j;
10505 }
10506 }
10507 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10508 {
10509 // Stop on unconditional branch
10510 break;
10511 }
10512 else
10513 if(itype[j]==LOAD||itype[j]==LOADLR||
10514 itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
10515 score[hr]++;
10516 end[hr]=j;
10517 }
10518 }
10519 }
10520 }
10521 // Find highest score and allocate that register
10522 int maxscore=0;
10523 for(hr=0;hr<HOST_REGS;hr++) {
10524 if(hr!=EXCLUDE_REG) {
10525 if(score[hr]>score[maxscore]) {
10526 maxscore=hr;
10527 //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
10528 }
10529 }
10530 }
10531 if(score[maxscore]>1)
10532 {
10533 if(i<loop_start[maxscore]) loop_start[maxscore]=i;
10534 for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
10535 //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
10536 assert(regs[j].regmap[maxscore]<0);
10537 if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
10538 regs[j].regmap[maxscore]=reg;
10539 regs[j].dirty&=~(1<<maxscore);
10540 regs[j].wasconst&=~(1<<maxscore);
10541 regs[j].isconst&=~(1<<maxscore);
10542 if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10543 branch_regs[j].regmap[maxscore]=reg;
10544 branch_regs[j].wasdirty&=~(1<<maxscore);
10545 branch_regs[j].dirty&=~(1<<maxscore);
10546 branch_regs[j].wasconst&=~(1<<maxscore);
10547 branch_regs[j].isconst&=~(1<<maxscore);
10548 if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
10549 regmap_pre[j+2][maxscore]=reg;
10550 regs[j+2].wasdirty&=~(1<<maxscore);
10551 }
10552 // loop optimization (loop_preload)
10553 int t=(ba[j]-start)>>2;
198df76f 10554 if(t==loop_start[maxscore]) {
10555 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
10556 regs[t].regmap_entry[maxscore]=reg;
10557 }
d61de97e 10558 }
10559 else
10560 {
10561 if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
10562 regmap_pre[j+1][maxscore]=reg;
10563 regs[j+1].wasdirty&=~(1<<maxscore);
10564 }
10565 }
10566 }
10567 i=j-1;
10568 if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
10569 for(hr=0;hr<HOST_REGS;hr++) {
10570 score[hr]=0;earliest_available[hr]=i+i;
10571 loop_start[hr]=MAXBLOCK;
10572 }
10573 }
10574 }
10575 }
10576 }
10577 #endif
10578
57871462 10579 // This allocates registers (if possible) one instruction prior
10580 // to use, which can avoid a load-use penalty on certain CPUs.
10581 for(i=0;i<slen-1;i++)
10582 {
10583 if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10584 {
10585 if(!bt[i+1])
10586 {
b9b61529 10587 if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10588 ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
57871462 10589 {
10590 if(rs1[i+1]) {
10591 if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10592 {
10593 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10594 {
10595 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10596 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10597 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10598 regs[i].isconst&=~(1<<hr);
10599 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10600 constmap[i][hr]=constmap[i+1][hr];
10601 regs[i+1].wasdirty&=~(1<<hr);
10602 regs[i].dirty&=~(1<<hr);
10603 }
10604 }
10605 }
10606 if(rs2[i+1]) {
10607 if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10608 {
10609 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10610 {
10611 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10612 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10613 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10614 regs[i].isconst&=~(1<<hr);
10615 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10616 constmap[i][hr]=constmap[i+1][hr];
10617 regs[i+1].wasdirty&=~(1<<hr);
10618 regs[i].dirty&=~(1<<hr);
10619 }
10620 }
10621 }
198df76f 10622 // Preload target address for load instruction (non-constant)
57871462 10623 if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10624 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10625 {
10626 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10627 {
10628 regs[i].regmap[hr]=rs1[i+1];
10629 regmap_pre[i+1][hr]=rs1[i+1];
10630 regs[i+1].regmap_entry[hr]=rs1[i+1];
10631 regs[i].isconst&=~(1<<hr);
10632 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10633 constmap[i][hr]=constmap[i+1][hr];
10634 regs[i+1].wasdirty&=~(1<<hr);
10635 regs[i].dirty&=~(1<<hr);
10636 }
10637 }
10638 }
198df76f 10639 // Load source into target register
57871462 10640 if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10641 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10642 {
10643 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10644 {
10645 regs[i].regmap[hr]=rs1[i+1];
10646 regmap_pre[i+1][hr]=rs1[i+1];
10647 regs[i+1].regmap_entry[hr]=rs1[i+1];
10648 regs[i].isconst&=~(1<<hr);
10649 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10650 constmap[i][hr]=constmap[i+1][hr];
10651 regs[i+1].wasdirty&=~(1<<hr);
10652 regs[i].dirty&=~(1<<hr);
10653 }
10654 }
10655 }
198df76f 10656 // Preload map address
57871462 10657 #ifndef HOST_IMM_ADDR32
b9b61529 10658 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
57871462 10659 hr=get_reg(regs[i+1].regmap,TLREG);
10660 if(hr>=0) {
10661 int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10662 if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10663 int nr;
10664 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10665 {
10666 regs[i].regmap[hr]=MGEN1+((i+1)&1);
10667 regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10668 regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10669 regs[i].isconst&=~(1<<hr);
10670 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10671 constmap[i][hr]=constmap[i+1][hr];
10672 regs[i+1].wasdirty&=~(1<<hr);
10673 regs[i].dirty&=~(1<<hr);
10674 }
10675 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10676 {
10677 // move it to another register
10678 regs[i+1].regmap[hr]=-1;
10679 regmap_pre[i+2][hr]=-1;
10680 regs[i+1].regmap[nr]=TLREG;
10681 regmap_pre[i+2][nr]=TLREG;
10682 regs[i].regmap[nr]=MGEN1+((i+1)&1);
10683 regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10684 regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10685 regs[i].isconst&=~(1<<nr);
10686 regs[i+1].isconst&=~(1<<nr);
10687 regs[i].dirty&=~(1<<nr);
10688 regs[i+1].wasdirty&=~(1<<nr);
10689 regs[i+1].dirty&=~(1<<nr);
10690 regs[i+2].wasdirty&=~(1<<nr);
10691 }
10692 }
10693 }
10694 }
10695 #endif
198df76f 10696 // Address for store instruction (non-constant)
b9b61529 10697 if(itype[i+1]==STORE||itype[i+1]==STORELR
10698 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
57871462 10699 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10700 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10701 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10702 else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10703 assert(hr>=0);
10704 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10705 {
10706 regs[i].regmap[hr]=rs1[i+1];
10707 regmap_pre[i+1][hr]=rs1[i+1];
10708 regs[i+1].regmap_entry[hr]=rs1[i+1];
10709 regs[i].isconst&=~(1<<hr);
10710 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10711 constmap[i][hr]=constmap[i+1][hr];
10712 regs[i+1].wasdirty&=~(1<<hr);
10713 regs[i].dirty&=~(1<<hr);
10714 }
10715 }
10716 }
b9b61529 10717 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
57871462 10718 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10719 int nr;
10720 hr=get_reg(regs[i+1].regmap,FTEMP);
10721 assert(hr>=0);
10722 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10723 {
10724 regs[i].regmap[hr]=rs1[i+1];
10725 regmap_pre[i+1][hr]=rs1[i+1];
10726 regs[i+1].regmap_entry[hr]=rs1[i+1];
10727 regs[i].isconst&=~(1<<hr);
10728 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10729 constmap[i][hr]=constmap[i+1][hr];
10730 regs[i+1].wasdirty&=~(1<<hr);
10731 regs[i].dirty&=~(1<<hr);
10732 }
10733 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10734 {
10735 // move it to another register
10736 regs[i+1].regmap[hr]=-1;
10737 regmap_pre[i+2][hr]=-1;
10738 regs[i+1].regmap[nr]=FTEMP;
10739 regmap_pre[i+2][nr]=FTEMP;
10740 regs[i].regmap[nr]=rs1[i+1];
10741 regmap_pre[i+1][nr]=rs1[i+1];
10742 regs[i+1].regmap_entry[nr]=rs1[i+1];
10743 regs[i].isconst&=~(1<<nr);
10744 regs[i+1].isconst&=~(1<<nr);
10745 regs[i].dirty&=~(1<<nr);
10746 regs[i+1].wasdirty&=~(1<<nr);
10747 regs[i+1].dirty&=~(1<<nr);
10748 regs[i+2].wasdirty&=~(1<<nr);
10749 }
10750 }
10751 }
b9b61529 10752 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
57871462 10753 if(itype[i+1]==LOAD)
10754 hr=get_reg(regs[i+1].regmap,rt1[i+1]);
b9b61529 10755 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
57871462 10756 hr=get_reg(regs[i+1].regmap,FTEMP);
b9b61529 10757 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
57871462 10758 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10759 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10760 }
10761 if(hr>=0&&regs[i].regmap[hr]<0) {
10762 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10763 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10764 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10765 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10766 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10767 regs[i].isconst&=~(1<<hr);
10768 regs[i+1].wasdirty&=~(1<<hr);
10769 regs[i].dirty&=~(1<<hr);
10770 }
10771 }
10772 }
10773 }
10774 }
10775 }
10776 }
10777
10778 /* Pass 6 - Optimize clean/dirty state */
10779 clean_registers(0,slen-1,1);
10780
10781 /* Pass 7 - Identify 32-bit registers */
a28c6ce8 10782#ifndef FORCE32
57871462 10783 provisional_r32();
10784
10785 u_int r32=0;
10786
10787 for (i=slen-1;i>=0;i--)
10788 {
10789 int hr;
10790 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10791 {
10792 if(ba[i]<start || ba[i]>=(start+slen*4))
10793 {
10794 // Branch out of this block, don't need anything
10795 r32=0;
10796 }
10797 else
10798 {
10799 // Internal branch
10800 // Need whatever matches the target
10801 // (and doesn't get overwritten by the delay slot instruction)
10802 r32=0;
10803 int t=(ba[i]-start)>>2;
10804 if(ba[i]>start+i*4) {
10805 // Forward branch
10806 if(!(requires_32bit[t]&~regs[i].was32))
10807 r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10808 }else{
10809 // Backward branch
10810 //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10811 // r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10812 if(!(pr32[t]&~regs[i].was32))
10813 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10814 }
10815 }
10816 // Conditional branch may need registers for following instructions
10817 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10818 {
10819 if(i<slen-2) {
10820 r32|=requires_32bit[i+2];
10821 r32&=regs[i].was32;
10822 // Mark this address as a branch target since it may be called
10823 // upon return from interrupt
10824 bt[i+2]=1;
10825 }
10826 }
10827 // Merge in delay slot
10828 if(!likely[i]) {
10829 // These are overwritten unless the branch is "likely"
10830 // and the delay slot is nullified if not taken
10831 r32&=~(1LL<<rt1[i+1]);
10832 r32&=~(1LL<<rt2[i+1]);
10833 }
10834 // Assume these are needed (delay slot)
10835 if(us1[i+1]>0)
10836 {
10837 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10838 }
10839 if(us2[i+1]>0)
10840 {
10841 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10842 }
10843 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10844 {
10845 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10846 }
10847 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10848 {
10849 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10850 }
10851 }
1e973cb0 10852 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
57871462 10853 {
10854 // SYSCALL instruction (software interrupt)
10855 r32=0;
10856 }
10857 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10858 {
10859 // ERET instruction (return from interrupt)
10860 r32=0;
10861 }
10862 // Check 32 bits
10863 r32&=~(1LL<<rt1[i]);
10864 r32&=~(1LL<<rt2[i]);
10865 if(us1[i]>0)
10866 {
10867 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10868 }
10869 if(us2[i]>0)
10870 {
10871 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10872 }
10873 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10874 {
10875 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10876 }
10877 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10878 {
10879 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10880 }
10881 requires_32bit[i]=r32;
10882
10883 // Dirty registers which are 32-bit, require 32-bit input
10884 // as they will be written as 32-bit values
10885 for(hr=0;hr<HOST_REGS;hr++)
10886 {
10887 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10888 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10889 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10890 requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10891 }
10892 }
10893 }
10894 //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10895 }
04fd948a 10896#else
10897 for (i=slen-1;i>=0;i--)
10898 {
10899 if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10900 {
10901 // Conditional branch
10902 if((source[i]>>16)!=0x1000&&i<slen-2) {
10903 // Mark this address as a branch target since it may be called
10904 // upon return from interrupt
10905 bt[i+2]=1;
10906 }
10907 }
10908 }
a28c6ce8 10909#endif
57871462 10910
10911 if(itype[slen-1]==SPAN) {
10912 bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10913 }
4600ba03 10914
10915#ifdef DISASM
57871462 10916 /* Debug/disassembly */
57871462 10917 for(i=0;i<slen;i++)
10918 {
10919 printf("U:");
10920 int r;
10921 for(r=1;r<=CCREG;r++) {
10922 if((unneeded_reg[i]>>r)&1) {
10923 if(r==HIREG) printf(" HI");
10924 else if(r==LOREG) printf(" LO");
10925 else printf(" r%d",r);
10926 }
10927 }
90ae6d4e 10928#ifndef FORCE32
57871462 10929 printf(" UU:");
10930 for(r=1;r<=CCREG;r++) {
10931 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10932 if(r==HIREG) printf(" HI");
10933 else if(r==LOREG) printf(" LO");
10934 else printf(" r%d",r);
10935 }
10936 }
10937 printf(" 32:");
10938 for(r=0;r<=CCREG;r++) {
10939 //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10940 if((regs[i].was32>>r)&1) {
10941 if(r==CCREG) printf(" CC");
10942 else if(r==HIREG) printf(" HI");
10943 else if(r==LOREG) printf(" LO");
10944 else printf(" r%d",r);
10945 }
10946 }
90ae6d4e 10947#endif
57871462 10948 printf("\n");
10949 #if defined(__i386__) || defined(__x86_64__)
10950 printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10951 #endif
10952 #ifdef __arm__
10953 printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10954 #endif
10955 printf("needs: ");
10956 if(needed_reg[i]&1) printf("eax ");
10957 if((needed_reg[i]>>1)&1) printf("ecx ");
10958 if((needed_reg[i]>>2)&1) printf("edx ");
10959 if((needed_reg[i]>>3)&1) printf("ebx ");
10960 if((needed_reg[i]>>5)&1) printf("ebp ");
10961 if((needed_reg[i]>>6)&1) printf("esi ");
10962 if((needed_reg[i]>>7)&1) printf("edi ");
10963 printf("r:");
10964 for(r=0;r<=CCREG;r++) {
10965 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10966 if((requires_32bit[i]>>r)&1) {
10967 if(r==CCREG) printf(" CC");
10968 else if(r==HIREG) printf(" HI");
10969 else if(r==LOREG) printf(" LO");
10970 else printf(" r%d",r);
10971 }
10972 }
10973 printf("\n");
10974 /*printf("pr:");
10975 for(r=0;r<=CCREG;r++) {
10976 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10977 if((pr32[i]>>r)&1) {
10978 if(r==CCREG) printf(" CC");
10979 else if(r==HIREG) printf(" HI");
10980 else if(r==LOREG) printf(" LO");
10981 else printf(" r%d",r);
10982 }
10983 }
10984 if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10985 printf("\n");*/
10986 #if defined(__i386__) || defined(__x86_64__)
10987 printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10988 printf("dirty: ");
10989 if(regs[i].wasdirty&1) printf("eax ");
10990 if((regs[i].wasdirty>>1)&1) printf("ecx ");
10991 if((regs[i].wasdirty>>2)&1) printf("edx ");
10992 if((regs[i].wasdirty>>3)&1) printf("ebx ");
10993 if((regs[i].wasdirty>>5)&1) printf("ebp ");
10994 if((regs[i].wasdirty>>6)&1) printf("esi ");
10995 if((regs[i].wasdirty>>7)&1) printf("edi ");
10996 #endif
10997 #ifdef __arm__
10998 printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10999 printf("dirty: ");
11000 if(regs[i].wasdirty&1) printf("r0 ");
11001 if((regs[i].wasdirty>>1)&1) printf("r1 ");
11002 if((regs[i].wasdirty>>2)&1) printf("r2 ");
11003 if((regs[i].wasdirty>>3)&1) printf("r3 ");
11004 if((regs[i].wasdirty>>4)&1) printf("r4 ");
11005 if((regs[i].wasdirty>>5)&1) printf("r5 ");
11006 if((regs[i].wasdirty>>6)&1) printf("r6 ");
11007 if((regs[i].wasdirty>>7)&1) printf("r7 ");
11008 if((regs[i].wasdirty>>8)&1) printf("r8 ");
11009 if((regs[i].wasdirty>>9)&1) printf("r9 ");
11010 if((regs[i].wasdirty>>10)&1) printf("r10 ");
11011 if((regs[i].wasdirty>>12)&1) printf("r12 ");
11012 #endif
11013 printf("\n");
11014 disassemble_inst(i);
11015 //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
11016 #if defined(__i386__) || defined(__x86_64__)
11017 printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
11018 if(regs[i].dirty&1) printf("eax ");
11019 if((regs[i].dirty>>1)&1) printf("ecx ");
11020 if((regs[i].dirty>>2)&1) printf("edx ");
11021 if((regs[i].dirty>>3)&1) printf("ebx ");
11022 if((regs[i].dirty>>5)&1) printf("ebp ");
11023 if((regs[i].dirty>>6)&1) printf("esi ");
11024 if((regs[i].dirty>>7)&1) printf("edi ");
11025 #endif
11026 #ifdef __arm__
11027 printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
11028 if(regs[i].dirty&1) printf("r0 ");
11029 if((regs[i].dirty>>1)&1) printf("r1 ");
11030 if((regs[i].dirty>>2)&1) printf("r2 ");
11031 if((regs[i].dirty>>3)&1) printf("r3 ");
11032 if((regs[i].dirty>>4)&1) printf("r4 ");
11033 if((regs[i].dirty>>5)&1) printf("r5 ");
11034 if((regs[i].dirty>>6)&1) printf("r6 ");
11035 if((regs[i].dirty>>7)&1) printf("r7 ");
11036 if((regs[i].dirty>>8)&1) printf("r8 ");
11037 if((regs[i].dirty>>9)&1) printf("r9 ");
11038 if((regs[i].dirty>>10)&1) printf("r10 ");
11039 if((regs[i].dirty>>12)&1) printf("r12 ");
11040 #endif
11041 printf("\n");
11042 if(regs[i].isconst) {
11043 printf("constants: ");
11044 #if defined(__i386__) || defined(__x86_64__)
11045 if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
11046 if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
11047 if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
11048 if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
11049 if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
11050 if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
11051 if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
11052 #endif
11053 #ifdef __arm__
11054 if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
11055 if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
11056 if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
11057 if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
11058 if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
11059 if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
11060 if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
11061 if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
11062 if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
11063 if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
11064 if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
11065 if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
11066 #endif
11067 printf("\n");
11068 }
90ae6d4e 11069#ifndef FORCE32
57871462 11070 printf(" 32:");
11071 for(r=0;r<=CCREG;r++) {
11072 if((regs[i].is32>>r)&1) {
11073 if(r==CCREG) printf(" CC");
11074 else if(r==HIREG) printf(" HI");
11075 else if(r==LOREG) printf(" LO");
11076 else printf(" r%d",r);
11077 }
11078 }
11079 printf("\n");
90ae6d4e 11080#endif
57871462 11081 /*printf(" p32:");
11082 for(r=0;r<=CCREG;r++) {
11083 if((p32[i]>>r)&1) {
11084 if(r==CCREG) printf(" CC");
11085 else if(r==HIREG) printf(" HI");
11086 else if(r==LOREG) printf(" LO");
11087 else printf(" r%d",r);
11088 }
11089 }
11090 if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
11091 else printf("\n");*/
11092 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
11093 #if defined(__i386__) || defined(__x86_64__)
11094 printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
11095 if(branch_regs[i].dirty&1) printf("eax ");
11096 if((branch_regs[i].dirty>>1)&1) printf("ecx ");
11097 if((branch_regs[i].dirty>>2)&1) printf("edx ");
11098 if((branch_regs[i].dirty>>3)&1) printf("ebx ");
11099 if((branch_regs[i].dirty>>5)&1) printf("ebp ");
11100 if((branch_regs[i].dirty>>6)&1) printf("esi ");
11101 if((branch_regs[i].dirty>>7)&1) printf("edi ");
11102 #endif
11103 #ifdef __arm__
11104 printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
11105 if(branch_regs[i].dirty&1) printf("r0 ");
11106 if((branch_regs[i].dirty>>1)&1) printf("r1 ");
11107 if((branch_regs[i].dirty>>2)&1) printf("r2 ");
11108 if((branch_regs[i].dirty>>3)&1) printf("r3 ");
11109 if((branch_regs[i].dirty>>4)&1) printf("r4 ");
11110 if((branch_regs[i].dirty>>5)&1) printf("r5 ");
11111 if((branch_regs[i].dirty>>6)&1) printf("r6 ");
11112 if((branch_regs[i].dirty>>7)&1) printf("r7 ");
11113 if((branch_regs[i].dirty>>8)&1) printf("r8 ");
11114 if((branch_regs[i].dirty>>9)&1) printf("r9 ");
11115 if((branch_regs[i].dirty>>10)&1) printf("r10 ");
11116 if((branch_regs[i].dirty>>12)&1) printf("r12 ");
11117 #endif
90ae6d4e 11118#ifndef FORCE32
57871462 11119 printf(" 32:");
11120 for(r=0;r<=CCREG;r++) {
11121 if((branch_regs[i].is32>>r)&1) {
11122 if(r==CCREG) printf(" CC");
11123 else if(r==HIREG) printf(" HI");
11124 else if(r==LOREG) printf(" LO");
11125 else printf(" r%d",r);
11126 }
11127 }
11128 printf("\n");
90ae6d4e 11129#endif
57871462 11130 }
11131 }
4600ba03 11132#endif // DISASM
57871462 11133
11134 /* Pass 8 - Assembly */
11135 linkcount=0;stubcount=0;
11136 ds=0;is_delayslot=0;
11137 cop1_usable=0;
11138 uint64_t is32_pre=0;
11139 u_int dirty_pre=0;
11140 u_int beginning=(u_int)out;
11141 if((u_int)addr&1) {
11142 ds=1;
11143 pagespan_ds();
11144 }
9ad4d757 11145 u_int instr_addr0_override=0;
11146
11147#ifdef PCSX
11148 if (start == 0x80030000) {
11149 // nasty hack for fastbios thing
96186eba 11150 // override block entry to this code
9ad4d757 11151 instr_addr0_override=(u_int)out;
11152 emit_movimm(start,0);
96186eba 11153 // abuse io address var as a flag that we
11154 // have already returned here once
11155 emit_readword((int)&address,1);
9ad4d757 11156 emit_writeword(0,(int)&pcaddr);
96186eba 11157 emit_writeword(0,(int)&address);
9ad4d757 11158 emit_cmp(0,1);
11159 emit_jne((int)new_dyna_leave);
11160 }
11161#endif
57871462 11162 for(i=0;i<slen;i++)
11163 {
11164 //if(ds) printf("ds: ");
4600ba03 11165 disassemble_inst(i);
57871462 11166 if(ds) {
11167 ds=0; // Skip delay slot
11168 if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
11169 instr_addr[i]=0;
11170 } else {
11171 #ifndef DESTRUCTIVE_WRITEBACK
11172 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11173 {
11174 wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
11175 unneeded_reg[i],unneeded_reg_upper[i]);
11176 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
11177 unneeded_reg[i],unneeded_reg_upper[i]);
11178 }
f776eb14 11179 if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
11180 is32_pre=branch_regs[i].is32;
11181 dirty_pre=branch_regs[i].dirty;
11182 }else{
11183 is32_pre=regs[i].is32;
11184 dirty_pre=regs[i].dirty;
11185 }
57871462 11186 #endif
11187 // write back
11188 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11189 {
11190 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
11191 unneeded_reg[i],unneeded_reg_upper[i]);
11192 loop_preload(regmap_pre[i],regs[i].regmap_entry);
11193 }
11194 // branch target entry point
11195 instr_addr[i]=(u_int)out;
11196 assem_debug("<->\n");
11197 // load regs
11198 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
11199 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
11200 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
11201 address_generation(i,&regs[i],regs[i].regmap_entry);
11202 load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
11203 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
11204 {
11205 // Load the delay slot registers if necessary
4ef8f67d 11206 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
57871462 11207 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
4ef8f67d 11208 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
57871462 11209 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
b9b61529 11210 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
57871462 11211 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11212 }
11213 else if(i+1<slen)
11214 {
11215 // Preload registers for following instruction
11216 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
11217 if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
11218 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11219 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
11220 if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
11221 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11222 }
11223 // TODO: if(is_ooo(i)) address_generation(i+1);
11224 if(itype[i]==CJUMP||itype[i]==FJUMP)
11225 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
b9b61529 11226 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
57871462 11227 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11228 if(bt[i]) cop1_usable=0;
11229 // assemble
11230 switch(itype[i]) {
11231 case ALU:
11232 alu_assemble(i,&regs[i]);break;
11233 case IMM16:
11234 imm16_assemble(i,&regs[i]);break;
11235 case SHIFT:
11236 shift_assemble(i,&regs[i]);break;
11237 case SHIFTIMM:
11238 shiftimm_assemble(i,&regs[i]);break;
11239 case LOAD:
11240 load_assemble(i,&regs[i]);break;
11241 case LOADLR:
11242 loadlr_assemble(i,&regs[i]);break;
11243 case STORE:
11244 store_assemble(i,&regs[i]);break;
11245 case STORELR:
11246 storelr_assemble(i,&regs[i]);break;
11247 case COP0:
11248 cop0_assemble(i,&regs[i]);break;
11249 case COP1:
11250 cop1_assemble(i,&regs[i]);break;
11251 case C1LS:
11252 c1ls_assemble(i,&regs[i]);break;
b9b61529 11253 case COP2:
11254 cop2_assemble(i,&regs[i]);break;
11255 case C2LS:
11256 c2ls_assemble(i,&regs[i]);break;
11257 case C2OP:
11258 c2op_assemble(i,&regs[i]);break;
57871462 11259 case FCONV:
11260 fconv_assemble(i,&regs[i]);break;
11261 case FLOAT:
11262 float_assemble(i,&regs[i]);break;
11263 case FCOMP:
11264 fcomp_assemble(i,&regs[i]);break;
11265 case MULTDIV:
11266 multdiv_assemble(i,&regs[i]);break;
11267 case MOV:
11268 mov_assemble(i,&regs[i]);break;
11269 case SYSCALL:
11270 syscall_assemble(i,&regs[i]);break;
7139f3c8 11271 case HLECALL:
11272 hlecall_assemble(i,&regs[i]);break;
1e973cb0 11273 case INTCALL:
11274 intcall_assemble(i,&regs[i]);break;
57871462 11275 case UJUMP:
11276 ujump_assemble(i,&regs[i]);ds=1;break;
11277 case RJUMP:
11278 rjump_assemble(i,&regs[i]);ds=1;break;
11279 case CJUMP:
11280 cjump_assemble(i,&regs[i]);ds=1;break;
11281 case SJUMP:
11282 sjump_assemble(i,&regs[i]);ds=1;break;
11283 case FJUMP:
11284 fjump_assemble(i,&regs[i]);ds=1;break;
11285 case SPAN:
11286 pagespan_assemble(i,&regs[i]);break;
11287 }
11288 if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
11289 literal_pool(1024);
11290 else
11291 literal_pool_jumpover(256);
11292 }
11293 }
11294 //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
11295 // If the block did not end with an unconditional branch,
11296 // add a jump to the next instruction.
11297 if(i>1) {
11298 if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
11299 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11300 assert(i==slen);
11301 if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
11302 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11303 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11304 emit_loadreg(CCREG,HOST_CCREG);
11305 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
11306 }
11307 else if(!likely[i-2])
11308 {
11309 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
11310 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
11311 }
11312 else
11313 {
11314 store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
11315 assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
11316 }
11317 add_to_linker((int)out,start+i*4,0);
11318 emit_jmp(0);
11319 }
11320 }
11321 else
11322 {
11323 assert(i>0);
11324 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11325 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11326 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11327 emit_loadreg(CCREG,HOST_CCREG);
11328 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
11329 add_to_linker((int)out,start+i*4,0);
11330 emit_jmp(0);
11331 }
11332
11333 // TODO: delay slot stubs?
11334 // Stubs
11335 for(i=0;i<stubcount;i++)
11336 {
11337 switch(stubs[i][0])
11338 {
11339 case LOADB_STUB:
11340 case LOADH_STUB:
11341 case LOADW_STUB:
11342 case LOADD_STUB:
11343 case LOADBU_STUB:
11344 case LOADHU_STUB:
11345 do_readstub(i);break;
11346 case STOREB_STUB:
11347 case STOREH_STUB:
11348 case STOREW_STUB:
11349 case STORED_STUB:
11350 do_writestub(i);break;
11351 case CC_STUB:
11352 do_ccstub(i);break;
11353 case INVCODE_STUB:
11354 do_invstub(i);break;
11355 case FP_STUB:
11356 do_cop1stub(i);break;
11357 case STORELR_STUB:
11358 do_unalignedwritestub(i);break;
11359 }
11360 }
11361
9ad4d757 11362 if (instr_addr0_override)
11363 instr_addr[0] = instr_addr0_override;
11364
57871462 11365 /* Pass 9 - Linker */
11366 for(i=0;i<linkcount;i++)
11367 {
11368 assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
11369 literal_pool(64);
11370 if(!link_addr[i][2])
11371 {
11372 void *stub=out;
11373 void *addr=check_addr(link_addr[i][1]);
11374 emit_extjump(link_addr[i][0],link_addr[i][1]);
11375 if(addr) {
11376 set_jump_target(link_addr[i][0],(int)addr);
11377 add_link(link_addr[i][1],stub);
11378 }
11379 else set_jump_target(link_addr[i][0],(int)stub);
11380 }
11381 else
11382 {
11383 // Internal branch
11384 int target=(link_addr[i][1]-start)>>2;
11385 assert(target>=0&&target<slen);
11386 assert(instr_addr[target]);
11387 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11388 //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
11389 //#else
11390 set_jump_target(link_addr[i][0],instr_addr[target]);
11391 //#endif
11392 }
11393 }
11394 // External Branch Targets (jump_in)
11395 if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
11396 for(i=0;i<slen;i++)
11397 {
11398 if(bt[i]||i==0)
11399 {
11400 if(instr_addr[i]) // TODO - delay slots (=null)
11401 {
11402 u_int vaddr=start+i*4;
94d23bb9 11403 u_int page=get_page(vaddr);
11404 u_int vpage=get_vpage(vaddr);
57871462 11405 literal_pool(256);
11406 //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
a28c6ce8 11407#ifndef FORCE32
57871462 11408 if(!requires_32bit[i])
a28c6ce8 11409#else
11410 if(1)
11411#endif
57871462 11412 {
11413 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11414 assem_debug("jump_in: %x\n",start+i*4);
11415 ll_add(jump_dirty+vpage,vaddr,(void *)out);
11416 int entry_point=do_dirty_stub(i);
11417 ll_add(jump_in+page,vaddr,(void *)entry_point);
11418 // If there was an existing entry in the hash table,
11419 // replace it with the new address.
11420 // Don't add new entries. We'll insert the
11421 // ones that actually get used in check_addr().
11422 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
11423 if(ht_bin[0]==vaddr) {
11424 ht_bin[1]=entry_point;
11425 }
11426 if(ht_bin[2]==vaddr) {
11427 ht_bin[3]=entry_point;
11428 }
11429 }
11430 else
11431 {
11432 u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
11433 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11434 assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
11435 //int entry_point=(int)out;
11436 ////assem_debug("entry_point: %x\n",entry_point);
11437 //load_regs_entry(i);
11438 //if(entry_point==(int)out)
11439 // entry_point=instr_addr[i];
11440 //else
11441 // emit_jmp(instr_addr[i]);
11442 //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
11443 ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
11444 int entry_point=do_dirty_stub(i);
11445 ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
11446 }
11447 }
11448 }
11449 }
11450 // Write out the literal pool if necessary
11451 literal_pool(0);
11452 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11453 // Align code
11454 if(((u_int)out)&7) emit_addnop(13);
11455 #endif
11456 assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
11457 //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
11458 memcpy(copy,source,slen*4);
11459 copy+=slen*4;
11460
11461 #ifdef __arm__
11462 __clear_cache((void *)beginning,out);
11463 #endif
11464
11465 // If we're within 256K of the end of the buffer,
11466 // start over from the beginning. (Is 256K enough?)
11467 if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
11468
11469 // Trap writes to any of the pages we compiled
11470 for(i=start>>12;i<=(start+slen*4)>>12;i++) {
11471 invalid_code[i]=0;
90ae6d4e 11472#ifndef DISABLE_TLB
57871462 11473 memory_map[i]|=0x40000000;
11474 if((signed int)start>=(signed int)0xC0000000) {
11475 assert(using_tlb);
11476 j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
11477 invalid_code[j]=0;
11478 memory_map[j]|=0x40000000;
11479 //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
11480 }
90ae6d4e 11481#endif
57871462 11482 }
9be4ba64 11483 inv_code_start=inv_code_end=~0;
b12c9fb8 11484#ifdef PCSX
b96d3df7 11485 // for PCSX we need to mark all mirrors too
b12c9fb8 11486 if(get_page(start)<(RAM_SIZE>>12))
11487 for(i=start>>12;i<=(start+slen*4)>>12;i++)
b96d3df7 11488 invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
11489 invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
11490 invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
b12c9fb8 11491#endif
57871462 11492
11493 /* Pass 10 - Free memory by expiring oldest blocks */
11494
11495 int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
11496 while(expirep!=end)
11497 {
11498 int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
11499 int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
11500 inv_debug("EXP: Phase %d\n",expirep);
11501 switch((expirep>>11)&3)
11502 {
11503 case 0:
11504 // Clear jump_in and jump_dirty
11505 ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
11506 ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
11507 ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
11508 ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
11509 break;
11510 case 1:
11511 // Clear pointers
11512 ll_kill_pointers(jump_out[expirep&2047],base,shift);
11513 ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
11514 break;
11515 case 2:
11516 // Clear hash table
11517 for(i=0;i<32;i++) {
11518 int *ht_bin=hash_table[((expirep&2047)<<5)+i];
11519 if((ht_bin[3]>>shift)==(base>>shift) ||
11520 ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11521 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
11522 ht_bin[2]=ht_bin[3]=-1;
11523 }
11524 if((ht_bin[1]>>shift)==(base>>shift) ||
11525 ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11526 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
11527 ht_bin[0]=ht_bin[2];
11528 ht_bin[1]=ht_bin[3];
11529 ht_bin[2]=ht_bin[3]=-1;
11530 }
11531 }
11532 break;
11533 case 3:
11534 // Clear jump_out
dd3a91a1 11535 #ifdef __arm__
11536 if((expirep&2047)==0)
11537 do_clear_cache();
11538 #endif
57871462 11539 ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
11540 ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
11541 break;
11542 }
11543 expirep=(expirep+1)&65535;
11544 }
11545 return 0;
11546}
b9b61529 11547
11548// vim:shiftwidth=2:expandtab