drc: merge Ari64's patch: 07_clear_cache
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
3 * Copyright (C) 2009-2010 Ari64 *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21#include <stdlib.h>
22#include <stdint.h> //include for uint64_t
23#include <assert.h>
24
25#include "emu_if.h" //emulator interface
26
27#include <sys/mman.h>
28
29#ifdef __i386__
30#include "assem_x86.h"
31#endif
32#ifdef __x86_64__
33#include "assem_x64.h"
34#endif
35#ifdef __arm__
36#include "assem_arm.h"
37#endif
38
39#define MAXBLOCK 4096
40#define MAX_OUTPUT_BLOCK_SIZE 262144
41#define CLOCK_DIVIDER 2
42
43struct regstat
44{
45 signed char regmap_entry[HOST_REGS];
46 signed char regmap[HOST_REGS];
47 uint64_t was32;
48 uint64_t is32;
49 uint64_t wasdirty;
50 uint64_t dirty;
51 uint64_t u;
52 uint64_t uu;
53 u_int wasconst;
54 u_int isconst;
55 uint64_t constmap[HOST_REGS];
56};
57
58struct ll_entry
59{
60 u_int vaddr;
61 u_int reg32;
62 void *addr;
63 struct ll_entry *next;
64};
65
66 u_int start;
67 u_int *source;
68 u_int pagelimit;
69 char insn[MAXBLOCK][10];
70 u_char itype[MAXBLOCK];
71 u_char opcode[MAXBLOCK];
72 u_char opcode2[MAXBLOCK];
73 u_char bt[MAXBLOCK];
74 u_char rs1[MAXBLOCK];
75 u_char rs2[MAXBLOCK];
76 u_char rt1[MAXBLOCK];
77 u_char rt2[MAXBLOCK];
78 u_char us1[MAXBLOCK];
79 u_char us2[MAXBLOCK];
80 u_char dep1[MAXBLOCK];
81 u_char dep2[MAXBLOCK];
82 u_char lt1[MAXBLOCK];
83 int imm[MAXBLOCK];
84 u_int ba[MAXBLOCK];
85 char likely[MAXBLOCK];
86 char is_ds[MAXBLOCK];
87 uint64_t unneeded_reg[MAXBLOCK];
88 uint64_t unneeded_reg_upper[MAXBLOCK];
89 uint64_t branch_unneeded_reg[MAXBLOCK];
90 uint64_t branch_unneeded_reg_upper[MAXBLOCK];
91 uint64_t p32[MAXBLOCK];
92 uint64_t pr32[MAXBLOCK];
93 signed char regmap_pre[MAXBLOCK][HOST_REGS];
94 signed char regmap[MAXBLOCK][HOST_REGS];
95 signed char regmap_entry[MAXBLOCK][HOST_REGS];
96 uint64_t constmap[MAXBLOCK][HOST_REGS];
97 uint64_t known_value[HOST_REGS];
98 u_int known_reg;
99 struct regstat regs[MAXBLOCK];
100 struct regstat branch_regs[MAXBLOCK];
101 u_int needed_reg[MAXBLOCK];
102 uint64_t requires_32bit[MAXBLOCK];
103 u_int wont_dirty[MAXBLOCK];
104 u_int will_dirty[MAXBLOCK];
105 int ccadj[MAXBLOCK];
106 int slen;
107 u_int instr_addr[MAXBLOCK];
108 u_int link_addr[MAXBLOCK][3];
109 int linkcount;
110 u_int stubs[MAXBLOCK*3][8];
111 int stubcount;
112 u_int literals[1024][2];
113 int literalcount;
114 int is_delayslot;
115 int cop1_usable;
116 u_char *out;
117 struct ll_entry *jump_in[4096];
118 struct ll_entry *jump_out[4096];
119 struct ll_entry *jump_dirty[4096];
120 u_int hash_table[65536][4] __attribute__((aligned(16)));
121 char shadow[1048576] __attribute__((aligned(16)));
122 void *copy;
123 int expirep;
124 u_int using_tlb;
125 u_int stop_after_jal;
126 extern u_char restore_candidate[512];
127 extern int cycle_count;
128
129 /* registers that may be allocated */
130 /* 1-31 gpr */
131#define HIREG 32 // hi
132#define LOREG 33 // lo
133#define FSREG 34 // FPU status (FCSR)
134#define CSREG 35 // Coprocessor status
135#define CCREG 36 // Cycle count
136#define INVCP 37 // Pointer to invalid_code
137#define TEMPREG 38
138#define FTEMP 38 // FPU/LDL/LDR temporary register
139#define PTEMP 39 // Prefetch temporary register
140#define TLREG 40 // TLB mapping offset
141#define RHASH 41 // Return address hash
142#define RHTBL 42 // Return address hash table address
143#define RTEMP 43 // JR/JALR address register
144#define MAXREG 43
145#define AGEN1 44 // Address generation temporary register
146#define AGEN2 45 // Address generation temporary register
147#define MGEN1 46 // Maptable address generation temporary register
148#define MGEN2 47 // Maptable address generation temporary register
149#define BTREG 48 // Branch target temporary register
150
151 /* instruction types */
152#define NOP 0 // No operation
153#define LOAD 1 // Load
154#define STORE 2 // Store
155#define LOADLR 3 // Unaligned load
156#define STORELR 4 // Unaligned store
157#define MOV 5 // Move
158#define ALU 6 // Arithmetic/logic
159#define MULTDIV 7 // Multiply/divide
160#define SHIFT 8 // Shift by register
161#define SHIFTIMM 9// Shift by immediate
162#define IMM16 10 // 16-bit immediate
163#define RJUMP 11 // Unconditional jump to register
164#define UJUMP 12 // Unconditional jump
165#define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
166#define SJUMP 14 // Conditional branch (regimm format)
167#define COP0 15 // Coprocessor 0
168#define COP1 16 // Coprocessor 1
169#define C1LS 17 // Coprocessor 1 load/store
170#define FJUMP 18 // Conditional branch (floating point)
171#define FLOAT 19 // Floating point unit
172#define FCONV 20 // Convert integer to float
173#define FCOMP 21 // Floating point compare (sets FSREG)
174#define SYSCALL 22// SYSCALL
175#define OTHER 23 // Other
176#define SPAN 24 // Branch/delay slot spans 2 pages
177#define NI 25 // Not implemented
178#define HLECALL 26// PCSX fake opcodes for HLE
179#define COP2 27 // Coprocessor 2 move
180#define C2LS 28 // Coprocessor 2 load/store
181#define C2OP 29 // Coprocessor 2 operation
182#define INTCALL 30// Call interpreter to handle rare corner cases
183
184 /* stubs */
185#define CC_STUB 1
186#define FP_STUB 2
187#define LOADB_STUB 3
188#define LOADH_STUB 4
189#define LOADW_STUB 5
190#define LOADD_STUB 6
191#define LOADBU_STUB 7
192#define LOADHU_STUB 8
193#define STOREB_STUB 9
194#define STOREH_STUB 10
195#define STOREW_STUB 11
196#define STORED_STUB 12
197#define STORELR_STUB 13
198#define INVCODE_STUB 14
199
200 /* branch codes */
201#define TAKEN 1
202#define NOTTAKEN 2
203#define NULLDS 3
204
205// asm linkage
206int new_recompile_block(int addr);
207void *get_addr_ht(u_int vaddr);
208void invalidate_block(u_int block);
209void invalidate_addr(u_int addr);
210void remove_hash(int vaddr);
211void jump_vaddr();
212void dyna_linker();
213void dyna_linker_ds();
214void verify_code();
215void verify_code_vm();
216void verify_code_ds();
217void cc_interrupt();
218void fp_exception();
219void fp_exception_ds();
220void jump_syscall();
221void jump_syscall_hle();
222void jump_eret();
223void jump_hlecall();
224void jump_intcall();
225void new_dyna_leave();
226
227// TLB
228void TLBWI_new();
229void TLBWR_new();
230void read_nomem_new();
231void read_nomemb_new();
232void read_nomemh_new();
233void read_nomemd_new();
234void write_nomem_new();
235void write_nomemb_new();
236void write_nomemh_new();
237void write_nomemd_new();
238void write_rdram_new();
239void write_rdramb_new();
240void write_rdramh_new();
241void write_rdramd_new();
242extern u_int memory_map[1048576];
243
244// Needed by assembler
245void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
246void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
247void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
248void load_all_regs(signed char i_regmap[]);
249void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
250void load_regs_entry(int t);
251void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
252
253int tracedebug=0;
254
255//#define DEBUG_CYCLE_COUNT 1
256
257void nullf() {}
258//#define assem_debug printf
259//#define inv_debug printf
260#define assem_debug nullf
261#define inv_debug nullf
262
263static void tlb_hacks()
264{
265#ifndef DISABLE_TLB
266 // Goldeneye hack
267 if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
268 {
269 u_int addr;
270 int n;
271 switch (ROM_HEADER->Country_code&0xFF)
272 {
273 case 0x45: // U
274 addr=0x34b30;
275 break;
276 case 0x4A: // J
277 addr=0x34b70;
278 break;
279 case 0x50: // E
280 addr=0x329f0;
281 break;
282 default:
283 // Unknown country code
284 addr=0;
285 break;
286 }
287 u_int rom_addr=(u_int)rom;
288 #ifdef ROM_COPY
289 // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
290 // in the lower 4G of memory to use this hack. Copy it if necessary.
291 if((void *)rom>(void *)0xffffffff) {
292 munmap(ROM_COPY, 67108864);
293 if(mmap(ROM_COPY, 12582912,
294 PROT_READ | PROT_WRITE,
295 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
296 -1, 0) <= 0) {printf("mmap() failed\n");}
297 memcpy(ROM_COPY,rom,12582912);
298 rom_addr=(u_int)ROM_COPY;
299 }
300 #endif
301 if(addr) {
302 for(n=0x7F000;n<0x80000;n++) {
303 memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
304 }
305 }
306 }
307#endif
308}
309
310static u_int get_page(u_int vaddr)
311{
312#ifndef PCSX
313 u_int page=(vaddr^0x80000000)>>12;
314#else
315 u_int page=vaddr&~0xe0000000;
316 if (page < 0x1000000)
317 page &= ~0x0e00000; // RAM mirrors
318 page>>=12;
319#endif
320#ifndef DISABLE_TLB
321 if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
322#endif
323 if(page>2048) page=2048+(page&2047);
324 return page;
325}
326
327static u_int get_vpage(u_int vaddr)
328{
329 u_int vpage=(vaddr^0x80000000)>>12;
330#ifndef DISABLE_TLB
331 if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
332#endif
333 if(vpage>2048) vpage=2048+(vpage&2047);
334 return vpage;
335}
336
337// Get address from virtual address
338// This is called from the recompiled JR/JALR instructions
339void *get_addr(u_int vaddr)
340{
341 u_int page=get_page(vaddr);
342 u_int vpage=get_vpage(vaddr);
343 struct ll_entry *head;
344 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
345 head=jump_in[page];
346 while(head!=NULL) {
347 if(head->vaddr==vaddr&&head->reg32==0) {
348 //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
349 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
350 ht_bin[3]=ht_bin[1];
351 ht_bin[2]=ht_bin[0];
352 ht_bin[1]=(int)head->addr;
353 ht_bin[0]=vaddr;
354 return head->addr;
355 }
356 head=head->next;
357 }
358 head=jump_dirty[vpage];
359 while(head!=NULL) {
360 if(head->vaddr==vaddr&&head->reg32==0) {
361 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
362 // Don't restore blocks which are about to expire from the cache
363 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
364 if(verify_dirty(head->addr)) {
365 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
366 invalid_code[vaddr>>12]=0;
367 memory_map[vaddr>>12]|=0x40000000;
368 if(vpage<2048) {
369#ifndef DISABLE_TLB
370 if(tlb_LUT_r[vaddr>>12]) {
371 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
372 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
373 }
374#endif
375 restore_candidate[vpage>>3]|=1<<(vpage&7);
376 }
377 else restore_candidate[page>>3]|=1<<(page&7);
378 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
379 if(ht_bin[0]==vaddr) {
380 ht_bin[1]=(int)head->addr; // Replace existing entry
381 }
382 else
383 {
384 ht_bin[3]=ht_bin[1];
385 ht_bin[2]=ht_bin[0];
386 ht_bin[1]=(int)head->addr;
387 ht_bin[0]=vaddr;
388 }
389 return head->addr;
390 }
391 }
392 head=head->next;
393 }
394 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
395 int r=new_recompile_block(vaddr);
396 if(r==0) return get_addr(vaddr);
397 // Execute in unmapped page, generate pagefault execption
398 Status|=2;
399 Cause=(vaddr<<31)|0x8;
400 EPC=(vaddr&1)?vaddr-5:vaddr;
401 BadVAddr=(vaddr&~1);
402 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
403 EntryHi=BadVAddr&0xFFFFE000;
404 return get_addr_ht(0x80000000);
405}
406// Look up address in hash table first
407void *get_addr_ht(u_int vaddr)
408{
409 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
410 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
411 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
412 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
413 return get_addr(vaddr);
414}
415
416void *get_addr_32(u_int vaddr,u_int flags)
417{
418#ifdef FORCE32
419 return get_addr(vaddr);
420#else
421 //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
422 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
423 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
424 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
425 u_int page=get_page(vaddr);
426 u_int vpage=get_vpage(vaddr);
427 struct ll_entry *head;
428 head=jump_in[page];
429 while(head!=NULL) {
430 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
431 //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
432 if(head->reg32==0) {
433 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
434 if(ht_bin[0]==-1) {
435 ht_bin[1]=(int)head->addr;
436 ht_bin[0]=vaddr;
437 }else if(ht_bin[2]==-1) {
438 ht_bin[3]=(int)head->addr;
439 ht_bin[2]=vaddr;
440 }
441 //ht_bin[3]=ht_bin[1];
442 //ht_bin[2]=ht_bin[0];
443 //ht_bin[1]=(int)head->addr;
444 //ht_bin[0]=vaddr;
445 }
446 return head->addr;
447 }
448 head=head->next;
449 }
450 head=jump_dirty[vpage];
451 while(head!=NULL) {
452 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
453 //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
454 // Don't restore blocks which are about to expire from the cache
455 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
456 if(verify_dirty(head->addr)) {
457 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
458 invalid_code[vaddr>>12]=0;
459 memory_map[vaddr>>12]|=0x40000000;
460 if(vpage<2048) {
461#ifndef DISABLE_TLB
462 if(tlb_LUT_r[vaddr>>12]) {
463 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
464 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
465 }
466#endif
467 restore_candidate[vpage>>3]|=1<<(vpage&7);
468 }
469 else restore_candidate[page>>3]|=1<<(page&7);
470 if(head->reg32==0) {
471 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
472 if(ht_bin[0]==-1) {
473 ht_bin[1]=(int)head->addr;
474 ht_bin[0]=vaddr;
475 }else if(ht_bin[2]==-1) {
476 ht_bin[3]=(int)head->addr;
477 ht_bin[2]=vaddr;
478 }
479 //ht_bin[3]=ht_bin[1];
480 //ht_bin[2]=ht_bin[0];
481 //ht_bin[1]=(int)head->addr;
482 //ht_bin[0]=vaddr;
483 }
484 return head->addr;
485 }
486 }
487 head=head->next;
488 }
489 //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
490 int r=new_recompile_block(vaddr);
491 if(r==0) return get_addr(vaddr);
492 // Execute in unmapped page, generate pagefault execption
493 Status|=2;
494 Cause=(vaddr<<31)|0x8;
495 EPC=(vaddr&1)?vaddr-5:vaddr;
496 BadVAddr=(vaddr&~1);
497 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
498 EntryHi=BadVAddr&0xFFFFE000;
499 return get_addr_ht(0x80000000);
500#endif
501}
502
503void clear_all_regs(signed char regmap[])
504{
505 int hr;
506 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
507}
508
509signed char get_reg(signed char regmap[],int r)
510{
511 int hr;
512 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
513 return -1;
514}
515
516// Find a register that is available for two consecutive cycles
517signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
518{
519 int hr;
520 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
521 return -1;
522}
523
524int count_free_regs(signed char regmap[])
525{
526 int count=0;
527 int hr;
528 for(hr=0;hr<HOST_REGS;hr++)
529 {
530 if(hr!=EXCLUDE_REG) {
531 if(regmap[hr]<0) count++;
532 }
533 }
534 return count;
535}
536
537void dirty_reg(struct regstat *cur,signed char reg)
538{
539 int hr;
540 if(!reg) return;
541 for (hr=0;hr<HOST_REGS;hr++) {
542 if((cur->regmap[hr]&63)==reg) {
543 cur->dirty|=1<<hr;
544 }
545 }
546}
547
548// If we dirty the lower half of a 64 bit register which is now being
549// sign-extended, we need to dump the upper half.
550// Note: Do this only after completion of the instruction, because
551// some instructions may need to read the full 64-bit value even if
552// overwriting it (eg SLTI, DSRA32).
553static void flush_dirty_uppers(struct regstat *cur)
554{
555 int hr,reg;
556 for (hr=0;hr<HOST_REGS;hr++) {
557 if((cur->dirty>>hr)&1) {
558 reg=cur->regmap[hr];
559 if(reg>=64)
560 if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
561 }
562 }
563}
564
565void set_const(struct regstat *cur,signed char reg,uint64_t value)
566{
567 int hr;
568 if(!reg) return;
569 for (hr=0;hr<HOST_REGS;hr++) {
570 if(cur->regmap[hr]==reg) {
571 cur->isconst|=1<<hr;
572 cur->constmap[hr]=value;
573 }
574 else if((cur->regmap[hr]^64)==reg) {
575 cur->isconst|=1<<hr;
576 cur->constmap[hr]=value>>32;
577 }
578 }
579}
580
581void clear_const(struct regstat *cur,signed char reg)
582{
583 int hr;
584 if(!reg) return;
585 for (hr=0;hr<HOST_REGS;hr++) {
586 if((cur->regmap[hr]&63)==reg) {
587 cur->isconst&=~(1<<hr);
588 }
589 }
590}
591
592int is_const(struct regstat *cur,signed char reg)
593{
594 int hr;
595 if(!reg) return 1;
596 for (hr=0;hr<HOST_REGS;hr++) {
597 if((cur->regmap[hr]&63)==reg) {
598 return (cur->isconst>>hr)&1;
599 }
600 }
601 return 0;
602}
603uint64_t get_const(struct regstat *cur,signed char reg)
604{
605 int hr;
606 if(!reg) return 0;
607 for (hr=0;hr<HOST_REGS;hr++) {
608 if(cur->regmap[hr]==reg) {
609 return cur->constmap[hr];
610 }
611 }
612 printf("Unknown constant in r%d\n",reg);
613 exit(1);
614}
615
616// Least soon needed registers
617// Look at the next ten instructions and see which registers
618// will be used. Try not to reallocate these.
619void lsn(u_char hsn[], int i, int *preferred_reg)
620{
621 int j;
622 int b=-1;
623 for(j=0;j<9;j++)
624 {
625 if(i+j>=slen) {
626 j=slen-i-1;
627 break;
628 }
629 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
630 {
631 // Don't go past an unconditonal jump
632 j++;
633 break;
634 }
635 }
636 for(;j>=0;j--)
637 {
638 if(rs1[i+j]) hsn[rs1[i+j]]=j;
639 if(rs2[i+j]) hsn[rs2[i+j]]=j;
640 if(rt1[i+j]) hsn[rt1[i+j]]=j;
641 if(rt2[i+j]) hsn[rt2[i+j]]=j;
642 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
643 // Stores can allocate zero
644 hsn[rs1[i+j]]=j;
645 hsn[rs2[i+j]]=j;
646 }
647 // On some architectures stores need invc_ptr
648 #if defined(HOST_IMM8)
649 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
650 hsn[INVCP]=j;
651 }
652 #endif
653 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
654 {
655 hsn[CCREG]=j;
656 b=j;
657 }
658 }
659 if(b>=0)
660 {
661 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
662 {
663 // Follow first branch
664 int t=(ba[i+b]-start)>>2;
665 j=7-b;if(t+j>=slen) j=slen-t-1;
666 for(;j>=0;j--)
667 {
668 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
669 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
670 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
671 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
672 }
673 }
674 // TODO: preferred register based on backward branch
675 }
676 // Delay slot should preferably not overwrite branch conditions or cycle count
677 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
678 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
679 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
680 hsn[CCREG]=1;
681 // ...or hash tables
682 hsn[RHASH]=1;
683 hsn[RHTBL]=1;
684 }
685 // Coprocessor load/store needs FTEMP, even if not declared
686 if(itype[i]==C1LS||itype[i]==C2LS) {
687 hsn[FTEMP]=0;
688 }
689 // Load L/R also uses FTEMP as a temporary register
690 if(itype[i]==LOADLR) {
691 hsn[FTEMP]=0;
692 }
693 // Also SWL/SWR/SDL/SDR
694 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
695 hsn[FTEMP]=0;
696 }
697 // Don't remove the TLB registers either
698 if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
699 hsn[TLREG]=0;
700 }
701 // Don't remove the miniht registers
702 if(itype[i]==UJUMP||itype[i]==RJUMP)
703 {
704 hsn[RHASH]=0;
705 hsn[RHTBL]=0;
706 }
707}
708
709// We only want to allocate registers if we're going to use them again soon
710int needed_again(int r, int i)
711{
712 int j;
713 int b=-1;
714 int rn=10;
715 int hr;
716 u_char hsn[MAXREG+1];
717 int preferred_reg;
718
719 memset(hsn,10,sizeof(hsn));
720 lsn(hsn,i,&preferred_reg);
721
722 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
723 {
724 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
725 return 0; // Don't need any registers if exiting the block
726 }
727 for(j=0;j<9;j++)
728 {
729 if(i+j>=slen) {
730 j=slen-i-1;
731 break;
732 }
733 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
734 {
735 // Don't go past an unconditonal jump
736 j++;
737 break;
738 }
739 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
740 {
741 break;
742 }
743 }
744 for(;j>=1;j--)
745 {
746 if(rs1[i+j]==r) rn=j;
747 if(rs2[i+j]==r) rn=j;
748 if((unneeded_reg[i+j]>>r)&1) rn=10;
749 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
750 {
751 b=j;
752 }
753 }
754 /*
755 if(b>=0)
756 {
757 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
758 {
759 // Follow first branch
760 int o=rn;
761 int t=(ba[i+b]-start)>>2;
762 j=7-b;if(t+j>=slen) j=slen-t-1;
763 for(;j>=0;j--)
764 {
765 if(!((unneeded_reg[t+j]>>r)&1)) {
766 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
767 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
768 }
769 else rn=o;
770 }
771 }
772 }*/
773 for(hr=0;hr<HOST_REGS;hr++) {
774 if(hr!=EXCLUDE_REG) {
775 if(rn<hsn[hr]) return 1;
776 }
777 }
778 return 0;
779}
780
781// Try to match register allocations at the end of a loop with those
782// at the beginning
783int loop_reg(int i, int r, int hr)
784{
785 int j,k;
786 for(j=0;j<9;j++)
787 {
788 if(i+j>=slen) {
789 j=slen-i-1;
790 break;
791 }
792 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
793 {
794 // Don't go past an unconditonal jump
795 j++;
796 break;
797 }
798 }
799 k=0;
800 if(i>0){
801 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
802 k--;
803 }
804 for(;k<j;k++)
805 {
806 if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
807 if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
808 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
809 {
810 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
811 {
812 int t=(ba[i+k]-start)>>2;
813 int reg=get_reg(regs[t].regmap_entry,r);
814 if(reg>=0) return reg;
815 //reg=get_reg(regs[t+1].regmap_entry,r);
816 //if(reg>=0) return reg;
817 }
818 }
819 }
820 return hr;
821}
822
823
824// Allocate every register, preserving source/target regs
825void alloc_all(struct regstat *cur,int i)
826{
827 int hr;
828
829 for(hr=0;hr<HOST_REGS;hr++) {
830 if(hr!=EXCLUDE_REG) {
831 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
832 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
833 {
834 cur->regmap[hr]=-1;
835 cur->dirty&=~(1<<hr);
836 }
837 // Don't need zeros
838 if((cur->regmap[hr]&63)==0)
839 {
840 cur->regmap[hr]=-1;
841 cur->dirty&=~(1<<hr);
842 }
843 }
844 }
845}
846
847
848void div64(int64_t dividend,int64_t divisor)
849{
850 lo=dividend/divisor;
851 hi=dividend%divisor;
852 //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
853 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
854}
855void divu64(uint64_t dividend,uint64_t divisor)
856{
857 lo=dividend/divisor;
858 hi=dividend%divisor;
859 //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
860 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
861}
862
863void mult64(uint64_t m1,uint64_t m2)
864{
865 unsigned long long int op1, op2, op3, op4;
866 unsigned long long int result1, result2, result3, result4;
867 unsigned long long int temp1, temp2, temp3, temp4;
868 int sign = 0;
869
870 if (m1 < 0)
871 {
872 op2 = -m1;
873 sign = 1 - sign;
874 }
875 else op2 = m1;
876 if (m2 < 0)
877 {
878 op4 = -m2;
879 sign = 1 - sign;
880 }
881 else op4 = m2;
882
883 op1 = op2 & 0xFFFFFFFF;
884 op2 = (op2 >> 32) & 0xFFFFFFFF;
885 op3 = op4 & 0xFFFFFFFF;
886 op4 = (op4 >> 32) & 0xFFFFFFFF;
887
888 temp1 = op1 * op3;
889 temp2 = (temp1 >> 32) + op1 * op4;
890 temp3 = op2 * op3;
891 temp4 = (temp3 >> 32) + op2 * op4;
892
893 result1 = temp1 & 0xFFFFFFFF;
894 result2 = temp2 + (temp3 & 0xFFFFFFFF);
895 result3 = (result2 >> 32) + temp4;
896 result4 = (result3 >> 32);
897
898 lo = result1 | (result2 << 32);
899 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
900 if (sign)
901 {
902 hi = ~hi;
903 if (!lo) hi++;
904 else lo = ~lo + 1;
905 }
906}
907
908void multu64(uint64_t m1,uint64_t m2)
909{
910 unsigned long long int op1, op2, op3, op4;
911 unsigned long long int result1, result2, result3, result4;
912 unsigned long long int temp1, temp2, temp3, temp4;
913
914 op1 = m1 & 0xFFFFFFFF;
915 op2 = (m1 >> 32) & 0xFFFFFFFF;
916 op3 = m2 & 0xFFFFFFFF;
917 op4 = (m2 >> 32) & 0xFFFFFFFF;
918
919 temp1 = op1 * op3;
920 temp2 = (temp1 >> 32) + op1 * op4;
921 temp3 = op2 * op3;
922 temp4 = (temp3 >> 32) + op2 * op4;
923
924 result1 = temp1 & 0xFFFFFFFF;
925 result2 = temp2 + (temp3 & 0xFFFFFFFF);
926 result3 = (result2 >> 32) + temp4;
927 result4 = (result3 >> 32);
928
929 lo = result1 | (result2 << 32);
930 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
931
932 //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
933 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
934}
935
936uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
937{
938 if(bits) {
939 original<<=64-bits;
940 original>>=64-bits;
941 loaded<<=bits;
942 original|=loaded;
943 }
944 else original=loaded;
945 return original;
946}
947uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
948{
949 if(bits^56) {
950 original>>=64-(bits^56);
951 original<<=64-(bits^56);
952 loaded>>=bits^56;
953 original|=loaded;
954 }
955 else original=loaded;
956 return original;
957}
958
959#ifdef __i386__
960#include "assem_x86.c"
961#endif
962#ifdef __x86_64__
963#include "assem_x64.c"
964#endif
965#ifdef __arm__
966#include "assem_arm.c"
967#endif
968
969// Add virtual address mapping to linked list
970void ll_add(struct ll_entry **head,int vaddr,void *addr)
971{
972 struct ll_entry *new_entry;
973 new_entry=malloc(sizeof(struct ll_entry));
974 assert(new_entry!=NULL);
975 new_entry->vaddr=vaddr;
976 new_entry->reg32=0;
977 new_entry->addr=addr;
978 new_entry->next=*head;
979 *head=new_entry;
980}
981
982// Add virtual address mapping for 32-bit compiled block
983void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
984{
985 ll_add(head,vaddr,addr);
986#ifndef FORCE32
987 (*head)->reg32=reg32;
988#endif
989}
990
991// Check if an address is already compiled
992// but don't return addresses which are about to expire from the cache
993void *check_addr(u_int vaddr)
994{
995 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
996 if(ht_bin[0]==vaddr) {
997 if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
998 if(isclean(ht_bin[1])) return (void *)ht_bin[1];
999 }
1000 if(ht_bin[2]==vaddr) {
1001 if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1002 if(isclean(ht_bin[3])) return (void *)ht_bin[3];
1003 }
1004 u_int page=get_page(vaddr);
1005 struct ll_entry *head;
1006 head=jump_in[page];
1007 while(head!=NULL) {
1008 if(head->vaddr==vaddr&&head->reg32==0) {
1009 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1010 // Update existing entry with current address
1011 if(ht_bin[0]==vaddr) {
1012 ht_bin[1]=(int)head->addr;
1013 return head->addr;
1014 }
1015 if(ht_bin[2]==vaddr) {
1016 ht_bin[3]=(int)head->addr;
1017 return head->addr;
1018 }
1019 // Insert into hash table with low priority.
1020 // Don't evict existing entries, as they are probably
1021 // addresses that are being accessed frequently.
1022 if(ht_bin[0]==-1) {
1023 ht_bin[1]=(int)head->addr;
1024 ht_bin[0]=vaddr;
1025 }else if(ht_bin[2]==-1) {
1026 ht_bin[3]=(int)head->addr;
1027 ht_bin[2]=vaddr;
1028 }
1029 return head->addr;
1030 }
1031 }
1032 head=head->next;
1033 }
1034 return 0;
1035}
1036
1037void remove_hash(int vaddr)
1038{
1039 //printf("remove hash: %x\n",vaddr);
1040 int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1041 if(ht_bin[2]==vaddr) {
1042 ht_bin[2]=ht_bin[3]=-1;
1043 }
1044 if(ht_bin[0]==vaddr) {
1045 ht_bin[0]=ht_bin[2];
1046 ht_bin[1]=ht_bin[3];
1047 ht_bin[2]=ht_bin[3]=-1;
1048 }
1049}
1050
1051void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1052{
1053 struct ll_entry *next;
1054 while(*head) {
1055 if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
1056 ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1057 {
1058 inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1059 remove_hash((*head)->vaddr);
1060 next=(*head)->next;
1061 free(*head);
1062 *head=next;
1063 }
1064 else
1065 {
1066 head=&((*head)->next);
1067 }
1068 }
1069}
1070
1071// Remove all entries from linked list
1072void ll_clear(struct ll_entry **head)
1073{
1074 struct ll_entry *cur;
1075 struct ll_entry *next;
1076 if(cur=*head) {
1077 *head=0;
1078 while(cur) {
1079 next=cur->next;
1080 free(cur);
1081 cur=next;
1082 }
1083 }
1084}
1085
1086// Dereference the pointers and remove if it matches
1087void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1088{
1089 while(head) {
1090 int ptr=get_pointer(head->addr);
1091 inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1092 if(((ptr>>shift)==(addr>>shift)) ||
1093 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1094 {
1095 inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1096 u_int host_addr=(u_int)kill_pointer(head->addr);
1097 #ifdef __arm__
1098 needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1099 #endif
1100 }
1101 head=head->next;
1102 }
1103}
1104
1105// This is called when we write to a compiled block (see do_invstub)
1106void invalidate_page(u_int page)
1107{
1108 struct ll_entry *head;
1109 struct ll_entry *next;
1110 head=jump_in[page];
1111 jump_in[page]=0;
1112 while(head!=NULL) {
1113 inv_debug("INVALIDATE: %x\n",head->vaddr);
1114 remove_hash(head->vaddr);
1115 next=head->next;
1116 free(head);
1117 head=next;
1118 }
1119 head=jump_out[page];
1120 jump_out[page]=0;
1121 while(head!=NULL) {
1122 inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1123 u_int host_addr=(u_int)kill_pointer(head->addr);
1124 #ifdef __arm__
1125 needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1126 #endif
1127 next=head->next;
1128 free(head);
1129 head=next;
1130 }
1131}
1132void invalidate_block(u_int block)
1133{
1134 u_int page=get_page(block<<12);
1135 u_int vpage=get_vpage(block<<12);
1136 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1137 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1138 u_int first,last;
1139 first=last=page;
1140 struct ll_entry *head;
1141 head=jump_dirty[vpage];
1142 //printf("page=%d vpage=%d\n",page,vpage);
1143 while(head!=NULL) {
1144 u_int start,end;
1145 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1146 get_bounds((int)head->addr,&start,&end);
1147 //printf("start: %x end: %x\n",start,end);
1148 if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
1149 if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1150 if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1151 if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1152 }
1153 }
1154#ifndef DISABLE_TLB
1155 if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1156 if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1157 if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1158 if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1159 }
1160 }
1161#endif
1162 }
1163 head=head->next;
1164 }
1165 //printf("first=%d last=%d\n",first,last);
1166 invalidate_page(page);
1167 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1168 assert(last<page+5);
1169 // Invalidate the adjacent pages if a block crosses a 4K boundary
1170 while(first<page) {
1171 invalidate_page(first);
1172 first++;
1173 }
1174 for(first=page+1;first<last;first++) {
1175 invalidate_page(first);
1176 }
1177 #ifdef __arm__
1178 do_clear_cache();
1179 #endif
1180
1181 // Don't trap writes
1182 invalid_code[block]=1;
1183#ifndef DISABLE_TLB
1184 // If there is a valid TLB entry for this page, remove write protect
1185 if(tlb_LUT_w[block]) {
1186 assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1187 // CHECK: Is this right?
1188 memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1189 u_int real_block=tlb_LUT_w[block]>>12;
1190 invalid_code[real_block]=1;
1191 if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1192 }
1193 else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1194#endif
1195
1196 #ifdef USE_MINI_HT
1197 memset(mini_ht,-1,sizeof(mini_ht));
1198 #endif
1199}
1200void invalidate_addr(u_int addr)
1201{
1202 invalidate_block(addr>>12);
1203}
1204// This is called when loading a save state.
1205// Anything could have changed, so invalidate everything.
1206void invalidate_all_pages()
1207{
1208 u_int page,n;
1209 for(page=0;page<4096;page++)
1210 invalidate_page(page);
1211 for(page=0;page<1048576;page++)
1212 if(!invalid_code[page]) {
1213 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1214 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1215 }
1216 #ifdef __arm__
1217 __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1218 #endif
1219 #ifdef USE_MINI_HT
1220 memset(mini_ht,-1,sizeof(mini_ht));
1221 #endif
1222 #ifndef DISABLE_TLB
1223 // TLB
1224 for(page=0;page<0x100000;page++) {
1225 if(tlb_LUT_r[page]) {
1226 memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1227 if(!tlb_LUT_w[page]||!invalid_code[page])
1228 memory_map[page]|=0x40000000; // Write protect
1229 }
1230 else memory_map[page]=-1;
1231 if(page==0x80000) page=0xC0000;
1232 }
1233 tlb_hacks();
1234 #endif
1235}
1236
1237// Add an entry to jump_out after making a link
1238void add_link(u_int vaddr,void *src)
1239{
1240 u_int page=get_page(vaddr);
1241 inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1242 ll_add(jump_out+page,vaddr,src);
1243 //int ptr=get_pointer(src);
1244 //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1245}
1246
1247// If a code block was found to be unmodified (bit was set in
1248// restore_candidate) and it remains unmodified (bit is clear
1249// in invalid_code) then move the entries for that 4K page from
1250// the dirty list to the clean list.
1251void clean_blocks(u_int page)
1252{
1253 struct ll_entry *head;
1254 inv_debug("INV: clean_blocks page=%d\n",page);
1255 head=jump_dirty[page];
1256 while(head!=NULL) {
1257 if(!invalid_code[head->vaddr>>12]) {
1258 // Don't restore blocks which are about to expire from the cache
1259 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1260 u_int start,end;
1261 if(verify_dirty((int)head->addr)) {
1262 //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1263 u_int i;
1264 u_int inv=0;
1265 get_bounds((int)head->addr,&start,&end);
1266 if(start-(u_int)rdram<RAM_SIZE) {
1267 for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1268 inv|=invalid_code[i];
1269 }
1270 }
1271 if((signed int)head->vaddr>=(signed int)0xC0000000) {
1272 u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1273 //printf("addr=%x start=%x end=%x\n",addr,start,end);
1274 if(addr<start||addr>=end) inv=1;
1275 }
1276 else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1277 inv=1;
1278 }
1279 if(!inv) {
1280 void * clean_addr=(void *)get_clean_addr((int)head->addr);
1281 if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1282 u_int ppage=page;
1283#ifndef DISABLE_TLB
1284 if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1285#endif
1286 inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1287 //printf("page=%x, addr=%x\n",page,head->vaddr);
1288 //assert(head->vaddr>>12==(page|0x80000));
1289 ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1290 int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1291 if(!head->reg32) {
1292 if(ht_bin[0]==head->vaddr) {
1293 ht_bin[1]=(int)clean_addr; // Replace existing entry
1294 }
1295 if(ht_bin[2]==head->vaddr) {
1296 ht_bin[3]=(int)clean_addr; // Replace existing entry
1297 }
1298 }
1299 }
1300 }
1301 }
1302 }
1303 }
1304 head=head->next;
1305 }
1306}
1307
1308
1309void mov_alloc(struct regstat *current,int i)
1310{
1311 // Note: Don't need to actually alloc the source registers
1312 if((~current->is32>>rs1[i])&1) {
1313 //alloc_reg64(current,i,rs1[i]);
1314 alloc_reg64(current,i,rt1[i]);
1315 current->is32&=~(1LL<<rt1[i]);
1316 } else {
1317 //alloc_reg(current,i,rs1[i]);
1318 alloc_reg(current,i,rt1[i]);
1319 current->is32|=(1LL<<rt1[i]);
1320 }
1321 clear_const(current,rs1[i]);
1322 clear_const(current,rt1[i]);
1323 dirty_reg(current,rt1[i]);
1324}
1325
1326void shiftimm_alloc(struct regstat *current,int i)
1327{
1328 clear_const(current,rs1[i]);
1329 clear_const(current,rt1[i]);
1330 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1331 {
1332 if(rt1[i]) {
1333 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1334 else lt1[i]=rs1[i];
1335 alloc_reg(current,i,rt1[i]);
1336 current->is32|=1LL<<rt1[i];
1337 dirty_reg(current,rt1[i]);
1338 }
1339 }
1340 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1341 {
1342 if(rt1[i]) {
1343 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1344 alloc_reg64(current,i,rt1[i]);
1345 current->is32&=~(1LL<<rt1[i]);
1346 dirty_reg(current,rt1[i]);
1347 }
1348 }
1349 if(opcode2[i]==0x3c) // DSLL32
1350 {
1351 if(rt1[i]) {
1352 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1353 alloc_reg64(current,i,rt1[i]);
1354 current->is32&=~(1LL<<rt1[i]);
1355 dirty_reg(current,rt1[i]);
1356 }
1357 }
1358 if(opcode2[i]==0x3e) // DSRL32
1359 {
1360 if(rt1[i]) {
1361 alloc_reg64(current,i,rs1[i]);
1362 if(imm[i]==32) {
1363 alloc_reg64(current,i,rt1[i]);
1364 current->is32&=~(1LL<<rt1[i]);
1365 } else {
1366 alloc_reg(current,i,rt1[i]);
1367 current->is32|=1LL<<rt1[i];
1368 }
1369 dirty_reg(current,rt1[i]);
1370 }
1371 }
1372 if(opcode2[i]==0x3f) // DSRA32
1373 {
1374 if(rt1[i]) {
1375 alloc_reg64(current,i,rs1[i]);
1376 alloc_reg(current,i,rt1[i]);
1377 current->is32|=1LL<<rt1[i];
1378 dirty_reg(current,rt1[i]);
1379 }
1380 }
1381}
1382
1383void shift_alloc(struct regstat *current,int i)
1384{
1385 if(rt1[i]) {
1386 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1387 {
1388 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1389 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1390 alloc_reg(current,i,rt1[i]);
1391 if(rt1[i]==rs2[i]) alloc_reg_temp(current,i,-1);
1392 current->is32|=1LL<<rt1[i];
1393 } else { // DSLLV/DSRLV/DSRAV
1394 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1395 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1396 alloc_reg64(current,i,rt1[i]);
1397 current->is32&=~(1LL<<rt1[i]);
1398 if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1399 alloc_reg_temp(current,i,-1);
1400 }
1401 clear_const(current,rs1[i]);
1402 clear_const(current,rs2[i]);
1403 clear_const(current,rt1[i]);
1404 dirty_reg(current,rt1[i]);
1405 }
1406}
1407
1408void alu_alloc(struct regstat *current,int i)
1409{
1410 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1411 if(rt1[i]) {
1412 if(rs1[i]&&rs2[i]) {
1413 alloc_reg(current,i,rs1[i]);
1414 alloc_reg(current,i,rs2[i]);
1415 }
1416 else {
1417 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1418 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1419 }
1420 alloc_reg(current,i,rt1[i]);
1421 }
1422 current->is32|=1LL<<rt1[i];
1423 }
1424 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1425 if(rt1[i]) {
1426 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1427 {
1428 alloc_reg64(current,i,rs1[i]);
1429 alloc_reg64(current,i,rs2[i]);
1430 alloc_reg(current,i,rt1[i]);
1431 } else {
1432 alloc_reg(current,i,rs1[i]);
1433 alloc_reg(current,i,rs2[i]);
1434 alloc_reg(current,i,rt1[i]);
1435 }
1436 }
1437 current->is32|=1LL<<rt1[i];
1438 }
1439 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1440 if(rt1[i]) {
1441 if(rs1[i]&&rs2[i]) {
1442 alloc_reg(current,i,rs1[i]);
1443 alloc_reg(current,i,rs2[i]);
1444 }
1445 else
1446 {
1447 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1448 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1449 }
1450 alloc_reg(current,i,rt1[i]);
1451 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1452 {
1453 if(!((current->uu>>rt1[i])&1)) {
1454 alloc_reg64(current,i,rt1[i]);
1455 }
1456 if(get_reg(current->regmap,rt1[i]|64)>=0) {
1457 if(rs1[i]&&rs2[i]) {
1458 alloc_reg64(current,i,rs1[i]);
1459 alloc_reg64(current,i,rs2[i]);
1460 }
1461 else
1462 {
1463 // Is is really worth it to keep 64-bit values in registers?
1464 #ifdef NATIVE_64BIT
1465 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1466 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1467 #endif
1468 }
1469 }
1470 current->is32&=~(1LL<<rt1[i]);
1471 } else {
1472 current->is32|=1LL<<rt1[i];
1473 }
1474 }
1475 }
1476 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1477 if(rt1[i]) {
1478 if(rs1[i]&&rs2[i]) {
1479 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1480 alloc_reg64(current,i,rs1[i]);
1481 alloc_reg64(current,i,rs2[i]);
1482 alloc_reg64(current,i,rt1[i]);
1483 } else {
1484 alloc_reg(current,i,rs1[i]);
1485 alloc_reg(current,i,rs2[i]);
1486 alloc_reg(current,i,rt1[i]);
1487 }
1488 }
1489 else {
1490 alloc_reg(current,i,rt1[i]);
1491 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1492 // DADD used as move, or zeroing
1493 // If we have a 64-bit source, then make the target 64 bits too
1494 if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1495 if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1496 alloc_reg64(current,i,rt1[i]);
1497 } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1498 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1499 alloc_reg64(current,i,rt1[i]);
1500 }
1501 if(opcode2[i]>=0x2e&&rs2[i]) {
1502 // DSUB used as negation - 64-bit result
1503 // If we have a 32-bit register, extend it to 64 bits
1504 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1505 alloc_reg64(current,i,rt1[i]);
1506 }
1507 }
1508 }
1509 if(rs1[i]&&rs2[i]) {
1510 current->is32&=~(1LL<<rt1[i]);
1511 } else if(rs1[i]) {
1512 current->is32&=~(1LL<<rt1[i]);
1513 if((current->is32>>rs1[i])&1)
1514 current->is32|=1LL<<rt1[i];
1515 } else if(rs2[i]) {
1516 current->is32&=~(1LL<<rt1[i]);
1517 if((current->is32>>rs2[i])&1)
1518 current->is32|=1LL<<rt1[i];
1519 } else {
1520 current->is32|=1LL<<rt1[i];
1521 }
1522 }
1523 }
1524 clear_const(current,rs1[i]);
1525 clear_const(current,rs2[i]);
1526 clear_const(current,rt1[i]);
1527 dirty_reg(current,rt1[i]);
1528}
1529
1530void imm16_alloc(struct regstat *current,int i)
1531{
1532 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1533 else lt1[i]=rs1[i];
1534 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1535 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1536 current->is32&=~(1LL<<rt1[i]);
1537 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1538 // TODO: Could preserve the 32-bit flag if the immediate is zero
1539 alloc_reg64(current,i,rt1[i]);
1540 alloc_reg64(current,i,rs1[i]);
1541 }
1542 clear_const(current,rs1[i]);
1543 clear_const(current,rt1[i]);
1544 }
1545 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1546 if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1547 current->is32|=1LL<<rt1[i];
1548 clear_const(current,rs1[i]);
1549 clear_const(current,rt1[i]);
1550 }
1551 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1552 if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1553 if(rs1[i]!=rt1[i]) {
1554 if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1555 alloc_reg64(current,i,rt1[i]);
1556 current->is32&=~(1LL<<rt1[i]);
1557 }
1558 }
1559 else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1560 if(is_const(current,rs1[i])) {
1561 int v=get_const(current,rs1[i]);
1562 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1563 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1564 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1565 }
1566 else clear_const(current,rt1[i]);
1567 }
1568 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1569 if(is_const(current,rs1[i])) {
1570 int v=get_const(current,rs1[i]);
1571 set_const(current,rt1[i],v+imm[i]);
1572 }
1573 else clear_const(current,rt1[i]);
1574 current->is32|=1LL<<rt1[i];
1575 }
1576 else {
1577 set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1578 current->is32|=1LL<<rt1[i];
1579 }
1580 dirty_reg(current,rt1[i]);
1581}
1582
1583void load_alloc(struct regstat *current,int i)
1584{
1585 clear_const(current,rt1[i]);
1586 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1587 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1588 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1589 if(rt1[i]) {
1590 alloc_reg(current,i,rt1[i]);
1591 if(get_reg(current->regmap,rt1[i])<0) {
1592 // dummy load, but we still need a register to calculate the address
1593 alloc_reg_temp(current,i,-1);
1594 }
1595 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1596 {
1597 current->is32&=~(1LL<<rt1[i]);
1598 alloc_reg64(current,i,rt1[i]);
1599 }
1600 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1601 {
1602 current->is32&=~(1LL<<rt1[i]);
1603 alloc_reg64(current,i,rt1[i]);
1604 alloc_all(current,i);
1605 alloc_reg64(current,i,FTEMP);
1606 }
1607 else current->is32|=1LL<<rt1[i];
1608 dirty_reg(current,rt1[i]);
1609 // If using TLB, need a register for pointer to the mapping table
1610 if(using_tlb) alloc_reg(current,i,TLREG);
1611 // LWL/LWR need a temporary register for the old value
1612 if(opcode[i]==0x22||opcode[i]==0x26)
1613 {
1614 alloc_reg(current,i,FTEMP);
1615 alloc_reg_temp(current,i,-1);
1616 }
1617 }
1618 else
1619 {
1620 // Load to r0 (dummy load)
1621 // but we still need a register to calculate the address
1622 if(opcode[i]==0x22||opcode[i]==0x26)
1623 {
1624 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1625 }
1626 alloc_reg_temp(current,i,-1);
1627 if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1628 {
1629 alloc_all(current,i);
1630 alloc_reg64(current,i,FTEMP);
1631 }
1632 }
1633}
1634
1635void store_alloc(struct regstat *current,int i)
1636{
1637 clear_const(current,rs2[i]);
1638 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1639 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1640 alloc_reg(current,i,rs2[i]);
1641 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1642 alloc_reg64(current,i,rs2[i]);
1643 if(rs2[i]) alloc_reg(current,i,FTEMP);
1644 }
1645 // If using TLB, need a register for pointer to the mapping table
1646 if(using_tlb) alloc_reg(current,i,TLREG);
1647 #if defined(HOST_IMM8)
1648 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1649 else alloc_reg(current,i,INVCP);
1650 #endif
1651 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1652 alloc_reg(current,i,FTEMP);
1653 }
1654 // We need a temporary register for address generation
1655 alloc_reg_temp(current,i,-1);
1656}
1657
1658void c1ls_alloc(struct regstat *current,int i)
1659{
1660 //clear_const(current,rs1[i]); // FIXME
1661 clear_const(current,rt1[i]);
1662 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1663 alloc_reg(current,i,CSREG); // Status
1664 alloc_reg(current,i,FTEMP);
1665 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1666 alloc_reg64(current,i,FTEMP);
1667 }
1668 // If using TLB, need a register for pointer to the mapping table
1669 if(using_tlb) alloc_reg(current,i,TLREG);
1670 #if defined(HOST_IMM8)
1671 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1672 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1673 alloc_reg(current,i,INVCP);
1674 #endif
1675 // We need a temporary register for address generation
1676 alloc_reg_temp(current,i,-1);
1677}
1678
1679void c2ls_alloc(struct regstat *current,int i)
1680{
1681 clear_const(current,rt1[i]);
1682 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1683 alloc_reg(current,i,FTEMP);
1684 // If using TLB, need a register for pointer to the mapping table
1685 if(using_tlb) alloc_reg(current,i,TLREG);
1686 #if defined(HOST_IMM8)
1687 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1688 else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1689 alloc_reg(current,i,INVCP);
1690 #endif
1691 // We need a temporary register for address generation
1692 alloc_reg_temp(current,i,-1);
1693}
1694
1695#ifndef multdiv_alloc
1696void multdiv_alloc(struct regstat *current,int i)
1697{
1698 // case 0x18: MULT
1699 // case 0x19: MULTU
1700 // case 0x1A: DIV
1701 // case 0x1B: DIVU
1702 // case 0x1C: DMULT
1703 // case 0x1D: DMULTU
1704 // case 0x1E: DDIV
1705 // case 0x1F: DDIVU
1706 clear_const(current,rs1[i]);
1707 clear_const(current,rs2[i]);
1708 if(rs1[i]&&rs2[i])
1709 {
1710 if((opcode2[i]&4)==0) // 32-bit
1711 {
1712 current->u&=~(1LL<<HIREG);
1713 current->u&=~(1LL<<LOREG);
1714 alloc_reg(current,i,HIREG);
1715 alloc_reg(current,i,LOREG);
1716 alloc_reg(current,i,rs1[i]);
1717 alloc_reg(current,i,rs2[i]);
1718 current->is32|=1LL<<HIREG;
1719 current->is32|=1LL<<LOREG;
1720 dirty_reg(current,HIREG);
1721 dirty_reg(current,LOREG);
1722 }
1723 else // 64-bit
1724 {
1725 current->u&=~(1LL<<HIREG);
1726 current->u&=~(1LL<<LOREG);
1727 current->uu&=~(1LL<<HIREG);
1728 current->uu&=~(1LL<<LOREG);
1729 alloc_reg64(current,i,HIREG);
1730 //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1731 alloc_reg64(current,i,rs1[i]);
1732 alloc_reg64(current,i,rs2[i]);
1733 alloc_all(current,i);
1734 current->is32&=~(1LL<<HIREG);
1735 current->is32&=~(1LL<<LOREG);
1736 dirty_reg(current,HIREG);
1737 dirty_reg(current,LOREG);
1738 }
1739 }
1740 else
1741 {
1742 // Multiply by zero is zero.
1743 // MIPS does not have a divide by zero exception.
1744 // The result is undefined, we return zero.
1745 alloc_reg(current,i,HIREG);
1746 alloc_reg(current,i,LOREG);
1747 current->is32|=1LL<<HIREG;
1748 current->is32|=1LL<<LOREG;
1749 dirty_reg(current,HIREG);
1750 dirty_reg(current,LOREG);
1751 }
1752}
1753#endif
1754
1755void cop0_alloc(struct regstat *current,int i)
1756{
1757 if(opcode2[i]==0) // MFC0
1758 {
1759 if(rt1[i]) {
1760 clear_const(current,rt1[i]);
1761 alloc_all(current,i);
1762 alloc_reg(current,i,rt1[i]);
1763 current->is32|=1LL<<rt1[i];
1764 dirty_reg(current,rt1[i]);
1765 }
1766 }
1767 else if(opcode2[i]==4) // MTC0
1768 {
1769 if(rs1[i]){
1770 clear_const(current,rs1[i]);
1771 alloc_reg(current,i,rs1[i]);
1772 alloc_all(current,i);
1773 }
1774 else {
1775 alloc_all(current,i); // FIXME: Keep r0
1776 current->u&=~1LL;
1777 alloc_reg(current,i,0);
1778 }
1779 }
1780 else
1781 {
1782 // TLBR/TLBWI/TLBWR/TLBP/ERET
1783 assert(opcode2[i]==0x10);
1784 alloc_all(current,i);
1785 }
1786}
1787
1788void cop1_alloc(struct regstat *current,int i)
1789{
1790 alloc_reg(current,i,CSREG); // Load status
1791 if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1792 {
1793 if(rt1[i]){
1794 clear_const(current,rt1[i]);
1795 if(opcode2[i]==1) {
1796 alloc_reg64(current,i,rt1[i]); // DMFC1
1797 current->is32&=~(1LL<<rt1[i]);
1798 }else{
1799 alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1800 current->is32|=1LL<<rt1[i];
1801 }
1802 dirty_reg(current,rt1[i]);
1803 }
1804 alloc_reg_temp(current,i,-1);
1805 }
1806 else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1807 {
1808 if(rs1[i]){
1809 clear_const(current,rs1[i]);
1810 if(opcode2[i]==5)
1811 alloc_reg64(current,i,rs1[i]); // DMTC1
1812 else
1813 alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1814 alloc_reg_temp(current,i,-1);
1815 }
1816 else {
1817 current->u&=~1LL;
1818 alloc_reg(current,i,0);
1819 alloc_reg_temp(current,i,-1);
1820 }
1821 }
1822}
1823void fconv_alloc(struct regstat *current,int i)
1824{
1825 alloc_reg(current,i,CSREG); // Load status
1826 alloc_reg_temp(current,i,-1);
1827}
1828void float_alloc(struct regstat *current,int i)
1829{
1830 alloc_reg(current,i,CSREG); // Load status
1831 alloc_reg_temp(current,i,-1);
1832}
1833void c2op_alloc(struct regstat *current,int i)
1834{
1835 alloc_reg_temp(current,i,-1);
1836}
1837void fcomp_alloc(struct regstat *current,int i)
1838{
1839 alloc_reg(current,i,CSREG); // Load status
1840 alloc_reg(current,i,FSREG); // Load flags
1841 dirty_reg(current,FSREG); // Flag will be modified
1842 alloc_reg_temp(current,i,-1);
1843}
1844
1845void syscall_alloc(struct regstat *current,int i)
1846{
1847 alloc_cc(current,i);
1848 dirty_reg(current,CCREG);
1849 alloc_all(current,i);
1850 current->isconst=0;
1851}
1852
1853void delayslot_alloc(struct regstat *current,int i)
1854{
1855 switch(itype[i]) {
1856 case UJUMP:
1857 case CJUMP:
1858 case SJUMP:
1859 case RJUMP:
1860 case FJUMP:
1861 case SYSCALL:
1862 case HLECALL:
1863 case SPAN:
1864 assem_debug("jump in the delay slot. this shouldn't happen.\n");//exit(1);
1865 printf("Disabled speculative precompilation\n");
1866 stop_after_jal=1;
1867 break;
1868 case IMM16:
1869 imm16_alloc(current,i);
1870 break;
1871 case LOAD:
1872 case LOADLR:
1873 load_alloc(current,i);
1874 break;
1875 case STORE:
1876 case STORELR:
1877 store_alloc(current,i);
1878 break;
1879 case ALU:
1880 alu_alloc(current,i);
1881 break;
1882 case SHIFT:
1883 shift_alloc(current,i);
1884 break;
1885 case MULTDIV:
1886 multdiv_alloc(current,i);
1887 break;
1888 case SHIFTIMM:
1889 shiftimm_alloc(current,i);
1890 break;
1891 case MOV:
1892 mov_alloc(current,i);
1893 break;
1894 case COP0:
1895 cop0_alloc(current,i);
1896 break;
1897 case COP1:
1898 case COP2:
1899 cop1_alloc(current,i);
1900 break;
1901 case C1LS:
1902 c1ls_alloc(current,i);
1903 break;
1904 case C2LS:
1905 c2ls_alloc(current,i);
1906 break;
1907 case FCONV:
1908 fconv_alloc(current,i);
1909 break;
1910 case FLOAT:
1911 float_alloc(current,i);
1912 break;
1913 case FCOMP:
1914 fcomp_alloc(current,i);
1915 break;
1916 case C2OP:
1917 c2op_alloc(current,i);
1918 break;
1919 }
1920}
1921
1922// Special case where a branch and delay slot span two pages in virtual memory
1923static void pagespan_alloc(struct regstat *current,int i)
1924{
1925 current->isconst=0;
1926 current->wasconst=0;
1927 regs[i].wasconst=0;
1928 alloc_all(current,i);
1929 alloc_cc(current,i);
1930 dirty_reg(current,CCREG);
1931 if(opcode[i]==3) // JAL
1932 {
1933 alloc_reg(current,i,31);
1934 dirty_reg(current,31);
1935 }
1936 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1937 {
1938 alloc_reg(current,i,rs1[i]);
1939 if (rt1[i]!=0) {
1940 alloc_reg(current,i,rt1[i]);
1941 dirty_reg(current,rt1[i]);
1942 }
1943 }
1944 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1945 {
1946 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1947 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1948 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1949 {
1950 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1951 if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1952 }
1953 }
1954 else
1955 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1956 {
1957 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1958 if(!((current->is32>>rs1[i])&1))
1959 {
1960 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1961 }
1962 }
1963 else
1964 if(opcode[i]==0x11) // BC1
1965 {
1966 alloc_reg(current,i,FSREG);
1967 alloc_reg(current,i,CSREG);
1968 }
1969 //else ...
1970}
1971
1972add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1973{
1974 stubs[stubcount][0]=type;
1975 stubs[stubcount][1]=addr;
1976 stubs[stubcount][2]=retaddr;
1977 stubs[stubcount][3]=a;
1978 stubs[stubcount][4]=b;
1979 stubs[stubcount][5]=c;
1980 stubs[stubcount][6]=d;
1981 stubs[stubcount][7]=e;
1982 stubcount++;
1983}
1984
1985// Write out a single register
1986void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1987{
1988 int hr;
1989 for(hr=0;hr<HOST_REGS;hr++) {
1990 if(hr!=EXCLUDE_REG) {
1991 if((regmap[hr]&63)==r) {
1992 if((dirty>>hr)&1) {
1993 if(regmap[hr]<64) {
1994 emit_storereg(r,hr);
1995#ifndef FORCE32
1996 if((is32>>regmap[hr])&1) {
1997 emit_sarimm(hr,31,hr);
1998 emit_storereg(r|64,hr);
1999 }
2000#endif
2001 }else{
2002 emit_storereg(r|64,hr);
2003 }
2004 }
2005 }
2006 }
2007 }
2008}
2009
2010int mchecksum()
2011{
2012 //if(!tracedebug) return 0;
2013 int i;
2014 int sum=0;
2015 for(i=0;i<2097152;i++) {
2016 unsigned int temp=sum;
2017 sum<<=1;
2018 sum|=(~temp)>>31;
2019 sum^=((u_int *)rdram)[i];
2020 }
2021 return sum;
2022}
2023int rchecksum()
2024{
2025 int i;
2026 int sum=0;
2027 for(i=0;i<64;i++)
2028 sum^=((u_int *)reg)[i];
2029 return sum;
2030}
2031void rlist()
2032{
2033 int i;
2034 printf("TRACE: ");
2035 for(i=0;i<32;i++)
2036 printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2037 printf("\n");
2038#ifndef DISABLE_COP1
2039 printf("TRACE: ");
2040 for(i=0;i<32;i++)
2041 printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2042 printf("\n");
2043#endif
2044}
2045
2046void enabletrace()
2047{
2048 tracedebug=1;
2049}
2050
2051void memdebug(int i)
2052{
2053 //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2054 //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2055 //rlist();
2056 //if(tracedebug) {
2057 //if(Count>=-2084597794) {
2058 if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2059 //if(0) {
2060 printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2061 //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2062 //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2063 rlist();
2064 #ifdef __i386__
2065 printf("TRACE: %x\n",(&i)[-1]);
2066 #endif
2067 #ifdef __arm__
2068 int j;
2069 printf("TRACE: %x \n",(&j)[10]);
2070 printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2071 #endif
2072 //fflush(stdout);
2073 }
2074 //printf("TRACE: %x\n",(&i)[-1]);
2075}
2076
2077void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2078{
2079 printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2080}
2081
2082void alu_assemble(int i,struct regstat *i_regs)
2083{
2084 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2085 if(rt1[i]) {
2086 signed char s1,s2,t;
2087 t=get_reg(i_regs->regmap,rt1[i]);
2088 if(t>=0) {
2089 s1=get_reg(i_regs->regmap,rs1[i]);
2090 s2=get_reg(i_regs->regmap,rs2[i]);
2091 if(rs1[i]&&rs2[i]) {
2092 assert(s1>=0);
2093 assert(s2>=0);
2094 if(opcode2[i]&2) emit_sub(s1,s2,t);
2095 else emit_add(s1,s2,t);
2096 }
2097 else if(rs1[i]) {
2098 if(s1>=0) emit_mov(s1,t);
2099 else emit_loadreg(rs1[i],t);
2100 }
2101 else if(rs2[i]) {
2102 if(s2>=0) {
2103 if(opcode2[i]&2) emit_neg(s2,t);
2104 else emit_mov(s2,t);
2105 }
2106 else {
2107 emit_loadreg(rs2[i],t);
2108 if(opcode2[i]&2) emit_neg(t,t);
2109 }
2110 }
2111 else emit_zeroreg(t);
2112 }
2113 }
2114 }
2115 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2116 if(rt1[i]) {
2117 signed char s1l,s2l,s1h,s2h,tl,th;
2118 tl=get_reg(i_regs->regmap,rt1[i]);
2119 th=get_reg(i_regs->regmap,rt1[i]|64);
2120 if(tl>=0) {
2121 s1l=get_reg(i_regs->regmap,rs1[i]);
2122 s2l=get_reg(i_regs->regmap,rs2[i]);
2123 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2124 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2125 if(rs1[i]&&rs2[i]) {
2126 assert(s1l>=0);
2127 assert(s2l>=0);
2128 if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2129 else emit_adds(s1l,s2l,tl);
2130 if(th>=0) {
2131 #ifdef INVERTED_CARRY
2132 if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2133 #else
2134 if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2135 #endif
2136 else emit_add(s1h,s2h,th);
2137 }
2138 }
2139 else if(rs1[i]) {
2140 if(s1l>=0) emit_mov(s1l,tl);
2141 else emit_loadreg(rs1[i],tl);
2142 if(th>=0) {
2143 if(s1h>=0) emit_mov(s1h,th);
2144 else emit_loadreg(rs1[i]|64,th);
2145 }
2146 }
2147 else if(rs2[i]) {
2148 if(s2l>=0) {
2149 if(opcode2[i]&2) emit_negs(s2l,tl);
2150 else emit_mov(s2l,tl);
2151 }
2152 else {
2153 emit_loadreg(rs2[i],tl);
2154 if(opcode2[i]&2) emit_negs(tl,tl);
2155 }
2156 if(th>=0) {
2157 #ifdef INVERTED_CARRY
2158 if(s2h>=0) emit_mov(s2h,th);
2159 else emit_loadreg(rs2[i]|64,th);
2160 if(opcode2[i]&2) {
2161 emit_adcimm(-1,th); // x86 has inverted carry flag
2162 emit_not(th,th);
2163 }
2164 #else
2165 if(opcode2[i]&2) {
2166 if(s2h>=0) emit_rscimm(s2h,0,th);
2167 else {
2168 emit_loadreg(rs2[i]|64,th);
2169 emit_rscimm(th,0,th);
2170 }
2171 }else{
2172 if(s2h>=0) emit_mov(s2h,th);
2173 else emit_loadreg(rs2[i]|64,th);
2174 }
2175 #endif
2176 }
2177 }
2178 else {
2179 emit_zeroreg(tl);
2180 if(th>=0) emit_zeroreg(th);
2181 }
2182 }
2183 }
2184 }
2185 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2186 if(rt1[i]) {
2187 signed char s1l,s1h,s2l,s2h,t;
2188 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2189 {
2190 t=get_reg(i_regs->regmap,rt1[i]);
2191 //assert(t>=0);
2192 if(t>=0) {
2193 s1l=get_reg(i_regs->regmap,rs1[i]);
2194 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2195 s2l=get_reg(i_regs->regmap,rs2[i]);
2196 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2197 if(rs2[i]==0) // rx<r0
2198 {
2199 assert(s1h>=0);
2200 if(opcode2[i]==0x2a) // SLT
2201 emit_shrimm(s1h,31,t);
2202 else // SLTU (unsigned can not be less than zero)
2203 emit_zeroreg(t);
2204 }
2205 else if(rs1[i]==0) // r0<rx
2206 {
2207 assert(s2h>=0);
2208 if(opcode2[i]==0x2a) // SLT
2209 emit_set_gz64_32(s2h,s2l,t);
2210 else // SLTU (set if not zero)
2211 emit_set_nz64_32(s2h,s2l,t);
2212 }
2213 else {
2214 assert(s1l>=0);assert(s1h>=0);
2215 assert(s2l>=0);assert(s2h>=0);
2216 if(opcode2[i]==0x2a) // SLT
2217 emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2218 else // SLTU
2219 emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2220 }
2221 }
2222 } else {
2223 t=get_reg(i_regs->regmap,rt1[i]);
2224 //assert(t>=0);
2225 if(t>=0) {
2226 s1l=get_reg(i_regs->regmap,rs1[i]);
2227 s2l=get_reg(i_regs->regmap,rs2[i]);
2228 if(rs2[i]==0) // rx<r0
2229 {
2230 assert(s1l>=0);
2231 if(opcode2[i]==0x2a) // SLT
2232 emit_shrimm(s1l,31,t);
2233 else // SLTU (unsigned can not be less than zero)
2234 emit_zeroreg(t);
2235 }
2236 else if(rs1[i]==0) // r0<rx
2237 {
2238 assert(s2l>=0);
2239 if(opcode2[i]==0x2a) // SLT
2240 emit_set_gz32(s2l,t);
2241 else // SLTU (set if not zero)
2242 emit_set_nz32(s2l,t);
2243 }
2244 else{
2245 assert(s1l>=0);assert(s2l>=0);
2246 if(opcode2[i]==0x2a) // SLT
2247 emit_set_if_less32(s1l,s2l,t);
2248 else // SLTU
2249 emit_set_if_carry32(s1l,s2l,t);
2250 }
2251 }
2252 }
2253 }
2254 }
2255 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2256 if(rt1[i]) {
2257 signed char s1l,s1h,s2l,s2h,th,tl;
2258 tl=get_reg(i_regs->regmap,rt1[i]);
2259 th=get_reg(i_regs->regmap,rt1[i]|64);
2260 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2261 {
2262 assert(tl>=0);
2263 if(tl>=0) {
2264 s1l=get_reg(i_regs->regmap,rs1[i]);
2265 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2266 s2l=get_reg(i_regs->regmap,rs2[i]);
2267 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2268 if(rs1[i]&&rs2[i]) {
2269 assert(s1l>=0);assert(s1h>=0);
2270 assert(s2l>=0);assert(s2h>=0);
2271 if(opcode2[i]==0x24) { // AND
2272 emit_and(s1l,s2l,tl);
2273 emit_and(s1h,s2h,th);
2274 } else
2275 if(opcode2[i]==0x25) { // OR
2276 emit_or(s1l,s2l,tl);
2277 emit_or(s1h,s2h,th);
2278 } else
2279 if(opcode2[i]==0x26) { // XOR
2280 emit_xor(s1l,s2l,tl);
2281 emit_xor(s1h,s2h,th);
2282 } else
2283 if(opcode2[i]==0x27) { // NOR
2284 emit_or(s1l,s2l,tl);
2285 emit_or(s1h,s2h,th);
2286 emit_not(tl,tl);
2287 emit_not(th,th);
2288 }
2289 }
2290 else
2291 {
2292 if(opcode2[i]==0x24) { // AND
2293 emit_zeroreg(tl);
2294 emit_zeroreg(th);
2295 } else
2296 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2297 if(rs1[i]){
2298 if(s1l>=0) emit_mov(s1l,tl);
2299 else emit_loadreg(rs1[i],tl);
2300 if(s1h>=0) emit_mov(s1h,th);
2301 else emit_loadreg(rs1[i]|64,th);
2302 }
2303 else
2304 if(rs2[i]){
2305 if(s2l>=0) emit_mov(s2l,tl);
2306 else emit_loadreg(rs2[i],tl);
2307 if(s2h>=0) emit_mov(s2h,th);
2308 else emit_loadreg(rs2[i]|64,th);
2309 }
2310 else{
2311 emit_zeroreg(tl);
2312 emit_zeroreg(th);
2313 }
2314 } else
2315 if(opcode2[i]==0x27) { // NOR
2316 if(rs1[i]){
2317 if(s1l>=0) emit_not(s1l,tl);
2318 else{
2319 emit_loadreg(rs1[i],tl);
2320 emit_not(tl,tl);
2321 }
2322 if(s1h>=0) emit_not(s1h,th);
2323 else{
2324 emit_loadreg(rs1[i]|64,th);
2325 emit_not(th,th);
2326 }
2327 }
2328 else
2329 if(rs2[i]){
2330 if(s2l>=0) emit_not(s2l,tl);
2331 else{
2332 emit_loadreg(rs2[i],tl);
2333 emit_not(tl,tl);
2334 }
2335 if(s2h>=0) emit_not(s2h,th);
2336 else{
2337 emit_loadreg(rs2[i]|64,th);
2338 emit_not(th,th);
2339 }
2340 }
2341 else {
2342 emit_movimm(-1,tl);
2343 emit_movimm(-1,th);
2344 }
2345 }
2346 }
2347 }
2348 }
2349 else
2350 {
2351 // 32 bit
2352 if(tl>=0) {
2353 s1l=get_reg(i_regs->regmap,rs1[i]);
2354 s2l=get_reg(i_regs->regmap,rs2[i]);
2355 if(rs1[i]&&rs2[i]) {
2356 assert(s1l>=0);
2357 assert(s2l>=0);
2358 if(opcode2[i]==0x24) { // AND
2359 emit_and(s1l,s2l,tl);
2360 } else
2361 if(opcode2[i]==0x25) { // OR
2362 emit_or(s1l,s2l,tl);
2363 } else
2364 if(opcode2[i]==0x26) { // XOR
2365 emit_xor(s1l,s2l,tl);
2366 } else
2367 if(opcode2[i]==0x27) { // NOR
2368 emit_or(s1l,s2l,tl);
2369 emit_not(tl,tl);
2370 }
2371 }
2372 else
2373 {
2374 if(opcode2[i]==0x24) { // AND
2375 emit_zeroreg(tl);
2376 } else
2377 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2378 if(rs1[i]){
2379 if(s1l>=0) emit_mov(s1l,tl);
2380 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2381 }
2382 else
2383 if(rs2[i]){
2384 if(s2l>=0) emit_mov(s2l,tl);
2385 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2386 }
2387 else emit_zeroreg(tl);
2388 } else
2389 if(opcode2[i]==0x27) { // NOR
2390 if(rs1[i]){
2391 if(s1l>=0) emit_not(s1l,tl);
2392 else {
2393 emit_loadreg(rs1[i],tl);
2394 emit_not(tl,tl);
2395 }
2396 }
2397 else
2398 if(rs2[i]){
2399 if(s2l>=0) emit_not(s2l,tl);
2400 else {
2401 emit_loadreg(rs2[i],tl);
2402 emit_not(tl,tl);
2403 }
2404 }
2405 else emit_movimm(-1,tl);
2406 }
2407 }
2408 }
2409 }
2410 }
2411 }
2412}
2413
2414void imm16_assemble(int i,struct regstat *i_regs)
2415{
2416 if (opcode[i]==0x0f) { // LUI
2417 if(rt1[i]) {
2418 signed char t;
2419 t=get_reg(i_regs->regmap,rt1[i]);
2420 //assert(t>=0);
2421 if(t>=0) {
2422 if(!((i_regs->isconst>>t)&1))
2423 emit_movimm(imm[i]<<16,t);
2424 }
2425 }
2426 }
2427 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2428 if(rt1[i]) {
2429 signed char s,t;
2430 t=get_reg(i_regs->regmap,rt1[i]);
2431 s=get_reg(i_regs->regmap,rs1[i]);
2432 if(rs1[i]) {
2433 //assert(t>=0);
2434 //assert(s>=0);
2435 if(t>=0) {
2436 if(!((i_regs->isconst>>t)&1)) {
2437 if(s<0) {
2438 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2439 emit_addimm(t,imm[i],t);
2440 }else{
2441 if(!((i_regs->wasconst>>s)&1))
2442 emit_addimm(s,imm[i],t);
2443 else
2444 emit_movimm(constmap[i][s]+imm[i],t);
2445 }
2446 }
2447 }
2448 } else {
2449 if(t>=0) {
2450 if(!((i_regs->isconst>>t)&1))
2451 emit_movimm(imm[i],t);
2452 }
2453 }
2454 }
2455 }
2456 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2457 if(rt1[i]) {
2458 signed char sh,sl,th,tl;
2459 th=get_reg(i_regs->regmap,rt1[i]|64);
2460 tl=get_reg(i_regs->regmap,rt1[i]);
2461 sh=get_reg(i_regs->regmap,rs1[i]|64);
2462 sl=get_reg(i_regs->regmap,rs1[i]);
2463 if(tl>=0) {
2464 if(rs1[i]) {
2465 assert(sh>=0);
2466 assert(sl>=0);
2467 if(th>=0) {
2468 emit_addimm64_32(sh,sl,imm[i],th,tl);
2469 }
2470 else {
2471 emit_addimm(sl,imm[i],tl);
2472 }
2473 } else {
2474 emit_movimm(imm[i],tl);
2475 if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2476 }
2477 }
2478 }
2479 }
2480 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2481 if(rt1[i]) {
2482 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2483 signed char sh,sl,t;
2484 t=get_reg(i_regs->regmap,rt1[i]);
2485 sh=get_reg(i_regs->regmap,rs1[i]|64);
2486 sl=get_reg(i_regs->regmap,rs1[i]);
2487 //assert(t>=0);
2488 if(t>=0) {
2489 if(rs1[i]>0) {
2490 if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2491 if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2492 if(opcode[i]==0x0a) { // SLTI
2493 if(sl<0) {
2494 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2495 emit_slti32(t,imm[i],t);
2496 }else{
2497 emit_slti32(sl,imm[i],t);
2498 }
2499 }
2500 else { // SLTIU
2501 if(sl<0) {
2502 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2503 emit_sltiu32(t,imm[i],t);
2504 }else{
2505 emit_sltiu32(sl,imm[i],t);
2506 }
2507 }
2508 }else{ // 64-bit
2509 assert(sl>=0);
2510 if(opcode[i]==0x0a) // SLTI
2511 emit_slti64_32(sh,sl,imm[i],t);
2512 else // SLTIU
2513 emit_sltiu64_32(sh,sl,imm[i],t);
2514 }
2515 }else{
2516 // SLTI(U) with r0 is just stupid,
2517 // nonetheless examples can be found
2518 if(opcode[i]==0x0a) // SLTI
2519 if(0<imm[i]) emit_movimm(1,t);
2520 else emit_zeroreg(t);
2521 else // SLTIU
2522 {
2523 if(imm[i]) emit_movimm(1,t);
2524 else emit_zeroreg(t);
2525 }
2526 }
2527 }
2528 }
2529 }
2530 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2531 if(rt1[i]) {
2532 signed char sh,sl,th,tl;
2533 th=get_reg(i_regs->regmap,rt1[i]|64);
2534 tl=get_reg(i_regs->regmap,rt1[i]);
2535 sh=get_reg(i_regs->regmap,rs1[i]|64);
2536 sl=get_reg(i_regs->regmap,rs1[i]);
2537 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2538 if(opcode[i]==0x0c) //ANDI
2539 {
2540 if(rs1[i]) {
2541 if(sl<0) {
2542 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2543 emit_andimm(tl,imm[i],tl);
2544 }else{
2545 if(!((i_regs->wasconst>>sl)&1))
2546 emit_andimm(sl,imm[i],tl);
2547 else
2548 emit_movimm(constmap[i][sl]&imm[i],tl);
2549 }
2550 }
2551 else
2552 emit_zeroreg(tl);
2553 if(th>=0) emit_zeroreg(th);
2554 }
2555 else
2556 {
2557 if(rs1[i]) {
2558 if(sl<0) {
2559 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2560 }
2561 if(th>=0) {
2562 if(sh<0) {
2563 emit_loadreg(rs1[i]|64,th);
2564 }else{
2565 emit_mov(sh,th);
2566 }
2567 }
2568 if(opcode[i]==0x0d) //ORI
2569 if(sl<0) {
2570 emit_orimm(tl,imm[i],tl);
2571 }else{
2572 if(!((i_regs->wasconst>>sl)&1))
2573 emit_orimm(sl,imm[i],tl);
2574 else
2575 emit_movimm(constmap[i][sl]|imm[i],tl);
2576 }
2577 if(opcode[i]==0x0e) //XORI
2578 if(sl<0) {
2579 emit_xorimm(tl,imm[i],tl);
2580 }else{
2581 if(!((i_regs->wasconst>>sl)&1))
2582 emit_xorimm(sl,imm[i],tl);
2583 else
2584 emit_movimm(constmap[i][sl]^imm[i],tl);
2585 }
2586 }
2587 else {
2588 emit_movimm(imm[i],tl);
2589 if(th>=0) emit_zeroreg(th);
2590 }
2591 }
2592 }
2593 }
2594 }
2595}
2596
2597void shiftimm_assemble(int i,struct regstat *i_regs)
2598{
2599 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2600 {
2601 if(rt1[i]) {
2602 signed char s,t;
2603 t=get_reg(i_regs->regmap,rt1[i]);
2604 s=get_reg(i_regs->regmap,rs1[i]);
2605 //assert(t>=0);
2606 if(t>=0){
2607 if(rs1[i]==0)
2608 {
2609 emit_zeroreg(t);
2610 }
2611 else
2612 {
2613 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2614 if(imm[i]) {
2615 if(opcode2[i]==0) // SLL
2616 {
2617 emit_shlimm(s<0?t:s,imm[i],t);
2618 }
2619 if(opcode2[i]==2) // SRL
2620 {
2621 emit_shrimm(s<0?t:s,imm[i],t);
2622 }
2623 if(opcode2[i]==3) // SRA
2624 {
2625 emit_sarimm(s<0?t:s,imm[i],t);
2626 }
2627 }else{
2628 // Shift by zero
2629 if(s>=0 && s!=t) emit_mov(s,t);
2630 }
2631 }
2632 }
2633 //emit_storereg(rt1[i],t); //DEBUG
2634 }
2635 }
2636 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2637 {
2638 if(rt1[i]) {
2639 signed char sh,sl,th,tl;
2640 th=get_reg(i_regs->regmap,rt1[i]|64);
2641 tl=get_reg(i_regs->regmap,rt1[i]);
2642 sh=get_reg(i_regs->regmap,rs1[i]|64);
2643 sl=get_reg(i_regs->regmap,rs1[i]);
2644 if(tl>=0) {
2645 if(rs1[i]==0)
2646 {
2647 emit_zeroreg(tl);
2648 if(th>=0) emit_zeroreg(th);
2649 }
2650 else
2651 {
2652 assert(sl>=0);
2653 assert(sh>=0);
2654 if(imm[i]) {
2655 if(opcode2[i]==0x38) // DSLL
2656 {
2657 if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2658 emit_shlimm(sl,imm[i],tl);
2659 }
2660 if(opcode2[i]==0x3a) // DSRL
2661 {
2662 emit_shrdimm(sl,sh,imm[i],tl);
2663 if(th>=0) emit_shrimm(sh,imm[i],th);
2664 }
2665 if(opcode2[i]==0x3b) // DSRA
2666 {
2667 emit_shrdimm(sl,sh,imm[i],tl);
2668 if(th>=0) emit_sarimm(sh,imm[i],th);
2669 }
2670 }else{
2671 // Shift by zero
2672 if(sl!=tl) emit_mov(sl,tl);
2673 if(th>=0&&sh!=th) emit_mov(sh,th);
2674 }
2675 }
2676 }
2677 }
2678 }
2679 if(opcode2[i]==0x3c) // DSLL32
2680 {
2681 if(rt1[i]) {
2682 signed char sl,tl,th;
2683 tl=get_reg(i_regs->regmap,rt1[i]);
2684 th=get_reg(i_regs->regmap,rt1[i]|64);
2685 sl=get_reg(i_regs->regmap,rs1[i]);
2686 if(th>=0||tl>=0){
2687 assert(tl>=0);
2688 assert(th>=0);
2689 assert(sl>=0);
2690 emit_mov(sl,th);
2691 emit_zeroreg(tl);
2692 if(imm[i]>32)
2693 {
2694 emit_shlimm(th,imm[i]&31,th);
2695 }
2696 }
2697 }
2698 }
2699 if(opcode2[i]==0x3e) // DSRL32
2700 {
2701 if(rt1[i]) {
2702 signed char sh,tl,th;
2703 tl=get_reg(i_regs->regmap,rt1[i]);
2704 th=get_reg(i_regs->regmap,rt1[i]|64);
2705 sh=get_reg(i_regs->regmap,rs1[i]|64);
2706 if(tl>=0){
2707 assert(sh>=0);
2708 emit_mov(sh,tl);
2709 if(th>=0) emit_zeroreg(th);
2710 if(imm[i]>32)
2711 {
2712 emit_shrimm(tl,imm[i]&31,tl);
2713 }
2714 }
2715 }
2716 }
2717 if(opcode2[i]==0x3f) // DSRA32
2718 {
2719 if(rt1[i]) {
2720 signed char sh,tl;
2721 tl=get_reg(i_regs->regmap,rt1[i]);
2722 sh=get_reg(i_regs->regmap,rs1[i]|64);
2723 if(tl>=0){
2724 assert(sh>=0);
2725 emit_mov(sh,tl);
2726 if(imm[i]>32)
2727 {
2728 emit_sarimm(tl,imm[i]&31,tl);
2729 }
2730 }
2731 }
2732 }
2733}
2734
2735#ifndef shift_assemble
2736void shift_assemble(int i,struct regstat *i_regs)
2737{
2738 printf("Need shift_assemble for this architecture.\n");
2739 exit(1);
2740}
2741#endif
2742
2743void load_assemble(int i,struct regstat *i_regs)
2744{
2745 int s,th,tl,addr,map=-1;
2746 int offset;
2747 int jaddr=0;
2748 int memtarget=0,c=0;
2749 u_int hr,reglist=0;
2750 th=get_reg(i_regs->regmap,rt1[i]|64);
2751 tl=get_reg(i_regs->regmap,rt1[i]);
2752 s=get_reg(i_regs->regmap,rs1[i]);
2753 offset=imm[i];
2754 for(hr=0;hr<HOST_REGS;hr++) {
2755 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2756 }
2757 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2758 if(s>=0) {
2759 c=(i_regs->wasconst>>s)&1;
2760 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2761 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2762 }
2763 //printf("load_assemble: c=%d\n",c);
2764 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2765 // FIXME: Even if the load is a NOP, we should check for pagefaults...
2766#ifdef PCSX
2767 if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2768 ||rt1[i]==0) {
2769 // could be FIFO, must perform the read
2770 // ||dummy read
2771 assem_debug("(forced read)\n");
2772 tl=get_reg(i_regs->regmap,-1);
2773 assert(tl>=0);
2774 }
2775#endif
2776 if(offset||s<0||c) addr=tl;
2777 else addr=s;
2778 //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2779 if(tl>=0) {
2780 //printf("load_assemble: c=%d\n",c);
2781 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2782 assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2783 reglist&=~(1<<tl);
2784 if(th>=0) reglist&=~(1<<th);
2785 if(!using_tlb) {
2786 if(!c) {
2787 #ifdef RAM_OFFSET
2788 map=get_reg(i_regs->regmap,ROREG);
2789 if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2790 #endif
2791//#define R29_HACK 1
2792 #ifdef R29_HACK
2793 // Strmnnrmn's speed hack
2794 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2795 #endif
2796 {
2797 emit_cmpimm(addr,RAM_SIZE);
2798 jaddr=(int)out;
2799 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2800 // Hint to branch predictor that the branch is unlikely to be taken
2801 if(rs1[i]>=28)
2802 emit_jno_unlikely(0);
2803 else
2804 #endif
2805 emit_jno(0);
2806 }
2807 }
2808 }else{ // using tlb
2809 int x=0;
2810 if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2811 if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2812 map=get_reg(i_regs->regmap,TLREG);
2813 assert(map>=0);
2814 map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2815 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2816 }
2817 int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2818 if (opcode[i]==0x20) { // LB
2819 if(!c||memtarget) {
2820 if(!dummy) {
2821 #ifdef HOST_IMM_ADDR32
2822 if(c)
2823 emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2824 else
2825 #endif
2826 {
2827 //emit_xorimm(addr,3,tl);
2828 //gen_tlb_addr_r(tl,map);
2829 //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2830 int x=0,a=tl;
2831#ifdef BIG_ENDIAN_MIPS
2832 if(!c) emit_xorimm(addr,3,tl);
2833 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2834#else
2835 if(!c) a=addr;
2836#endif
2837 emit_movsbl_indexed_tlb(x,a,map,tl);
2838 }
2839 }
2840 if(jaddr)
2841 add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2842 }
2843 else
2844 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2845 }
2846 if (opcode[i]==0x21) { // LH
2847 if(!c||memtarget) {
2848 if(!dummy) {
2849 #ifdef HOST_IMM_ADDR32
2850 if(c)
2851 emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2852 else
2853 #endif
2854 {
2855 int x=0,a=tl;
2856#ifdef BIG_ENDIAN_MIPS
2857 if(!c) emit_xorimm(addr,2,tl);
2858 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2859#else
2860 if(!c) a=addr;
2861#endif
2862 //#ifdef
2863 //emit_movswl_indexed_tlb(x,tl,map,tl);
2864 //else
2865 if(map>=0) {
2866 gen_tlb_addr_r(a,map);
2867 emit_movswl_indexed(x,a,tl);
2868 }else{
2869 #ifdef RAM_OFFSET
2870 emit_movswl_indexed(x,a,tl);
2871 #else
2872 emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2873 #endif
2874 }
2875 }
2876 }
2877 if(jaddr)
2878 add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2879 }
2880 else
2881 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2882 }
2883 if (opcode[i]==0x23) { // LW
2884 if(!c||memtarget) {
2885 if(!dummy) {
2886 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2887 #ifdef HOST_IMM_ADDR32
2888 if(c)
2889 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2890 else
2891 #endif
2892 emit_readword_indexed_tlb(0,addr,map,tl);
2893 }
2894 if(jaddr)
2895 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2896 }
2897 else
2898 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2899 }
2900 if (opcode[i]==0x24) { // LBU
2901 if(!c||memtarget) {
2902 if(!dummy) {
2903 #ifdef HOST_IMM_ADDR32
2904 if(c)
2905 emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2906 else
2907 #endif
2908 {
2909 //emit_xorimm(addr,3,tl);
2910 //gen_tlb_addr_r(tl,map);
2911 //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2912 int x=0,a=tl;
2913#ifdef BIG_ENDIAN_MIPS
2914 if(!c) emit_xorimm(addr,3,tl);
2915 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2916#else
2917 if(!c) a=addr;
2918#endif
2919 emit_movzbl_indexed_tlb(x,a,map,tl);
2920 }
2921 }
2922 if(jaddr)
2923 add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2924 }
2925 else
2926 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2927 }
2928 if (opcode[i]==0x25) { // LHU
2929 if(!c||memtarget) {
2930 if(!dummy) {
2931 #ifdef HOST_IMM_ADDR32
2932 if(c)
2933 emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2934 else
2935 #endif
2936 {
2937 int x=0,a=tl;
2938#ifdef BIG_ENDIAN_MIPS
2939 if(!c) emit_xorimm(addr,2,tl);
2940 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2941#else
2942 if(!c) a=addr;
2943#endif
2944 //#ifdef
2945 //emit_movzwl_indexed_tlb(x,tl,map,tl);
2946 //#else
2947 if(map>=0) {
2948 gen_tlb_addr_r(a,map);
2949 emit_movzwl_indexed(x,a,tl);
2950 }else{
2951 #ifdef RAM_OFFSET
2952 emit_movzwl_indexed(x,a,tl);
2953 #else
2954 emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
2955 #endif
2956 }
2957 }
2958 }
2959 if(jaddr)
2960 add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2961 }
2962 else
2963 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2964 }
2965 if (opcode[i]==0x27) { // LWU
2966 assert(th>=0);
2967 if(!c||memtarget) {
2968 if(!dummy) {
2969 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2970 #ifdef HOST_IMM_ADDR32
2971 if(c)
2972 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2973 else
2974 #endif
2975 emit_readword_indexed_tlb(0,addr,map,tl);
2976 }
2977 if(jaddr)
2978 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2979 }
2980 else {
2981 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2982 }
2983 emit_zeroreg(th);
2984 }
2985 if (opcode[i]==0x37) { // LD
2986 if(!c||memtarget) {
2987 if(!dummy) {
2988 //gen_tlb_addr_r(tl,map);
2989 //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2990 //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2991 #ifdef HOST_IMM_ADDR32
2992 if(c)
2993 emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2994 else
2995 #endif
2996 emit_readdword_indexed_tlb(0,addr,map,th,tl);
2997 }
2998 if(jaddr)
2999 add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3000 }
3001 else
3002 inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3003 }
3004 }
3005 //emit_storereg(rt1[i],tl); // DEBUG
3006 //if(opcode[i]==0x23)
3007 //if(opcode[i]==0x24)
3008 //if(opcode[i]==0x23||opcode[i]==0x24)
3009 /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
3010 {
3011 //emit_pusha();
3012 save_regs(0x100f);
3013 emit_readword((int)&last_count,ECX);
3014 #ifdef __i386__
3015 if(get_reg(i_regs->regmap,CCREG)<0)
3016 emit_loadreg(CCREG,HOST_CCREG);
3017 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3018 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3019 emit_writeword(HOST_CCREG,(int)&Count);
3020 #endif
3021 #ifdef __arm__
3022 if(get_reg(i_regs->regmap,CCREG)<0)
3023 emit_loadreg(CCREG,0);
3024 else
3025 emit_mov(HOST_CCREG,0);
3026 emit_add(0,ECX,0);
3027 emit_addimm(0,2*ccadj[i],0);
3028 emit_writeword(0,(int)&Count);
3029 #endif
3030 emit_call((int)memdebug);
3031 //emit_popa();
3032 restore_regs(0x100f);
3033 }/**/
3034}
3035
3036#ifndef loadlr_assemble
3037void loadlr_assemble(int i,struct regstat *i_regs)
3038{
3039 printf("Need loadlr_assemble for this architecture.\n");
3040 exit(1);
3041}
3042#endif
3043
3044void store_assemble(int i,struct regstat *i_regs)
3045{
3046 int s,th,tl,map=-1;
3047 int addr,temp;
3048 int offset;
3049 int jaddr=0,jaddr2,type;
3050 int memtarget=0,c=0;
3051 int agr=AGEN1+(i&1);
3052 u_int hr,reglist=0;
3053 th=get_reg(i_regs->regmap,rs2[i]|64);
3054 tl=get_reg(i_regs->regmap,rs2[i]);
3055 s=get_reg(i_regs->regmap,rs1[i]);
3056 temp=get_reg(i_regs->regmap,agr);
3057 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3058 offset=imm[i];
3059 if(s>=0) {
3060 c=(i_regs->wasconst>>s)&1;
3061 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3062 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3063 }
3064 assert(tl>=0);
3065 assert(temp>=0);
3066 for(hr=0;hr<HOST_REGS;hr++) {
3067 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3068 }
3069 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3070 if(offset||s<0||c) addr=temp;
3071 else addr=s;
3072 if(!using_tlb) {
3073 if(!c) {
3074 #ifdef R29_HACK
3075 // Strmnnrmn's speed hack
3076 memtarget=1;
3077 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3078 #endif
3079 emit_cmpimm(addr,RAM_SIZE);
3080 #ifdef DESTRUCTIVE_SHIFT
3081 if(s==addr) emit_mov(s,temp);
3082 #endif
3083 #ifdef R29_HACK
3084 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3085 #endif
3086 {
3087 jaddr=(int)out;
3088 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3089 // Hint to branch predictor that the branch is unlikely to be taken
3090 if(rs1[i]>=28)
3091 emit_jno_unlikely(0);
3092 else
3093 #endif
3094 emit_jno(0);
3095 }
3096 }
3097 }else{ // using tlb
3098 int x=0;
3099 if (opcode[i]==0x28) x=3; // SB
3100 if (opcode[i]==0x29) x=2; // SH
3101 map=get_reg(i_regs->regmap,TLREG);
3102 assert(map>=0);
3103 map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3104 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3105 }
3106
3107 if (opcode[i]==0x28) { // SB
3108 if(!c||memtarget) {
3109 int x=0;
3110#ifdef BIG_ENDIAN_MIPS
3111 if(!c) emit_xorimm(addr,3,temp);
3112 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3113#else
3114 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3115 else if (addr!=temp) emit_mov(addr,temp);
3116#endif
3117 //gen_tlb_addr_w(temp,map);
3118 //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3119 emit_writebyte_indexed_tlb(tl,x,temp,map,temp);
3120 }
3121 type=STOREB_STUB;
3122 }
3123 if (opcode[i]==0x29) { // SH
3124 if(!c||memtarget) {
3125 int x=0;
3126#ifdef BIG_ENDIAN_MIPS
3127 if(!c) emit_xorimm(addr,2,temp);
3128 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3129#else
3130 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3131 else if (addr!=temp) emit_mov(addr,temp);
3132#endif
3133 //#ifdef
3134 //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3135 //#else
3136 if(map>=0) {
3137 gen_tlb_addr_w(temp,map);
3138 emit_writehword_indexed(tl,x,temp);
3139 }else
3140 emit_writehword_indexed(tl,(int)rdram-0x80000000+x,temp);
3141 }
3142 type=STOREH_STUB;
3143 }
3144 if (opcode[i]==0x2B) { // SW
3145 if(!c||memtarget)
3146 //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3147 emit_writeword_indexed_tlb(tl,0,addr,map,temp);
3148 type=STOREW_STUB;
3149 }
3150 if (opcode[i]==0x3F) { // SD
3151 if(!c||memtarget) {
3152 if(rs2[i]) {
3153 assert(th>=0);
3154 //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3155 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3156 emit_writedword_indexed_tlb(th,tl,0,addr,map,temp);
3157 }else{
3158 // Store zero
3159 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3160 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3161 emit_writedword_indexed_tlb(tl,tl,0,addr,map,temp);
3162 }
3163 }
3164 type=STORED_STUB;
3165 }
3166 if(!using_tlb&&(!c||memtarget))
3167 // addr could be a temp, make sure it survives STORE*_STUB
3168 reglist|=1<<addr;
3169 if(jaddr) {
3170 add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3171 } else if(!memtarget) {
3172 inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3173 }
3174 if(!using_tlb) {
3175 if(!c||memtarget) {
3176 #ifdef DESTRUCTIVE_SHIFT
3177 // The x86 shift operation is 'destructive'; it overwrites the
3178 // source register, so we need to make a copy first and use that.
3179 addr=temp;
3180 #endif
3181 #if defined(HOST_IMM8)
3182 int ir=get_reg(i_regs->regmap,INVCP);
3183 assert(ir>=0);
3184 emit_cmpmem_indexedsr12_reg(ir,addr,1);
3185 #else
3186 emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3187 #endif
3188 jaddr2=(int)out;
3189 emit_jne(0);
3190 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3191 }
3192 }
3193 //if(opcode[i]==0x2B || opcode[i]==0x3F)
3194 //if(opcode[i]==0x2B || opcode[i]==0x28)
3195 //if(opcode[i]==0x2B || opcode[i]==0x29)
3196 //if(opcode[i]==0x2B)
3197 /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3198 {
3199 //emit_pusha();
3200 save_regs(0x100f);
3201 emit_readword((int)&last_count,ECX);
3202 #ifdef __i386__
3203 if(get_reg(i_regs->regmap,CCREG)<0)
3204 emit_loadreg(CCREG,HOST_CCREG);
3205 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3206 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3207 emit_writeword(HOST_CCREG,(int)&Count);
3208 #endif
3209 #ifdef __arm__
3210 if(get_reg(i_regs->regmap,CCREG)<0)
3211 emit_loadreg(CCREG,0);
3212 else
3213 emit_mov(HOST_CCREG,0);
3214 emit_add(0,ECX,0);
3215 emit_addimm(0,2*ccadj[i],0);
3216 emit_writeword(0,(int)&Count);
3217 #endif
3218 emit_call((int)memdebug);
3219 //emit_popa();
3220 restore_regs(0x100f);
3221 }/**/
3222}
3223
3224void storelr_assemble(int i,struct regstat *i_regs)
3225{
3226 int s,th,tl;
3227 int temp;
3228 int temp2;
3229 int offset;
3230 int jaddr=0,jaddr2;
3231 int case1,case2,case3;
3232 int done0,done1,done2;
3233 int memtarget,c=0;
3234 int agr=AGEN1+(i&1);
3235 u_int hr,reglist=0;
3236 th=get_reg(i_regs->regmap,rs2[i]|64);
3237 tl=get_reg(i_regs->regmap,rs2[i]);
3238 s=get_reg(i_regs->regmap,rs1[i]);
3239 temp=get_reg(i_regs->regmap,agr);
3240 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3241 offset=imm[i];
3242 if(s>=0) {
3243 c=(i_regs->isconst>>s)&1;
3244 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3245 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3246 }
3247 assert(tl>=0);
3248 for(hr=0;hr<HOST_REGS;hr++) {
3249 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3250 }
3251 assert(temp>=0);
3252 if(!using_tlb) {
3253 if(!c) {
3254 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3255 if(!offset&&s!=temp) emit_mov(s,temp);
3256 jaddr=(int)out;
3257 emit_jno(0);
3258 }
3259 else
3260 {
3261 if(!memtarget||!rs1[i]) {
3262 jaddr=(int)out;
3263 emit_jmp(0);
3264 }
3265 }
3266 #ifdef RAM_OFFSET
3267 int map=get_reg(i_regs->regmap,ROREG);
3268 if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3269 gen_tlb_addr_w(temp,map);
3270 #else
3271 if((u_int)rdram!=0x80000000)
3272 emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3273 #endif
3274 }else{ // using tlb
3275 int map=get_reg(i_regs->regmap,TLREG);
3276 assert(map>=0);
3277 map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3278 if(!c&&!offset&&s>=0) emit_mov(s,temp);
3279 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3280 if(!jaddr&&!memtarget) {
3281 jaddr=(int)out;
3282 emit_jmp(0);
3283 }
3284 gen_tlb_addr_w(temp,map);
3285 }
3286
3287 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3288 temp2=get_reg(i_regs->regmap,FTEMP);
3289 if(!rs2[i]) temp2=th=tl;
3290 }
3291
3292#ifndef BIG_ENDIAN_MIPS
3293 emit_xorimm(temp,3,temp);
3294#endif
3295 emit_testimm(temp,2);
3296 case2=(int)out;
3297 emit_jne(0);
3298 emit_testimm(temp,1);
3299 case1=(int)out;
3300 emit_jne(0);
3301 // 0
3302 if (opcode[i]==0x2A) { // SWL
3303 emit_writeword_indexed(tl,0,temp);
3304 }
3305 if (opcode[i]==0x2E) { // SWR
3306 emit_writebyte_indexed(tl,3,temp);
3307 }
3308 if (opcode[i]==0x2C) { // SDL
3309 emit_writeword_indexed(th,0,temp);
3310 if(rs2[i]) emit_mov(tl,temp2);
3311 }
3312 if (opcode[i]==0x2D) { // SDR
3313 emit_writebyte_indexed(tl,3,temp);
3314 if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3315 }
3316 done0=(int)out;
3317 emit_jmp(0);
3318 // 1
3319 set_jump_target(case1,(int)out);
3320 if (opcode[i]==0x2A) { // SWL
3321 // Write 3 msb into three least significant bytes
3322 if(rs2[i]) emit_rorimm(tl,8,tl);
3323 emit_writehword_indexed(tl,-1,temp);
3324 if(rs2[i]) emit_rorimm(tl,16,tl);
3325 emit_writebyte_indexed(tl,1,temp);
3326 if(rs2[i]) emit_rorimm(tl,8,tl);
3327 }
3328 if (opcode[i]==0x2E) { // SWR
3329 // Write two lsb into two most significant bytes
3330 emit_writehword_indexed(tl,1,temp);
3331 }
3332 if (opcode[i]==0x2C) { // SDL
3333 if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3334 // Write 3 msb into three least significant bytes
3335 if(rs2[i]) emit_rorimm(th,8,th);
3336 emit_writehword_indexed(th,-1,temp);
3337 if(rs2[i]) emit_rorimm(th,16,th);
3338 emit_writebyte_indexed(th,1,temp);
3339 if(rs2[i]) emit_rorimm(th,8,th);
3340 }
3341 if (opcode[i]==0x2D) { // SDR
3342 if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3343 // Write two lsb into two most significant bytes
3344 emit_writehword_indexed(tl,1,temp);
3345 }
3346 done1=(int)out;
3347 emit_jmp(0);
3348 // 2
3349 set_jump_target(case2,(int)out);
3350 emit_testimm(temp,1);
3351 case3=(int)out;
3352 emit_jne(0);
3353 if (opcode[i]==0x2A) { // SWL
3354 // Write two msb into two least significant bytes
3355 if(rs2[i]) emit_rorimm(tl,16,tl);
3356 emit_writehword_indexed(tl,-2,temp);
3357 if(rs2[i]) emit_rorimm(tl,16,tl);
3358 }
3359 if (opcode[i]==0x2E) { // SWR
3360 // Write 3 lsb into three most significant bytes
3361 emit_writebyte_indexed(tl,-1,temp);
3362 if(rs2[i]) emit_rorimm(tl,8,tl);
3363 emit_writehword_indexed(tl,0,temp);
3364 if(rs2[i]) emit_rorimm(tl,24,tl);
3365 }
3366 if (opcode[i]==0x2C) { // SDL
3367 if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3368 // Write two msb into two least significant bytes
3369 if(rs2[i]) emit_rorimm(th,16,th);
3370 emit_writehword_indexed(th,-2,temp);
3371 if(rs2[i]) emit_rorimm(th,16,th);
3372 }
3373 if (opcode[i]==0x2D) { // SDR
3374 if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3375 // Write 3 lsb into three most significant bytes
3376 emit_writebyte_indexed(tl,-1,temp);
3377 if(rs2[i]) emit_rorimm(tl,8,tl);
3378 emit_writehword_indexed(tl,0,temp);
3379 if(rs2[i]) emit_rorimm(tl,24,tl);
3380 }
3381 done2=(int)out;
3382 emit_jmp(0);
3383 // 3
3384 set_jump_target(case3,(int)out);
3385 if (opcode[i]==0x2A) { // SWL
3386 // Write msb into least significant byte
3387 if(rs2[i]) emit_rorimm(tl,24,tl);
3388 emit_writebyte_indexed(tl,-3,temp);
3389 if(rs2[i]) emit_rorimm(tl,8,tl);
3390 }
3391 if (opcode[i]==0x2E) { // SWR
3392 // Write entire word
3393 emit_writeword_indexed(tl,-3,temp);
3394 }
3395 if (opcode[i]==0x2C) { // SDL
3396 if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3397 // Write msb into least significant byte
3398 if(rs2[i]) emit_rorimm(th,24,th);
3399 emit_writebyte_indexed(th,-3,temp);
3400 if(rs2[i]) emit_rorimm(th,8,th);
3401 }
3402 if (opcode[i]==0x2D) { // SDR
3403 if(rs2[i]) emit_mov(th,temp2);
3404 // Write entire word
3405 emit_writeword_indexed(tl,-3,temp);
3406 }
3407 set_jump_target(done0,(int)out);
3408 set_jump_target(done1,(int)out);
3409 set_jump_target(done2,(int)out);
3410 if (opcode[i]==0x2C) { // SDL
3411 emit_testimm(temp,4);
3412 done0=(int)out;
3413 emit_jne(0);
3414 emit_andimm(temp,~3,temp);
3415 emit_writeword_indexed(temp2,4,temp);
3416 set_jump_target(done0,(int)out);
3417 }
3418 if (opcode[i]==0x2D) { // SDR
3419 emit_testimm(temp,4);
3420 done0=(int)out;
3421 emit_jeq(0);
3422 emit_andimm(temp,~3,temp);
3423 emit_writeword_indexed(temp2,-4,temp);
3424 set_jump_target(done0,(int)out);
3425 }
3426 if(!c||!memtarget)
3427 add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3428 if(!using_tlb) {
3429 #ifdef RAM_OFFSET
3430 int map=get_reg(i_regs->regmap,ROREG);
3431 if(map<0) map=HOST_TEMPREG;
3432 gen_orig_addr_w(temp,map);
3433 #else
3434 emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3435 #endif
3436 #if defined(HOST_IMM8)
3437 int ir=get_reg(i_regs->regmap,INVCP);
3438 assert(ir>=0);
3439 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3440 #else
3441 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3442 #endif
3443 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3444 emit_callne(invalidate_addr_reg[temp]);
3445 #else
3446 jaddr2=(int)out;
3447 emit_jne(0);
3448 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3449 #endif
3450 }
3451 /*
3452 emit_pusha();
3453 //save_regs(0x100f);
3454 emit_readword((int)&last_count,ECX);
3455 if(get_reg(i_regs->regmap,CCREG)<0)
3456 emit_loadreg(CCREG,HOST_CCREG);
3457 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3458 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3459 emit_writeword(HOST_CCREG,(int)&Count);
3460 emit_call((int)memdebug);
3461 emit_popa();
3462 //restore_regs(0x100f);
3463 /**/
3464}
3465
3466void c1ls_assemble(int i,struct regstat *i_regs)
3467{
3468#ifndef DISABLE_COP1
3469 int s,th,tl;
3470 int temp,ar;
3471 int map=-1;
3472 int offset;
3473 int c=0;
3474 int jaddr,jaddr2=0,jaddr3,type;
3475 int agr=AGEN1+(i&1);
3476 u_int hr,reglist=0;
3477 th=get_reg(i_regs->regmap,FTEMP|64);
3478 tl=get_reg(i_regs->regmap,FTEMP);
3479 s=get_reg(i_regs->regmap,rs1[i]);
3480 temp=get_reg(i_regs->regmap,agr);
3481 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3482 offset=imm[i];
3483 assert(tl>=0);
3484 assert(rs1[i]>0);
3485 assert(temp>=0);
3486 for(hr=0;hr<HOST_REGS;hr++) {
3487 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3488 }
3489 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3490 if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3491 {
3492 // Loads use a temporary register which we need to save
3493 reglist|=1<<temp;
3494 }
3495 if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3496 ar=temp;
3497 else // LWC1/LDC1
3498 ar=tl;
3499 //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3500 //else c=(i_regs->wasconst>>s)&1;
3501 if(s>=0) c=(i_regs->wasconst>>s)&1;
3502 // Check cop1 unusable
3503 if(!cop1_usable) {
3504 signed char rs=get_reg(i_regs->regmap,CSREG);
3505 assert(rs>=0);
3506 emit_testimm(rs,0x20000000);
3507 jaddr=(int)out;
3508 emit_jeq(0);
3509 add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3510 cop1_usable=1;
3511 }
3512 if (opcode[i]==0x39) { // SWC1 (get float address)
3513 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3514 }
3515 if (opcode[i]==0x3D) { // SDC1 (get double address)
3516 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3517 }
3518 // Generate address + offset
3519 if(!using_tlb) {
3520 if(!c)
3521 emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3522 }
3523 else
3524 {
3525 map=get_reg(i_regs->regmap,TLREG);
3526 assert(map>=0);
3527 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3528 map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3529 }
3530 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3531 map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3532 }
3533 }
3534 if (opcode[i]==0x39) { // SWC1 (read float)
3535 emit_readword_indexed(0,tl,tl);
3536 }
3537 if (opcode[i]==0x3D) { // SDC1 (read double)
3538 emit_readword_indexed(4,tl,th);
3539 emit_readword_indexed(0,tl,tl);
3540 }
3541 if (opcode[i]==0x31) { // LWC1 (get target address)
3542 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3543 }
3544 if (opcode[i]==0x35) { // LDC1 (get target address)
3545 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3546 }
3547 if(!using_tlb) {
3548 if(!c) {
3549 jaddr2=(int)out;
3550 emit_jno(0);
3551 }
3552 else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3553 jaddr2=(int)out;
3554 emit_jmp(0); // inline_readstub/inline_writestub? Very rare case
3555 }
3556 #ifdef DESTRUCTIVE_SHIFT
3557 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3558 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3559 }
3560 #endif
3561 }else{
3562 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3563 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3564 }
3565 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3566 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3567 }
3568 }
3569 if (opcode[i]==0x31) { // LWC1
3570 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3571 //gen_tlb_addr_r(ar,map);
3572 //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3573 #ifdef HOST_IMM_ADDR32
3574 if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3575 else
3576 #endif
3577 emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3578 type=LOADW_STUB;
3579 }
3580 if (opcode[i]==0x35) { // LDC1
3581 assert(th>=0);
3582 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3583 //gen_tlb_addr_r(ar,map);
3584 //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3585 //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3586 #ifdef HOST_IMM_ADDR32
3587 if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3588 else
3589 #endif
3590 emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3591 type=LOADD_STUB;
3592 }
3593 if (opcode[i]==0x39) { // SWC1
3594 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3595 emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3596 type=STOREW_STUB;
3597 }
3598 if (opcode[i]==0x3D) { // SDC1
3599 assert(th>=0);
3600 //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3601 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3602 emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3603 type=STORED_STUB;
3604 }
3605 if(!using_tlb) {
3606 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3607 #ifndef DESTRUCTIVE_SHIFT
3608 temp=offset||c||s<0?ar:s;
3609 #endif
3610 #if defined(HOST_IMM8)
3611 int ir=get_reg(i_regs->regmap,INVCP);
3612 assert(ir>=0);
3613 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3614 #else
3615 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3616 #endif
3617 jaddr3=(int)out;
3618 emit_jne(0);
3619 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3620 }
3621 }
3622 if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3623 if (opcode[i]==0x31) { // LWC1 (write float)
3624 emit_writeword_indexed(tl,0,temp);
3625 }
3626 if (opcode[i]==0x35) { // LDC1 (write double)
3627 emit_writeword_indexed(th,4,temp);
3628 emit_writeword_indexed(tl,0,temp);
3629 }
3630 //if(opcode[i]==0x39)
3631 /*if(opcode[i]==0x39||opcode[i]==0x31)
3632 {
3633 emit_pusha();
3634 emit_readword((int)&last_count,ECX);
3635 if(get_reg(i_regs->regmap,CCREG)<0)
3636 emit_loadreg(CCREG,HOST_CCREG);
3637 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3638 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3639 emit_writeword(HOST_CCREG,(int)&Count);
3640 emit_call((int)memdebug);
3641 emit_popa();
3642 }/**/
3643#else
3644 cop1_unusable(i, i_regs);
3645#endif
3646}
3647
3648void c2ls_assemble(int i,struct regstat *i_regs)
3649{
3650 int s,tl;
3651 int ar;
3652 int offset;
3653 int memtarget=0,c=0;
3654 int jaddr,jaddr2=0,jaddr3,type;
3655 int agr=AGEN1+(i&1);
3656 u_int hr,reglist=0;
3657 u_int copr=(source[i]>>16)&0x1f;
3658 s=get_reg(i_regs->regmap,rs1[i]);
3659 tl=get_reg(i_regs->regmap,FTEMP);
3660 offset=imm[i];
3661 assert(rs1[i]>0);
3662 assert(tl>=0);
3663 assert(!using_tlb);
3664
3665 for(hr=0;hr<HOST_REGS;hr++) {
3666 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3667 }
3668 if(i_regs->regmap[HOST_CCREG]==CCREG)
3669 reglist&=~(1<<HOST_CCREG);
3670
3671 // get the address
3672 if (opcode[i]==0x3a) { // SWC2
3673 ar=get_reg(i_regs->regmap,agr);
3674 if(ar<0) ar=get_reg(i_regs->regmap,-1);
3675 reglist|=1<<ar;
3676 } else { // LWC2
3677 ar=tl;
3678 }
3679 if(s>=0) c=(i_regs->wasconst>>s)&1;
3680 memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3681 if (!offset&&!c&&s>=0) ar=s;
3682 assert(ar>=0);
3683
3684 if (opcode[i]==0x3a) { // SWC2
3685 cop2_get_dreg(copr,tl,HOST_TEMPREG);
3686 type=STOREW_STUB;
3687 }
3688 else
3689 type=LOADW_STUB;
3690
3691 if(c&&!memtarget) {
3692 jaddr2=(int)out;
3693 emit_jmp(0); // inline_readstub/inline_writestub?
3694 }
3695 else {
3696 if(!c) {
3697 emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3698 jaddr2=(int)out;
3699 emit_jno(0);
3700 }
3701 if (opcode[i]==0x32) { // LWC2
3702 #ifdef HOST_IMM_ADDR32
3703 if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3704 else
3705 #endif
3706 emit_readword_indexed(0,ar,tl);
3707 }
3708 if (opcode[i]==0x3a) { // SWC2
3709 #ifdef DESTRUCTIVE_SHIFT
3710 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3711 #endif
3712 emit_writeword_indexed(tl,0,ar);
3713 }
3714 }
3715 if(jaddr2)
3716 add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3717 if (opcode[i]==0x3a) { // SWC2
3718#if defined(HOST_IMM8)
3719 int ir=get_reg(i_regs->regmap,INVCP);
3720 assert(ir>=0);
3721 emit_cmpmem_indexedsr12_reg(ir,ar,1);
3722#else
3723 emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3724#endif
3725 jaddr3=(int)out;
3726 emit_jne(0);
3727 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3728 }
3729 if (opcode[i]==0x32) { // LWC2
3730 cop2_put_dreg(copr,tl,HOST_TEMPREG);
3731 }
3732}
3733
3734#ifndef multdiv_assemble
3735void multdiv_assemble(int i,struct regstat *i_regs)
3736{
3737 printf("Need multdiv_assemble for this architecture.\n");
3738 exit(1);
3739}
3740#endif
3741
3742void mov_assemble(int i,struct regstat *i_regs)
3743{
3744 //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3745 //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3746 if(rt1[i]) {
3747 signed char sh,sl,th,tl;
3748 th=get_reg(i_regs->regmap,rt1[i]|64);
3749 tl=get_reg(i_regs->regmap,rt1[i]);
3750 //assert(tl>=0);
3751 if(tl>=0) {
3752 sh=get_reg(i_regs->regmap,rs1[i]|64);
3753 sl=get_reg(i_regs->regmap,rs1[i]);
3754 if(sl>=0) emit_mov(sl,tl);
3755 else emit_loadreg(rs1[i],tl);
3756 if(th>=0) {
3757 if(sh>=0) emit_mov(sh,th);
3758 else emit_loadreg(rs1[i]|64,th);
3759 }
3760 }
3761 }
3762}
3763
3764#ifndef fconv_assemble
3765void fconv_assemble(int i,struct regstat *i_regs)
3766{
3767 printf("Need fconv_assemble for this architecture.\n");
3768 exit(1);
3769}
3770#endif
3771
3772#if 0
3773void float_assemble(int i,struct regstat *i_regs)
3774{
3775 printf("Need float_assemble for this architecture.\n");
3776 exit(1);
3777}
3778#endif
3779
3780void syscall_assemble(int i,struct regstat *i_regs)
3781{
3782 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3783 assert(ccreg==HOST_CCREG);
3784 assert(!is_delayslot);
3785 emit_movimm(start+i*4,EAX); // Get PC
3786 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
3787 emit_jmp((int)jump_syscall_hle); // XXX
3788}
3789
3790void hlecall_assemble(int i,struct regstat *i_regs)
3791{
3792 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3793 assert(ccreg==HOST_CCREG);
3794 assert(!is_delayslot);
3795 emit_movimm(start+i*4+4,0); // Get PC
3796 emit_movimm((int)psxHLEt[source[i]&7],1);
3797 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
3798 emit_jmp((int)jump_hlecall);
3799}
3800
3801void intcall_assemble(int i,struct regstat *i_regs)
3802{
3803 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3804 assert(ccreg==HOST_CCREG);
3805 assert(!is_delayslot);
3806 emit_movimm(start+i*4,0); // Get PC
3807 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG);
3808 emit_jmp((int)jump_intcall);
3809}
3810
3811void ds_assemble(int i,struct regstat *i_regs)
3812{
3813 is_delayslot=1;
3814 switch(itype[i]) {
3815 case ALU:
3816 alu_assemble(i,i_regs);break;
3817 case IMM16:
3818 imm16_assemble(i,i_regs);break;
3819 case SHIFT:
3820 shift_assemble(i,i_regs);break;
3821 case SHIFTIMM:
3822 shiftimm_assemble(i,i_regs);break;
3823 case LOAD:
3824 load_assemble(i,i_regs);break;
3825 case LOADLR:
3826 loadlr_assemble(i,i_regs);break;
3827 case STORE:
3828 store_assemble(i,i_regs);break;
3829 case STORELR:
3830 storelr_assemble(i,i_regs);break;
3831 case COP0:
3832 cop0_assemble(i,i_regs);break;
3833 case COP1:
3834 cop1_assemble(i,i_regs);break;
3835 case C1LS:
3836 c1ls_assemble(i,i_regs);break;
3837 case COP2:
3838 cop2_assemble(i,i_regs);break;
3839 case C2LS:
3840 c2ls_assemble(i,i_regs);break;
3841 case C2OP:
3842 c2op_assemble(i,i_regs);break;
3843 case FCONV:
3844 fconv_assemble(i,i_regs);break;
3845 case FLOAT:
3846 float_assemble(i,i_regs);break;
3847 case FCOMP:
3848 fcomp_assemble(i,i_regs);break;
3849 case MULTDIV:
3850 multdiv_assemble(i,i_regs);break;
3851 case MOV:
3852 mov_assemble(i,i_regs);break;
3853 case SYSCALL:
3854 case HLECALL:
3855 case INTCALL:
3856 case SPAN:
3857 case UJUMP:
3858 case RJUMP:
3859 case CJUMP:
3860 case SJUMP:
3861 case FJUMP:
3862 printf("Jump in the delay slot. This is probably a bug.\n");
3863 }
3864 is_delayslot=0;
3865}
3866
3867// Is the branch target a valid internal jump?
3868int internal_branch(uint64_t i_is32,int addr)
3869{
3870 if(addr&1) return 0; // Indirect (register) jump
3871 if(addr>=start && addr<start+slen*4-4)
3872 {
3873 int t=(addr-start)>>2;
3874 // Delay slots are not valid branch targets
3875 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3876 // 64 -> 32 bit transition requires a recompile
3877 /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3878 {
3879 if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3880 else printf("optimizable: yes\n");
3881 }*/
3882 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3883#ifndef FORCE32
3884 if(requires_32bit[t]&~i_is32) return 0;
3885 else
3886#endif
3887 return 1;
3888 }
3889 return 0;
3890}
3891
3892#ifndef wb_invalidate
3893void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3894 uint64_t u,uint64_t uu)
3895{
3896 int hr;
3897 for(hr=0;hr<HOST_REGS;hr++) {
3898 if(hr!=EXCLUDE_REG) {
3899 if(pre[hr]!=entry[hr]) {
3900 if(pre[hr]>=0) {
3901 if((dirty>>hr)&1) {
3902 if(get_reg(entry,pre[hr])<0) {
3903 if(pre[hr]<64) {
3904 if(!((u>>pre[hr])&1)) {
3905 emit_storereg(pre[hr],hr);
3906 if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3907 emit_sarimm(hr,31,hr);
3908 emit_storereg(pre[hr]|64,hr);
3909 }
3910 }
3911 }else{
3912 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3913 emit_storereg(pre[hr],hr);
3914 }
3915 }
3916 }
3917 }
3918 }
3919 }
3920 }
3921 }
3922 // Move from one register to another (no writeback)
3923 for(hr=0;hr<HOST_REGS;hr++) {
3924 if(hr!=EXCLUDE_REG) {
3925 if(pre[hr]!=entry[hr]) {
3926 if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3927 int nr;
3928 if((nr=get_reg(entry,pre[hr]))>=0) {
3929 emit_mov(hr,nr);
3930 }
3931 }
3932 }
3933 }
3934 }
3935}
3936#endif
3937
3938// Load the specified registers
3939// This only loads the registers given as arguments because
3940// we don't want to load things that will be overwritten
3941void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3942{
3943 int hr;
3944 // Load 32-bit regs
3945 for(hr=0;hr<HOST_REGS;hr++) {
3946 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3947 if(entry[hr]!=regmap[hr]) {
3948 if(regmap[hr]==rs1||regmap[hr]==rs2)
3949 {
3950 if(regmap[hr]==0) {
3951 emit_zeroreg(hr);
3952 }
3953 else
3954 {
3955 emit_loadreg(regmap[hr],hr);
3956 }
3957 }
3958 }
3959 }
3960 }
3961 //Load 64-bit regs
3962 for(hr=0;hr<HOST_REGS;hr++) {
3963 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3964 if(entry[hr]!=regmap[hr]) {
3965 if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3966 {
3967 assert(regmap[hr]!=64);
3968 if((is32>>(regmap[hr]&63))&1) {
3969 int lr=get_reg(regmap,regmap[hr]-64);
3970 if(lr>=0)
3971 emit_sarimm(lr,31,hr);
3972 else
3973 emit_loadreg(regmap[hr],hr);
3974 }
3975 else
3976 {
3977 emit_loadreg(regmap[hr],hr);
3978 }
3979 }
3980 }
3981 }
3982 }
3983}
3984
3985// Load registers prior to the start of a loop
3986// so that they are not loaded within the loop
3987static void loop_preload(signed char pre[],signed char entry[])
3988{
3989 int hr;
3990 for(hr=0;hr<HOST_REGS;hr++) {
3991 if(hr!=EXCLUDE_REG) {
3992 if(pre[hr]!=entry[hr]) {
3993 if(entry[hr]>=0) {
3994 if(get_reg(pre,entry[hr])<0) {
3995 assem_debug("loop preload:\n");
3996 //printf("loop preload: %d\n",hr);
3997 if(entry[hr]==0) {
3998 emit_zeroreg(hr);
3999 }
4000 else if(entry[hr]<TEMPREG)
4001 {
4002 emit_loadreg(entry[hr],hr);
4003 }
4004 else if(entry[hr]-64<TEMPREG)
4005 {
4006 emit_loadreg(entry[hr],hr);
4007 }
4008 }
4009 }
4010 }
4011 }
4012 }
4013}
4014
4015// Generate address for load/store instruction
4016// goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
4017void address_generation(int i,struct regstat *i_regs,signed char entry[])
4018{
4019 if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
4020 int ra;
4021 int agr=AGEN1+(i&1);
4022 int mgr=MGEN1+(i&1);
4023 if(itype[i]==LOAD) {
4024 ra=get_reg(i_regs->regmap,rt1[i]);
4025 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4026 assert(ra>=0);
4027 }
4028 if(itype[i]==LOADLR) {
4029 ra=get_reg(i_regs->regmap,FTEMP);
4030 }
4031 if(itype[i]==STORE||itype[i]==STORELR) {
4032 ra=get_reg(i_regs->regmap,agr);
4033 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4034 }
4035 if(itype[i]==C1LS||itype[i]==C2LS) {
4036 if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
4037 ra=get_reg(i_regs->regmap,FTEMP);
4038 else { // SWC1/SDC1/SWC2/SDC2
4039 ra=get_reg(i_regs->regmap,agr);
4040 if(ra<0) ra=get_reg(i_regs->regmap,-1);
4041 }
4042 }
4043 int rs=get_reg(i_regs->regmap,rs1[i]);
4044 int rm=get_reg(i_regs->regmap,TLREG);
4045 if(ra>=0) {
4046 int offset=imm[i];
4047 int c=(i_regs->wasconst>>rs)&1;
4048 if(rs1[i]==0) {
4049 // Using r0 as a base address
4050 /*if(rm>=0) {
4051 if(!entry||entry[rm]!=mgr) {
4052 generate_map_const(offset,rm);
4053 } // else did it in the previous cycle
4054 }*/
4055 if(!entry||entry[ra]!=agr) {
4056 if (opcode[i]==0x22||opcode[i]==0x26) {
4057 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4058 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4059 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4060 }else{
4061 emit_movimm(offset,ra);
4062 }
4063 } // else did it in the previous cycle
4064 }
4065 else if(rs<0) {
4066 if(!entry||entry[ra]!=rs1[i])
4067 emit_loadreg(rs1[i],ra);
4068 //if(!entry||entry[ra]!=rs1[i])
4069 // printf("poor load scheduling!\n");
4070 }
4071 else if(c) {
4072 if(rm>=0) {
4073 if(!entry||entry[rm]!=mgr) {
4074 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4075 // Stores to memory go thru the mapper to detect self-modifying
4076 // code, loads don't.
4077 if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4078 (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
4079 generate_map_const(constmap[i][rs]+offset,rm);
4080 }else{
4081 if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4082 generate_map_const(constmap[i][rs]+offset,rm);
4083 }
4084 }
4085 }
4086 if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4087 if(!entry||entry[ra]!=agr) {
4088 if (opcode[i]==0x22||opcode[i]==0x26) {
4089 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4090 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4091 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4092 }else{
4093 #ifdef HOST_IMM_ADDR32
4094 if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4095 (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4096 #endif
4097 emit_movimm(constmap[i][rs]+offset,ra);
4098 }
4099 } // else did it in the previous cycle
4100 } // else load_consts already did it
4101 }
4102 if(offset&&!c&&rs1[i]) {
4103 if(rs>=0) {
4104 emit_addimm(rs,offset,ra);
4105 }else{
4106 emit_addimm(ra,offset,ra);
4107 }
4108 }
4109 }
4110 }
4111 // Preload constants for next instruction
4112 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4113 int agr,ra;
4114 #ifndef HOST_IMM_ADDR32
4115 // Mapper entry
4116 agr=MGEN1+((i+1)&1);
4117 ra=get_reg(i_regs->regmap,agr);
4118 if(ra>=0) {
4119 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4120 int offset=imm[i+1];
4121 int c=(regs[i+1].wasconst>>rs)&1;
4122 if(c) {
4123 if(itype[i+1]==STORE||itype[i+1]==STORELR
4124 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4125 // Stores to memory go thru the mapper to detect self-modifying
4126 // code, loads don't.
4127 if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4128 (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
4129 generate_map_const(constmap[i+1][rs]+offset,ra);
4130 }else{
4131 if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4132 generate_map_const(constmap[i+1][rs]+offset,ra);
4133 }
4134 }
4135 /*else if(rs1[i]==0) {
4136 generate_map_const(offset,ra);
4137 }*/
4138 }
4139 #endif
4140 // Actual address
4141 agr=AGEN1+((i+1)&1);
4142 ra=get_reg(i_regs->regmap,agr);
4143 if(ra>=0) {
4144 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4145 int offset=imm[i+1];
4146 int c=(regs[i+1].wasconst>>rs)&1;
4147 if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4148 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4149 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4150 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4151 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4152 }else{
4153 #ifdef HOST_IMM_ADDR32
4154 if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4155 (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4156 #endif
4157 emit_movimm(constmap[i+1][rs]+offset,ra);
4158 }
4159 }
4160 else if(rs1[i+1]==0) {
4161 // Using r0 as a base address
4162 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4163 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4164 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4165 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4166 }else{
4167 emit_movimm(offset,ra);
4168 }
4169 }
4170 }
4171 }
4172}
4173
4174int get_final_value(int hr, int i, int *value)
4175{
4176 int reg=regs[i].regmap[hr];
4177 while(i<slen-1) {
4178 if(regs[i+1].regmap[hr]!=reg) break;
4179 if(!((regs[i+1].isconst>>hr)&1)) break;
4180 if(bt[i+1]) break;
4181 i++;
4182 }
4183 if(i<slen-1) {
4184 if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4185 *value=constmap[i][hr];
4186 return 1;
4187 }
4188 if(!bt[i+1]) {
4189 if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4190 // Load in delay slot, out-of-order execution
4191 if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4192 {
4193 #ifdef HOST_IMM_ADDR32
4194 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4195 #endif
4196 // Precompute load address
4197 *value=constmap[i][hr]+imm[i+2];
4198 return 1;
4199 }
4200 }
4201 if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4202 {
4203 #ifdef HOST_IMM_ADDR32
4204 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4205 #endif
4206 // Precompute load address
4207 *value=constmap[i][hr]+imm[i+1];
4208 //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4209 return 1;
4210 }
4211 }
4212 }
4213 *value=constmap[i][hr];
4214 //printf("c=%x\n",(int)constmap[i][hr]);
4215 if(i==slen-1) return 1;
4216 if(reg<64) {
4217 return !((unneeded_reg[i+1]>>reg)&1);
4218 }else{
4219 return !((unneeded_reg_upper[i+1]>>reg)&1);
4220 }
4221}
4222
4223// Load registers with known constants
4224void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4225{
4226 int hr;
4227 // Load 32-bit regs
4228 for(hr=0;hr<HOST_REGS;hr++) {
4229 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4230 //if(entry[hr]!=regmap[hr]) {
4231 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4232 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4233 int value;
4234 if(get_final_value(hr,i,&value)) {
4235 if(value==0) {
4236 emit_zeroreg(hr);
4237 }
4238 else {
4239 emit_movimm(value,hr);
4240 }
4241 }
4242 }
4243 }
4244 }
4245 }
4246 // Load 64-bit regs
4247 for(hr=0;hr<HOST_REGS;hr++) {
4248 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4249 //if(entry[hr]!=regmap[hr]) {
4250 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4251 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4252 if((is32>>(regmap[hr]&63))&1) {
4253 int lr=get_reg(regmap,regmap[hr]-64);
4254 assert(lr>=0);
4255 emit_sarimm(lr,31,hr);
4256 }
4257 else
4258 {
4259 int value;
4260 if(get_final_value(hr,i,&value)) {
4261 if(value==0) {
4262 emit_zeroreg(hr);
4263 }
4264 else {
4265 emit_movimm(value,hr);
4266 }
4267 }
4268 }
4269 }
4270 }
4271 }
4272 }
4273}
4274void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4275{
4276 int hr;
4277 // Load 32-bit regs
4278 for(hr=0;hr<HOST_REGS;hr++) {
4279 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4280 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4281 int value=constmap[i][hr];
4282 if(value==0) {
4283 emit_zeroreg(hr);
4284 }
4285 else {
4286 emit_movimm(value,hr);
4287 }
4288 }
4289 }
4290 }
4291 // Load 64-bit regs
4292 for(hr=0;hr<HOST_REGS;hr++) {
4293 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4294 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4295 if((is32>>(regmap[hr]&63))&1) {
4296 int lr=get_reg(regmap,regmap[hr]-64);
4297 assert(lr>=0);
4298 emit_sarimm(lr,31,hr);
4299 }
4300 else
4301 {
4302 int value=constmap[i][hr];
4303 if(value==0) {
4304 emit_zeroreg(hr);
4305 }
4306 else {
4307 emit_movimm(value,hr);
4308 }
4309 }
4310 }
4311 }
4312 }
4313}
4314
4315// Write out all dirty registers (except cycle count)
4316void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4317{
4318 int hr;
4319 for(hr=0;hr<HOST_REGS;hr++) {
4320 if(hr!=EXCLUDE_REG) {
4321 if(i_regmap[hr]>0) {
4322 if(i_regmap[hr]!=CCREG) {
4323 if((i_dirty>>hr)&1) {
4324 if(i_regmap[hr]<64) {
4325 emit_storereg(i_regmap[hr],hr);
4326#ifndef FORCE32
4327 if( ((i_is32>>i_regmap[hr])&1) ) {
4328 #ifdef DESTRUCTIVE_WRITEBACK
4329 emit_sarimm(hr,31,hr);
4330 emit_storereg(i_regmap[hr]|64,hr);
4331 #else
4332 emit_sarimm(hr,31,HOST_TEMPREG);
4333 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4334 #endif
4335 }
4336#endif
4337 }else{
4338 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4339 emit_storereg(i_regmap[hr],hr);
4340 }
4341 }
4342 }
4343 }
4344 }
4345 }
4346 }
4347}
4348// Write out dirty registers that we need to reload (pair with load_needed_regs)
4349// This writes the registers not written by store_regs_bt
4350void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4351{
4352 int hr;
4353 int t=(addr-start)>>2;
4354 for(hr=0;hr<HOST_REGS;hr++) {
4355 if(hr!=EXCLUDE_REG) {
4356 if(i_regmap[hr]>0) {
4357 if(i_regmap[hr]!=CCREG) {
4358 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4359 if((i_dirty>>hr)&1) {
4360 if(i_regmap[hr]<64) {
4361 emit_storereg(i_regmap[hr],hr);
4362#ifndef FORCE32
4363 if( ((i_is32>>i_regmap[hr])&1) ) {
4364 #ifdef DESTRUCTIVE_WRITEBACK
4365 emit_sarimm(hr,31,hr);
4366 emit_storereg(i_regmap[hr]|64,hr);
4367 #else
4368 emit_sarimm(hr,31,HOST_TEMPREG);
4369 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4370 #endif
4371 }
4372#endif
4373 }else{
4374 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4375 emit_storereg(i_regmap[hr],hr);
4376 }
4377 }
4378 }
4379 }
4380 }
4381 }
4382 }
4383 }
4384}
4385
4386// Load all registers (except cycle count)
4387void load_all_regs(signed char i_regmap[])
4388{
4389 int hr;
4390 for(hr=0;hr<HOST_REGS;hr++) {
4391 if(hr!=EXCLUDE_REG) {
4392 if(i_regmap[hr]==0) {
4393 emit_zeroreg(hr);
4394 }
4395 else
4396 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4397 {
4398 emit_loadreg(i_regmap[hr],hr);
4399 }
4400 }
4401 }
4402}
4403
4404// Load all current registers also needed by next instruction
4405void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4406{
4407 int hr;
4408 for(hr=0;hr<HOST_REGS;hr++) {
4409 if(hr!=EXCLUDE_REG) {
4410 if(get_reg(next_regmap,i_regmap[hr])>=0) {
4411 if(i_regmap[hr]==0) {
4412 emit_zeroreg(hr);
4413 }
4414 else
4415 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4416 {
4417 emit_loadreg(i_regmap[hr],hr);
4418 }
4419 }
4420 }
4421 }
4422}
4423
4424// Load all regs, storing cycle count if necessary
4425void load_regs_entry(int t)
4426{
4427 int hr;
4428 if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4429 else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4430 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4431 emit_storereg(CCREG,HOST_CCREG);
4432 }
4433 // Load 32-bit regs
4434 for(hr=0;hr<HOST_REGS;hr++) {
4435 if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4436 if(regs[t].regmap_entry[hr]==0) {
4437 emit_zeroreg(hr);
4438 }
4439 else if(regs[t].regmap_entry[hr]!=CCREG)
4440 {
4441 emit_loadreg(regs[t].regmap_entry[hr],hr);
4442 }
4443 }
4444 }
4445 // Load 64-bit regs
4446 for(hr=0;hr<HOST_REGS;hr++) {
4447 if(regs[t].regmap_entry[hr]>=64) {
4448 assert(regs[t].regmap_entry[hr]!=64);
4449 if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4450 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4451 if(lr<0) {
4452 emit_loadreg(regs[t].regmap_entry[hr],hr);
4453 }
4454 else
4455 {
4456 emit_sarimm(lr,31,hr);
4457 }
4458 }
4459 else
4460 {
4461 emit_loadreg(regs[t].regmap_entry[hr],hr);
4462 }
4463 }
4464 }
4465}
4466
4467// Store dirty registers prior to branch
4468void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4469{
4470 if(internal_branch(i_is32,addr))
4471 {
4472 int t=(addr-start)>>2;
4473 int hr;
4474 for(hr=0;hr<HOST_REGS;hr++) {
4475 if(hr!=EXCLUDE_REG) {
4476 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4477 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4478 if((i_dirty>>hr)&1) {
4479 if(i_regmap[hr]<64) {
4480 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4481 emit_storereg(i_regmap[hr],hr);
4482 if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4483 #ifdef DESTRUCTIVE_WRITEBACK
4484 emit_sarimm(hr,31,hr);
4485 emit_storereg(i_regmap[hr]|64,hr);
4486 #else
4487 emit_sarimm(hr,31,HOST_TEMPREG);
4488 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4489 #endif
4490 }
4491 }
4492 }else{
4493 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4494 emit_storereg(i_regmap[hr],hr);
4495 }
4496 }
4497 }
4498 }
4499 }
4500 }
4501 }
4502 }
4503 else
4504 {
4505 // Branch out of this block, write out all dirty regs
4506 wb_dirtys(i_regmap,i_is32,i_dirty);
4507 }
4508}
4509
4510// Load all needed registers for branch target
4511void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4512{
4513 //if(addr>=start && addr<(start+slen*4))
4514 if(internal_branch(i_is32,addr))
4515 {
4516 int t=(addr-start)>>2;
4517 int hr;
4518 // Store the cycle count before loading something else
4519 if(i_regmap[HOST_CCREG]!=CCREG) {
4520 assert(i_regmap[HOST_CCREG]==-1);
4521 }
4522 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4523 emit_storereg(CCREG,HOST_CCREG);
4524 }
4525 // Load 32-bit regs
4526 for(hr=0;hr<HOST_REGS;hr++) {
4527 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4528 #ifdef DESTRUCTIVE_WRITEBACK
4529 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4530 #else
4531 if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4532 #endif
4533 if(regs[t].regmap_entry[hr]==0) {
4534 emit_zeroreg(hr);
4535 }
4536 else if(regs[t].regmap_entry[hr]!=CCREG)
4537 {
4538 emit_loadreg(regs[t].regmap_entry[hr],hr);
4539 }
4540 }
4541 }
4542 }
4543 //Load 64-bit regs
4544 for(hr=0;hr<HOST_REGS;hr++) {
4545 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64) {
4546 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4547 assert(regs[t].regmap_entry[hr]!=64);
4548 if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4549 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4550 if(lr<0) {
4551 emit_loadreg(regs[t].regmap_entry[hr],hr);
4552 }
4553 else
4554 {
4555 emit_sarimm(lr,31,hr);
4556 }
4557 }
4558 else
4559 {
4560 emit_loadreg(regs[t].regmap_entry[hr],hr);
4561 }
4562 }
4563 else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4564 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4565 assert(lr>=0);
4566 emit_sarimm(lr,31,hr);
4567 }
4568 }
4569 }
4570 }
4571}
4572
4573int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4574{
4575 if(addr>=start && addr<start+slen*4-4)
4576 {
4577 int t=(addr-start)>>2;
4578 int hr;
4579 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4580 for(hr=0;hr<HOST_REGS;hr++)
4581 {
4582 if(hr!=EXCLUDE_REG)
4583 {
4584 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4585 {
4586 if(regs[t].regmap_entry[hr]!=-1)
4587 {
4588 return 0;
4589 }
4590 else
4591 if((i_dirty>>hr)&1)
4592 {
4593 if(i_regmap[hr]<64)
4594 {
4595 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4596 return 0;
4597 }
4598 else
4599 {
4600 if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4601 return 0;
4602 }
4603 }
4604 }
4605 else // Same register but is it 32-bit or dirty?
4606 if(i_regmap[hr]>=0)
4607 {
4608 if(!((regs[t].dirty>>hr)&1))
4609 {
4610 if((i_dirty>>hr)&1)
4611 {
4612 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4613 {
4614 //printf("%x: dirty no match\n",addr);
4615 return 0;
4616 }
4617 }
4618 }
4619 if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4620 {
4621 //printf("%x: is32 no match\n",addr);
4622 return 0;
4623 }
4624 }
4625 }
4626 }
4627 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4628#ifndef FORCE32
4629 if(requires_32bit[t]&~i_is32) return 0;
4630#endif
4631 // Delay slots are not valid branch targets
4632 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4633 // Delay slots require additional processing, so do not match
4634 if(is_ds[t]) return 0;
4635 }
4636 else
4637 {
4638 int hr;
4639 for(hr=0;hr<HOST_REGS;hr++)
4640 {
4641 if(hr!=EXCLUDE_REG)
4642 {
4643 if(i_regmap[hr]>=0)
4644 {
4645 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4646 {
4647 if((i_dirty>>hr)&1)
4648 {
4649 return 0;
4650 }
4651 }
4652 }
4653 }
4654 }
4655 }
4656 return 1;
4657}
4658
4659// Used when a branch jumps into the delay slot of another branch
4660void ds_assemble_entry(int i)
4661{
4662 int t=(ba[i]-start)>>2;
4663 if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4664 assem_debug("Assemble delay slot at %x\n",ba[i]);
4665 assem_debug("<->\n");
4666 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4667 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4668 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4669 address_generation(t,&regs[t],regs[t].regmap_entry);
4670 if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4671 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4672 cop1_usable=0;
4673 is_delayslot=0;
4674 switch(itype[t]) {
4675 case ALU:
4676 alu_assemble(t,&regs[t]);break;
4677 case IMM16:
4678 imm16_assemble(t,&regs[t]);break;
4679 case SHIFT:
4680 shift_assemble(t,&regs[t]);break;
4681 case SHIFTIMM:
4682 shiftimm_assemble(t,&regs[t]);break;
4683 case LOAD:
4684 load_assemble(t,&regs[t]);break;
4685 case LOADLR:
4686 loadlr_assemble(t,&regs[t]);break;
4687 case STORE:
4688 store_assemble(t,&regs[t]);break;
4689 case STORELR:
4690 storelr_assemble(t,&regs[t]);break;
4691 case COP0:
4692 cop0_assemble(t,&regs[t]);break;
4693 case COP1:
4694 cop1_assemble(t,&regs[t]);break;
4695 case C1LS:
4696 c1ls_assemble(t,&regs[t]);break;
4697 case COP2:
4698 cop2_assemble(t,&regs[t]);break;
4699 case C2LS:
4700 c2ls_assemble(t,&regs[t]);break;
4701 case C2OP:
4702 c2op_assemble(t,&regs[t]);break;
4703 case FCONV:
4704 fconv_assemble(t,&regs[t]);break;
4705 case FLOAT:
4706 float_assemble(t,&regs[t]);break;
4707 case FCOMP:
4708 fcomp_assemble(t,&regs[t]);break;
4709 case MULTDIV:
4710 multdiv_assemble(t,&regs[t]);break;
4711 case MOV:
4712 mov_assemble(t,&regs[t]);break;
4713 case SYSCALL:
4714 case HLECALL:
4715 case INTCALL:
4716 case SPAN:
4717 case UJUMP:
4718 case RJUMP:
4719 case CJUMP:
4720 case SJUMP:
4721 case FJUMP:
4722 printf("Jump in the delay slot. This is probably a bug.\n");
4723 }
4724 store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4725 load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4726 if(internal_branch(regs[t].is32,ba[i]+4))
4727 assem_debug("branch: internal\n");
4728 else
4729 assem_debug("branch: external\n");
4730 assert(internal_branch(regs[t].is32,ba[i]+4));
4731 add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4732 emit_jmp(0);
4733}
4734
4735void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4736{
4737 int count;
4738 int jaddr;
4739 int idle=0;
4740 if(itype[i]==RJUMP)
4741 {
4742 *adj=0;
4743 }
4744 //if(ba[i]>=start && ba[i]<(start+slen*4))
4745 if(internal_branch(branch_regs[i].is32,ba[i]))
4746 {
4747 int t=(ba[i]-start)>>2;
4748 if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4749 else *adj=ccadj[t];
4750 }
4751 else
4752 {
4753 *adj=0;
4754 }
4755 count=ccadj[i];
4756 if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4757 // Idle loop
4758 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4759 idle=(int)out;
4760 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4761 emit_andimm(HOST_CCREG,3,HOST_CCREG);
4762 jaddr=(int)out;
4763 emit_jmp(0);
4764 }
4765 else if(*adj==0||invert) {
4766 emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4767 jaddr=(int)out;
4768 emit_jns(0);
4769 }
4770 else
4771 {
4772 emit_cmpimm(HOST_CCREG,-2*(count+2));
4773 jaddr=(int)out;
4774 emit_jns(0);
4775 }
4776 add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4777}
4778
4779void do_ccstub(int n)
4780{
4781 literal_pool(256);
4782 assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4783 set_jump_target(stubs[n][1],(int)out);
4784 int i=stubs[n][4];
4785 if(stubs[n][6]==NULLDS) {
4786 // Delay slot instruction is nullified ("likely" branch)
4787 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4788 }
4789 else if(stubs[n][6]!=TAKEN) {
4790 wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4791 }
4792 else {
4793 if(internal_branch(branch_regs[i].is32,ba[i]))
4794 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4795 }
4796 if(stubs[n][5]!=-1)
4797 {
4798 // Save PC as return address
4799 emit_movimm(stubs[n][5],EAX);
4800 emit_writeword(EAX,(int)&pcaddr);
4801 }
4802 else
4803 {
4804 // Return address depends on which way the branch goes
4805 if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4806 {
4807 int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4808 int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4809 int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4810 int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4811 if(rs1[i]==0)
4812 {
4813 s1l=s2l;s1h=s2h;
4814 s2l=s2h=-1;
4815 }
4816 else if(rs2[i]==0)
4817 {
4818 s2l=s2h=-1;
4819 }
4820 if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4821 s1h=s2h=-1;
4822 }
4823 assert(s1l>=0);
4824 #ifdef DESTRUCTIVE_WRITEBACK
4825 if(rs1[i]) {
4826 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4827 emit_loadreg(rs1[i],s1l);
4828 }
4829 else {
4830 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4831 emit_loadreg(rs2[i],s1l);
4832 }
4833 if(s2l>=0)
4834 if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4835 emit_loadreg(rs2[i],s2l);
4836 #endif
4837 int hr=0;
4838 int addr,alt,ntaddr;
4839 while(hr<HOST_REGS)
4840 {
4841 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4842 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4843 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4844 {
4845 addr=hr++;break;
4846 }
4847 hr++;
4848 }
4849 while(hr<HOST_REGS)
4850 {
4851 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4852 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4853 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4854 {
4855 alt=hr++;break;
4856 }
4857 hr++;
4858 }
4859 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4860 {
4861 while(hr<HOST_REGS)
4862 {
4863 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4864 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4865 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4866 {
4867 ntaddr=hr;break;
4868 }
4869 hr++;
4870 }
4871 assert(hr<HOST_REGS);
4872 }
4873 if((opcode[i]&0x2f)==4) // BEQ
4874 {
4875 #ifdef HAVE_CMOV_IMM
4876 if(s1h<0) {
4877 if(s2l>=0) emit_cmp(s1l,s2l);
4878 else emit_test(s1l,s1l);
4879 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4880 }
4881 else
4882 #endif
4883 {
4884 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4885 if(s1h>=0) {
4886 if(s2h>=0) emit_cmp(s1h,s2h);
4887 else emit_test(s1h,s1h);
4888 emit_cmovne_reg(alt,addr);
4889 }
4890 if(s2l>=0) emit_cmp(s1l,s2l);
4891 else emit_test(s1l,s1l);
4892 emit_cmovne_reg(alt,addr);
4893 }
4894 }
4895 if((opcode[i]&0x2f)==5) // BNE
4896 {
4897 #ifdef HAVE_CMOV_IMM
4898 if(s1h<0) {
4899 if(s2l>=0) emit_cmp(s1l,s2l);
4900 else emit_test(s1l,s1l);
4901 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4902 }
4903 else
4904 #endif
4905 {
4906 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4907 if(s1h>=0) {
4908 if(s2h>=0) emit_cmp(s1h,s2h);
4909 else emit_test(s1h,s1h);
4910 emit_cmovne_reg(alt,addr);
4911 }
4912 if(s2l>=0) emit_cmp(s1l,s2l);
4913 else emit_test(s1l,s1l);
4914 emit_cmovne_reg(alt,addr);
4915 }
4916 }
4917 if((opcode[i]&0x2f)==6) // BLEZ
4918 {
4919 //emit_movimm(ba[i],alt);
4920 //emit_movimm(start+i*4+8,addr);
4921 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4922 emit_cmpimm(s1l,1);
4923 if(s1h>=0) emit_mov(addr,ntaddr);
4924 emit_cmovl_reg(alt,addr);
4925 if(s1h>=0) {
4926 emit_test(s1h,s1h);
4927 emit_cmovne_reg(ntaddr,addr);
4928 emit_cmovs_reg(alt,addr);
4929 }
4930 }
4931 if((opcode[i]&0x2f)==7) // BGTZ
4932 {
4933 //emit_movimm(ba[i],addr);
4934 //emit_movimm(start+i*4+8,ntaddr);
4935 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4936 emit_cmpimm(s1l,1);
4937 if(s1h>=0) emit_mov(addr,alt);
4938 emit_cmovl_reg(ntaddr,addr);
4939 if(s1h>=0) {
4940 emit_test(s1h,s1h);
4941 emit_cmovne_reg(alt,addr);
4942 emit_cmovs_reg(ntaddr,addr);
4943 }
4944 }
4945 if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4946 {
4947 //emit_movimm(ba[i],alt);
4948 //emit_movimm(start+i*4+8,addr);
4949 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4950 if(s1h>=0) emit_test(s1h,s1h);
4951 else emit_test(s1l,s1l);
4952 emit_cmovs_reg(alt,addr);
4953 }
4954 if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4955 {
4956 //emit_movimm(ba[i],addr);
4957 //emit_movimm(start+i*4+8,alt);
4958 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4959 if(s1h>=0) emit_test(s1h,s1h);
4960 else emit_test(s1l,s1l);
4961 emit_cmovs_reg(alt,addr);
4962 }
4963 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4964 if(source[i]&0x10000) // BC1T
4965 {
4966 //emit_movimm(ba[i],alt);
4967 //emit_movimm(start+i*4+8,addr);
4968 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4969 emit_testimm(s1l,0x800000);
4970 emit_cmovne_reg(alt,addr);
4971 }
4972 else // BC1F
4973 {
4974 //emit_movimm(ba[i],addr);
4975 //emit_movimm(start+i*4+8,alt);
4976 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4977 emit_testimm(s1l,0x800000);
4978 emit_cmovne_reg(alt,addr);
4979 }
4980 }
4981 emit_writeword(addr,(int)&pcaddr);
4982 }
4983 else
4984 if(itype[i]==RJUMP)
4985 {
4986 int r=get_reg(branch_regs[i].regmap,rs1[i]);
4987 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4988 r=get_reg(branch_regs[i].regmap,RTEMP);
4989 }
4990 emit_writeword(r,(int)&pcaddr);
4991 }
4992 else {printf("Unknown branch type in do_ccstub\n");exit(1);}
4993 }
4994 // Update cycle count
4995 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4996 if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4997 emit_call((int)cc_interrupt);
4998 if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4999 if(stubs[n][6]==TAKEN) {
5000 if(internal_branch(branch_regs[i].is32,ba[i]))
5001 load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
5002 else if(itype[i]==RJUMP) {
5003 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5004 emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5005 else
5006 emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
5007 }
5008 }else if(stubs[n][6]==NOTTAKEN) {
5009 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5010 else load_all_regs(branch_regs[i].regmap);
5011 }else if(stubs[n][6]==NULLDS) {
5012 // Delay slot instruction is nullified ("likely" branch)
5013 if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
5014 else load_all_regs(regs[i].regmap);
5015 }else{
5016 load_all_regs(branch_regs[i].regmap);
5017 }
5018 emit_jmp(stubs[n][2]); // return address
5019
5020 /* This works but uses a lot of memory...
5021 emit_readword((int)&last_count,ECX);
5022 emit_add(HOST_CCREG,ECX,EAX);
5023 emit_writeword(EAX,(int)&Count);
5024 emit_call((int)gen_interupt);
5025 emit_readword((int)&Count,HOST_CCREG);
5026 emit_readword((int)&next_interupt,EAX);
5027 emit_readword((int)&pending_exception,EBX);
5028 emit_writeword(EAX,(int)&last_count);
5029 emit_sub(HOST_CCREG,EAX,HOST_CCREG);
5030 emit_test(EBX,EBX);
5031 int jne_instr=(int)out;
5032 emit_jne(0);
5033 if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
5034 load_all_regs(branch_regs[i].regmap);
5035 emit_jmp(stubs[n][2]); // return address
5036 set_jump_target(jne_instr,(int)out);
5037 emit_readword((int)&pcaddr,EAX);
5038 // Call get_addr_ht instead of doing the hash table here.
5039 // This code is executed infrequently and takes up a lot of space
5040 // so smaller is better.
5041 emit_storereg(CCREG,HOST_CCREG);
5042 emit_pushreg(EAX);
5043 emit_call((int)get_addr_ht);
5044 emit_loadreg(CCREG,HOST_CCREG);
5045 emit_addimm(ESP,4,ESP);
5046 emit_jmpreg(EAX);*/
5047}
5048
5049add_to_linker(int addr,int target,int ext)
5050{
5051 link_addr[linkcount][0]=addr;
5052 link_addr[linkcount][1]=target;
5053 link_addr[linkcount][2]=ext;
5054 linkcount++;
5055}
5056
5057void ujump_assemble(int i,struct regstat *i_regs)
5058{
5059 signed char *i_regmap=i_regs->regmap;
5060 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5061 address_generation(i+1,i_regs,regs[i].regmap_entry);
5062 #ifdef REG_PREFETCH
5063 int temp=get_reg(branch_regs[i].regmap,PTEMP);
5064 if(rt1[i]==31&&temp>=0)
5065 {
5066 int return_address=start+i*4+8;
5067 if(get_reg(branch_regs[i].regmap,31)>0)
5068 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5069 }
5070 #endif
5071 ds_assemble(i+1,i_regs);
5072 uint64_t bc_unneeded=branch_regs[i].u;
5073 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5074 bc_unneeded|=1|(1LL<<rt1[i]);
5075 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5076 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5077 bc_unneeded,bc_unneeded_upper);
5078 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5079 if(rt1[i]==31) {
5080 int rt;
5081 unsigned int return_address;
5082 assert(rt1[i+1]!=31);
5083 assert(rt2[i+1]!=31);
5084 rt=get_reg(branch_regs[i].regmap,31);
5085 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5086 //assert(rt>=0);
5087 return_address=start+i*4+8;
5088 if(rt>=0) {
5089 #ifdef USE_MINI_HT
5090 if(internal_branch(branch_regs[i].is32,return_address)) {
5091 int temp=rt+1;
5092 if(temp==EXCLUDE_REG||temp>=HOST_REGS||
5093 branch_regs[i].regmap[temp]>=0)
5094 {
5095 temp=get_reg(branch_regs[i].regmap,-1);
5096 }
5097 #ifdef HOST_TEMPREG
5098 if(temp<0) temp=HOST_TEMPREG;
5099 #endif
5100 if(temp>=0) do_miniht_insert(return_address,rt,temp);
5101 else emit_movimm(return_address,rt);
5102 }
5103 else
5104 #endif
5105 {
5106 #ifdef REG_PREFETCH
5107 if(temp>=0)
5108 {
5109 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5110 }
5111 #endif
5112 emit_movimm(return_address,rt); // PC into link register
5113 #ifdef IMM_PREFETCH
5114 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5115 #endif
5116 }
5117 }
5118 }
5119 int cc,adj;
5120 cc=get_reg(branch_regs[i].regmap,CCREG);
5121 assert(cc==HOST_CCREG);
5122 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5123 #ifdef REG_PREFETCH
5124 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5125 #endif
5126 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5127 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5128 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5129 if(internal_branch(branch_regs[i].is32,ba[i]))
5130 assem_debug("branch: internal\n");
5131 else
5132 assem_debug("branch: external\n");
5133 if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5134 ds_assemble_entry(i);
5135 }
5136 else {
5137 add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5138 emit_jmp(0);
5139 }
5140}
5141
5142void rjump_assemble(int i,struct regstat *i_regs)
5143{
5144 signed char *i_regmap=i_regs->regmap;
5145 int temp;
5146 int rs,cc,adj;
5147 rs=get_reg(branch_regs[i].regmap,rs1[i]);
5148 assert(rs>=0);
5149 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5150 // Delay slot abuse, make a copy of the branch address register
5151 temp=get_reg(branch_regs[i].regmap,RTEMP);
5152 assert(temp>=0);
5153 assert(regs[i].regmap[temp]==RTEMP);
5154 emit_mov(rs,temp);
5155 rs=temp;
5156 }
5157 address_generation(i+1,i_regs,regs[i].regmap_entry);
5158 #ifdef REG_PREFETCH
5159 if(rt1[i]==31)
5160 {
5161 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5162 int return_address=start+i*4+8;
5163 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5164 }
5165 }
5166 #endif
5167 #ifdef USE_MINI_HT
5168 if(rs1[i]==31) {
5169 int rh=get_reg(regs[i].regmap,RHASH);
5170 if(rh>=0) do_preload_rhash(rh);
5171 }
5172 #endif
5173 ds_assemble(i+1,i_regs);
5174 uint64_t bc_unneeded=branch_regs[i].u;
5175 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5176 bc_unneeded|=1|(1LL<<rt1[i]);
5177 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5178 bc_unneeded&=~(1LL<<rs1[i]);
5179 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5180 bc_unneeded,bc_unneeded_upper);
5181 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5182 if(rt1[i]!=0) {
5183 int rt,return_address;
5184 assert(rt1[i+1]!=rt1[i]);
5185 assert(rt2[i+1]!=rt1[i]);
5186 rt=get_reg(branch_regs[i].regmap,rt1[i]);
5187 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5188 assert(rt>=0);
5189 return_address=start+i*4+8;
5190 #ifdef REG_PREFETCH
5191 if(temp>=0)
5192 {
5193 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5194 }
5195 #endif
5196 emit_movimm(return_address,rt); // PC into link register
5197 #ifdef IMM_PREFETCH
5198 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5199 #endif
5200 }
5201 cc=get_reg(branch_regs[i].regmap,CCREG);
5202 assert(cc==HOST_CCREG);
5203 #ifdef USE_MINI_HT
5204 int rh=get_reg(branch_regs[i].regmap,RHASH);
5205 int ht=get_reg(branch_regs[i].regmap,RHTBL);
5206 if(rs1[i]==31) {
5207 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5208 do_preload_rhtbl(ht);
5209 do_rhash(rs,rh);
5210 }
5211 #endif
5212 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5213 #ifdef DESTRUCTIVE_WRITEBACK
5214 if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5215 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5216 emit_loadreg(rs1[i],rs);
5217 }
5218 }
5219 #endif
5220 #ifdef REG_PREFETCH
5221 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5222 #endif
5223 #ifdef USE_MINI_HT
5224 if(rs1[i]==31) {
5225 do_miniht_load(ht,rh);
5226 }
5227 #endif
5228 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5229 //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5230 //assert(adj==0);
5231 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5232 add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5233 emit_jns(0);
5234 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5235 #ifdef USE_MINI_HT
5236 if(rs1[i]==31) {
5237 do_miniht_jump(rs,rh,ht);
5238 }
5239 else
5240 #endif
5241 {
5242 //if(rs!=EAX) emit_mov(rs,EAX);
5243 //emit_jmp((int)jump_vaddr_eax);
5244 emit_jmp(jump_vaddr_reg[rs]);
5245 }
5246 /* Check hash table
5247 temp=!rs;
5248 emit_mov(rs,temp);
5249 emit_shrimm(rs,16,rs);
5250 emit_xor(temp,rs,rs);
5251 emit_movzwl_reg(rs,rs);
5252 emit_shlimm(rs,4,rs);
5253 emit_cmpmem_indexed((int)hash_table,rs,temp);
5254 emit_jne((int)out+14);
5255 emit_readword_indexed((int)hash_table+4,rs,rs);
5256 emit_jmpreg(rs);
5257 emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5258 emit_addimm_no_flags(8,rs);
5259 emit_jeq((int)out-17);
5260 // No hit on hash table, call compiler
5261 emit_pushreg(temp);
5262//DEBUG >
5263#ifdef DEBUG_CYCLE_COUNT
5264 emit_readword((int)&last_count,ECX);
5265 emit_add(HOST_CCREG,ECX,HOST_CCREG);
5266 emit_readword((int)&next_interupt,ECX);
5267 emit_writeword(HOST_CCREG,(int)&Count);
5268 emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5269 emit_writeword(ECX,(int)&last_count);
5270#endif
5271//DEBUG <
5272 emit_storereg(CCREG,HOST_CCREG);
5273 emit_call((int)get_addr);
5274 emit_loadreg(CCREG,HOST_CCREG);
5275 emit_addimm(ESP,4,ESP);
5276 emit_jmpreg(EAX);*/
5277 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5278 if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5279 #endif
5280}
5281
5282void cjump_assemble(int i,struct regstat *i_regs)
5283{
5284 signed char *i_regmap=i_regs->regmap;
5285 int cc;
5286 int match;
5287 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5288 assem_debug("match=%d\n",match);
5289 int s1h,s1l,s2h,s2l;
5290 int prev_cop1_usable=cop1_usable;
5291 int unconditional=0,nop=0;
5292 int only32=0;
5293 int ooo=1;
5294 int invert=0;
5295 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5296 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5297 if(likely[i]) ooo=0;
5298 if(!match) invert=1;
5299 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5300 if(i>(ba[i]-start)>>2) invert=1;
5301 #endif
5302
5303 if(ooo)
5304 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
5305 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1])))
5306 {
5307 // Write-after-read dependency prevents out of order execution
5308 // First test branch condition, then execute delay slot, then branch
5309 ooo=0;
5310 }
5311
5312 if(ooo) {
5313 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5314 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5315 s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5316 s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5317 }
5318 else {
5319 s1l=get_reg(i_regmap,rs1[i]);
5320 s1h=get_reg(i_regmap,rs1[i]|64);
5321 s2l=get_reg(i_regmap,rs2[i]);
5322 s2h=get_reg(i_regmap,rs2[i]|64);
5323 }
5324 if(rs1[i]==0&&rs2[i]==0)
5325 {
5326 if(opcode[i]&1) nop=1;
5327 else unconditional=1;
5328 //assert(opcode[i]!=5);
5329 //assert(opcode[i]!=7);
5330 //assert(opcode[i]!=0x15);
5331 //assert(opcode[i]!=0x17);
5332 }
5333 else if(rs1[i]==0)
5334 {
5335 s1l=s2l;s1h=s2h;
5336 s2l=s2h=-1;
5337 only32=(regs[i].was32>>rs2[i])&1;
5338 }
5339 else if(rs2[i]==0)
5340 {
5341 s2l=s2h=-1;
5342 only32=(regs[i].was32>>rs1[i])&1;
5343 }
5344 else {
5345 only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5346 }
5347
5348 if(ooo) {
5349 // Out of order execution (delay slot first)
5350 //printf("OOOE\n");
5351 address_generation(i+1,i_regs,regs[i].regmap_entry);
5352 ds_assemble(i+1,i_regs);
5353 int adj;
5354 uint64_t bc_unneeded=branch_regs[i].u;
5355 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5356 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5357 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5358 bc_unneeded|=1;
5359 bc_unneeded_upper|=1;
5360 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5361 bc_unneeded,bc_unneeded_upper);
5362 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5363 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5364 cc=get_reg(branch_regs[i].regmap,CCREG);
5365 assert(cc==HOST_CCREG);
5366 if(unconditional)
5367 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5368 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5369 //assem_debug("cycle count (adj)\n");
5370 if(unconditional) {
5371 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5372 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5373 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5374 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5375 if(internal)
5376 assem_debug("branch: internal\n");
5377 else
5378 assem_debug("branch: external\n");
5379 if(internal&&is_ds[(ba[i]-start)>>2]) {
5380 ds_assemble_entry(i);
5381 }
5382 else {
5383 add_to_linker((int)out,ba[i],internal);
5384 emit_jmp(0);
5385 }
5386 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5387 if(((u_int)out)&7) emit_addnop(0);
5388 #endif
5389 }
5390 }
5391 else if(nop) {
5392 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5393 int jaddr=(int)out;
5394 emit_jns(0);
5395 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5396 }
5397 else {
5398 int taken=0,nottaken=0,nottaken1=0;
5399 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5400 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5401 if(!only32)
5402 {
5403 assert(s1h>=0);
5404 if(opcode[i]==4) // BEQ
5405 {
5406 if(s2h>=0) emit_cmp(s1h,s2h);
5407 else emit_test(s1h,s1h);
5408 nottaken1=(int)out;
5409 emit_jne(1);
5410 }
5411 if(opcode[i]==5) // BNE
5412 {
5413 if(s2h>=0) emit_cmp(s1h,s2h);
5414 else emit_test(s1h,s1h);
5415 if(invert) taken=(int)out;
5416 else add_to_linker((int)out,ba[i],internal);
5417 emit_jne(0);
5418 }
5419 if(opcode[i]==6) // BLEZ
5420 {
5421 emit_test(s1h,s1h);
5422 if(invert) taken=(int)out;
5423 else add_to_linker((int)out,ba[i],internal);
5424 emit_js(0);
5425 nottaken1=(int)out;
5426 emit_jne(1);
5427 }
5428 if(opcode[i]==7) // BGTZ
5429 {
5430 emit_test(s1h,s1h);
5431 nottaken1=(int)out;
5432 emit_js(1);
5433 if(invert) taken=(int)out;
5434 else add_to_linker((int)out,ba[i],internal);
5435 emit_jne(0);
5436 }
5437 } // if(!only32)
5438
5439 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5440 assert(s1l>=0);
5441 if(opcode[i]==4) // BEQ
5442 {
5443 if(s2l>=0) emit_cmp(s1l,s2l);
5444 else emit_test(s1l,s1l);
5445 if(invert){
5446 nottaken=(int)out;
5447 emit_jne(1);
5448 }else{
5449 add_to_linker((int)out,ba[i],internal);
5450 emit_jeq(0);
5451 }
5452 }
5453 if(opcode[i]==5) // BNE
5454 {
5455 if(s2l>=0) emit_cmp(s1l,s2l);
5456 else emit_test(s1l,s1l);
5457 if(invert){
5458 nottaken=(int)out;
5459 emit_jeq(1);
5460 }else{
5461 add_to_linker((int)out,ba[i],internal);
5462 emit_jne(0);
5463 }
5464 }
5465 if(opcode[i]==6) // BLEZ
5466 {
5467 emit_cmpimm(s1l,1);
5468 if(invert){
5469 nottaken=(int)out;
5470 emit_jge(1);
5471 }else{
5472 add_to_linker((int)out,ba[i],internal);
5473 emit_jl(0);
5474 }
5475 }
5476 if(opcode[i]==7) // BGTZ
5477 {
5478 emit_cmpimm(s1l,1);
5479 if(invert){
5480 nottaken=(int)out;
5481 emit_jl(1);
5482 }else{
5483 add_to_linker((int)out,ba[i],internal);
5484 emit_jge(0);
5485 }
5486 }
5487 if(invert) {
5488 if(taken) set_jump_target(taken,(int)out);
5489 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5490 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5491 if(adj) {
5492 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5493 add_to_linker((int)out,ba[i],internal);
5494 }else{
5495 emit_addnop(13);
5496 add_to_linker((int)out,ba[i],internal*2);
5497 }
5498 emit_jmp(0);
5499 }else
5500 #endif
5501 {
5502 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5503 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5504 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5505 if(internal)
5506 assem_debug("branch: internal\n");
5507 else
5508 assem_debug("branch: external\n");
5509 if(internal&&is_ds[(ba[i]-start)>>2]) {
5510 ds_assemble_entry(i);
5511 }
5512 else {
5513 add_to_linker((int)out,ba[i],internal);
5514 emit_jmp(0);
5515 }
5516 }
5517 set_jump_target(nottaken,(int)out);
5518 }
5519
5520 if(nottaken1) set_jump_target(nottaken1,(int)out);
5521 if(adj) {
5522 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5523 }
5524 } // (!unconditional)
5525 } // if(ooo)
5526 else
5527 {
5528 // In-order execution (branch first)
5529 //if(likely[i]) printf("IOL\n");
5530 //else
5531 //printf("IOE\n");
5532 int taken=0,nottaken=0,nottaken1=0;
5533 if(!unconditional&&!nop) {
5534 if(!only32)
5535 {
5536 assert(s1h>=0);
5537 if((opcode[i]&0x2f)==4) // BEQ
5538 {
5539 if(s2h>=0) emit_cmp(s1h,s2h);
5540 else emit_test(s1h,s1h);
5541 nottaken1=(int)out;
5542 emit_jne(2);
5543 }
5544 if((opcode[i]&0x2f)==5) // BNE
5545 {
5546 if(s2h>=0) emit_cmp(s1h,s2h);
5547 else emit_test(s1h,s1h);
5548 taken=(int)out;
5549 emit_jne(1);
5550 }
5551 if((opcode[i]&0x2f)==6) // BLEZ
5552 {
5553 emit_test(s1h,s1h);
5554 taken=(int)out;
5555 emit_js(1);
5556 nottaken1=(int)out;
5557 emit_jne(2);
5558 }
5559 if((opcode[i]&0x2f)==7) // BGTZ
5560 {
5561 emit_test(s1h,s1h);
5562 nottaken1=(int)out;
5563 emit_js(2);
5564 taken=(int)out;
5565 emit_jne(1);
5566 }
5567 } // if(!only32)
5568
5569 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5570 assert(s1l>=0);
5571 if((opcode[i]&0x2f)==4) // BEQ
5572 {
5573 if(s2l>=0) emit_cmp(s1l,s2l);
5574 else emit_test(s1l,s1l);
5575 nottaken=(int)out;
5576 emit_jne(2);
5577 }
5578 if((opcode[i]&0x2f)==5) // BNE
5579 {
5580 if(s2l>=0) emit_cmp(s1l,s2l);
5581 else emit_test(s1l,s1l);
5582 nottaken=(int)out;
5583 emit_jeq(2);
5584 }
5585 if((opcode[i]&0x2f)==6) // BLEZ
5586 {
5587 emit_cmpimm(s1l,1);
5588 nottaken=(int)out;
5589 emit_jge(2);
5590 }
5591 if((opcode[i]&0x2f)==7) // BGTZ
5592 {
5593 emit_cmpimm(s1l,1);
5594 nottaken=(int)out;
5595 emit_jl(2);
5596 }
5597 } // if(!unconditional)
5598 int adj;
5599 uint64_t ds_unneeded=branch_regs[i].u;
5600 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5601 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5602 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5603 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5604 ds_unneeded|=1;
5605 ds_unneeded_upper|=1;
5606 // branch taken
5607 if(!nop) {
5608 if(taken) set_jump_target(taken,(int)out);
5609 assem_debug("1:\n");
5610 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5611 ds_unneeded,ds_unneeded_upper);
5612 // load regs
5613 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5614 address_generation(i+1,&branch_regs[i],0);
5615 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5616 ds_assemble(i+1,&branch_regs[i]);
5617 cc=get_reg(branch_regs[i].regmap,CCREG);
5618 if(cc==-1) {
5619 emit_loadreg(CCREG,cc=HOST_CCREG);
5620 // CHECK: Is the following instruction (fall thru) allocated ok?
5621 }
5622 assert(cc==HOST_CCREG);
5623 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5624 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5625 assem_debug("cycle count (adj)\n");
5626 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5627 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5628 if(internal)
5629 assem_debug("branch: internal\n");
5630 else
5631 assem_debug("branch: external\n");
5632 if(internal&&is_ds[(ba[i]-start)>>2]) {
5633 ds_assemble_entry(i);
5634 }
5635 else {
5636 add_to_linker((int)out,ba[i],internal);
5637 emit_jmp(0);
5638 }
5639 }
5640 // branch not taken
5641 cop1_usable=prev_cop1_usable;
5642 if(!unconditional) {
5643 if(nottaken1) set_jump_target(nottaken1,(int)out);
5644 set_jump_target(nottaken,(int)out);
5645 assem_debug("2:\n");
5646 if(!likely[i]) {
5647 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5648 ds_unneeded,ds_unneeded_upper);
5649 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5650 address_generation(i+1,&branch_regs[i],0);
5651 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5652 ds_assemble(i+1,&branch_regs[i]);
5653 }
5654 cc=get_reg(branch_regs[i].regmap,CCREG);
5655 if(cc==-1&&!likely[i]) {
5656 // Cycle count isn't in a register, temporarily load it then write it out
5657 emit_loadreg(CCREG,HOST_CCREG);
5658 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5659 int jaddr=(int)out;
5660 emit_jns(0);
5661 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5662 emit_storereg(CCREG,HOST_CCREG);
5663 }
5664 else{
5665 cc=get_reg(i_regmap,CCREG);
5666 assert(cc==HOST_CCREG);
5667 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5668 int jaddr=(int)out;
5669 emit_jns(0);
5670 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5671 }
5672 }
5673 }
5674}
5675
5676void sjump_assemble(int i,struct regstat *i_regs)
5677{
5678 signed char *i_regmap=i_regs->regmap;
5679 int cc;
5680 int match;
5681 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5682 assem_debug("smatch=%d\n",match);
5683 int s1h,s1l;
5684 int prev_cop1_usable=cop1_usable;
5685 int unconditional=0,nevertaken=0;
5686 int only32=0;
5687 int ooo=1;
5688 int invert=0;
5689 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5690 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5691 if(likely[i]) ooo=0;
5692 if(!match) invert=1;
5693 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5694 if(i>(ba[i]-start)>>2) invert=1;
5695 #endif
5696
5697 //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5698 //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5699
5700 if(ooo) {
5701 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))
5702 {
5703 // Write-after-read dependency prevents out of order execution
5704 // First test branch condition, then execute delay slot, then branch
5705 ooo=0;
5706 }
5707 if(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))
5708 // BxxZAL $ra is available to delay insn, so do it in order
5709 ooo=0;
5710 }
5711
5712 if(ooo) {
5713 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5714 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5715 }
5716 else {
5717 s1l=get_reg(i_regmap,rs1[i]);
5718 s1h=get_reg(i_regmap,rs1[i]|64);
5719 }
5720 if(rs1[i]==0)
5721 {
5722 if(opcode2[i]&1) unconditional=1;
5723 else nevertaken=1;
5724 // These are never taken (r0 is never less than zero)
5725 //assert(opcode2[i]!=0);
5726 //assert(opcode2[i]!=2);
5727 //assert(opcode2[i]!=0x10);
5728 //assert(opcode2[i]!=0x12);
5729 }
5730 else {
5731 only32=(regs[i].was32>>rs1[i])&1;
5732 }
5733
5734 if(ooo) {
5735 // Out of order execution (delay slot first)
5736 //printf("OOOE\n");
5737 address_generation(i+1,i_regs,regs[i].regmap_entry);
5738 ds_assemble(i+1,i_regs);
5739 int adj;
5740 uint64_t bc_unneeded=branch_regs[i].u;
5741 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5742 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5743 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5744 bc_unneeded|=1;
5745 bc_unneeded_upper|=1;
5746 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5747 bc_unneeded,bc_unneeded_upper);
5748 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5749 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5750 if(rt1[i]==31) {
5751 int rt,return_address;
5752 rt=get_reg(branch_regs[i].regmap,31);
5753 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5754 if(rt>=0) {
5755 // Save the PC even if the branch is not taken
5756 return_address=start+i*4+8;
5757 emit_movimm(return_address,rt); // PC into link register
5758 #ifdef IMM_PREFETCH
5759 if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5760 #endif
5761 }
5762 }
5763 cc=get_reg(branch_regs[i].regmap,CCREG);
5764 assert(cc==HOST_CCREG);
5765 if(unconditional)
5766 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5767 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5768 assem_debug("cycle count (adj)\n");
5769 if(unconditional) {
5770 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5771 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5772 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5773 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5774 if(internal)
5775 assem_debug("branch: internal\n");
5776 else
5777 assem_debug("branch: external\n");
5778 if(internal&&is_ds[(ba[i]-start)>>2]) {
5779 ds_assemble_entry(i);
5780 }
5781 else {
5782 add_to_linker((int)out,ba[i],internal);
5783 emit_jmp(0);
5784 }
5785 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5786 if(((u_int)out)&7) emit_addnop(0);
5787 #endif
5788 }
5789 }
5790 else if(nevertaken) {
5791 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5792 int jaddr=(int)out;
5793 emit_jns(0);
5794 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5795 }
5796 else {
5797 int nottaken=0;
5798 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5799 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5800 if(!only32)
5801 {
5802 assert(s1h>=0);
5803 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5804 {
5805 emit_test(s1h,s1h);
5806 if(invert){
5807 nottaken=(int)out;
5808 emit_jns(1);
5809 }else{
5810 add_to_linker((int)out,ba[i],internal);
5811 emit_js(0);
5812 }
5813 }
5814 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5815 {
5816 emit_test(s1h,s1h);
5817 if(invert){
5818 nottaken=(int)out;
5819 emit_js(1);
5820 }else{
5821 add_to_linker((int)out,ba[i],internal);
5822 emit_jns(0);
5823 }
5824 }
5825 } // if(!only32)
5826 else
5827 {
5828 assert(s1l>=0);
5829 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5830 {
5831 emit_test(s1l,s1l);
5832 if(invert){
5833 nottaken=(int)out;
5834 emit_jns(1);
5835 }else{
5836 add_to_linker((int)out,ba[i],internal);
5837 emit_js(0);
5838 }
5839 }
5840 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5841 {
5842 emit_test(s1l,s1l);
5843 if(invert){
5844 nottaken=(int)out;
5845 emit_js(1);
5846 }else{
5847 add_to_linker((int)out,ba[i],internal);
5848 emit_jns(0);
5849 }
5850 }
5851 } // if(!only32)
5852
5853 if(invert) {
5854 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5855 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5856 if(adj) {
5857 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5858 add_to_linker((int)out,ba[i],internal);
5859 }else{
5860 emit_addnop(13);
5861 add_to_linker((int)out,ba[i],internal*2);
5862 }
5863 emit_jmp(0);
5864 }else
5865 #endif
5866 {
5867 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5868 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5869 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5870 if(internal)
5871 assem_debug("branch: internal\n");
5872 else
5873 assem_debug("branch: external\n");
5874 if(internal&&is_ds[(ba[i]-start)>>2]) {
5875 ds_assemble_entry(i);
5876 }
5877 else {
5878 add_to_linker((int)out,ba[i],internal);
5879 emit_jmp(0);
5880 }
5881 }
5882 set_jump_target(nottaken,(int)out);
5883 }
5884
5885 if(adj) {
5886 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5887 }
5888 } // (!unconditional)
5889 } // if(ooo)
5890 else
5891 {
5892 // In-order execution (branch first)
5893 //printf("IOE\n");
5894 int nottaken=0;
5895 if(rt1[i]==31) {
5896 int rt,return_address;
5897 rt=get_reg(branch_regs[i].regmap,31);
5898 if(rt>=0) {
5899 // Save the PC even if the branch is not taken
5900 return_address=start+i*4+8;
5901 emit_movimm(return_address,rt); // PC into link register
5902 #ifdef IMM_PREFETCH
5903 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5904 #endif
5905 }
5906 }
5907 if(!unconditional) {
5908 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5909 if(!only32)
5910 {
5911 assert(s1h>=0);
5912 if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5913 {
5914 emit_test(s1h,s1h);
5915 nottaken=(int)out;
5916 emit_jns(1);
5917 }
5918 if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5919 {
5920 emit_test(s1h,s1h);
5921 nottaken=(int)out;
5922 emit_js(1);
5923 }
5924 } // if(!only32)
5925 else
5926 {
5927 assert(s1l>=0);
5928 if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5929 {
5930 emit_test(s1l,s1l);
5931 nottaken=(int)out;
5932 emit_jns(1);
5933 }
5934 if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5935 {
5936 emit_test(s1l,s1l);
5937 nottaken=(int)out;
5938 emit_js(1);
5939 }
5940 }
5941 } // if(!unconditional)
5942 int adj;
5943 uint64_t ds_unneeded=branch_regs[i].u;
5944 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5945 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5946 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5947 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5948 ds_unneeded|=1;
5949 ds_unneeded_upper|=1;
5950 // branch taken
5951 if(!nevertaken) {
5952 //assem_debug("1:\n");
5953 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5954 ds_unneeded,ds_unneeded_upper);
5955 // load regs
5956 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5957 address_generation(i+1,&branch_regs[i],0);
5958 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5959 ds_assemble(i+1,&branch_regs[i]);
5960 cc=get_reg(branch_regs[i].regmap,CCREG);
5961 if(cc==-1) {
5962 emit_loadreg(CCREG,cc=HOST_CCREG);
5963 // CHECK: Is the following instruction (fall thru) allocated ok?
5964 }
5965 assert(cc==HOST_CCREG);
5966 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5967 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5968 assem_debug("cycle count (adj)\n");
5969 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5970 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5971 if(internal)
5972 assem_debug("branch: internal\n");
5973 else
5974 assem_debug("branch: external\n");
5975 if(internal&&is_ds[(ba[i]-start)>>2]) {
5976 ds_assemble_entry(i);
5977 }
5978 else {
5979 add_to_linker((int)out,ba[i],internal);
5980 emit_jmp(0);
5981 }
5982 }
5983 // branch not taken
5984 cop1_usable=prev_cop1_usable;
5985 if(!unconditional) {
5986 set_jump_target(nottaken,(int)out);
5987 assem_debug("1:\n");
5988 if(!likely[i]) {
5989 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5990 ds_unneeded,ds_unneeded_upper);
5991 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5992 address_generation(i+1,&branch_regs[i],0);
5993 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5994 ds_assemble(i+1,&branch_regs[i]);
5995 }
5996 cc=get_reg(branch_regs[i].regmap,CCREG);
5997 if(cc==-1&&!likely[i]) {
5998 // Cycle count isn't in a register, temporarily load it then write it out
5999 emit_loadreg(CCREG,HOST_CCREG);
6000 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6001 int jaddr=(int)out;
6002 emit_jns(0);
6003 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6004 emit_storereg(CCREG,HOST_CCREG);
6005 }
6006 else{
6007 cc=get_reg(i_regmap,CCREG);
6008 assert(cc==HOST_CCREG);
6009 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6010 int jaddr=(int)out;
6011 emit_jns(0);
6012 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6013 }
6014 }
6015 }
6016}
6017
6018void fjump_assemble(int i,struct regstat *i_regs)
6019{
6020 signed char *i_regmap=i_regs->regmap;
6021 int cc;
6022 int match;
6023 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6024 assem_debug("fmatch=%d\n",match);
6025 int fs,cs;
6026 int eaddr;
6027 int ooo=1;
6028 int invert=0;
6029 int internal=internal_branch(branch_regs[i].is32,ba[i]);
6030 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
6031 if(likely[i]) ooo=0;
6032 if(!match) invert=1;
6033 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6034 if(i>(ba[i]-start)>>2) invert=1;
6035 #endif
6036
6037 if(ooo)
6038 if(itype[i+1]==FCOMP)
6039 {
6040 // Write-after-read dependency prevents out of order execution
6041 // First test branch condition, then execute delay slot, then branch
6042 ooo=0;
6043 }
6044
6045 if(ooo) {
6046 fs=get_reg(branch_regs[i].regmap,FSREG);
6047 address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
6048 }
6049 else {
6050 fs=get_reg(i_regmap,FSREG);
6051 }
6052
6053 // Check cop1 unusable
6054 if(!cop1_usable) {
6055 cs=get_reg(i_regmap,CSREG);
6056 assert(cs>=0);
6057 emit_testimm(cs,0x20000000);
6058 eaddr=(int)out;
6059 emit_jeq(0);
6060 add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
6061 cop1_usable=1;
6062 }
6063
6064 if(ooo) {
6065 // Out of order execution (delay slot first)
6066 //printf("OOOE\n");
6067 ds_assemble(i+1,i_regs);
6068 int adj;
6069 uint64_t bc_unneeded=branch_regs[i].u;
6070 uint64_t bc_unneeded_upper=branch_regs[i].uu;
6071 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6072 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
6073 bc_unneeded|=1;
6074 bc_unneeded_upper|=1;
6075 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6076 bc_unneeded,bc_unneeded_upper);
6077 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
6078 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6079 cc=get_reg(branch_regs[i].regmap,CCREG);
6080 assert(cc==HOST_CCREG);
6081 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6082 assem_debug("cycle count (adj)\n");
6083 if(1) {
6084 int nottaken=0;
6085 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6086 if(1) {
6087 assert(fs>=0);
6088 emit_testimm(fs,0x800000);
6089 if(source[i]&0x10000) // BC1T
6090 {
6091 if(invert){
6092 nottaken=(int)out;
6093 emit_jeq(1);
6094 }else{
6095 add_to_linker((int)out,ba[i],internal);
6096 emit_jne(0);
6097 }
6098 }
6099 else // BC1F
6100 if(invert){
6101 nottaken=(int)out;
6102 emit_jne(1);
6103 }else{
6104 add_to_linker((int)out,ba[i],internal);
6105 emit_jeq(0);
6106 }
6107 {
6108 }
6109 } // if(!only32)
6110
6111 if(invert) {
6112 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6113 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6114 else if(match) emit_addnop(13);
6115 #endif
6116 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6117 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6118 if(internal)
6119 assem_debug("branch: internal\n");
6120 else
6121 assem_debug("branch: external\n");
6122 if(internal&&is_ds[(ba[i]-start)>>2]) {
6123 ds_assemble_entry(i);
6124 }
6125 else {
6126 add_to_linker((int)out,ba[i],internal);
6127 emit_jmp(0);
6128 }
6129 set_jump_target(nottaken,(int)out);
6130 }
6131
6132 if(adj) {
6133 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6134 }
6135 } // (!unconditional)
6136 } // if(ooo)
6137 else
6138 {
6139 // In-order execution (branch first)
6140 //printf("IOE\n");
6141 int nottaken=0;
6142 if(1) {
6143 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6144 if(1) {
6145 assert(fs>=0);
6146 emit_testimm(fs,0x800000);
6147 if(source[i]&0x10000) // BC1T
6148 {
6149 nottaken=(int)out;
6150 emit_jeq(1);
6151 }
6152 else // BC1F
6153 {
6154 nottaken=(int)out;
6155 emit_jne(1);
6156 }
6157 }
6158 } // if(!unconditional)
6159 int adj;
6160 uint64_t ds_unneeded=branch_regs[i].u;
6161 uint64_t ds_unneeded_upper=branch_regs[i].uu;
6162 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6163 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6164 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6165 ds_unneeded|=1;
6166 ds_unneeded_upper|=1;
6167 // branch taken
6168 //assem_debug("1:\n");
6169 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6170 ds_unneeded,ds_unneeded_upper);
6171 // load regs
6172 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6173 address_generation(i+1,&branch_regs[i],0);
6174 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6175 ds_assemble(i+1,&branch_regs[i]);
6176 cc=get_reg(branch_regs[i].regmap,CCREG);
6177 if(cc==-1) {
6178 emit_loadreg(CCREG,cc=HOST_CCREG);
6179 // CHECK: Is the following instruction (fall thru) allocated ok?
6180 }
6181 assert(cc==HOST_CCREG);
6182 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6183 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6184 assem_debug("cycle count (adj)\n");
6185 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6186 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6187 if(internal)
6188 assem_debug("branch: internal\n");
6189 else
6190 assem_debug("branch: external\n");
6191 if(internal&&is_ds[(ba[i]-start)>>2]) {
6192 ds_assemble_entry(i);
6193 }
6194 else {
6195 add_to_linker((int)out,ba[i],internal);
6196 emit_jmp(0);
6197 }
6198
6199 // branch not taken
6200 if(1) { // <- FIXME (don't need this)
6201 set_jump_target(nottaken,(int)out);
6202 assem_debug("1:\n");
6203 if(!likely[i]) {
6204 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6205 ds_unneeded,ds_unneeded_upper);
6206 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6207 address_generation(i+1,&branch_regs[i],0);
6208 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6209 ds_assemble(i+1,&branch_regs[i]);
6210 }
6211 cc=get_reg(branch_regs[i].regmap,CCREG);
6212 if(cc==-1&&!likely[i]) {
6213 // Cycle count isn't in a register, temporarily load it then write it out
6214 emit_loadreg(CCREG,HOST_CCREG);
6215 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6216 int jaddr=(int)out;
6217 emit_jns(0);
6218 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6219 emit_storereg(CCREG,HOST_CCREG);
6220 }
6221 else{
6222 cc=get_reg(i_regmap,CCREG);
6223 assert(cc==HOST_CCREG);
6224 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6225 int jaddr=(int)out;
6226 emit_jns(0);
6227 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6228 }
6229 }
6230 }
6231}
6232
6233static void pagespan_assemble(int i,struct regstat *i_regs)
6234{
6235 int s1l=get_reg(i_regs->regmap,rs1[i]);
6236 int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6237 int s2l=get_reg(i_regs->regmap,rs2[i]);
6238 int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6239 void *nt_branch=NULL;
6240 int taken=0;
6241 int nottaken=0;
6242 int unconditional=0;
6243 if(rs1[i]==0)
6244 {
6245 s1l=s2l;s1h=s2h;
6246 s2l=s2h=-1;
6247 }
6248 else if(rs2[i]==0)
6249 {
6250 s2l=s2h=-1;
6251 }
6252 if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6253 s1h=s2h=-1;
6254 }
6255 int hr=0;
6256 int addr,alt,ntaddr;
6257 if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6258 else {
6259 while(hr<HOST_REGS)
6260 {
6261 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6262 (i_regs->regmap[hr]&63)!=rs1[i] &&
6263 (i_regs->regmap[hr]&63)!=rs2[i] )
6264 {
6265 addr=hr++;break;
6266 }
6267 hr++;
6268 }
6269 }
6270 while(hr<HOST_REGS)
6271 {
6272 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6273 (i_regs->regmap[hr]&63)!=rs1[i] &&
6274 (i_regs->regmap[hr]&63)!=rs2[i] )
6275 {
6276 alt=hr++;break;
6277 }
6278 hr++;
6279 }
6280 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6281 {
6282 while(hr<HOST_REGS)
6283 {
6284 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6285 (i_regs->regmap[hr]&63)!=rs1[i] &&
6286 (i_regs->regmap[hr]&63)!=rs2[i] )
6287 {
6288 ntaddr=hr;break;
6289 }
6290 hr++;
6291 }
6292 }
6293 assert(hr<HOST_REGS);
6294 if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6295 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6296 }
6297 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6298 if(opcode[i]==2) // J
6299 {
6300 unconditional=1;
6301 }
6302 if(opcode[i]==3) // JAL
6303 {
6304 // TODO: mini_ht
6305 int rt=get_reg(i_regs->regmap,31);
6306 emit_movimm(start+i*4+8,rt);
6307 unconditional=1;
6308 }
6309 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6310 {
6311 emit_mov(s1l,addr);
6312 if(opcode2[i]==9) // JALR
6313 {
6314 int rt=get_reg(i_regs->regmap,rt1[i]);
6315 emit_movimm(start+i*4+8,rt);
6316 }
6317 }
6318 if((opcode[i]&0x3f)==4) // BEQ
6319 {
6320 if(rs1[i]==rs2[i])
6321 {
6322 unconditional=1;
6323 }
6324 else
6325 #ifdef HAVE_CMOV_IMM
6326 if(s1h<0) {
6327 if(s2l>=0) emit_cmp(s1l,s2l);
6328 else emit_test(s1l,s1l);
6329 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6330 }
6331 else
6332 #endif
6333 {
6334 assert(s1l>=0);
6335 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6336 if(s1h>=0) {
6337 if(s2h>=0) emit_cmp(s1h,s2h);
6338 else emit_test(s1h,s1h);
6339 emit_cmovne_reg(alt,addr);
6340 }
6341 if(s2l>=0) emit_cmp(s1l,s2l);
6342 else emit_test(s1l,s1l);
6343 emit_cmovne_reg(alt,addr);
6344 }
6345 }
6346 if((opcode[i]&0x3f)==5) // BNE
6347 {
6348 #ifdef HAVE_CMOV_IMM
6349 if(s1h<0) {
6350 if(s2l>=0) emit_cmp(s1l,s2l);
6351 else emit_test(s1l,s1l);
6352 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6353 }
6354 else
6355 #endif
6356 {
6357 assert(s1l>=0);
6358 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6359 if(s1h>=0) {
6360 if(s2h>=0) emit_cmp(s1h,s2h);
6361 else emit_test(s1h,s1h);
6362 emit_cmovne_reg(alt,addr);
6363 }
6364 if(s2l>=0) emit_cmp(s1l,s2l);
6365 else emit_test(s1l,s1l);
6366 emit_cmovne_reg(alt,addr);
6367 }
6368 }
6369 if((opcode[i]&0x3f)==0x14) // BEQL
6370 {
6371 if(s1h>=0) {
6372 if(s2h>=0) emit_cmp(s1h,s2h);
6373 else emit_test(s1h,s1h);
6374 nottaken=(int)out;
6375 emit_jne(0);
6376 }
6377 if(s2l>=0) emit_cmp(s1l,s2l);
6378 else emit_test(s1l,s1l);
6379 if(nottaken) set_jump_target(nottaken,(int)out);
6380 nottaken=(int)out;
6381 emit_jne(0);
6382 }
6383 if((opcode[i]&0x3f)==0x15) // BNEL
6384 {
6385 if(s1h>=0) {
6386 if(s2h>=0) emit_cmp(s1h,s2h);
6387 else emit_test(s1h,s1h);
6388 taken=(int)out;
6389 emit_jne(0);
6390 }
6391 if(s2l>=0) emit_cmp(s1l,s2l);
6392 else emit_test(s1l,s1l);
6393 nottaken=(int)out;
6394 emit_jeq(0);
6395 if(taken) set_jump_target(taken,(int)out);
6396 }
6397 if((opcode[i]&0x3f)==6) // BLEZ
6398 {
6399 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6400 emit_cmpimm(s1l,1);
6401 if(s1h>=0) emit_mov(addr,ntaddr);
6402 emit_cmovl_reg(alt,addr);
6403 if(s1h>=0) {
6404 emit_test(s1h,s1h);
6405 emit_cmovne_reg(ntaddr,addr);
6406 emit_cmovs_reg(alt,addr);
6407 }
6408 }
6409 if((opcode[i]&0x3f)==7) // BGTZ
6410 {
6411 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6412 emit_cmpimm(s1l,1);
6413 if(s1h>=0) emit_mov(addr,alt);
6414 emit_cmovl_reg(ntaddr,addr);
6415 if(s1h>=0) {
6416 emit_test(s1h,s1h);
6417 emit_cmovne_reg(alt,addr);
6418 emit_cmovs_reg(ntaddr,addr);
6419 }
6420 }
6421 if((opcode[i]&0x3f)==0x16) // BLEZL
6422 {
6423 assert((opcode[i]&0x3f)!=0x16);
6424 }
6425 if((opcode[i]&0x3f)==0x17) // BGTZL
6426 {
6427 assert((opcode[i]&0x3f)!=0x17);
6428 }
6429 assert(opcode[i]!=1); // BLTZ/BGEZ
6430
6431 //FIXME: Check CSREG
6432 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6433 if((source[i]&0x30000)==0) // BC1F
6434 {
6435 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6436 emit_testimm(s1l,0x800000);
6437 emit_cmovne_reg(alt,addr);
6438 }
6439 if((source[i]&0x30000)==0x10000) // BC1T
6440 {
6441 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6442 emit_testimm(s1l,0x800000);
6443 emit_cmovne_reg(alt,addr);
6444 }
6445 if((source[i]&0x30000)==0x20000) // BC1FL
6446 {
6447 emit_testimm(s1l,0x800000);
6448 nottaken=(int)out;
6449 emit_jne(0);
6450 }
6451 if((source[i]&0x30000)==0x30000) // BC1TL
6452 {
6453 emit_testimm(s1l,0x800000);
6454 nottaken=(int)out;
6455 emit_jeq(0);
6456 }
6457 }
6458
6459 assert(i_regs->regmap[HOST_CCREG]==CCREG);
6460 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6461 if(likely[i]||unconditional)
6462 {
6463 emit_movimm(ba[i],HOST_BTREG);
6464 }
6465 else if(addr!=HOST_BTREG)
6466 {
6467 emit_mov(addr,HOST_BTREG);
6468 }
6469 void *branch_addr=out;
6470 emit_jmp(0);
6471 int target_addr=start+i*4+5;
6472 void *stub=out;
6473 void *compiled_target_addr=check_addr(target_addr);
6474 emit_extjump_ds((int)branch_addr,target_addr);
6475 if(compiled_target_addr) {
6476 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6477 add_link(target_addr,stub);
6478 }
6479 else set_jump_target((int)branch_addr,(int)stub);
6480 if(likely[i]) {
6481 // Not-taken path
6482 set_jump_target((int)nottaken,(int)out);
6483 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6484 void *branch_addr=out;
6485 emit_jmp(0);
6486 int target_addr=start+i*4+8;
6487 void *stub=out;
6488 void *compiled_target_addr=check_addr(target_addr);
6489 emit_extjump_ds((int)branch_addr,target_addr);
6490 if(compiled_target_addr) {
6491 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6492 add_link(target_addr,stub);
6493 }
6494 else set_jump_target((int)branch_addr,(int)stub);
6495 }
6496}
6497
6498// Assemble the delay slot for the above
6499static void pagespan_ds()
6500{
6501 assem_debug("initial delay slot:\n");
6502 u_int vaddr=start+1;
6503 u_int page=get_page(vaddr);
6504 u_int vpage=get_vpage(vaddr);
6505 ll_add(jump_dirty+vpage,vaddr,(void *)out);
6506 do_dirty_stub_ds();
6507 ll_add(jump_in+page,vaddr,(void *)out);
6508 assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6509 if(regs[0].regmap[HOST_CCREG]!=CCREG)
6510 wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6511 if(regs[0].regmap[HOST_BTREG]!=BTREG)
6512 emit_writeword(HOST_BTREG,(int)&branch_target);
6513 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6514 address_generation(0,&regs[0],regs[0].regmap_entry);
6515 if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6516 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6517 cop1_usable=0;
6518 is_delayslot=0;
6519 switch(itype[0]) {
6520 case ALU:
6521 alu_assemble(0,&regs[0]);break;
6522 case IMM16:
6523 imm16_assemble(0,&regs[0]);break;
6524 case SHIFT:
6525 shift_assemble(0,&regs[0]);break;
6526 case SHIFTIMM:
6527 shiftimm_assemble(0,&regs[0]);break;
6528 case LOAD:
6529 load_assemble(0,&regs[0]);break;
6530 case LOADLR:
6531 loadlr_assemble(0,&regs[0]);break;
6532 case STORE:
6533 store_assemble(0,&regs[0]);break;
6534 case STORELR:
6535 storelr_assemble(0,&regs[0]);break;
6536 case COP0:
6537 cop0_assemble(0,&regs[0]);break;
6538 case COP1:
6539 cop1_assemble(0,&regs[0]);break;
6540 case C1LS:
6541 c1ls_assemble(0,&regs[0]);break;
6542 case COP2:
6543 cop2_assemble(0,&regs[0]);break;
6544 case C2LS:
6545 c2ls_assemble(0,&regs[0]);break;
6546 case C2OP:
6547 c2op_assemble(0,&regs[0]);break;
6548 case FCONV:
6549 fconv_assemble(0,&regs[0]);break;
6550 case FLOAT:
6551 float_assemble(0,&regs[0]);break;
6552 case FCOMP:
6553 fcomp_assemble(0,&regs[0]);break;
6554 case MULTDIV:
6555 multdiv_assemble(0,&regs[0]);break;
6556 case MOV:
6557 mov_assemble(0,&regs[0]);break;
6558 case SYSCALL:
6559 case HLECALL:
6560 case INTCALL:
6561 case SPAN:
6562 case UJUMP:
6563 case RJUMP:
6564 case CJUMP:
6565 case SJUMP:
6566 case FJUMP:
6567 printf("Jump in the delay slot. This is probably a bug.\n");
6568 }
6569 int btaddr=get_reg(regs[0].regmap,BTREG);
6570 if(btaddr<0) {
6571 btaddr=get_reg(regs[0].regmap,-1);
6572 emit_readword((int)&branch_target,btaddr);
6573 }
6574 assert(btaddr!=HOST_CCREG);
6575 if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6576#ifdef HOST_IMM8
6577 emit_movimm(start+4,HOST_TEMPREG);
6578 emit_cmp(btaddr,HOST_TEMPREG);
6579#else
6580 emit_cmpimm(btaddr,start+4);
6581#endif
6582 int branch=(int)out;
6583 emit_jeq(0);
6584 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6585 emit_jmp(jump_vaddr_reg[btaddr]);
6586 set_jump_target(branch,(int)out);
6587 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6588 load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6589}
6590
6591// Basic liveness analysis for MIPS registers
6592void unneeded_registers(int istart,int iend,int r)
6593{
6594 int i;
6595 uint64_t u,uu,b,bu;
6596 uint64_t temp_u,temp_uu;
6597 uint64_t tdep;
6598 if(iend==slen-1) {
6599 u=1;uu=1;
6600 }else{
6601 u=unneeded_reg[iend+1];
6602 uu=unneeded_reg_upper[iend+1];
6603 u=1;uu=1;
6604 }
6605 for (i=iend;i>=istart;i--)
6606 {
6607 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6608 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6609 {
6610 // If subroutine call, flag return address as a possible branch target
6611 if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6612
6613 if(ba[i]<start || ba[i]>=(start+slen*4))
6614 {
6615 // Branch out of this block, flush all regs
6616 u=1;
6617 uu=1;
6618 /* Hexagon hack
6619 if(itype[i]==UJUMP&&rt1[i]==31)
6620 {
6621 uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6622 }
6623 if(itype[i]==RJUMP&&rs1[i]==31)
6624 {
6625 uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6626 }
6627 if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6628 if(itype[i]==UJUMP&&rt1[i]==31)
6629 {
6630 //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6631 uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6632 }
6633 if(itype[i]==RJUMP&&rs1[i]==31)
6634 {
6635 //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6636 uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6637 }
6638 }*/
6639 branch_unneeded_reg[i]=u;
6640 branch_unneeded_reg_upper[i]=uu;
6641 // Merge in delay slot
6642 tdep=(~uu>>rt1[i+1])&1;
6643 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6644 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6645 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6646 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6647 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6648 u|=1;uu|=1;
6649 // If branch is "likely" (and conditional)
6650 // then we skip the delay slot on the fall-thru path
6651 if(likely[i]) {
6652 if(i<slen-1) {
6653 u&=unneeded_reg[i+2];
6654 uu&=unneeded_reg_upper[i+2];
6655 }
6656 else
6657 {
6658 u=1;
6659 uu=1;
6660 }
6661 }
6662 }
6663 else
6664 {
6665 // Internal branch, flag target
6666 bt[(ba[i]-start)>>2]=1;
6667 if(ba[i]<=start+i*4) {
6668 // Backward branch
6669 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6670 {
6671 // Unconditional branch
6672 temp_u=1;temp_uu=1;
6673 } else {
6674 // Conditional branch (not taken case)
6675 temp_u=unneeded_reg[i+2];
6676 temp_uu=unneeded_reg_upper[i+2];
6677 }
6678 // Merge in delay slot
6679 tdep=(~temp_uu>>rt1[i+1])&1;
6680 temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6681 temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6682 temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6683 temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6684 temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6685 temp_u|=1;temp_uu|=1;
6686 // If branch is "likely" (and conditional)
6687 // then we skip the delay slot on the fall-thru path
6688 if(likely[i]) {
6689 if(i<slen-1) {
6690 temp_u&=unneeded_reg[i+2];
6691 temp_uu&=unneeded_reg_upper[i+2];
6692 }
6693 else
6694 {
6695 temp_u=1;
6696 temp_uu=1;
6697 }
6698 }
6699 tdep=(~temp_uu>>rt1[i])&1;
6700 temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6701 temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6702 temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6703 temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6704 temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6705 temp_u|=1;temp_uu|=1;
6706 unneeded_reg[i]=temp_u;
6707 unneeded_reg_upper[i]=temp_uu;
6708 // Only go three levels deep. This recursion can take an
6709 // excessive amount of time if there are a lot of nested loops.
6710 if(r<2) {
6711 unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6712 }else{
6713 unneeded_reg[(ba[i]-start)>>2]=1;
6714 unneeded_reg_upper[(ba[i]-start)>>2]=1;
6715 }
6716 } /*else*/ if(1) {
6717 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6718 {
6719 // Unconditional branch
6720 u=unneeded_reg[(ba[i]-start)>>2];
6721 uu=unneeded_reg_upper[(ba[i]-start)>>2];
6722 branch_unneeded_reg[i]=u;
6723 branch_unneeded_reg_upper[i]=uu;
6724 //u=1;
6725 //uu=1;
6726 //branch_unneeded_reg[i]=u;
6727 //branch_unneeded_reg_upper[i]=uu;
6728 // Merge in delay slot
6729 tdep=(~uu>>rt1[i+1])&1;
6730 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6731 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6732 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6733 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6734 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6735 u|=1;uu|=1;
6736 } else {
6737 // Conditional branch
6738 b=unneeded_reg[(ba[i]-start)>>2];
6739 bu=unneeded_reg_upper[(ba[i]-start)>>2];
6740 branch_unneeded_reg[i]=b;
6741 branch_unneeded_reg_upper[i]=bu;
6742 //b=1;
6743 //bu=1;
6744 //branch_unneeded_reg[i]=b;
6745 //branch_unneeded_reg_upper[i]=bu;
6746 // Branch delay slot
6747 tdep=(~uu>>rt1[i+1])&1;
6748 b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6749 bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6750 b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6751 bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6752 bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6753 b|=1;bu|=1;
6754 // If branch is "likely" then we skip the
6755 // delay slot on the fall-thru path
6756 if(likely[i]) {
6757 u=b;
6758 uu=bu;
6759 if(i<slen-1) {
6760 u&=unneeded_reg[i+2];
6761 uu&=unneeded_reg_upper[i+2];
6762 //u=1;
6763 //uu=1;
6764 }
6765 } else {
6766 u&=b;
6767 uu&=bu;
6768 //u=1;
6769 //uu=1;
6770 }
6771 if(i<slen-1) {
6772 branch_unneeded_reg[i]&=unneeded_reg[i+2];
6773 branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6774 //branch_unneeded_reg[i]=1;
6775 //branch_unneeded_reg_upper[i]=1;
6776 } else {
6777 branch_unneeded_reg[i]=1;
6778 branch_unneeded_reg_upper[i]=1;
6779 }
6780 }
6781 }
6782 }
6783 }
6784 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6785 {
6786 // SYSCALL instruction (software interrupt)
6787 u=1;
6788 uu=1;
6789 }
6790 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6791 {
6792 // ERET instruction (return from interrupt)
6793 u=1;
6794 uu=1;
6795 }
6796 //u=uu=1; // DEBUG
6797 tdep=(~uu>>rt1[i])&1;
6798 // Written registers are unneeded
6799 u|=1LL<<rt1[i];
6800 u|=1LL<<rt2[i];
6801 uu|=1LL<<rt1[i];
6802 uu|=1LL<<rt2[i];
6803 // Accessed registers are needed
6804 u&=~(1LL<<rs1[i]);
6805 u&=~(1LL<<rs2[i]);
6806 uu&=~(1LL<<us1[i]);
6807 uu&=~(1LL<<us2[i]);
6808 // Source-target dependencies
6809 uu&=~(tdep<<dep1[i]);
6810 uu&=~(tdep<<dep2[i]);
6811 // R0 is always unneeded
6812 u|=1;uu|=1;
6813 // Save it
6814 unneeded_reg[i]=u;
6815 unneeded_reg_upper[i]=uu;
6816 /*
6817 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6818 printf("U:");
6819 int r;
6820 for(r=1;r<=CCREG;r++) {
6821 if((unneeded_reg[i]>>r)&1) {
6822 if(r==HIREG) printf(" HI");
6823 else if(r==LOREG) printf(" LO");
6824 else printf(" r%d",r);
6825 }
6826 }
6827 printf(" UU:");
6828 for(r=1;r<=CCREG;r++) {
6829 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6830 if(r==HIREG) printf(" HI");
6831 else if(r==LOREG) printf(" LO");
6832 else printf(" r%d",r);
6833 }
6834 }
6835 printf("\n");*/
6836 }
6837#ifdef FORCE32
6838 for (i=iend;i>=istart;i--)
6839 {
6840 unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6841 }
6842#endif
6843}
6844
6845// Identify registers which are likely to contain 32-bit values
6846// This is used to predict whether any branches will jump to a
6847// location with 64-bit values in registers.
6848static void provisional_32bit()
6849{
6850 int i,j;
6851 uint64_t is32=1;
6852 uint64_t lastbranch=1;
6853
6854 for(i=0;i<slen;i++)
6855 {
6856 if(i>0) {
6857 if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
6858 if(i>1) is32=lastbranch;
6859 else is32=1;
6860 }
6861 }
6862 if(i>1)
6863 {
6864 if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
6865 if(likely[i-2]) {
6866 if(i>2) is32=lastbranch;
6867 else is32=1;
6868 }
6869 }
6870 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
6871 {
6872 if(rs1[i-2]==0||rs2[i-2]==0)
6873 {
6874 if(rs1[i-2]) {
6875 is32|=1LL<<rs1[i-2];
6876 }
6877 if(rs2[i-2]) {
6878 is32|=1LL<<rs2[i-2];
6879 }
6880 }
6881 }
6882 }
6883 // If something jumps here with 64-bit values
6884 // then promote those registers to 64 bits
6885 if(bt[i])
6886 {
6887 uint64_t temp_is32=is32;
6888 for(j=i-1;j>=0;j--)
6889 {
6890 if(ba[j]==start+i*4)
6891 //temp_is32&=branch_regs[j].is32;
6892 temp_is32&=p32[j];
6893 }
6894 for(j=i;j<slen;j++)
6895 {
6896 if(ba[j]==start+i*4)
6897 temp_is32=1;
6898 }
6899 is32=temp_is32;
6900 }
6901 int type=itype[i];
6902 int op=opcode[i];
6903 int op2=opcode2[i];
6904 int rt=rt1[i];
6905 int s1=rs1[i];
6906 int s2=rs2[i];
6907 if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
6908 // Branches don't write registers, consider the delay slot instead.
6909 type=itype[i+1];
6910 op=opcode[i+1];
6911 op2=opcode2[i+1];
6912 rt=rt1[i+1];
6913 s1=rs1[i+1];
6914 s2=rs2[i+1];
6915 lastbranch=is32;
6916 }
6917 switch(type) {
6918 case LOAD:
6919 if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
6920 opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
6921 is32&=~(1LL<<rt);
6922 else
6923 is32|=1LL<<rt;
6924 break;
6925 case STORE:
6926 case STORELR:
6927 break;
6928 case LOADLR:
6929 if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
6930 if(op==0x22) is32|=1LL<<rt; // LWL
6931 break;
6932 case IMM16:
6933 if (op==0x08||op==0x09|| // ADDI/ADDIU
6934 op==0x0a||op==0x0b|| // SLTI/SLTIU
6935 op==0x0c|| // ANDI
6936 op==0x0f) // LUI
6937 {
6938 is32|=1LL<<rt;
6939 }
6940 if(op==0x18||op==0x19) { // DADDI/DADDIU
6941 is32&=~(1LL<<rt);
6942 //if(imm[i]==0)
6943 // is32|=((is32>>s1)&1LL)<<rt;
6944 }
6945 if(op==0x0d||op==0x0e) { // ORI/XORI
6946 uint64_t sr=((is32>>s1)&1LL);
6947 is32&=~(1LL<<rt);
6948 is32|=sr<<rt;
6949 }
6950 break;
6951 case UJUMP:
6952 break;
6953 case RJUMP:
6954 break;
6955 case CJUMP:
6956 break;
6957 case SJUMP:
6958 break;
6959 case FJUMP:
6960 break;
6961 case ALU:
6962 if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
6963 is32|=1LL<<rt;
6964 }
6965 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
6966 is32|=1LL<<rt;
6967 }
6968 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
6969 uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
6970 is32&=~(1LL<<rt);
6971 is32|=sr<<rt;
6972 }
6973 else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
6974 if(s1==0&&s2==0) {
6975 is32|=1LL<<rt;
6976 }
6977 else if(s2==0) {
6978 uint64_t sr=((is32>>s1)&1LL);
6979 is32&=~(1LL<<rt);
6980 is32|=sr<<rt;
6981 }
6982 else if(s1==0) {
6983 uint64_t sr=((is32>>s2)&1LL);
6984 is32&=~(1LL<<rt);
6985 is32|=sr<<rt;
6986 }
6987 else {
6988 is32&=~(1LL<<rt);
6989 }
6990 }
6991 else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
6992 if(s1==0&&s2==0) {
6993 is32|=1LL<<rt;
6994 }
6995 else if(s2==0) {
6996 uint64_t sr=((is32>>s1)&1LL);
6997 is32&=~(1LL<<rt);
6998 is32|=sr<<rt;
6999 }
7000 else {
7001 is32&=~(1LL<<rt);
7002 }
7003 }
7004 break;
7005 case MULTDIV:
7006 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7007 is32&=~((1LL<<HIREG)|(1LL<<LOREG));
7008 }
7009 else {
7010 is32|=(1LL<<HIREG)|(1LL<<LOREG);
7011 }
7012 break;
7013 case MOV:
7014 {
7015 uint64_t sr=((is32>>s1)&1LL);
7016 is32&=~(1LL<<rt);
7017 is32|=sr<<rt;
7018 }
7019 break;
7020 case SHIFT:
7021 if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
7022 else is32|=1LL<<rt; // SLLV/SRLV/SRAV
7023 break;
7024 case SHIFTIMM:
7025 is32|=1LL<<rt;
7026 // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
7027 if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
7028 break;
7029 case COP0:
7030 if(op2==0) is32|=1LL<<rt; // MFC0
7031 break;
7032 case COP1:
7033 case COP2:
7034 if(op2==0) is32|=1LL<<rt; // MFC1
7035 if(op2==1) is32&=~(1LL<<rt); // DMFC1
7036 if(op2==2) is32|=1LL<<rt; // CFC1
7037 break;
7038 case C1LS:
7039 case C2LS:
7040 break;
7041 case FLOAT:
7042 case FCONV:
7043 break;
7044 case FCOMP:
7045 break;
7046 case C2OP:
7047 case SYSCALL:
7048 case HLECALL:
7049 break;
7050 default:
7051 break;
7052 }
7053 is32|=1;
7054 p32[i]=is32;
7055
7056 if(i>0)
7057 {
7058 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
7059 {
7060 if(rt1[i-1]==31) // JAL/JALR
7061 {
7062 // Subroutine call will return here, don't alloc any registers
7063 is32=1;
7064 }
7065 else if(i+1<slen)
7066 {
7067 // Internal branch will jump here, match registers to caller
7068 is32=0x3FFFFFFFFLL;
7069 }
7070 }
7071 }
7072 }
7073}
7074
7075// Identify registers which may be assumed to contain 32-bit values
7076// and where optimizations will rely on this.
7077// This is used to determine whether backward branches can safely
7078// jump to a location with 64-bit values in registers.
7079static void provisional_r32()
7080{
7081 u_int r32=0;
7082 int i;
7083
7084 for (i=slen-1;i>=0;i--)
7085 {
7086 int hr;
7087 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7088 {
7089 if(ba[i]<start || ba[i]>=(start+slen*4))
7090 {
7091 // Branch out of this block, don't need anything
7092 r32=0;
7093 }
7094 else
7095 {
7096 // Internal branch
7097 // Need whatever matches the target
7098 // (and doesn't get overwritten by the delay slot instruction)
7099 r32=0;
7100 int t=(ba[i]-start)>>2;
7101 if(ba[i]>start+i*4) {
7102 // Forward branch
7103 //if(!(requires_32bit[t]&~regs[i].was32))
7104 // r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7105 if(!(pr32[t]&~regs[i].was32))
7106 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7107 }else{
7108 // Backward branch
7109 if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7110 r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7111 }
7112 }
7113 // Conditional branch may need registers for following instructions
7114 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7115 {
7116 if(i<slen-2) {
7117 //r32|=requires_32bit[i+2];
7118 r32|=pr32[i+2];
7119 r32&=regs[i].was32;
7120 // Mark this address as a branch target since it may be called
7121 // upon return from interrupt
7122 //bt[i+2]=1;
7123 }
7124 }
7125 // Merge in delay slot
7126 if(!likely[i]) {
7127 // These are overwritten unless the branch is "likely"
7128 // and the delay slot is nullified if not taken
7129 r32&=~(1LL<<rt1[i+1]);
7130 r32&=~(1LL<<rt2[i+1]);
7131 }
7132 // Assume these are needed (delay slot)
7133 if(us1[i+1]>0)
7134 {
7135 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7136 }
7137 if(us2[i+1]>0)
7138 {
7139 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7140 }
7141 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7142 {
7143 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7144 }
7145 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7146 {
7147 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7148 }
7149 }
7150 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7151 {
7152 // SYSCALL instruction (software interrupt)
7153 r32=0;
7154 }
7155 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7156 {
7157 // ERET instruction (return from interrupt)
7158 r32=0;
7159 }
7160 // Check 32 bits
7161 r32&=~(1LL<<rt1[i]);
7162 r32&=~(1LL<<rt2[i]);
7163 if(us1[i]>0)
7164 {
7165 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7166 }
7167 if(us2[i]>0)
7168 {
7169 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7170 }
7171 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7172 {
7173 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7174 }
7175 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7176 {
7177 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7178 }
7179 //requires_32bit[i]=r32;
7180 pr32[i]=r32;
7181
7182 // Dirty registers which are 32-bit, require 32-bit input
7183 // as they will be written as 32-bit values
7184 for(hr=0;hr<HOST_REGS;hr++)
7185 {
7186 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7187 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7188 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7189 pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7190 //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7191 }
7192 }
7193 }
7194 }
7195}
7196
7197// Write back dirty registers as soon as we will no longer modify them,
7198// so that we don't end up with lots of writes at the branches.
7199void clean_registers(int istart,int iend,int wr)
7200{
7201 int i;
7202 int r;
7203 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7204 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7205 if(iend==slen-1) {
7206 will_dirty_i=will_dirty_next=0;
7207 wont_dirty_i=wont_dirty_next=0;
7208 }else{
7209 will_dirty_i=will_dirty_next=will_dirty[iend+1];
7210 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7211 }
7212 for (i=iend;i>=istart;i--)
7213 {
7214 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7215 {
7216 if(ba[i]<start || ba[i]>=(start+slen*4))
7217 {
7218 // Branch out of this block, flush all regs
7219 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7220 {
7221 // Unconditional branch
7222 will_dirty_i=0;
7223 wont_dirty_i=0;
7224 // Merge in delay slot (will dirty)
7225 for(r=0;r<HOST_REGS;r++) {
7226 if(r!=EXCLUDE_REG) {
7227 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7228 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7229 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7230 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7231 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7232 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7233 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7234 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7235 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7236 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7237 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7238 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7239 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7240 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7241 }
7242 }
7243 }
7244 else
7245 {
7246 // Conditional branch
7247 will_dirty_i=0;
7248 wont_dirty_i=wont_dirty_next;
7249 // Merge in delay slot (will dirty)
7250 for(r=0;r<HOST_REGS;r++) {
7251 if(r!=EXCLUDE_REG) {
7252 if(!likely[i]) {
7253 // Might not dirty if likely branch is not taken
7254 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7255 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7256 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7257 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7258 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7259 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7260 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7261 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7262 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7263 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7264 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7265 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7266 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7267 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7268 }
7269 }
7270 }
7271 }
7272 // Merge in delay slot (wont dirty)
7273 for(r=0;r<HOST_REGS;r++) {
7274 if(r!=EXCLUDE_REG) {
7275 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7276 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7277 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7278 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7279 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7280 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7281 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7282 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7283 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7284 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7285 }
7286 }
7287 if(wr) {
7288 #ifndef DESTRUCTIVE_WRITEBACK
7289 branch_regs[i].dirty&=wont_dirty_i;
7290 #endif
7291 branch_regs[i].dirty|=will_dirty_i;
7292 }
7293 }
7294 else
7295 {
7296 // Internal branch
7297 if(ba[i]<=start+i*4) {
7298 // Backward branch
7299 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7300 {
7301 // Unconditional branch
7302 temp_will_dirty=0;
7303 temp_wont_dirty=0;
7304 // Merge in delay slot (will dirty)
7305 for(r=0;r<HOST_REGS;r++) {
7306 if(r!=EXCLUDE_REG) {
7307 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7308 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7309 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7310 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7311 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7312 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7313 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7314 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7315 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7316 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7317 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7318 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7319 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7320 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7321 }
7322 }
7323 } else {
7324 // Conditional branch (not taken case)
7325 temp_will_dirty=will_dirty_next;
7326 temp_wont_dirty=wont_dirty_next;
7327 // Merge in delay slot (will dirty)
7328 for(r=0;r<HOST_REGS;r++) {
7329 if(r!=EXCLUDE_REG) {
7330 if(!likely[i]) {
7331 // Will not dirty if likely branch is not taken
7332 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7333 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7334 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7335 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7336 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7337 if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7338 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7339 //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7340 //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7341 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7342 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7343 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7344 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7345 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7346 }
7347 }
7348 }
7349 }
7350 // Merge in delay slot (wont dirty)
7351 for(r=0;r<HOST_REGS;r++) {
7352 if(r!=EXCLUDE_REG) {
7353 if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7354 if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7355 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7356 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7357 if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7358 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7359 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7360 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7361 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7362 if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7363 }
7364 }
7365 // Deal with changed mappings
7366 if(i<iend) {
7367 for(r=0;r<HOST_REGS;r++) {
7368 if(r!=EXCLUDE_REG) {
7369 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7370 temp_will_dirty&=~(1<<r);
7371 temp_wont_dirty&=~(1<<r);
7372 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7373 temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7374 temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7375 } else {
7376 temp_will_dirty|=1<<r;
7377 temp_wont_dirty|=1<<r;
7378 }
7379 }
7380 }
7381 }
7382 }
7383 if(wr) {
7384 will_dirty[i]=temp_will_dirty;
7385 wont_dirty[i]=temp_wont_dirty;
7386 clean_registers((ba[i]-start)>>2,i-1,0);
7387 }else{
7388 // Limit recursion. It can take an excessive amount
7389 // of time if there are a lot of nested loops.
7390 will_dirty[(ba[i]-start)>>2]=0;
7391 wont_dirty[(ba[i]-start)>>2]=-1;
7392 }
7393 }
7394 /*else*/ if(1)
7395 {
7396 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7397 {
7398 // Unconditional branch
7399 will_dirty_i=0;
7400 wont_dirty_i=0;
7401 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7402 for(r=0;r<HOST_REGS;r++) {
7403 if(r!=EXCLUDE_REG) {
7404 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7405 will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7406 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7407 }
7408 }
7409 }
7410 //}
7411 // Merge in delay slot
7412 for(r=0;r<HOST_REGS;r++) {
7413 if(r!=EXCLUDE_REG) {
7414 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7415 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7416 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7417 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7418 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7419 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7420 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7421 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7422 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7423 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7424 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7425 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7426 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7427 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7428 }
7429 }
7430 } else {
7431 // Conditional branch
7432 will_dirty_i=will_dirty_next;
7433 wont_dirty_i=wont_dirty_next;
7434 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7435 for(r=0;r<HOST_REGS;r++) {
7436 if(r!=EXCLUDE_REG) {
7437 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7438 will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7439 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7440 }
7441 else
7442 {
7443 will_dirty_i&=~(1<<r);
7444 }
7445 // Treat delay slot as part of branch too
7446 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7447 will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7448 wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7449 }
7450 else
7451 {
7452 will_dirty[i+1]&=~(1<<r);
7453 }*/
7454 }
7455 }
7456 //}
7457 // Merge in delay slot
7458 for(r=0;r<HOST_REGS;r++) {
7459 if(r!=EXCLUDE_REG) {
7460 if(!likely[i]) {
7461 // Might not dirty if likely branch is not taken
7462 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7463 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7464 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7465 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7466 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7467 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7468 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7469 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7470 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7471 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7472 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7473 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7474 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7475 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7476 }
7477 }
7478 }
7479 }
7480 // Merge in delay slot
7481 for(r=0;r<HOST_REGS;r++) {
7482 if(r!=EXCLUDE_REG) {
7483 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7484 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7485 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7486 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7487 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7488 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7489 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7490 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7491 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7492 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7493 }
7494 }
7495 if(wr) {
7496 #ifndef DESTRUCTIVE_WRITEBACK
7497 branch_regs[i].dirty&=wont_dirty_i;
7498 #endif
7499 branch_regs[i].dirty|=will_dirty_i;
7500 }
7501 }
7502 }
7503 }
7504 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7505 {
7506 // SYSCALL instruction (software interrupt)
7507 will_dirty_i=0;
7508 wont_dirty_i=0;
7509 }
7510 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7511 {
7512 // ERET instruction (return from interrupt)
7513 will_dirty_i=0;
7514 wont_dirty_i=0;
7515 }
7516 will_dirty_next=will_dirty_i;
7517 wont_dirty_next=wont_dirty_i;
7518 for(r=0;r<HOST_REGS;r++) {
7519 if(r!=EXCLUDE_REG) {
7520 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7521 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7522 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7523 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7524 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7525 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7526 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7527 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7528 if(i>istart) {
7529 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
7530 {
7531 // Don't store a register immediately after writing it,
7532 // may prevent dual-issue.
7533 if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7534 if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7535 }
7536 }
7537 }
7538 }
7539 // Save it
7540 will_dirty[i]=will_dirty_i;
7541 wont_dirty[i]=wont_dirty_i;
7542 // Mark registers that won't be dirtied as not dirty
7543 if(wr) {
7544 /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7545 for(r=0;r<HOST_REGS;r++) {
7546 if((will_dirty_i>>r)&1) {
7547 printf(" r%d",r);
7548 }
7549 }
7550 printf("\n");*/
7551
7552 //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7553 regs[i].dirty|=will_dirty_i;
7554 #ifndef DESTRUCTIVE_WRITEBACK
7555 regs[i].dirty&=wont_dirty_i;
7556 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7557 {
7558 if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7559 for(r=0;r<HOST_REGS;r++) {
7560 if(r!=EXCLUDE_REG) {
7561 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7562 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7563 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7564 }
7565 }
7566 }
7567 }
7568 else
7569 {
7570 if(i<iend) {
7571 for(r=0;r<HOST_REGS;r++) {
7572 if(r!=EXCLUDE_REG) {
7573 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7574 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7575 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7576 }
7577 }
7578 }
7579 }
7580 #endif
7581 //}
7582 }
7583 // Deal with changed mappings
7584 temp_will_dirty=will_dirty_i;
7585 temp_wont_dirty=wont_dirty_i;
7586 for(r=0;r<HOST_REGS;r++) {
7587 if(r!=EXCLUDE_REG) {
7588 int nr;
7589 if(regs[i].regmap[r]==regmap_pre[i][r]) {
7590 if(wr) {
7591 #ifndef DESTRUCTIVE_WRITEBACK
7592 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7593 #endif
7594 regs[i].wasdirty|=will_dirty_i&(1<<r);
7595 }
7596 }
7597 else if((nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7598 // Register moved to a different register
7599 will_dirty_i&=~(1<<r);
7600 wont_dirty_i&=~(1<<r);
7601 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7602 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7603 if(wr) {
7604 #ifndef DESTRUCTIVE_WRITEBACK
7605 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7606 #endif
7607 regs[i].wasdirty|=will_dirty_i&(1<<r);
7608 }
7609 }
7610 else {
7611 will_dirty_i&=~(1<<r);
7612 wont_dirty_i&=~(1<<r);
7613 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7614 will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7615 wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7616 } else {
7617 wont_dirty_i|=1<<r;
7618 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7619 }
7620 }
7621 }
7622 }
7623 }
7624}
7625
7626 /* disassembly */
7627void disassemble_inst(int i)
7628{
7629 if (bt[i]) printf("*"); else printf(" ");
7630 switch(itype[i]) {
7631 case UJUMP:
7632 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7633 case CJUMP:
7634 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7635 case SJUMP:
7636 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7637 case FJUMP:
7638 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7639 case RJUMP:
7640 if (opcode[i]==0x9&&rt1[i]!=31)
7641 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7642 else
7643 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7644 break;
7645 case SPAN:
7646 printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7647 case IMM16:
7648 if(opcode[i]==0xf) //LUI
7649 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7650 else
7651 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7652 break;
7653 case LOAD:
7654 case LOADLR:
7655 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7656 break;
7657 case STORE:
7658 case STORELR:
7659 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7660 break;
7661 case ALU:
7662 case SHIFT:
7663 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7664 break;
7665 case MULTDIV:
7666 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7667 break;
7668 case SHIFTIMM:
7669 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7670 break;
7671 case MOV:
7672 if((opcode2[i]&0x1d)==0x10)
7673 printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7674 else if((opcode2[i]&0x1d)==0x11)
7675 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7676 else
7677 printf (" %x: %s\n",start+i*4,insn[i]);
7678 break;
7679 case COP0:
7680 if(opcode2[i]==0)
7681 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7682 else if(opcode2[i]==4)
7683 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7684 else printf (" %x: %s\n",start+i*4,insn[i]);
7685 break;
7686 case COP1:
7687 if(opcode2[i]<3)
7688 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7689 else if(opcode2[i]>3)
7690 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7691 else printf (" %x: %s\n",start+i*4,insn[i]);
7692 break;
7693 case COP2:
7694 if(opcode2[i]<3)
7695 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7696 else if(opcode2[i]>3)
7697 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7698 else printf (" %x: %s\n",start+i*4,insn[i]);
7699 break;
7700 case C1LS:
7701 printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7702 break;
7703 case C2LS:
7704 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7705 break;
7706 case INTCALL:
7707 printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7708 break;
7709 default:
7710 //printf (" %s %8x\n",insn[i],source[i]);
7711 printf (" %x: %s\n",start+i*4,insn[i]);
7712 }
7713}
7714
7715void new_dynarec_init()
7716{
7717 printf("Init new dynarec\n");
7718 out=(u_char *)BASE_ADDR;
7719 if (mmap (out, 1<<TARGET_SIZE_2,
7720 PROT_READ | PROT_WRITE | PROT_EXEC,
7721 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7722 -1, 0) <= 0) {printf("mmap() failed\n");}
7723#ifdef MUPEN64
7724 rdword=&readmem_dword;
7725 fake_pc.f.r.rs=&readmem_dword;
7726 fake_pc.f.r.rt=&readmem_dword;
7727 fake_pc.f.r.rd=&readmem_dword;
7728#endif
7729 int n;
7730 for(n=0x80000;n<0x80800;n++)
7731 invalid_code[n]=1;
7732 for(n=0;n<65536;n++)
7733 hash_table[n][0]=hash_table[n][2]=-1;
7734 memset(mini_ht,-1,sizeof(mini_ht));
7735 memset(restore_candidate,0,sizeof(restore_candidate));
7736 copy=shadow;
7737 expirep=16384; // Expiry pointer, +2 blocks
7738 pending_exception=0;
7739 literalcount=0;
7740#ifdef HOST_IMM8
7741 // Copy this into local area so we don't have to put it in every literal pool
7742 invc_ptr=invalid_code;
7743#endif
7744 stop_after_jal=0;
7745 // TLB
7746 using_tlb=0;
7747 for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7748 memory_map[n]=-1;
7749 for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7750 memory_map[n]=((u_int)rdram-0x80000000)>>2;
7751 for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7752 memory_map[n]=-1;
7753#ifdef MUPEN64
7754 for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7755 writemem[n] = write_nomem_new;
7756 writememb[n] = write_nomemb_new;
7757 writememh[n] = write_nomemh_new;
7758#ifndef FORCE32
7759 writememd[n] = write_nomemd_new;
7760#endif
7761 readmem[n] = read_nomem_new;
7762 readmemb[n] = read_nomemb_new;
7763 readmemh[n] = read_nomemh_new;
7764#ifndef FORCE32
7765 readmemd[n] = read_nomemd_new;
7766#endif
7767 }
7768 for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7769 writemem[n] = write_rdram_new;
7770 writememb[n] = write_rdramb_new;
7771 writememh[n] = write_rdramh_new;
7772#ifndef FORCE32
7773 writememd[n] = write_rdramd_new;
7774#endif
7775 }
7776 for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7777 writemem[n] = write_nomem_new;
7778 writememb[n] = write_nomemb_new;
7779 writememh[n] = write_nomemh_new;
7780#ifndef FORCE32
7781 writememd[n] = write_nomemd_new;
7782#endif
7783 readmem[n] = read_nomem_new;
7784 readmemb[n] = read_nomemb_new;
7785 readmemh[n] = read_nomemh_new;
7786#ifndef FORCE32
7787 readmemd[n] = read_nomemd_new;
7788#endif
7789 }
7790#endif
7791 tlb_hacks();
7792 arch_init();
7793}
7794
7795void new_dynarec_cleanup()
7796{
7797 int n;
7798 if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7799 for(n=0;n<4096;n++) ll_clear(jump_in+n);
7800 for(n=0;n<4096;n++) ll_clear(jump_out+n);
7801 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7802 #ifdef ROM_COPY
7803 if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7804 #endif
7805}
7806
7807int new_recompile_block(int addr)
7808{
7809/*
7810 if(addr==0x800cd050) {
7811 int block;
7812 for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7813 int n;
7814 for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7815 }
7816*/
7817 //if(Count==365117028) tracedebug=1;
7818 assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7819 //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7820 //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7821 //if(debug)
7822 //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7823 //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7824 /*if(Count>=312978186) {
7825 rlist();
7826 }*/
7827 //rlist();
7828 start = (u_int)addr&~3;
7829 //assert(((u_int)addr&1)==0);
7830#ifdef PCSX
7831 if (Config.HLE && start == 0x80001000) // hlecall
7832 {
7833 // XXX: is this enough? Maybe check hleSoftCall?
7834 u_int beginning=(u_int)out;
7835 u_int page=get_page(start);
7836 invalid_code[start>>12]=0;
7837 emit_movimm(start,0);
7838 emit_writeword(0,(int)&pcaddr);
7839 emit_jmp((int)new_dyna_leave);
7840#ifdef __arm__
7841 __clear_cache((void *)beginning,out);
7842#endif
7843 ll_add(jump_in+page,start,(void *)beginning);
7844 return 0;
7845 }
7846 else if ((u_int)addr < 0x00200000 ||
7847 (0xa0000000 <= addr && addr < 0xa0200000)) {
7848 // used for BIOS calls mostly?
7849 source = (u_int *)((u_int)rdram+(start&0x1fffff));
7850 pagelimit = (addr&0xa0000000)|0x00200000;
7851 }
7852 else if (!Config.HLE && (
7853/* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
7854 (0xbfc00000 <= addr && addr < 0xbfc80000))) {
7855 // BIOS
7856 source = (u_int *)((u_int)psxR+(start&0x7ffff));
7857 pagelimit = (addr&0xfff00000)|0x80000;
7858 }
7859 else
7860#endif
7861#ifdef MUPEN64
7862 if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
7863 source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
7864 pagelimit = 0xa4001000;
7865 }
7866 else
7867#endif
7868 if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
7869 source = (u_int *)((u_int)rdram+start-0x80000000);
7870 pagelimit = 0x80000000+RAM_SIZE;
7871 }
7872#ifndef DISABLE_TLB
7873 else if ((signed int)addr >= (signed int)0xC0000000) {
7874 //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
7875 //if(tlb_LUT_r[start>>12])
7876 //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
7877 if((signed int)memory_map[start>>12]>=0) {
7878 source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
7879 pagelimit=(start+4096)&0xFFFFF000;
7880 int map=memory_map[start>>12];
7881 int i;
7882 for(i=0;i<5;i++) {
7883 //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
7884 if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
7885 }
7886 assem_debug("pagelimit=%x\n",pagelimit);
7887 assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
7888 }
7889 else {
7890 assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
7891 //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
7892 return -1; // Caller will invoke exception handler
7893 }
7894 //printf("source= %x\n",(int)source);
7895 }
7896#endif
7897 else {
7898 printf("Compile at bogus memory address: %x \n", (int)addr);
7899 exit(1);
7900 }
7901
7902 /* Pass 1: disassemble */
7903 /* Pass 2: register dependencies, branch targets */
7904 /* Pass 3: register allocation */
7905 /* Pass 4: branch dependencies */
7906 /* Pass 5: pre-alloc */
7907 /* Pass 6: optimize clean/dirty state */
7908 /* Pass 7: flag 32-bit registers */
7909 /* Pass 8: assembly */
7910 /* Pass 9: linker */
7911 /* Pass 10: garbage collection / free memory */
7912
7913 int i,j;
7914 int done=0;
7915 unsigned int type,op,op2;
7916
7917 //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7918
7919 /* Pass 1 disassembly */
7920
7921 for(i=0;!done;i++) {
7922 bt[i]=0;likely[i]=0;op2=0;
7923 opcode[i]=op=source[i]>>26;
7924 switch(op)
7925 {
7926 case 0x00: strcpy(insn[i],"special"); type=NI;
7927 op2=source[i]&0x3f;
7928 switch(op2)
7929 {
7930 case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7931 case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7932 case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7933 case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7934 case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7935 case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7936 case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7937 case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7938 case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7939 case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7940 case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7941 case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7942 case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7943 case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7944 case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7945 case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7946 case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7947 case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7948 case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7949 case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7950 case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7951 case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7952 case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7953 case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7954 case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7955 case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7956 case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7957 case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7958 case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7959 case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7960 case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7961 case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7962 case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7963 case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7964 case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7965 case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7966 case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7967 case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7968 case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7969 case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7970 case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7971 case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7972 case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7973 case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7974 case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7975 case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7976 case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7977 case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7978 case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7979 case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7980 case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7981 case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7982 }
7983 break;
7984 case 0x01: strcpy(insn[i],"regimm"); type=NI;
7985 op2=(source[i]>>16)&0x1f;
7986 switch(op2)
7987 {
7988 case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7989 case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7990 case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7991 case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7992 case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7993 case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7994 case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7995 case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7996 case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7997 case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7998 case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7999 case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
8000 case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
8001 case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
8002 }
8003 break;
8004 case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
8005 case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
8006 case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
8007 case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
8008 case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
8009 case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
8010 case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
8011 case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
8012 case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
8013 case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
8014 case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
8015 case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
8016 case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
8017 case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
8018 case 0x10: strcpy(insn[i],"cop0"); type=NI;
8019 op2=(source[i]>>21)&0x1f;
8020 switch(op2)
8021 {
8022 case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
8023 case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
8024 case 0x10: strcpy(insn[i],"tlb"); type=NI;
8025 switch(source[i]&0x3f)
8026 {
8027 case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
8028 case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
8029 case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
8030 case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
8031#ifdef PCSX
8032 case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
8033#else
8034 case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
8035#endif
8036 }
8037 }
8038 break;
8039 case 0x11: strcpy(insn[i],"cop1"); type=NI;
8040 op2=(source[i]>>21)&0x1f;
8041 switch(op2)
8042 {
8043 case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
8044 case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
8045 case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
8046 case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
8047 case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
8048 case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
8049 case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
8050 switch((source[i]>>16)&0x3)
8051 {
8052 case 0x00: strcpy(insn[i],"BC1F"); break;
8053 case 0x01: strcpy(insn[i],"BC1T"); break;
8054 case 0x02: strcpy(insn[i],"BC1FL"); break;
8055 case 0x03: strcpy(insn[i],"BC1TL"); break;
8056 }
8057 break;
8058 case 0x10: strcpy(insn[i],"C1.S"); type=NI;
8059 switch(source[i]&0x3f)
8060 {
8061 case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
8062 case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
8063 case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
8064 case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
8065 case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
8066 case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
8067 case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
8068 case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
8069 case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
8070 case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
8071 case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
8072 case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
8073 case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
8074 case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
8075 case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
8076 case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
8077 case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
8078 case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
8079 case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
8080 case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
8081 case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
8082 case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
8083 case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
8084 case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8085 case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8086 case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8087 case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8088 case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8089 case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8090 case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8091 case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8092 case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8093 case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8094 case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8095 case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8096 }
8097 break;
8098 case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8099 switch(source[i]&0x3f)
8100 {
8101 case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8102 case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8103 case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8104 case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8105 case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8106 case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8107 case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8108 case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8109 case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8110 case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8111 case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8112 case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8113 case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8114 case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8115 case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8116 case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8117 case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8118 case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8119 case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8120 case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8121 case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8122 case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8123 case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8124 case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8125 case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8126 case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8127 case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8128 case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8129 case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8130 case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8131 case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8132 case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8133 case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8134 case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8135 case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8136 }
8137 break;
8138 case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8139 switch(source[i]&0x3f)
8140 {
8141 case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8142 case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8143 }
8144 break;
8145 case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8146 switch(source[i]&0x3f)
8147 {
8148 case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8149 case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8150 }
8151 break;
8152 }
8153 break;
8154#ifndef FORCE32
8155 case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8156 case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8157 case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8158 case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8159 case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8160 case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8161 case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8162 case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8163#endif
8164 case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8165 case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8166 case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8167 case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8168 case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8169 case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8170 case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8171 case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8172 case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8173 case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8174 case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8175 case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8176#ifndef FORCE32
8177 case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8178 case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8179#endif
8180 case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8181 case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8182 case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8183 case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8184#ifndef FORCE32
8185 case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8186 case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8187 case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8188#endif
8189 case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8190 case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8191#ifndef FORCE32
8192 case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8193 case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8194 case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8195#endif
8196#ifdef PCSX
8197 case 0x12: strcpy(insn[i],"COP2"); type=NI;
8198 // note: COP MIPS-1 encoding differs from MIPS32
8199 op2=(source[i]>>21)&0x1f;
8200 if (source[i]&0x3f) {
8201 if (gte_handlers[source[i]&0x3f]!=NULL) {
8202 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8203 type=C2OP;
8204 }
8205 }
8206 else switch(op2)
8207 {
8208 case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8209 case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8210 case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8211 case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8212 }
8213 break;
8214 case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8215 case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8216 case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8217#endif
8218 default: strcpy(insn[i],"???"); type=NI;
8219 printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
8220 break;
8221 }
8222#ifdef PCSX
8223 /* detect branch in delay slot early */
8224 if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
8225 opcode[i+1]=source[i+1]>>26;
8226 opcode2[i+1]=source[i+1]&0x3f;
8227 if((0<opcode[i+1]&&opcode[i+1]<8)||(opcode[i+1]==0&&(opcode2[i+1]==8||opcode2[i+1]==9))) {
8228 printf("branch in delay slot @%08x (%08x)\n", addr + i*4+4, addr);
8229 // don't handle first branch and call interpreter if it's hit
8230 type=INTCALL;
8231 }
8232 }
8233#endif
8234 itype[i]=type;
8235 opcode2[i]=op2;
8236 /* Get registers/immediates */
8237 lt1[i]=0;
8238 us1[i]=0;
8239 us2[i]=0;
8240 dep1[i]=0;
8241 dep2[i]=0;
8242 switch(type) {
8243 case LOAD:
8244 rs1[i]=(source[i]>>21)&0x1f;
8245 rs2[i]=0;
8246 rt1[i]=(source[i]>>16)&0x1f;
8247 rt2[i]=0;
8248 imm[i]=(short)source[i];
8249 break;
8250 case STORE:
8251 case STORELR:
8252 rs1[i]=(source[i]>>21)&0x1f;
8253 rs2[i]=(source[i]>>16)&0x1f;
8254 rt1[i]=0;
8255 rt2[i]=0;
8256 imm[i]=(short)source[i];
8257 if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8258 break;
8259 case LOADLR:
8260 // LWL/LWR only load part of the register,
8261 // therefore the target register must be treated as a source too
8262 rs1[i]=(source[i]>>21)&0x1f;
8263 rs2[i]=(source[i]>>16)&0x1f;
8264 rt1[i]=(source[i]>>16)&0x1f;
8265 rt2[i]=0;
8266 imm[i]=(short)source[i];
8267 if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8268 if(op==0x26) dep1[i]=rt1[i]; // LWR
8269 break;
8270 case IMM16:
8271 if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8272 else rs1[i]=(source[i]>>21)&0x1f;
8273 rs2[i]=0;
8274 rt1[i]=(source[i]>>16)&0x1f;
8275 rt2[i]=0;
8276 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8277 imm[i]=(unsigned short)source[i];
8278 }else{
8279 imm[i]=(short)source[i];
8280 }
8281 if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8282 if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8283 if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8284 break;
8285 case UJUMP:
8286 rs1[i]=0;
8287 rs2[i]=0;
8288 rt1[i]=0;
8289 rt2[i]=0;
8290 // The JAL instruction writes to r31.
8291 if (op&1) {
8292 rt1[i]=31;
8293 }
8294 rs2[i]=CCREG;
8295 break;
8296 case RJUMP:
8297 rs1[i]=(source[i]>>21)&0x1f;
8298 rs2[i]=0;
8299 rt1[i]=0;
8300 rt2[i]=0;
8301 // The JALR instruction writes to rd.
8302 if (op2&1) {
8303 rt1[i]=(source[i]>>11)&0x1f;
8304 }
8305 rs2[i]=CCREG;
8306 break;
8307 case CJUMP:
8308 rs1[i]=(source[i]>>21)&0x1f;
8309 rs2[i]=(source[i]>>16)&0x1f;
8310 rt1[i]=0;
8311 rt2[i]=0;
8312 if(op&2) { // BGTZ/BLEZ
8313 rs2[i]=0;
8314 }
8315 us1[i]=rs1[i];
8316 us2[i]=rs2[i];
8317 likely[i]=op>>4;
8318 break;
8319 case SJUMP:
8320 rs1[i]=(source[i]>>21)&0x1f;
8321 rs2[i]=CCREG;
8322 rt1[i]=0;
8323 rt2[i]=0;
8324 us1[i]=rs1[i];
8325 if(op2&0x10) { // BxxAL
8326 rt1[i]=31;
8327 // NOTE: If the branch is not taken, r31 is still overwritten
8328 }
8329 likely[i]=(op2&2)>>1;
8330 break;
8331 case FJUMP:
8332 rs1[i]=FSREG;
8333 rs2[i]=CSREG;
8334 rt1[i]=0;
8335 rt2[i]=0;
8336 likely[i]=((source[i])>>17)&1;
8337 break;
8338 case ALU:
8339 rs1[i]=(source[i]>>21)&0x1f; // source
8340 rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8341 rt1[i]=(source[i]>>11)&0x1f; // destination
8342 rt2[i]=0;
8343 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8344 us1[i]=rs1[i];us2[i]=rs2[i];
8345 }
8346 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8347 dep1[i]=rs1[i];dep2[i]=rs2[i];
8348 }
8349 else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8350 dep1[i]=rs1[i];dep2[i]=rs2[i];
8351 }
8352 break;
8353 case MULTDIV:
8354 rs1[i]=(source[i]>>21)&0x1f; // source
8355 rs2[i]=(source[i]>>16)&0x1f; // divisor
8356 rt1[i]=HIREG;
8357 rt2[i]=LOREG;
8358 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8359 us1[i]=rs1[i];us2[i]=rs2[i];
8360 }
8361 break;
8362 case MOV:
8363 rs1[i]=0;
8364 rs2[i]=0;
8365 rt1[i]=0;
8366 rt2[i]=0;
8367 if(op2==0x10) rs1[i]=HIREG; // MFHI
8368 if(op2==0x11) rt1[i]=HIREG; // MTHI
8369 if(op2==0x12) rs1[i]=LOREG; // MFLO
8370 if(op2==0x13) rt1[i]=LOREG; // MTLO
8371 if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8372 if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8373 dep1[i]=rs1[i];
8374 break;
8375 case SHIFT:
8376 rs1[i]=(source[i]>>16)&0x1f; // target of shift
8377 rs2[i]=(source[i]>>21)&0x1f; // shift amount
8378 rt1[i]=(source[i]>>11)&0x1f; // destination
8379 rt2[i]=0;
8380 // DSLLV/DSRLV/DSRAV are 64-bit
8381 if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8382 break;
8383 case SHIFTIMM:
8384 rs1[i]=(source[i]>>16)&0x1f;
8385 rs2[i]=0;
8386 rt1[i]=(source[i]>>11)&0x1f;
8387 rt2[i]=0;
8388 imm[i]=(source[i]>>6)&0x1f;
8389 // DSxx32 instructions
8390 if(op2>=0x3c) imm[i]|=0x20;
8391 // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8392 if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8393 break;
8394 case COP0:
8395 rs1[i]=0;
8396 rs2[i]=0;
8397 rt1[i]=0;
8398 rt2[i]=0;
8399 if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8400 if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8401 if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8402 if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8403 break;
8404 case COP1:
8405 case COP2:
8406 rs1[i]=0;
8407 rs2[i]=0;
8408 rt1[i]=0;
8409 rt2[i]=0;
8410 if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8411 if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8412 if(op2==5) us1[i]=rs1[i]; // DMTC1
8413 rs2[i]=CSREG;
8414 break;
8415 case C1LS:
8416 rs1[i]=(source[i]>>21)&0x1F;
8417 rs2[i]=CSREG;
8418 rt1[i]=0;
8419 rt2[i]=0;
8420 imm[i]=(short)source[i];
8421 break;
8422 case C2LS:
8423 rs1[i]=(source[i]>>21)&0x1F;
8424 rs2[i]=0;
8425 rt1[i]=0;
8426 rt2[i]=0;
8427 imm[i]=(short)source[i];
8428 break;
8429 case FLOAT:
8430 case FCONV:
8431 rs1[i]=0;
8432 rs2[i]=CSREG;
8433 rt1[i]=0;
8434 rt2[i]=0;
8435 break;
8436 case FCOMP:
8437 rs1[i]=FSREG;
8438 rs2[i]=CSREG;
8439 rt1[i]=FSREG;
8440 rt2[i]=0;
8441 break;
8442 case SYSCALL:
8443 case HLECALL:
8444 case INTCALL:
8445 rs1[i]=CCREG;
8446 rs2[i]=0;
8447 rt1[i]=0;
8448 rt2[i]=0;
8449 break;
8450 default:
8451 rs1[i]=0;
8452 rs2[i]=0;
8453 rt1[i]=0;
8454 rt2[i]=0;
8455 }
8456 /* Calculate branch target addresses */
8457 if(type==UJUMP)
8458 ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8459 else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8460 ba[i]=start+i*4+8; // Ignore never taken branch
8461 else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8462 ba[i]=start+i*4+8; // Ignore never taken branch
8463 else if(type==CJUMP||type==SJUMP||type==FJUMP)
8464 ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8465 else ba[i]=-1;
8466 /* Is this the end of the block? */
8467 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8468#ifdef PCSX
8469 // check for link register access in delay slot
8470 int rt1_=rt1[i-1];
8471 if(rt1_!=0&&(rs1[i]==rt1_||rs2[i]==rt1_||rt1[i]==rt1_||rt2[i]==rt1_)) {
8472 printf("link access in delay slot @%08x (%08x)\n", addr + i*4, addr);
8473 ba[i-1]=-1;
8474 itype[i-1]=INTCALL;
8475 done=2;
8476 }
8477 else
8478#endif
8479 if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8480 done=2;
8481 }
8482 else {
8483 if(stop_after_jal) done=1;
8484 // Stop on BREAK
8485 if((source[i+1]&0xfc00003f)==0x0d) done=1;
8486 }
8487 // Don't recompile stuff that's already compiled
8488 if(check_addr(start+i*4+4)) done=1;
8489 // Don't get too close to the limit
8490 if(i>MAXBLOCK/2) done=1;
8491 }
8492 if(itype[i]==SYSCALL&&stop_after_jal) done=1;
8493 if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
8494 if(done==2) {
8495 // Does the block continue due to a branch?
8496 for(j=i-1;j>=0;j--)
8497 {
8498 if(ba[j]==start+i*4+4) done=j=0;
8499 if(ba[j]==start+i*4+8) done=j=0;
8500 }
8501 }
8502 //assert(i<MAXBLOCK-1);
8503 if(start+i*4==pagelimit-4) done=1;
8504 assert(start+i*4<pagelimit);
8505 if (i==MAXBLOCK-1) done=1;
8506 // Stop if we're compiling junk
8507 if(itype[i]==NI&&opcode[i]==0x11) {
8508 done=stop_after_jal=1;
8509 printf("Disabled speculative precompilation\n");
8510 }
8511 }
8512 slen=i;
8513 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8514 if(start+i*4==pagelimit) {
8515 itype[i-1]=SPAN;
8516 }
8517 }
8518 assert(slen>0);
8519
8520 /* Pass 2 - Register dependencies and branch targets */
8521
8522 unneeded_registers(0,slen-1,0);
8523
8524 /* Pass 3 - Register allocation */
8525
8526 struct regstat current; // Current register allocations/status
8527 current.is32=1;
8528 current.dirty=0;
8529 current.u=unneeded_reg[0];
8530 current.uu=unneeded_reg_upper[0];
8531 clear_all_regs(current.regmap);
8532 alloc_reg(&current,0,CCREG);
8533 dirty_reg(&current,CCREG);
8534 current.isconst=0;
8535 current.wasconst=0;
8536 int ds=0;
8537 int cc=0;
8538 int hr;
8539
8540#ifndef FORCE32
8541 provisional_32bit();
8542#endif
8543 if((u_int)addr&1) {
8544 // First instruction is delay slot
8545 cc=-1;
8546 bt[1]=1;
8547 ds=1;
8548 unneeded_reg[0]=1;
8549 unneeded_reg_upper[0]=1;
8550 current.regmap[HOST_BTREG]=BTREG;
8551 }
8552
8553 for(i=0;i<slen;i++)
8554 {
8555 if(bt[i])
8556 {
8557 int hr;
8558 for(hr=0;hr<HOST_REGS;hr++)
8559 {
8560 // Is this really necessary?
8561 if(current.regmap[hr]==0) current.regmap[hr]=-1;
8562 }
8563 current.isconst=0;
8564 }
8565 if(i>1)
8566 {
8567 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8568 {
8569 if(rs1[i-2]==0||rs2[i-2]==0)
8570 {
8571 if(rs1[i-2]) {
8572 current.is32|=1LL<<rs1[i-2];
8573 int hr=get_reg(current.regmap,rs1[i-2]|64);
8574 if(hr>=0) current.regmap[hr]=-1;
8575 }
8576 if(rs2[i-2]) {
8577 current.is32|=1LL<<rs2[i-2];
8578 int hr=get_reg(current.regmap,rs2[i-2]|64);
8579 if(hr>=0) current.regmap[hr]=-1;
8580 }
8581 }
8582 }
8583 }
8584#ifndef FORCE32
8585 // If something jumps here with 64-bit values
8586 // then promote those registers to 64 bits
8587 if(bt[i])
8588 {
8589 uint64_t temp_is32=current.is32;
8590 for(j=i-1;j>=0;j--)
8591 {
8592 if(ba[j]==start+i*4)
8593 temp_is32&=branch_regs[j].is32;
8594 }
8595 for(j=i;j<slen;j++)
8596 {
8597 if(ba[j]==start+i*4)
8598 //temp_is32=1;
8599 temp_is32&=p32[j];
8600 }
8601 if(temp_is32!=current.is32) {
8602 //printf("dumping 32-bit regs (%x)\n",start+i*4);
8603 #ifdef DESTRUCTIVE_WRITEBACK
8604 for(hr=0;hr<HOST_REGS;hr++)
8605 {
8606 int r=current.regmap[hr];
8607 if(r>0&&r<64)
8608 {
8609 if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8610 temp_is32|=1LL<<r;
8611 //printf("restore %d\n",r);
8612 }
8613 }
8614 }
8615 #endif
8616 current.is32=temp_is32;
8617 }
8618 }
8619#else
8620 current.is32=-1LL;
8621#endif
8622
8623 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8624 regs[i].wasconst=current.isconst;
8625 regs[i].was32=current.is32;
8626 regs[i].wasdirty=current.dirty;
8627 #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
8628 // To change a dirty register from 32 to 64 bits, we must write
8629 // it out during the previous cycle (for branches, 2 cycles)
8630 if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8631 {
8632 uint64_t temp_is32=current.is32;
8633 for(j=i-1;j>=0;j--)
8634 {
8635 if(ba[j]==start+i*4+4)
8636 temp_is32&=branch_regs[j].is32;
8637 }
8638 for(j=i;j<slen;j++)
8639 {
8640 if(ba[j]==start+i*4+4)
8641 //temp_is32=1;
8642 temp_is32&=p32[j];
8643 }
8644 if(temp_is32!=current.is32) {
8645 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8646 for(hr=0;hr<HOST_REGS;hr++)
8647 {
8648 int r=current.regmap[hr];
8649 if(r>0)
8650 {
8651 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8652 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8653 {
8654 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8655 {
8656 //printf("dump %d/r%d\n",hr,r);
8657 current.regmap[hr]=-1;
8658 if(get_reg(current.regmap,r|64)>=0)
8659 current.regmap[get_reg(current.regmap,r|64)]=-1;
8660 }
8661 }
8662 }
8663 }
8664 }
8665 }
8666 }
8667 else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8668 {
8669 uint64_t temp_is32=current.is32;
8670 for(j=i-1;j>=0;j--)
8671 {
8672 if(ba[j]==start+i*4+8)
8673 temp_is32&=branch_regs[j].is32;
8674 }
8675 for(j=i;j<slen;j++)
8676 {
8677 if(ba[j]==start+i*4+8)
8678 //temp_is32=1;
8679 temp_is32&=p32[j];
8680 }
8681 if(temp_is32!=current.is32) {
8682 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8683 for(hr=0;hr<HOST_REGS;hr++)
8684 {
8685 int r=current.regmap[hr];
8686 if(r>0)
8687 {
8688 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8689 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8690 {
8691 //printf("dump %d/r%d\n",hr,r);
8692 current.regmap[hr]=-1;
8693 if(get_reg(current.regmap,r|64)>=0)
8694 current.regmap[get_reg(current.regmap,r|64)]=-1;
8695 }
8696 }
8697 }
8698 }
8699 }
8700 }
8701 #endif
8702 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8703 if(i+1<slen) {
8704 current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8705 current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8706 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8707 current.u|=1;
8708 current.uu|=1;
8709 } else {
8710 current.u=1;
8711 current.uu=1;
8712 }
8713 } else {
8714 if(i+1<slen) {
8715 current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8716 current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8717 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8718 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8719 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8720 current.u|=1;
8721 current.uu|=1;
8722 } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8723 }
8724 is_ds[i]=ds;
8725 if(ds) {
8726 ds=0; // Skip delay slot, already allocated as part of branch
8727 // ...but we need to alloc it in case something jumps here
8728 if(i+1<slen) {
8729 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8730 current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8731 }else{
8732 current.u=branch_unneeded_reg[i-1];
8733 current.uu=branch_unneeded_reg_upper[i-1];
8734 }
8735 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8736 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8737 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8738 current.u|=1;
8739 current.uu|=1;
8740 struct regstat temp;
8741 memcpy(&temp,&current,sizeof(current));
8742 temp.wasdirty=temp.dirty;
8743 temp.was32=temp.is32;
8744 // TODO: Take into account unconditional branches, as below
8745 delayslot_alloc(&temp,i);
8746 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8747 regs[i].wasdirty=temp.wasdirty;
8748 regs[i].was32=temp.was32;
8749 regs[i].dirty=temp.dirty;
8750 regs[i].is32=temp.is32;
8751 regs[i].isconst=0;
8752 regs[i].wasconst=0;
8753 current.isconst=0;
8754 // Create entry (branch target) regmap
8755 for(hr=0;hr<HOST_REGS;hr++)
8756 {
8757 int r=temp.regmap[hr];
8758 if(r>=0) {
8759 if(r!=regmap_pre[i][hr]) {
8760 regs[i].regmap_entry[hr]=-1;
8761 }
8762 else
8763 {
8764 if(r<64){
8765 if((current.u>>r)&1) {
8766 regs[i].regmap_entry[hr]=-1;
8767 regs[i].regmap[hr]=-1;
8768 //Don't clear regs in the delay slot as the branch might need them
8769 //current.regmap[hr]=-1;
8770 }else
8771 regs[i].regmap_entry[hr]=r;
8772 }
8773 else {
8774 if((current.uu>>(r&63))&1) {
8775 regs[i].regmap_entry[hr]=-1;
8776 regs[i].regmap[hr]=-1;
8777 //Don't clear regs in the delay slot as the branch might need them
8778 //current.regmap[hr]=-1;
8779 }else
8780 regs[i].regmap_entry[hr]=r;
8781 }
8782 }
8783 } else {
8784 // First instruction expects CCREG to be allocated
8785 if(i==0&&hr==HOST_CCREG)
8786 regs[i].regmap_entry[hr]=CCREG;
8787 else
8788 regs[i].regmap_entry[hr]=-1;
8789 }
8790 }
8791 }
8792 else { // Not delay slot
8793 switch(itype[i]) {
8794 case UJUMP:
8795 //current.isconst=0; // DEBUG
8796 //current.wasconst=0; // DEBUG
8797 //regs[i].wasconst=0; // DEBUG
8798 clear_const(&current,rt1[i]);
8799 alloc_cc(&current,i);
8800 dirty_reg(&current,CCREG);
8801 if (rt1[i]==31) {
8802 alloc_reg(&current,i,31);
8803 dirty_reg(&current,31);
8804 assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8805 assert(rt1[i+1]!=rt1[i]);
8806 #ifdef REG_PREFETCH
8807 alloc_reg(&current,i,PTEMP);
8808 #endif
8809 //current.is32|=1LL<<rt1[i];
8810 }
8811 delayslot_alloc(&current,i+1);
8812 //current.isconst=0; // DEBUG
8813 ds=1;
8814 //printf("i=%d, isconst=%x\n",i,current.isconst);
8815 break;
8816 case RJUMP:
8817 //current.isconst=0;
8818 //current.wasconst=0;
8819 //regs[i].wasconst=0;
8820 clear_const(&current,rs1[i]);
8821 clear_const(&current,rt1[i]);
8822 alloc_cc(&current,i);
8823 dirty_reg(&current,CCREG);
8824 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8825 alloc_reg(&current,i,rs1[i]);
8826 if (rt1[i]!=0) {
8827 alloc_reg(&current,i,rt1[i]);
8828 dirty_reg(&current,rt1[i]);
8829 assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
8830 assert(rt1[i+1]!=rt1[i]);
8831 #ifdef REG_PREFETCH
8832 alloc_reg(&current,i,PTEMP);
8833 #endif
8834 }
8835 #ifdef USE_MINI_HT
8836 if(rs1[i]==31) { // JALR
8837 alloc_reg(&current,i,RHASH);
8838 #ifndef HOST_IMM_ADDR32
8839 alloc_reg(&current,i,RHTBL);
8840 #endif
8841 }
8842 #endif
8843 delayslot_alloc(&current,i+1);
8844 } else {
8845 // The delay slot overwrites our source register,
8846 // allocate a temporary register to hold the old value.
8847 current.isconst=0;
8848 current.wasconst=0;
8849 regs[i].wasconst=0;
8850 delayslot_alloc(&current,i+1);
8851 current.isconst=0;
8852 alloc_reg(&current,i,RTEMP);
8853 }
8854 //current.isconst=0; // DEBUG
8855 ds=1;
8856 break;
8857 case CJUMP:
8858 //current.isconst=0;
8859 //current.wasconst=0;
8860 //regs[i].wasconst=0;
8861 clear_const(&current,rs1[i]);
8862 clear_const(&current,rs2[i]);
8863 if((opcode[i]&0x3E)==4) // BEQ/BNE
8864 {
8865 alloc_cc(&current,i);
8866 dirty_reg(&current,CCREG);
8867 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8868 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8869 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8870 {
8871 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8872 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8873 }
8874 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8875 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8876 // The delay slot overwrites one of our conditions.
8877 // Allocate the branch condition registers instead.
8878 // Note that such a sequence of instructions could
8879 // be considered a bug since the branch can not be
8880 // re-executed if an exception occurs.
8881 current.isconst=0;
8882 current.wasconst=0;
8883 regs[i].wasconst=0;
8884 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8885 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8886 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8887 {
8888 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8889 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8890 }
8891 }
8892 else delayslot_alloc(&current,i+1);
8893 }
8894 else
8895 if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8896 {
8897 alloc_cc(&current,i);
8898 dirty_reg(&current,CCREG);
8899 alloc_reg(&current,i,rs1[i]);
8900 if(!(current.is32>>rs1[i]&1))
8901 {
8902 alloc_reg64(&current,i,rs1[i]);
8903 }
8904 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8905 // The delay slot overwrites one of our conditions.
8906 // Allocate the branch condition registers instead.
8907 // Note that such a sequence of instructions could
8908 // be considered a bug since the branch can not be
8909 // re-executed if an exception occurs.
8910 current.isconst=0;
8911 current.wasconst=0;
8912 regs[i].wasconst=0;
8913 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8914 if(!((current.is32>>rs1[i])&1))
8915 {
8916 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8917 }
8918 }
8919 else delayslot_alloc(&current,i+1);
8920 }
8921 else
8922 // Don't alloc the delay slot yet because we might not execute it
8923 if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8924 {
8925 current.isconst=0;
8926 current.wasconst=0;
8927 regs[i].wasconst=0;
8928 alloc_cc(&current,i);
8929 dirty_reg(&current,CCREG);
8930 alloc_reg(&current,i,rs1[i]);
8931 alloc_reg(&current,i,rs2[i]);
8932 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8933 {
8934 alloc_reg64(&current,i,rs1[i]);
8935 alloc_reg64(&current,i,rs2[i]);
8936 }
8937 }
8938 else
8939 if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8940 {
8941 current.isconst=0;
8942 current.wasconst=0;
8943 regs[i].wasconst=0;
8944 alloc_cc(&current,i);
8945 dirty_reg(&current,CCREG);
8946 alloc_reg(&current,i,rs1[i]);
8947 if(!(current.is32>>rs1[i]&1))
8948 {
8949 alloc_reg64(&current,i,rs1[i]);
8950 }
8951 }
8952 ds=1;
8953 //current.isconst=0;
8954 break;
8955 case SJUMP:
8956 //current.isconst=0;
8957 //current.wasconst=0;
8958 //regs[i].wasconst=0;
8959 clear_const(&current,rs1[i]);
8960 clear_const(&current,rt1[i]);
8961 //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8962 if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8963 {
8964 alloc_cc(&current,i);
8965 dirty_reg(&current,CCREG);
8966 alloc_reg(&current,i,rs1[i]);
8967 if(!(current.is32>>rs1[i]&1))
8968 {
8969 alloc_reg64(&current,i,rs1[i]);
8970 }
8971 if (rt1[i]==31) { // BLTZAL/BGEZAL
8972 alloc_reg(&current,i,31);
8973 dirty_reg(&current,31);
8974 //#ifdef REG_PREFETCH
8975 //alloc_reg(&current,i,PTEMP);
8976 //#endif
8977 //current.is32|=1LL<<rt1[i];
8978 }
8979 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8980 // The delay slot overwrites the branch condition.
8981 // Allocate the branch condition registers instead.
8982 // Note that such a sequence of instructions could
8983 // be considered a bug since the branch can not be
8984 // re-executed if an exception occurs.
8985 current.isconst=0;
8986 current.wasconst=0;
8987 regs[i].wasconst=0;
8988 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8989 if(!((current.is32>>rs1[i])&1))
8990 {
8991 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8992 }
8993 }
8994 else delayslot_alloc(&current,i+1);
8995 }
8996 else
8997 // Don't alloc the delay slot yet because we might not execute it
8998 if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8999 {
9000 current.isconst=0;
9001 current.wasconst=0;
9002 regs[i].wasconst=0;
9003 alloc_cc(&current,i);
9004 dirty_reg(&current,CCREG);
9005 alloc_reg(&current,i,rs1[i]);
9006 if(!(current.is32>>rs1[i]&1))
9007 {
9008 alloc_reg64(&current,i,rs1[i]);
9009 }
9010 }
9011 ds=1;
9012 //current.isconst=0;
9013 break;
9014 case FJUMP:
9015 current.isconst=0;
9016 current.wasconst=0;
9017 regs[i].wasconst=0;
9018 if(likely[i]==0) // BC1F/BC1T
9019 {
9020 // TODO: Theoretically we can run out of registers here on x86.
9021 // The delay slot can allocate up to six, and we need to check
9022 // CSREG before executing the delay slot. Possibly we can drop
9023 // the cycle count and then reload it after checking that the
9024 // FPU is in a usable state, or don't do out-of-order execution.
9025 alloc_cc(&current,i);
9026 dirty_reg(&current,CCREG);
9027 alloc_reg(&current,i,FSREG);
9028 alloc_reg(&current,i,CSREG);
9029 if(itype[i+1]==FCOMP) {
9030 // The delay slot overwrites the branch condition.
9031 // Allocate the branch condition registers instead.
9032 // Note that such a sequence of instructions could
9033 // be considered a bug since the branch can not be
9034 // re-executed if an exception occurs.
9035 alloc_cc(&current,i);
9036 dirty_reg(&current,CCREG);
9037 alloc_reg(&current,i,CSREG);
9038 alloc_reg(&current,i,FSREG);
9039 }
9040 else {
9041 delayslot_alloc(&current,i+1);
9042 alloc_reg(&current,i+1,CSREG);
9043 }
9044 }
9045 else
9046 // Don't alloc the delay slot yet because we might not execute it
9047 if(likely[i]) // BC1FL/BC1TL
9048 {
9049 alloc_cc(&current,i);
9050 dirty_reg(&current,CCREG);
9051 alloc_reg(&current,i,CSREG);
9052 alloc_reg(&current,i,FSREG);
9053 }
9054 ds=1;
9055 current.isconst=0;
9056 break;
9057 case IMM16:
9058 imm16_alloc(&current,i);
9059 break;
9060 case LOAD:
9061 case LOADLR:
9062 load_alloc(&current,i);
9063 break;
9064 case STORE:
9065 case STORELR:
9066 store_alloc(&current,i);
9067 break;
9068 case ALU:
9069 alu_alloc(&current,i);
9070 break;
9071 case SHIFT:
9072 shift_alloc(&current,i);
9073 break;
9074 case MULTDIV:
9075 multdiv_alloc(&current,i);
9076 break;
9077 case SHIFTIMM:
9078 shiftimm_alloc(&current,i);
9079 break;
9080 case MOV:
9081 mov_alloc(&current,i);
9082 break;
9083 case COP0:
9084 cop0_alloc(&current,i);
9085 break;
9086 case COP1:
9087 case COP2:
9088 cop1_alloc(&current,i);
9089 break;
9090 case C1LS:
9091 c1ls_alloc(&current,i);
9092 break;
9093 case C2LS:
9094 c2ls_alloc(&current,i);
9095 break;
9096 case C2OP:
9097 c2op_alloc(&current,i);
9098 break;
9099 case FCONV:
9100 fconv_alloc(&current,i);
9101 break;
9102 case FLOAT:
9103 float_alloc(&current,i);
9104 break;
9105 case FCOMP:
9106 fcomp_alloc(&current,i);
9107 break;
9108 case SYSCALL:
9109 case HLECALL:
9110 case INTCALL:
9111 syscall_alloc(&current,i);
9112 break;
9113 case SPAN:
9114 pagespan_alloc(&current,i);
9115 break;
9116 }
9117
9118 // Drop the upper half of registers that have become 32-bit
9119 current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9120 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9121 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9122 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9123 current.uu|=1;
9124 } else {
9125 current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9126 current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9127 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9128 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9129 current.uu|=1;
9130 }
9131
9132 // Create entry (branch target) regmap
9133 for(hr=0;hr<HOST_REGS;hr++)
9134 {
9135 int r,or,er;
9136 r=current.regmap[hr];
9137 if(r>=0) {
9138 if(r!=regmap_pre[i][hr]) {
9139 // TODO: delay slot (?)
9140 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9141 if(or<0||(r&63)>=TEMPREG){
9142 regs[i].regmap_entry[hr]=-1;
9143 }
9144 else
9145 {
9146 // Just move it to a different register
9147 regs[i].regmap_entry[hr]=r;
9148 // If it was dirty before, it's still dirty
9149 if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9150 }
9151 }
9152 else
9153 {
9154 // Unneeded
9155 if(r==0){
9156 regs[i].regmap_entry[hr]=0;
9157 }
9158 else
9159 if(r<64){
9160 if((current.u>>r)&1) {
9161 regs[i].regmap_entry[hr]=-1;
9162 //regs[i].regmap[hr]=-1;
9163 current.regmap[hr]=-1;
9164 }else
9165 regs[i].regmap_entry[hr]=r;
9166 }
9167 else {
9168 if((current.uu>>(r&63))&1) {
9169 regs[i].regmap_entry[hr]=-1;
9170 //regs[i].regmap[hr]=-1;
9171 current.regmap[hr]=-1;
9172 }else
9173 regs[i].regmap_entry[hr]=r;
9174 }
9175 }
9176 } else {
9177 // Branches expect CCREG to be allocated at the target
9178 if(regmap_pre[i][hr]==CCREG)
9179 regs[i].regmap_entry[hr]=CCREG;
9180 else
9181 regs[i].regmap_entry[hr]=-1;
9182 }
9183 }
9184 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9185 }
9186 /* Branch post-alloc */
9187 if(i>0)
9188 {
9189 current.was32=current.is32;
9190 current.wasdirty=current.dirty;
9191 switch(itype[i-1]) {
9192 case UJUMP:
9193 memcpy(&branch_regs[i-1],&current,sizeof(current));
9194 branch_regs[i-1].isconst=0;
9195 branch_regs[i-1].wasconst=0;
9196 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9197 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9198 alloc_cc(&branch_regs[i-1],i-1);
9199 dirty_reg(&branch_regs[i-1],CCREG);
9200 if(rt1[i-1]==31) { // JAL
9201 alloc_reg(&branch_regs[i-1],i-1,31);
9202 dirty_reg(&branch_regs[i-1],31);
9203 branch_regs[i-1].is32|=1LL<<31;
9204 }
9205 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9206 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9207 break;
9208 case RJUMP:
9209 memcpy(&branch_regs[i-1],&current,sizeof(current));
9210 branch_regs[i-1].isconst=0;
9211 branch_regs[i-1].wasconst=0;
9212 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9213 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9214 alloc_cc(&branch_regs[i-1],i-1);
9215 dirty_reg(&branch_regs[i-1],CCREG);
9216 alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9217 if(rt1[i-1]!=0) { // JALR
9218 alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9219 dirty_reg(&branch_regs[i-1],rt1[i-1]);
9220 branch_regs[i-1].is32|=1LL<<rt1[i-1];
9221 }
9222 #ifdef USE_MINI_HT
9223 if(rs1[i-1]==31) { // JALR
9224 alloc_reg(&branch_regs[i-1],i-1,RHASH);
9225 #ifndef HOST_IMM_ADDR32
9226 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9227 #endif
9228 }
9229 #endif
9230 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9231 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9232 break;
9233 case CJUMP:
9234 if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9235 {
9236 alloc_cc(&current,i-1);
9237 dirty_reg(&current,CCREG);
9238 if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9239 (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9240 // The delay slot overwrote one of our conditions
9241 // Delay slot goes after the test (in order)
9242 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9243 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9244 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9245 current.u|=1;
9246 current.uu|=1;
9247 delayslot_alloc(&current,i);
9248 current.isconst=0;
9249 }
9250 else
9251 {
9252 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9253 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9254 // Alloc the branch condition registers
9255 if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9256 if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9257 if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9258 {
9259 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9260 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9261 }
9262 }
9263 memcpy(&branch_regs[i-1],&current,sizeof(current));
9264 branch_regs[i-1].isconst=0;
9265 branch_regs[i-1].wasconst=0;
9266 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9267 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9268 }
9269 else
9270 if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9271 {
9272 alloc_cc(&current,i-1);
9273 dirty_reg(&current,CCREG);
9274 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9275 // The delay slot overwrote the branch condition
9276 // Delay slot goes after the test (in order)
9277 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9278 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9279 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9280 current.u|=1;
9281 current.uu|=1;
9282 delayslot_alloc(&current,i);
9283 current.isconst=0;
9284 }
9285 else
9286 {
9287 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9288 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9289 // Alloc the branch condition register
9290 alloc_reg(&current,i-1,rs1[i-1]);
9291 if(!(current.is32>>rs1[i-1]&1))
9292 {
9293 alloc_reg64(&current,i-1,rs1[i-1]);
9294 }
9295 }
9296 memcpy(&branch_regs[i-1],&current,sizeof(current));
9297 branch_regs[i-1].isconst=0;
9298 branch_regs[i-1].wasconst=0;
9299 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9300 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9301 }
9302 else
9303 // Alloc the delay slot in case the branch is taken
9304 if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9305 {
9306 memcpy(&branch_regs[i-1],&current,sizeof(current));
9307 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9308 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9309 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9310 alloc_cc(&branch_regs[i-1],i);
9311 dirty_reg(&branch_regs[i-1],CCREG);
9312 delayslot_alloc(&branch_regs[i-1],i);
9313 branch_regs[i-1].isconst=0;
9314 alloc_reg(&current,i,CCREG); // Not taken path
9315 dirty_reg(&current,CCREG);
9316 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9317 }
9318 else
9319 if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9320 {
9321 memcpy(&branch_regs[i-1],&current,sizeof(current));
9322 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9323 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9324 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9325 alloc_cc(&branch_regs[i-1],i);
9326 dirty_reg(&branch_regs[i-1],CCREG);
9327 delayslot_alloc(&branch_regs[i-1],i);
9328 branch_regs[i-1].isconst=0;
9329 alloc_reg(&current,i,CCREG); // Not taken path
9330 dirty_reg(&current,CCREG);
9331 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9332 }
9333 break;
9334 case SJUMP:
9335 //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9336 if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9337 {
9338 alloc_cc(&current,i-1);
9339 dirty_reg(&current,CCREG);
9340 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9341 // The delay slot overwrote the branch condition
9342 // Delay slot goes after the test (in order)
9343 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9344 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9345 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9346 current.u|=1;
9347 current.uu|=1;
9348 delayslot_alloc(&current,i);
9349 current.isconst=0;
9350 }
9351 else
9352 {
9353 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9354 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9355 // Alloc the branch condition register
9356 alloc_reg(&current,i-1,rs1[i-1]);
9357 if(!(current.is32>>rs1[i-1]&1))
9358 {
9359 alloc_reg64(&current,i-1,rs1[i-1]);
9360 }
9361 }
9362 memcpy(&branch_regs[i-1],&current,sizeof(current));
9363 branch_regs[i-1].isconst=0;
9364 branch_regs[i-1].wasconst=0;
9365 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9366 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9367 }
9368 else
9369 // Alloc the delay slot in case the branch is taken
9370 if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9371 {
9372 memcpy(&branch_regs[i-1],&current,sizeof(current));
9373 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9374 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9375 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9376 alloc_cc(&branch_regs[i-1],i);
9377 dirty_reg(&branch_regs[i-1],CCREG);
9378 delayslot_alloc(&branch_regs[i-1],i);
9379 branch_regs[i-1].isconst=0;
9380 alloc_reg(&current,i,CCREG); // Not taken path
9381 dirty_reg(&current,CCREG);
9382 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9383 }
9384 // FIXME: BLTZAL/BGEZAL
9385 if(opcode2[i-1]&0x10) { // BxxZAL
9386 alloc_reg(&branch_regs[i-1],i-1,31);
9387 dirty_reg(&branch_regs[i-1],31);
9388 branch_regs[i-1].is32|=1LL<<31;
9389 }
9390 break;
9391 case FJUMP:
9392 if(likely[i-1]==0) // BC1F/BC1T
9393 {
9394 alloc_cc(&current,i-1);
9395 dirty_reg(&current,CCREG);
9396 if(itype[i]==FCOMP) {
9397 // The delay slot overwrote the branch condition
9398 // Delay slot goes after the test (in order)
9399 delayslot_alloc(&current,i);
9400 current.isconst=0;
9401 }
9402 else
9403 {
9404 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9405 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9406 // Alloc the branch condition register
9407 alloc_reg(&current,i-1,FSREG);
9408 }
9409 memcpy(&branch_regs[i-1],&current,sizeof(current));
9410 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9411 }
9412 else // BC1FL/BC1TL
9413 {
9414 // Alloc the delay slot in case the branch is taken
9415 memcpy(&branch_regs[i-1],&current,sizeof(current));
9416 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9417 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9418 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9419 alloc_cc(&branch_regs[i-1],i);
9420 dirty_reg(&branch_regs[i-1],CCREG);
9421 delayslot_alloc(&branch_regs[i-1],i);
9422 branch_regs[i-1].isconst=0;
9423 alloc_reg(&current,i,CCREG); // Not taken path
9424 dirty_reg(&current,CCREG);
9425 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9426 }
9427 break;
9428 }
9429
9430 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9431 {
9432 if(rt1[i-1]==31) // JAL/JALR
9433 {
9434 // Subroutine call will return here, don't alloc any registers
9435 current.is32=1;
9436 current.dirty=0;
9437 clear_all_regs(current.regmap);
9438 alloc_reg(&current,i,CCREG);
9439 dirty_reg(&current,CCREG);
9440 }
9441 else if(i+1<slen)
9442 {
9443 // Internal branch will jump here, match registers to caller
9444 current.is32=0x3FFFFFFFFLL;
9445 current.dirty=0;
9446 clear_all_regs(current.regmap);
9447 alloc_reg(&current,i,CCREG);
9448 dirty_reg(&current,CCREG);
9449 for(j=i-1;j>=0;j--)
9450 {
9451 if(ba[j]==start+i*4+4) {
9452 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9453 current.is32=branch_regs[j].is32;
9454 current.dirty=branch_regs[j].dirty;
9455 break;
9456 }
9457 }
9458 while(j>=0) {
9459 if(ba[j]==start+i*4+4) {
9460 for(hr=0;hr<HOST_REGS;hr++) {
9461 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9462 current.regmap[hr]=-1;
9463 }
9464 current.is32&=branch_regs[j].is32;
9465 current.dirty&=branch_regs[j].dirty;
9466 }
9467 }
9468 j--;
9469 }
9470 }
9471 }
9472 }
9473
9474 // Count cycles in between branches
9475 ccadj[i]=cc;
9476 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9477 {
9478 cc=0;
9479 }
9480 else
9481 {
9482 cc++;
9483 }
9484
9485 flush_dirty_uppers(&current);
9486 if(!is_ds[i]) {
9487 regs[i].is32=current.is32;
9488 regs[i].dirty=current.dirty;
9489 regs[i].isconst=current.isconst;
9490 memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9491 }
9492 for(hr=0;hr<HOST_REGS;hr++) {
9493 if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9494 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9495 regs[i].wasconst&=~(1<<hr);
9496 }
9497 }
9498 }
9499 if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9500 }
9501
9502 /* Pass 4 - Cull unused host registers */
9503
9504 uint64_t nr=0;
9505
9506 for (i=slen-1;i>=0;i--)
9507 {
9508 int hr;
9509 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9510 {
9511 if(ba[i]<start || ba[i]>=(start+slen*4))
9512 {
9513 // Branch out of this block, don't need anything
9514 nr=0;
9515 }
9516 else
9517 {
9518 // Internal branch
9519 // Need whatever matches the target
9520 nr=0;
9521 int t=(ba[i]-start)>>2;
9522 for(hr=0;hr<HOST_REGS;hr++)
9523 {
9524 if(regs[i].regmap_entry[hr]>=0) {
9525 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9526 }
9527 }
9528 }
9529 // Conditional branch may need registers for following instructions
9530 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9531 {
9532 if(i<slen-2) {
9533 nr|=needed_reg[i+2];
9534 for(hr=0;hr<HOST_REGS;hr++)
9535 {
9536 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9537 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9538 }
9539 }
9540 }
9541 // Don't need stuff which is overwritten
9542 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9543 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9544 // Merge in delay slot
9545 for(hr=0;hr<HOST_REGS;hr++)
9546 {
9547 if(!likely[i]) {
9548 // These are overwritten unless the branch is "likely"
9549 // and the delay slot is nullified if not taken
9550 if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9551 if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9552 }
9553 if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9554 if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9555 if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9556 if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9557 if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9558 if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9559 if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9560 if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9561 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9562 if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9563 if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9564 }
9565 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9566 if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9567 if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9568 }
9569 if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
9570 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9571 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9572 }
9573 }
9574 }
9575 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
9576 {
9577 // SYSCALL instruction (software interrupt)
9578 nr=0;
9579 }
9580 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9581 {
9582 // ERET instruction (return from interrupt)
9583 nr=0;
9584 }
9585 else // Non-branch
9586 {
9587 if(i<slen-1) {
9588 for(hr=0;hr<HOST_REGS;hr++) {
9589 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9590 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9591 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9592 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9593 }
9594 }
9595 }
9596 for(hr=0;hr<HOST_REGS;hr++)
9597 {
9598 // Overwritten registers are not needed
9599 if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9600 if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9601 if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9602 // Source registers are needed
9603 if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9604 if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9605 if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9606 if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9607 if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9608 if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9609 if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9610 if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9611 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9612 if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9613 if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9614 }
9615 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9616 if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9617 if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9618 }
9619 if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
9620 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9621 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9622 }
9623 // Don't store a register immediately after writing it,
9624 // may prevent dual-issue.
9625 // But do so if this is a branch target, otherwise we
9626 // might have to load the register before the branch.
9627 if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9628 if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9629 (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9630 if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9631 if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9632 }
9633 if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9634 (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9635 if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9636 if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9637 }
9638 }
9639 }
9640 // Cycle count is needed at branches. Assume it is needed at the target too.
9641 if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9642 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9643 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9644 }
9645 // Save it
9646 needed_reg[i]=nr;
9647
9648 // Deallocate unneeded registers
9649 for(hr=0;hr<HOST_REGS;hr++)
9650 {
9651 if(!((nr>>hr)&1)) {
9652 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9653 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9654 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9655 (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9656 {
9657 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9658 {
9659 if(likely[i]) {
9660 regs[i].regmap[hr]=-1;
9661 regs[i].isconst&=~(1<<hr);
9662 if(i<slen-2) regmap_pre[i+2][hr]=-1;
9663 }
9664 }
9665 }
9666 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9667 {
9668 int d1=0,d2=0,map=0,temp=0;
9669 if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9670 {
9671 d1=dep1[i+1];
9672 d2=dep2[i+1];
9673 }
9674 if(using_tlb) {
9675 if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9676 itype[i+1]==STORE || itype[i+1]==STORELR ||
9677 itype[i+1]==C1LS || itype[i+1]==C2LS)
9678 map=TLREG;
9679 } else
9680 if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9681 (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9682 map=INVCP;
9683 }
9684 if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9685 itype[i+1]==C1LS || itype[i+1]==C2LS)
9686 temp=FTEMP;
9687 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9688 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9689 (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9690 (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9691 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9692 regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9693 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9694 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9695 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9696 regs[i].regmap[hr]!=map )
9697 {
9698 regs[i].regmap[hr]=-1;
9699 regs[i].isconst&=~(1<<hr);
9700 if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9701 (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9702 (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9703 (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9704 (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9705 branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9706 (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9707 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9708 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9709 branch_regs[i].regmap[hr]!=map)
9710 {
9711 branch_regs[i].regmap[hr]=-1;
9712 branch_regs[i].regmap_entry[hr]=-1;
9713 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9714 {
9715 if(!likely[i]&&i<slen-2) {
9716 regmap_pre[i+2][hr]=-1;
9717 }
9718 }
9719 }
9720 }
9721 }
9722 else
9723 {
9724 // Non-branch
9725 if(i>0)
9726 {
9727 int d1=0,d2=0,map=-1,temp=-1;
9728 if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9729 {
9730 d1=dep1[i];
9731 d2=dep2[i];
9732 }
9733 if(using_tlb) {
9734 if(itype[i]==LOAD || itype[i]==LOADLR ||
9735 itype[i]==STORE || itype[i]==STORELR ||
9736 itype[i]==C1LS || itype[i]==C2LS)
9737 map=TLREG;
9738 } else if(itype[i]==STORE || itype[i]==STORELR ||
9739 (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9740 map=INVCP;
9741 }
9742 if(itype[i]==LOADLR || itype[i]==STORELR ||
9743 itype[i]==C1LS || itype[i]==C2LS)
9744 temp=FTEMP;
9745 if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9746 (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9747 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9748 regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9749 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9750 (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9751 {
9752 if(i<slen-1&&!is_ds[i]) {
9753 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9754 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9755 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9756 {
9757 printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9758 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9759 }
9760 regmap_pre[i+1][hr]=-1;
9761 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9762 }
9763 regs[i].regmap[hr]=-1;
9764 regs[i].isconst&=~(1<<hr);
9765 }
9766 }
9767 }
9768 }
9769 }
9770 }
9771
9772 /* Pass 5 - Pre-allocate registers */
9773
9774 // If a register is allocated during a loop, try to allocate it for the
9775 // entire loop, if possible. This avoids loading/storing registers
9776 // inside of the loop.
9777
9778 signed char f_regmap[HOST_REGS];
9779 clear_all_regs(f_regmap);
9780 for(i=0;i<slen-1;i++)
9781 {
9782 if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9783 {
9784 if(ba[i]>=start && ba[i]<(start+i*4))
9785 if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9786 ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9787 ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9788 ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9789 ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9790 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9791 {
9792 int t=(ba[i]-start)>>2;
9793 if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9794 if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
9795 for(hr=0;hr<HOST_REGS;hr++)
9796 {
9797 if(regs[i].regmap[hr]>64) {
9798 if(!((regs[i].dirty>>hr)&1))
9799 f_regmap[hr]=regs[i].regmap[hr];
9800 else f_regmap[hr]=-1;
9801 }
9802 else if(regs[i].regmap[hr]>=0) {
9803 if(f_regmap[hr]!=regs[i].regmap[hr]) {
9804 // dealloc old register
9805 int n;
9806 for(n=0;n<HOST_REGS;n++)
9807 {
9808 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9809 }
9810 // and alloc new one
9811 f_regmap[hr]=regs[i].regmap[hr];
9812 }
9813 }
9814 if(branch_regs[i].regmap[hr]>64) {
9815 if(!((branch_regs[i].dirty>>hr)&1))
9816 f_regmap[hr]=branch_regs[i].regmap[hr];
9817 else f_regmap[hr]=-1;
9818 }
9819 else if(branch_regs[i].regmap[hr]>=0) {
9820 if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
9821 // dealloc old register
9822 int n;
9823 for(n=0;n<HOST_REGS;n++)
9824 {
9825 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
9826 }
9827 // and alloc new one
9828 f_regmap[hr]=branch_regs[i].regmap[hr];
9829 }
9830 }
9831 if(itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9832 ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9833 ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9834 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9835 {
9836 // Test both in case the delay slot is ooo,
9837 // could be done better...
9838 if(count_free_regs(branch_regs[i].regmap)<2
9839 ||count_free_regs(regs[i].regmap)<2)
9840 f_regmap[hr]=branch_regs[i].regmap[hr];
9841 }
9842 // Avoid dirty->clean transition
9843 // #ifdef DESTRUCTIVE_WRITEBACK here?
9844 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9845 if(f_regmap[hr]>0) {
9846 if(regs[t].regmap_entry[hr]<0) {
9847 int r=f_regmap[hr];
9848 for(j=t;j<=i;j++)
9849 {
9850 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9851 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9852 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9853 if(r>63) {
9854 // NB This can exclude the case where the upper-half
9855 // register is lower numbered than the lower-half
9856 // register. Not sure if it's worth fixing...
9857 if(get_reg(regs[j].regmap,r&63)<0) break;
9858 if(regs[j].is32&(1LL<<(r&63))) break;
9859 }
9860 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9861 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9862 int k;
9863 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9864 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9865 if(r>63) {
9866 if(get_reg(regs[i].regmap,r&63)<0) break;
9867 if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9868 }
9869 k=i;
9870 while(k>1&&regs[k-1].regmap[hr]==-1) {
9871 if(itype[k-1]==STORE||itype[k-1]==STORELR
9872 ||itype[k-1]==C1LS||itype[k-1]==SHIFT||itype[k-1]==COP1
9873 ||itype[k-1]==FLOAT||itype[k-1]==FCONV||itype[k-1]==FCOMP
9874 ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9875 if(count_free_regs(regs[k-1].regmap)<2) {
9876 //printf("no free regs for store %x\n",start+(k-1)*4);
9877 break;
9878 }
9879 }
9880 else
9881 if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9882 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9883 //printf("no-match due to different register\n");
9884 break;
9885 }
9886 if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9887 //printf("no-match due to branch\n");
9888 break;
9889 }
9890 // call/ret fast path assumes no registers allocated
9891 if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
9892 break;
9893 }
9894 if(r>63) {
9895 // NB This can exclude the case where the upper-half
9896 // register is lower numbered than the lower-half
9897 // register. Not sure if it's worth fixing...
9898 if(get_reg(regs[k-1].regmap,r&63)<0) break;
9899 if(regs[k-1].is32&(1LL<<(r&63))) break;
9900 }
9901 k--;
9902 }
9903 if(i<slen-1) {
9904 if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9905 (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9906 //printf("bad match after branch\n");
9907 break;
9908 }
9909 }
9910 if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9911 //printf("Extend r%d, %x ->\n",hr,start+k*4);
9912 while(k<i) {
9913 regs[k].regmap_entry[hr]=f_regmap[hr];
9914 regs[k].regmap[hr]=f_regmap[hr];
9915 regmap_pre[k+1][hr]=f_regmap[hr];
9916 regs[k].wasdirty&=~(1<<hr);
9917 regs[k].dirty&=~(1<<hr);
9918 regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9919 regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9920 regs[k].wasconst&=~(1<<hr);
9921 regs[k].isconst&=~(1<<hr);
9922 k++;
9923 }
9924 }
9925 else {
9926 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9927 break;
9928 }
9929 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9930 if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9931 //printf("OK fill %x (r%d)\n",start+i*4,hr);
9932 regs[i].regmap_entry[hr]=f_regmap[hr];
9933 regs[i].regmap[hr]=f_regmap[hr];
9934 regs[i].wasdirty&=~(1<<hr);
9935 regs[i].dirty&=~(1<<hr);
9936 regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9937 regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9938 regs[i].wasconst&=~(1<<hr);
9939 regs[i].isconst&=~(1<<hr);
9940 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9941 branch_regs[i].wasdirty&=~(1<<hr);
9942 branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9943 branch_regs[i].regmap[hr]=f_regmap[hr];
9944 branch_regs[i].dirty&=~(1<<hr);
9945 branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9946 branch_regs[i].wasconst&=~(1<<hr);
9947 branch_regs[i].isconst&=~(1<<hr);
9948 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9949 regmap_pre[i+2][hr]=f_regmap[hr];
9950 regs[i+2].wasdirty&=~(1<<hr);
9951 regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9952 assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9953 (regs[i+2].was32&(1LL<<f_regmap[hr])));
9954 }
9955 }
9956 }
9957 for(k=t;k<j;k++) {
9958 regs[k].regmap_entry[hr]=f_regmap[hr];
9959 regs[k].regmap[hr]=f_regmap[hr];
9960 regmap_pre[k+1][hr]=f_regmap[hr];
9961 regs[k+1].wasdirty&=~(1<<hr);
9962 regs[k].dirty&=~(1<<hr);
9963 regs[k].wasconst&=~(1<<hr);
9964 regs[k].isconst&=~(1<<hr);
9965 }
9966 if(regs[j].regmap[hr]==f_regmap[hr])
9967 regs[j].regmap_entry[hr]=f_regmap[hr];
9968 break;
9969 }
9970 if(j==i) break;
9971 if(regs[j].regmap[hr]>=0)
9972 break;
9973 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9974 //printf("no-match due to different register\n");
9975 break;
9976 }
9977 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9978 //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9979 break;
9980 }
9981 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9982 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9983 ||itype[j]==FCOMP||itype[j]==FCONV
9984 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9985 if(count_free_regs(regs[j].regmap)<2) {
9986 //printf("No free regs for store %x\n",start+j*4);
9987 break;
9988 }
9989 }
9990 else if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9991 if(f_regmap[hr]>=64) {
9992 if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9993 break;
9994 }
9995 else
9996 {
9997 if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9998 break;
9999 }
10000 }
10001 }
10002 }
10003 }
10004 }
10005 }
10006 }
10007 }else{
10008 int count=0;
10009 for(hr=0;hr<HOST_REGS;hr++)
10010 {
10011 if(hr!=EXCLUDE_REG) {
10012 if(regs[i].regmap[hr]>64) {
10013 if(!((regs[i].dirty>>hr)&1))
10014 f_regmap[hr]=regs[i].regmap[hr];
10015 }
10016 else if(regs[i].regmap[hr]>=0) {
10017 if(f_regmap[hr]!=regs[i].regmap[hr]) {
10018 // dealloc old register
10019 int n;
10020 for(n=0;n<HOST_REGS;n++)
10021 {
10022 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10023 }
10024 // and alloc new one
10025 f_regmap[hr]=regs[i].regmap[hr];
10026 }
10027 }
10028 else if(regs[i].regmap[hr]<0) count++;
10029 }
10030 }
10031 // Try to restore cycle count at branch targets
10032 if(bt[i]) {
10033 for(j=i;j<slen-1;j++) {
10034 if(regs[j].regmap[HOST_CCREG]!=-1) break;
10035 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
10036 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
10037 ||itype[j]==FCOMP||itype[j]==FCONV
10038 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
10039 if(count_free_regs(regs[j].regmap)<2) {
10040 //printf("no free regs for store %x\n",start+j*4);
10041 break;
10042 }
10043 }
10044 else
10045 if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
10046 }
10047 if(regs[j].regmap[HOST_CCREG]==CCREG) {
10048 int k=i;
10049 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
10050 while(k<j) {
10051 regs[k].regmap_entry[HOST_CCREG]=CCREG;
10052 regs[k].regmap[HOST_CCREG]=CCREG;
10053 regmap_pre[k+1][HOST_CCREG]=CCREG;
10054 regs[k+1].wasdirty|=1<<HOST_CCREG;
10055 regs[k].dirty|=1<<HOST_CCREG;
10056 regs[k].wasconst&=~(1<<HOST_CCREG);
10057 regs[k].isconst&=~(1<<HOST_CCREG);
10058 k++;
10059 }
10060 regs[j].regmap_entry[HOST_CCREG]=CCREG;
10061 }
10062 // Work backwards from the branch target
10063 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
10064 {
10065 //printf("Extend backwards\n");
10066 int k;
10067 k=i;
10068 while(regs[k-1].regmap[HOST_CCREG]==-1) {
10069 if(itype[k-1]==STORE||itype[k-1]==STORELR||itype[k-1]==C1LS
10070 ||itype[k-1]==SHIFT||itype[k-1]==COP1||itype[k-1]==FLOAT
10071 ||itype[k-1]==FCONV||itype[k-1]==FCOMP
10072 ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
10073 if(count_free_regs(regs[k-1].regmap)<2) {
10074 //printf("no free regs for store %x\n",start+(k-1)*4);
10075 break;
10076 }
10077 }
10078 else
10079 if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
10080 k--;
10081 }
10082 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
10083 //printf("Extend CC, %x ->\n",start+k*4);
10084 while(k<=i) {
10085 regs[k].regmap_entry[HOST_CCREG]=CCREG;
10086 regs[k].regmap[HOST_CCREG]=CCREG;
10087 regmap_pre[k+1][HOST_CCREG]=CCREG;
10088 regs[k+1].wasdirty|=1<<HOST_CCREG;
10089 regs[k].dirty|=1<<HOST_CCREG;
10090 regs[k].wasconst&=~(1<<HOST_CCREG);
10091 regs[k].isconst&=~(1<<HOST_CCREG);
10092 k++;
10093 }
10094 }
10095 else {
10096 //printf("Fail Extend CC, %x ->\n",start+k*4);
10097 }
10098 }
10099 }
10100 if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
10101 itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
10102 itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
10103 itype[i]!=FCONV&&itype[i]!=FCOMP&&
10104 itype[i]!=COP2&&itype[i]!=C2LS&&itype[i]!=C2OP)
10105 {
10106 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
10107 }
10108 }
10109 }
10110
10111 // This allocates registers (if possible) one instruction prior
10112 // to use, which can avoid a load-use penalty on certain CPUs.
10113 for(i=0;i<slen-1;i++)
10114 {
10115 if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10116 {
10117 if(!bt[i+1])
10118 {
10119 if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10120 ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
10121 {
10122 if(rs1[i+1]) {
10123 if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10124 {
10125 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10126 {
10127 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10128 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10129 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10130 regs[i].isconst&=~(1<<hr);
10131 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10132 constmap[i][hr]=constmap[i+1][hr];
10133 regs[i+1].wasdirty&=~(1<<hr);
10134 regs[i].dirty&=~(1<<hr);
10135 }
10136 }
10137 }
10138 if(rs2[i+1]) {
10139 if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10140 {
10141 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10142 {
10143 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10144 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10145 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10146 regs[i].isconst&=~(1<<hr);
10147 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10148 constmap[i][hr]=constmap[i+1][hr];
10149 regs[i+1].wasdirty&=~(1<<hr);
10150 regs[i].dirty&=~(1<<hr);
10151 }
10152 }
10153 }
10154 if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10155 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10156 {
10157 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10158 {
10159 regs[i].regmap[hr]=rs1[i+1];
10160 regmap_pre[i+1][hr]=rs1[i+1];
10161 regs[i+1].regmap_entry[hr]=rs1[i+1];
10162 regs[i].isconst&=~(1<<hr);
10163 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10164 constmap[i][hr]=constmap[i+1][hr];
10165 regs[i+1].wasdirty&=~(1<<hr);
10166 regs[i].dirty&=~(1<<hr);
10167 }
10168 }
10169 }
10170 if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10171 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10172 {
10173 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10174 {
10175 regs[i].regmap[hr]=rs1[i+1];
10176 regmap_pre[i+1][hr]=rs1[i+1];
10177 regs[i+1].regmap_entry[hr]=rs1[i+1];
10178 regs[i].isconst&=~(1<<hr);
10179 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10180 constmap[i][hr]=constmap[i+1][hr];
10181 regs[i+1].wasdirty&=~(1<<hr);
10182 regs[i].dirty&=~(1<<hr);
10183 }
10184 }
10185 }
10186 #ifndef HOST_IMM_ADDR32
10187 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10188 hr=get_reg(regs[i+1].regmap,TLREG);
10189 if(hr>=0) {
10190 int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10191 if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10192 int nr;
10193 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10194 {
10195 regs[i].regmap[hr]=MGEN1+((i+1)&1);
10196 regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10197 regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10198 regs[i].isconst&=~(1<<hr);
10199 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10200 constmap[i][hr]=constmap[i+1][hr];
10201 regs[i+1].wasdirty&=~(1<<hr);
10202 regs[i].dirty&=~(1<<hr);
10203 }
10204 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10205 {
10206 // move it to another register
10207 regs[i+1].regmap[hr]=-1;
10208 regmap_pre[i+2][hr]=-1;
10209 regs[i+1].regmap[nr]=TLREG;
10210 regmap_pre[i+2][nr]=TLREG;
10211 regs[i].regmap[nr]=MGEN1+((i+1)&1);
10212 regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10213 regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10214 regs[i].isconst&=~(1<<nr);
10215 regs[i+1].isconst&=~(1<<nr);
10216 regs[i].dirty&=~(1<<nr);
10217 regs[i+1].wasdirty&=~(1<<nr);
10218 regs[i+1].dirty&=~(1<<nr);
10219 regs[i+2].wasdirty&=~(1<<nr);
10220 }
10221 }
10222 }
10223 }
10224 #endif
10225 if(itype[i+1]==STORE||itype[i+1]==STORELR
10226 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10227 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10228 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10229 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10230 else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10231 assert(hr>=0);
10232 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10233 {
10234 regs[i].regmap[hr]=rs1[i+1];
10235 regmap_pre[i+1][hr]=rs1[i+1];
10236 regs[i+1].regmap_entry[hr]=rs1[i+1];
10237 regs[i].isconst&=~(1<<hr);
10238 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10239 constmap[i][hr]=constmap[i+1][hr];
10240 regs[i+1].wasdirty&=~(1<<hr);
10241 regs[i].dirty&=~(1<<hr);
10242 }
10243 }
10244 }
10245 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10246 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10247 int nr;
10248 hr=get_reg(regs[i+1].regmap,FTEMP);
10249 assert(hr>=0);
10250 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10251 {
10252 regs[i].regmap[hr]=rs1[i+1];
10253 regmap_pre[i+1][hr]=rs1[i+1];
10254 regs[i+1].regmap_entry[hr]=rs1[i+1];
10255 regs[i].isconst&=~(1<<hr);
10256 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10257 constmap[i][hr]=constmap[i+1][hr];
10258 regs[i+1].wasdirty&=~(1<<hr);
10259 regs[i].dirty&=~(1<<hr);
10260 }
10261 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10262 {
10263 // move it to another register
10264 regs[i+1].regmap[hr]=-1;
10265 regmap_pre[i+2][hr]=-1;
10266 regs[i+1].regmap[nr]=FTEMP;
10267 regmap_pre[i+2][nr]=FTEMP;
10268 regs[i].regmap[nr]=rs1[i+1];
10269 regmap_pre[i+1][nr]=rs1[i+1];
10270 regs[i+1].regmap_entry[nr]=rs1[i+1];
10271 regs[i].isconst&=~(1<<nr);
10272 regs[i+1].isconst&=~(1<<nr);
10273 regs[i].dirty&=~(1<<nr);
10274 regs[i+1].wasdirty&=~(1<<nr);
10275 regs[i+1].dirty&=~(1<<nr);
10276 regs[i+2].wasdirty&=~(1<<nr);
10277 }
10278 }
10279 }
10280 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10281 if(itype[i+1]==LOAD)
10282 hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10283 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10284 hr=get_reg(regs[i+1].regmap,FTEMP);
10285 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10286 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10287 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10288 }
10289 if(hr>=0&&regs[i].regmap[hr]<0) {
10290 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10291 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10292 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10293 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10294 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10295 regs[i].isconst&=~(1<<hr);
10296 regs[i+1].wasdirty&=~(1<<hr);
10297 regs[i].dirty&=~(1<<hr);
10298 }
10299 }
10300 }
10301 }
10302 }
10303 }
10304 }
10305
10306 /* Pass 6 - Optimize clean/dirty state */
10307 clean_registers(0,slen-1,1);
10308
10309 /* Pass 7 - Identify 32-bit registers */
10310#ifndef FORCE32
10311 provisional_r32();
10312
10313 u_int r32=0;
10314
10315 for (i=slen-1;i>=0;i--)
10316 {
10317 int hr;
10318 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10319 {
10320 if(ba[i]<start || ba[i]>=(start+slen*4))
10321 {
10322 // Branch out of this block, don't need anything
10323 r32=0;
10324 }
10325 else
10326 {
10327 // Internal branch
10328 // Need whatever matches the target
10329 // (and doesn't get overwritten by the delay slot instruction)
10330 r32=0;
10331 int t=(ba[i]-start)>>2;
10332 if(ba[i]>start+i*4) {
10333 // Forward branch
10334 if(!(requires_32bit[t]&~regs[i].was32))
10335 r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10336 }else{
10337 // Backward branch
10338 //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10339 // r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10340 if(!(pr32[t]&~regs[i].was32))
10341 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10342 }
10343 }
10344 // Conditional branch may need registers for following instructions
10345 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10346 {
10347 if(i<slen-2) {
10348 r32|=requires_32bit[i+2];
10349 r32&=regs[i].was32;
10350 // Mark this address as a branch target since it may be called
10351 // upon return from interrupt
10352 bt[i+2]=1;
10353 }
10354 }
10355 // Merge in delay slot
10356 if(!likely[i]) {
10357 // These are overwritten unless the branch is "likely"
10358 // and the delay slot is nullified if not taken
10359 r32&=~(1LL<<rt1[i+1]);
10360 r32&=~(1LL<<rt2[i+1]);
10361 }
10362 // Assume these are needed (delay slot)
10363 if(us1[i+1]>0)
10364 {
10365 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10366 }
10367 if(us2[i+1]>0)
10368 {
10369 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10370 }
10371 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10372 {
10373 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10374 }
10375 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10376 {
10377 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10378 }
10379 }
10380 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
10381 {
10382 // SYSCALL instruction (software interrupt)
10383 r32=0;
10384 }
10385 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10386 {
10387 // ERET instruction (return from interrupt)
10388 r32=0;
10389 }
10390 // Check 32 bits
10391 r32&=~(1LL<<rt1[i]);
10392 r32&=~(1LL<<rt2[i]);
10393 if(us1[i]>0)
10394 {
10395 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10396 }
10397 if(us2[i]>0)
10398 {
10399 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10400 }
10401 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10402 {
10403 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10404 }
10405 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10406 {
10407 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10408 }
10409 requires_32bit[i]=r32;
10410
10411 // Dirty registers which are 32-bit, require 32-bit input
10412 // as they will be written as 32-bit values
10413 for(hr=0;hr<HOST_REGS;hr++)
10414 {
10415 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10416 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10417 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10418 requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10419 }
10420 }
10421 }
10422 //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10423 }
10424#endif
10425
10426 if(itype[slen-1]==SPAN) {
10427 bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10428 }
10429
10430 /* Debug/disassembly */
10431 if((void*)assem_debug==(void*)printf)
10432 for(i=0;i<slen;i++)
10433 {
10434 printf("U:");
10435 int r;
10436 for(r=1;r<=CCREG;r++) {
10437 if((unneeded_reg[i]>>r)&1) {
10438 if(r==HIREG) printf(" HI");
10439 else if(r==LOREG) printf(" LO");
10440 else printf(" r%d",r);
10441 }
10442 }
10443#ifndef FORCE32
10444 printf(" UU:");
10445 for(r=1;r<=CCREG;r++) {
10446 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10447 if(r==HIREG) printf(" HI");
10448 else if(r==LOREG) printf(" LO");
10449 else printf(" r%d",r);
10450 }
10451 }
10452 printf(" 32:");
10453 for(r=0;r<=CCREG;r++) {
10454 //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10455 if((regs[i].was32>>r)&1) {
10456 if(r==CCREG) printf(" CC");
10457 else if(r==HIREG) printf(" HI");
10458 else if(r==LOREG) printf(" LO");
10459 else printf(" r%d",r);
10460 }
10461 }
10462#endif
10463 printf("\n");
10464 #if defined(__i386__) || defined(__x86_64__)
10465 printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10466 #endif
10467 #ifdef __arm__
10468 printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10469 #endif
10470 printf("needs: ");
10471 if(needed_reg[i]&1) printf("eax ");
10472 if((needed_reg[i]>>1)&1) printf("ecx ");
10473 if((needed_reg[i]>>2)&1) printf("edx ");
10474 if((needed_reg[i]>>3)&1) printf("ebx ");
10475 if((needed_reg[i]>>5)&1) printf("ebp ");
10476 if((needed_reg[i]>>6)&1) printf("esi ");
10477 if((needed_reg[i]>>7)&1) printf("edi ");
10478 printf("r:");
10479 for(r=0;r<=CCREG;r++) {
10480 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10481 if((requires_32bit[i]>>r)&1) {
10482 if(r==CCREG) printf(" CC");
10483 else if(r==HIREG) printf(" HI");
10484 else if(r==LOREG) printf(" LO");
10485 else printf(" r%d",r);
10486 }
10487 }
10488 printf("\n");
10489 /*printf("pr:");
10490 for(r=0;r<=CCREG;r++) {
10491 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10492 if((pr32[i]>>r)&1) {
10493 if(r==CCREG) printf(" CC");
10494 else if(r==HIREG) printf(" HI");
10495 else if(r==LOREG) printf(" LO");
10496 else printf(" r%d",r);
10497 }
10498 }
10499 if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10500 printf("\n");*/
10501 #if defined(__i386__) || defined(__x86_64__)
10502 printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10503 printf("dirty: ");
10504 if(regs[i].wasdirty&1) printf("eax ");
10505 if((regs[i].wasdirty>>1)&1) printf("ecx ");
10506 if((regs[i].wasdirty>>2)&1) printf("edx ");
10507 if((regs[i].wasdirty>>3)&1) printf("ebx ");
10508 if((regs[i].wasdirty>>5)&1) printf("ebp ");
10509 if((regs[i].wasdirty>>6)&1) printf("esi ");
10510 if((regs[i].wasdirty>>7)&1) printf("edi ");
10511 #endif
10512 #ifdef __arm__
10513 printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10514 printf("dirty: ");
10515 if(regs[i].wasdirty&1) printf("r0 ");
10516 if((regs[i].wasdirty>>1)&1) printf("r1 ");
10517 if((regs[i].wasdirty>>2)&1) printf("r2 ");
10518 if((regs[i].wasdirty>>3)&1) printf("r3 ");
10519 if((regs[i].wasdirty>>4)&1) printf("r4 ");
10520 if((regs[i].wasdirty>>5)&1) printf("r5 ");
10521 if((regs[i].wasdirty>>6)&1) printf("r6 ");
10522 if((regs[i].wasdirty>>7)&1) printf("r7 ");
10523 if((regs[i].wasdirty>>8)&1) printf("r8 ");
10524 if((regs[i].wasdirty>>9)&1) printf("r9 ");
10525 if((regs[i].wasdirty>>10)&1) printf("r10 ");
10526 if((regs[i].wasdirty>>12)&1) printf("r12 ");
10527 #endif
10528 printf("\n");
10529 disassemble_inst(i);
10530 //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10531 #if defined(__i386__) || defined(__x86_64__)
10532 printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
10533 if(regs[i].dirty&1) printf("eax ");
10534 if((regs[i].dirty>>1)&1) printf("ecx ");
10535 if((regs[i].dirty>>2)&1) printf("edx ");
10536 if((regs[i].dirty>>3)&1) printf("ebx ");
10537 if((regs[i].dirty>>5)&1) printf("ebp ");
10538 if((regs[i].dirty>>6)&1) printf("esi ");
10539 if((regs[i].dirty>>7)&1) printf("edi ");
10540 #endif
10541 #ifdef __arm__
10542 printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
10543 if(regs[i].dirty&1) printf("r0 ");
10544 if((regs[i].dirty>>1)&1) printf("r1 ");
10545 if((regs[i].dirty>>2)&1) printf("r2 ");
10546 if((regs[i].dirty>>3)&1) printf("r3 ");
10547 if((regs[i].dirty>>4)&1) printf("r4 ");
10548 if((regs[i].dirty>>5)&1) printf("r5 ");
10549 if((regs[i].dirty>>6)&1) printf("r6 ");
10550 if((regs[i].dirty>>7)&1) printf("r7 ");
10551 if((regs[i].dirty>>8)&1) printf("r8 ");
10552 if((regs[i].dirty>>9)&1) printf("r9 ");
10553 if((regs[i].dirty>>10)&1) printf("r10 ");
10554 if((regs[i].dirty>>12)&1) printf("r12 ");
10555 #endif
10556 printf("\n");
10557 if(regs[i].isconst) {
10558 printf("constants: ");
10559 #if defined(__i386__) || defined(__x86_64__)
10560 if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
10561 if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
10562 if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
10563 if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
10564 if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
10565 if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
10566 if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
10567 #endif
10568 #ifdef __arm__
10569 if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
10570 if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
10571 if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
10572 if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
10573 if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
10574 if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
10575 if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
10576 if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
10577 if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
10578 if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
10579 if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
10580 if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
10581 #endif
10582 printf("\n");
10583 }
10584#ifndef FORCE32
10585 printf(" 32:");
10586 for(r=0;r<=CCREG;r++) {
10587 if((regs[i].is32>>r)&1) {
10588 if(r==CCREG) printf(" CC");
10589 else if(r==HIREG) printf(" HI");
10590 else if(r==LOREG) printf(" LO");
10591 else printf(" r%d",r);
10592 }
10593 }
10594 printf("\n");
10595#endif
10596 /*printf(" p32:");
10597 for(r=0;r<=CCREG;r++) {
10598 if((p32[i]>>r)&1) {
10599 if(r==CCREG) printf(" CC");
10600 else if(r==HIREG) printf(" HI");
10601 else if(r==LOREG) printf(" LO");
10602 else printf(" r%d",r);
10603 }
10604 }
10605 if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
10606 else printf("\n");*/
10607 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10608 #if defined(__i386__) || defined(__x86_64__)
10609 printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
10610 if(branch_regs[i].dirty&1) printf("eax ");
10611 if((branch_regs[i].dirty>>1)&1) printf("ecx ");
10612 if((branch_regs[i].dirty>>2)&1) printf("edx ");
10613 if((branch_regs[i].dirty>>3)&1) printf("ebx ");
10614 if((branch_regs[i].dirty>>5)&1) printf("ebp ");
10615 if((branch_regs[i].dirty>>6)&1) printf("esi ");
10616 if((branch_regs[i].dirty>>7)&1) printf("edi ");
10617 #endif
10618 #ifdef __arm__
10619 printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
10620 if(branch_regs[i].dirty&1) printf("r0 ");
10621 if((branch_regs[i].dirty>>1)&1) printf("r1 ");
10622 if((branch_regs[i].dirty>>2)&1) printf("r2 ");
10623 if((branch_regs[i].dirty>>3)&1) printf("r3 ");
10624 if((branch_regs[i].dirty>>4)&1) printf("r4 ");
10625 if((branch_regs[i].dirty>>5)&1) printf("r5 ");
10626 if((branch_regs[i].dirty>>6)&1) printf("r6 ");
10627 if((branch_regs[i].dirty>>7)&1) printf("r7 ");
10628 if((branch_regs[i].dirty>>8)&1) printf("r8 ");
10629 if((branch_regs[i].dirty>>9)&1) printf("r9 ");
10630 if((branch_regs[i].dirty>>10)&1) printf("r10 ");
10631 if((branch_regs[i].dirty>>12)&1) printf("r12 ");
10632 #endif
10633#ifndef FORCE32
10634 printf(" 32:");
10635 for(r=0;r<=CCREG;r++) {
10636 if((branch_regs[i].is32>>r)&1) {
10637 if(r==CCREG) printf(" CC");
10638 else if(r==HIREG) printf(" HI");
10639 else if(r==LOREG) printf(" LO");
10640 else printf(" r%d",r);
10641 }
10642 }
10643 printf("\n");
10644#endif
10645 }
10646 }
10647
10648 /* Pass 8 - Assembly */
10649 linkcount=0;stubcount=0;
10650 ds=0;is_delayslot=0;
10651 cop1_usable=0;
10652 uint64_t is32_pre=0;
10653 u_int dirty_pre=0;
10654 u_int beginning=(u_int)out;
10655 if((u_int)addr&1) {
10656 ds=1;
10657 pagespan_ds();
10658 }
10659 u_int instr_addr0_override=0;
10660
10661#ifdef PCSX
10662 if (start == 0x80030000) {
10663 // nasty hack for fastbios thing
10664 instr_addr0_override=(u_int)out;
10665 emit_movimm(start,0);
10666 emit_readword((int)&pcaddr,1);
10667 emit_writeword(0,(int)&pcaddr);
10668 emit_cmp(0,1);
10669 emit_jne((int)new_dyna_leave);
10670 }
10671#endif
10672 for(i=0;i<slen;i++)
10673 {
10674 //if(ds) printf("ds: ");
10675 if((void*)assem_debug==(void*)printf) disassemble_inst(i);
10676 if(ds) {
10677 ds=0; // Skip delay slot
10678 if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10679 instr_addr[i]=0;
10680 } else {
10681 #ifndef DESTRUCTIVE_WRITEBACK
10682 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10683 {
10684 wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
10685 unneeded_reg[i],unneeded_reg_upper[i]);
10686 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10687 unneeded_reg[i],unneeded_reg_upper[i]);
10688 }
10689 is32_pre=regs[i].is32;
10690 dirty_pre=regs[i].dirty;
10691 #endif
10692 // write back
10693 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10694 {
10695 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10696 unneeded_reg[i],unneeded_reg_upper[i]);
10697 loop_preload(regmap_pre[i],regs[i].regmap_entry);
10698 }
10699 // branch target entry point
10700 instr_addr[i]=(u_int)out;
10701 assem_debug("<->\n");
10702 // load regs
10703 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10704 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10705 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10706 address_generation(i,&regs[i],regs[i].regmap_entry);
10707 load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10708 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10709 {
10710 // Load the delay slot registers if necessary
10711 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10712 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10713 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10714 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10715 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10716 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10717 }
10718 else if(i+1<slen)
10719 {
10720 // Preload registers for following instruction
10721 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10722 if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10723 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10724 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10725 if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10726 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10727 }
10728 // TODO: if(is_ooo(i)) address_generation(i+1);
10729 if(itype[i]==CJUMP||itype[i]==FJUMP)
10730 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10731 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10732 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10733 if(bt[i]) cop1_usable=0;
10734 // assemble
10735 switch(itype[i]) {
10736 case ALU:
10737 alu_assemble(i,&regs[i]);break;
10738 case IMM16:
10739 imm16_assemble(i,&regs[i]);break;
10740 case SHIFT:
10741 shift_assemble(i,&regs[i]);break;
10742 case SHIFTIMM:
10743 shiftimm_assemble(i,&regs[i]);break;
10744 case LOAD:
10745 load_assemble(i,&regs[i]);break;
10746 case LOADLR:
10747 loadlr_assemble(i,&regs[i]);break;
10748 case STORE:
10749 store_assemble(i,&regs[i]);break;
10750 case STORELR:
10751 storelr_assemble(i,&regs[i]);break;
10752 case COP0:
10753 cop0_assemble(i,&regs[i]);break;
10754 case COP1:
10755 cop1_assemble(i,&regs[i]);break;
10756 case C1LS:
10757 c1ls_assemble(i,&regs[i]);break;
10758 case COP2:
10759 cop2_assemble(i,&regs[i]);break;
10760 case C2LS:
10761 c2ls_assemble(i,&regs[i]);break;
10762 case C2OP:
10763 c2op_assemble(i,&regs[i]);break;
10764 case FCONV:
10765 fconv_assemble(i,&regs[i]);break;
10766 case FLOAT:
10767 float_assemble(i,&regs[i]);break;
10768 case FCOMP:
10769 fcomp_assemble(i,&regs[i]);break;
10770 case MULTDIV:
10771 multdiv_assemble(i,&regs[i]);break;
10772 case MOV:
10773 mov_assemble(i,&regs[i]);break;
10774 case SYSCALL:
10775 syscall_assemble(i,&regs[i]);break;
10776 case HLECALL:
10777 hlecall_assemble(i,&regs[i]);break;
10778 case INTCALL:
10779 intcall_assemble(i,&regs[i]);break;
10780 case UJUMP:
10781 ujump_assemble(i,&regs[i]);ds=1;break;
10782 case RJUMP:
10783 rjump_assemble(i,&regs[i]);ds=1;break;
10784 case CJUMP:
10785 cjump_assemble(i,&regs[i]);ds=1;break;
10786 case SJUMP:
10787 sjump_assemble(i,&regs[i]);ds=1;break;
10788 case FJUMP:
10789 fjump_assemble(i,&regs[i]);ds=1;break;
10790 case SPAN:
10791 pagespan_assemble(i,&regs[i]);break;
10792 }
10793 if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10794 literal_pool(1024);
10795 else
10796 literal_pool_jumpover(256);
10797 }
10798 }
10799 //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10800 // If the block did not end with an unconditional branch,
10801 // add a jump to the next instruction.
10802 if(i>1) {
10803 if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10804 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10805 assert(i==slen);
10806 if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10807 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10808 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10809 emit_loadreg(CCREG,HOST_CCREG);
10810 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10811 }
10812 else if(!likely[i-2])
10813 {
10814 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10815 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10816 }
10817 else
10818 {
10819 store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10820 assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10821 }
10822 add_to_linker((int)out,start+i*4,0);
10823 emit_jmp(0);
10824 }
10825 }
10826 else
10827 {
10828 assert(i>0);
10829 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10830 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10831 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10832 emit_loadreg(CCREG,HOST_CCREG);
10833 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10834 add_to_linker((int)out,start+i*4,0);
10835 emit_jmp(0);
10836 }
10837
10838 // TODO: delay slot stubs?
10839 // Stubs
10840 for(i=0;i<stubcount;i++)
10841 {
10842 switch(stubs[i][0])
10843 {
10844 case LOADB_STUB:
10845 case LOADH_STUB:
10846 case LOADW_STUB:
10847 case LOADD_STUB:
10848 case LOADBU_STUB:
10849 case LOADHU_STUB:
10850 do_readstub(i);break;
10851 case STOREB_STUB:
10852 case STOREH_STUB:
10853 case STOREW_STUB:
10854 case STORED_STUB:
10855 do_writestub(i);break;
10856 case CC_STUB:
10857 do_ccstub(i);break;
10858 case INVCODE_STUB:
10859 do_invstub(i);break;
10860 case FP_STUB:
10861 do_cop1stub(i);break;
10862 case STORELR_STUB:
10863 do_unalignedwritestub(i);break;
10864 }
10865 }
10866
10867 if (instr_addr0_override)
10868 instr_addr[0] = instr_addr0_override;
10869
10870 /* Pass 9 - Linker */
10871 for(i=0;i<linkcount;i++)
10872 {
10873 assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10874 literal_pool(64);
10875 if(!link_addr[i][2])
10876 {
10877 void *stub=out;
10878 void *addr=check_addr(link_addr[i][1]);
10879 emit_extjump(link_addr[i][0],link_addr[i][1]);
10880 if(addr) {
10881 set_jump_target(link_addr[i][0],(int)addr);
10882 add_link(link_addr[i][1],stub);
10883 }
10884 else set_jump_target(link_addr[i][0],(int)stub);
10885 }
10886 else
10887 {
10888 // Internal branch
10889 int target=(link_addr[i][1]-start)>>2;
10890 assert(target>=0&&target<slen);
10891 assert(instr_addr[target]);
10892 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10893 //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10894 //#else
10895 set_jump_target(link_addr[i][0],instr_addr[target]);
10896 //#endif
10897 }
10898 }
10899 // External Branch Targets (jump_in)
10900 if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10901 for(i=0;i<slen;i++)
10902 {
10903 if(bt[i]||i==0)
10904 {
10905 if(instr_addr[i]) // TODO - delay slots (=null)
10906 {
10907 u_int vaddr=start+i*4;
10908 u_int page=get_page(vaddr);
10909 u_int vpage=get_vpage(vaddr);
10910 literal_pool(256);
10911 //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
10912#ifndef FORCE32
10913 if(!requires_32bit[i])
10914#else
10915 if(1)
10916#endif
10917 {
10918 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10919 assem_debug("jump_in: %x\n",start+i*4);
10920 ll_add(jump_dirty+vpage,vaddr,(void *)out);
10921 int entry_point=do_dirty_stub(i);
10922 ll_add(jump_in+page,vaddr,(void *)entry_point);
10923 // If there was an existing entry in the hash table,
10924 // replace it with the new address.
10925 // Don't add new entries. We'll insert the
10926 // ones that actually get used in check_addr().
10927 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10928 if(ht_bin[0]==vaddr) {
10929 ht_bin[1]=entry_point;
10930 }
10931 if(ht_bin[2]==vaddr) {
10932 ht_bin[3]=entry_point;
10933 }
10934 }
10935 else
10936 {
10937 u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
10938 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10939 assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
10940 //int entry_point=(int)out;
10941 ////assem_debug("entry_point: %x\n",entry_point);
10942 //load_regs_entry(i);
10943 //if(entry_point==(int)out)
10944 // entry_point=instr_addr[i];
10945 //else
10946 // emit_jmp(instr_addr[i]);
10947 //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10948 ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
10949 int entry_point=do_dirty_stub(i);
10950 ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10951 }
10952 }
10953 }
10954 }
10955 // Write out the literal pool if necessary
10956 literal_pool(0);
10957 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10958 // Align code
10959 if(((u_int)out)&7) emit_addnop(13);
10960 #endif
10961 assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
10962 //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10963 memcpy(copy,source,slen*4);
10964 copy+=slen*4;
10965
10966 #ifdef __arm__
10967 __clear_cache((void *)beginning,out);
10968 #endif
10969
10970 // If we're within 256K of the end of the buffer,
10971 // start over from the beginning. (Is 256K enough?)
10972 if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10973
10974 // Trap writes to any of the pages we compiled
10975 for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10976 invalid_code[i]=0;
10977#ifndef DISABLE_TLB
10978 memory_map[i]|=0x40000000;
10979 if((signed int)start>=(signed int)0xC0000000) {
10980 assert(using_tlb);
10981 j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
10982 invalid_code[j]=0;
10983 memory_map[j]|=0x40000000;
10984 //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
10985 }
10986#endif
10987 }
10988
10989 /* Pass 10 - Free memory by expiring oldest blocks */
10990
10991 int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10992 while(expirep!=end)
10993 {
10994 int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10995 int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10996 inv_debug("EXP: Phase %d\n",expirep);
10997 switch((expirep>>11)&3)
10998 {
10999 case 0:
11000 // Clear jump_in and jump_dirty
11001 ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
11002 ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
11003 ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
11004 ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
11005 break;
11006 case 1:
11007 // Clear pointers
11008 ll_kill_pointers(jump_out[expirep&2047],base,shift);
11009 ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
11010 break;
11011 case 2:
11012 // Clear hash table
11013 for(i=0;i<32;i++) {
11014 int *ht_bin=hash_table[((expirep&2047)<<5)+i];
11015 if((ht_bin[3]>>shift)==(base>>shift) ||
11016 ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11017 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
11018 ht_bin[2]=ht_bin[3]=-1;
11019 }
11020 if((ht_bin[1]>>shift)==(base>>shift) ||
11021 ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11022 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
11023 ht_bin[0]=ht_bin[2];
11024 ht_bin[1]=ht_bin[3];
11025 ht_bin[2]=ht_bin[3]=-1;
11026 }
11027 }
11028 break;
11029 case 3:
11030 // Clear jump_out
11031 #ifdef __arm__
11032 if((expirep&2047)==0)
11033 do_clear_cache();
11034 #endif
11035 ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
11036 ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
11037 break;
11038 }
11039 expirep=(expirep+1)&65535;
11040 }
11041 return 0;
11042}
11043
11044// vim:shiftwidth=2:expandtab