drc: implemented STL/STR stubs (at least I think I did)
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
3 * Copyright (C) 2009-2010 Ari64 *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21#include <stdlib.h>
22#include <stdint.h> //include for uint64_t
23#include <assert.h>
24
25#include "emu_if.h" //emulator interface
26
27#include <sys/mman.h>
28
29#ifdef __i386__
30#include "assem_x86.h"
31#endif
32#ifdef __x86_64__
33#include "assem_x64.h"
34#endif
35#ifdef __arm__
36#include "assem_arm.h"
37#endif
38
39#define MAXBLOCK 4096
40#define MAX_OUTPUT_BLOCK_SIZE 262144
41#define CLOCK_DIVIDER 2
42
43struct regstat
44{
45 signed char regmap_entry[HOST_REGS];
46 signed char regmap[HOST_REGS];
47 uint64_t was32;
48 uint64_t is32;
49 uint64_t wasdirty;
50 uint64_t dirty;
51 uint64_t u;
52 uint64_t uu;
53 u_int wasconst;
54 u_int isconst;
55 uint64_t constmap[HOST_REGS];
56};
57
58struct ll_entry
59{
60 u_int vaddr;
61 u_int reg32;
62 void *addr;
63 struct ll_entry *next;
64};
65
66 u_int start;
67 u_int *source;
68 u_int pagelimit;
69 char insn[MAXBLOCK][10];
70 u_char itype[MAXBLOCK];
71 u_char opcode[MAXBLOCK];
72 u_char opcode2[MAXBLOCK];
73 u_char bt[MAXBLOCK];
74 u_char rs1[MAXBLOCK];
75 u_char rs2[MAXBLOCK];
76 u_char rt1[MAXBLOCK];
77 u_char rt2[MAXBLOCK];
78 u_char us1[MAXBLOCK];
79 u_char us2[MAXBLOCK];
80 u_char dep1[MAXBLOCK];
81 u_char dep2[MAXBLOCK];
82 u_char lt1[MAXBLOCK];
83 int imm[MAXBLOCK];
84 u_int ba[MAXBLOCK];
85 char likely[MAXBLOCK];
86 char is_ds[MAXBLOCK];
87 uint64_t unneeded_reg[MAXBLOCK];
88 uint64_t unneeded_reg_upper[MAXBLOCK];
89 uint64_t branch_unneeded_reg[MAXBLOCK];
90 uint64_t branch_unneeded_reg_upper[MAXBLOCK];
91 uint64_t p32[MAXBLOCK];
92 uint64_t pr32[MAXBLOCK];
93 signed char regmap_pre[MAXBLOCK][HOST_REGS];
94 signed char regmap[MAXBLOCK][HOST_REGS];
95 signed char regmap_entry[MAXBLOCK][HOST_REGS];
96 uint64_t constmap[MAXBLOCK][HOST_REGS];
97 uint64_t known_value[HOST_REGS];
98 u_int known_reg;
99 struct regstat regs[MAXBLOCK];
100 struct regstat branch_regs[MAXBLOCK];
101 u_int needed_reg[MAXBLOCK];
102 uint64_t requires_32bit[MAXBLOCK];
103 u_int wont_dirty[MAXBLOCK];
104 u_int will_dirty[MAXBLOCK];
105 int ccadj[MAXBLOCK];
106 int slen;
107 u_int instr_addr[MAXBLOCK];
108 u_int link_addr[MAXBLOCK][3];
109 int linkcount;
110 u_int stubs[MAXBLOCK*3][8];
111 int stubcount;
112 u_int literals[1024][2];
113 int literalcount;
114 int is_delayslot;
115 int cop1_usable;
116 u_char *out;
117 struct ll_entry *jump_in[4096];
118 struct ll_entry *jump_out[4096];
119 struct ll_entry *jump_dirty[4096];
120 u_int hash_table[65536][4] __attribute__((aligned(16)));
121 char shadow[1048576] __attribute__((aligned(16)));
122 void *copy;
123 int expirep;
124 u_int using_tlb;
125 u_int stop_after_jal;
126 extern u_char restore_candidate[512];
127 extern int cycle_count;
128
129 /* registers that may be allocated */
130 /* 1-31 gpr */
131#define HIREG 32 // hi
132#define LOREG 33 // lo
133#define FSREG 34 // FPU status (FCSR)
134#define CSREG 35 // Coprocessor status
135#define CCREG 36 // Cycle count
136#define INVCP 37 // Pointer to invalid_code
137#define TEMPREG 38
138#define FTEMP 38 // FPU/LDL/LDR temporary register
139#define PTEMP 39 // Prefetch temporary register
140#define TLREG 40 // TLB mapping offset
141#define RHASH 41 // Return address hash
142#define RHTBL 42 // Return address hash table address
143#define RTEMP 43 // JR/JALR address register
144#define MAXREG 43
145#define AGEN1 44 // Address generation temporary register
146#define AGEN2 45 // Address generation temporary register
147#define MGEN1 46 // Maptable address generation temporary register
148#define MGEN2 47 // Maptable address generation temporary register
149#define BTREG 48 // Branch target temporary register
150
151 /* instruction types */
152#define NOP 0 // No operation
153#define LOAD 1 // Load
154#define STORE 2 // Store
155#define LOADLR 3 // Unaligned load
156#define STORELR 4 // Unaligned store
157#define MOV 5 // Move
158#define ALU 6 // Arithmetic/logic
159#define MULTDIV 7 // Multiply/divide
160#define SHIFT 8 // Shift by register
161#define SHIFTIMM 9// Shift by immediate
162#define IMM16 10 // 16-bit immediate
163#define RJUMP 11 // Unconditional jump to register
164#define UJUMP 12 // Unconditional jump
165#define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
166#define SJUMP 14 // Conditional branch (regimm format)
167#define COP0 15 // Coprocessor 0
168#define COP1 16 // Coprocessor 1
169#define C1LS 17 // Coprocessor 1 load/store
170#define FJUMP 18 // Conditional branch (floating point)
171#define FLOAT 19 // Floating point unit
172#define FCONV 20 // Convert integer to float
173#define FCOMP 21 // Floating point compare (sets FSREG)
174#define SYSCALL 22// SYSCALL
175#define OTHER 23 // Other
176#define SPAN 24 // Branch/delay slot spans 2 pages
177#define NI 25 // Not implemented
178#define HLECALL 26// PCSX fake opcodes for HLE
179#define COP2 27 // Coprocessor 2 move
180#define C2LS 28 // Coprocessor 2 load/store
181#define C2OP 29 // Coprocessor 2 operation
182
183 /* stubs */
184#define CC_STUB 1
185#define FP_STUB 2
186#define LOADB_STUB 3
187#define LOADH_STUB 4
188#define LOADW_STUB 5
189#define LOADD_STUB 6
190#define LOADBU_STUB 7
191#define LOADHU_STUB 8
192#define STOREB_STUB 9
193#define STOREH_STUB 10
194#define STOREW_STUB 11
195#define STORED_STUB 12
196#define STORELR_STUB 13
197#define INVCODE_STUB 14
198
199 /* branch codes */
200#define TAKEN 1
201#define NOTTAKEN 2
202#define NULLDS 3
203
204// asm linkage
205int new_recompile_block(int addr);
206void *get_addr_ht(u_int vaddr);
207void invalidate_block(u_int block);
208void invalidate_addr(u_int addr);
209void remove_hash(int vaddr);
210void jump_vaddr();
211void dyna_linker();
212void dyna_linker_ds();
213void verify_code();
214void verify_code_vm();
215void verify_code_ds();
216void cc_interrupt();
217void fp_exception();
218void fp_exception_ds();
219void jump_syscall();
220void jump_syscall_hle();
221void jump_eret();
222void jump_hlecall();
223void new_dyna_leave();
224
225// TLB
226void TLBWI_new();
227void TLBWR_new();
228void read_nomem_new();
229void read_nomemb_new();
230void read_nomemh_new();
231void read_nomemd_new();
232void write_nomem_new();
233void write_nomemb_new();
234void write_nomemh_new();
235void write_nomemd_new();
236void write_rdram_new();
237void write_rdramb_new();
238void write_rdramh_new();
239void write_rdramd_new();
240extern u_int memory_map[1048576];
241
242// Needed by assembler
243void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
244void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
245void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
246void load_all_regs(signed char i_regmap[]);
247void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
248void load_regs_entry(int t);
249void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
250
251int tracedebug=0;
252
253//#define DEBUG_CYCLE_COUNT 1
254
255void nullf() {}
256//#define assem_debug printf
257//#define inv_debug printf
258#define assem_debug nullf
259#define inv_debug nullf
260
261static void tlb_hacks()
262{
263#ifndef DISABLE_TLB
264 // Goldeneye hack
265 if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
266 {
267 u_int addr;
268 int n;
269 switch (ROM_HEADER->Country_code&0xFF)
270 {
271 case 0x45: // U
272 addr=0x34b30;
273 break;
274 case 0x4A: // J
275 addr=0x34b70;
276 break;
277 case 0x50: // E
278 addr=0x329f0;
279 break;
280 default:
281 // Unknown country code
282 addr=0;
283 break;
284 }
285 u_int rom_addr=(u_int)rom;
286 #ifdef ROM_COPY
287 // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
288 // in the lower 4G of memory to use this hack. Copy it if necessary.
289 if((void *)rom>(void *)0xffffffff) {
290 munmap(ROM_COPY, 67108864);
291 if(mmap(ROM_COPY, 12582912,
292 PROT_READ | PROT_WRITE,
293 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
294 -1, 0) <= 0) {printf("mmap() failed\n");}
295 memcpy(ROM_COPY,rom,12582912);
296 rom_addr=(u_int)ROM_COPY;
297 }
298 #endif
299 if(addr) {
300 for(n=0x7F000;n<0x80000;n++) {
301 memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
302 }
303 }
304 }
305#endif
306}
307
308static u_int get_page(u_int vaddr)
309{
310 u_int page=(vaddr^0x80000000)>>12;
311#ifndef DISABLE_TLB
312 if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
313#endif
314 if(page>2048) page=2048+(page&2047);
315 return page;
316}
317
318static u_int get_vpage(u_int vaddr)
319{
320 u_int vpage=(vaddr^0x80000000)>>12;
321#ifndef DISABLE_TLB
322 if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
323#endif
324 if(vpage>2048) vpage=2048+(vpage&2047);
325 return vpage;
326}
327
328// Get address from virtual address
329// This is called from the recompiled JR/JALR instructions
330void *get_addr(u_int vaddr)
331{
332 u_int page=get_page(vaddr);
333 u_int vpage=get_vpage(vaddr);
334 struct ll_entry *head;
335 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
336 head=jump_in[page];
337 while(head!=NULL) {
338 if(head->vaddr==vaddr&&head->reg32==0) {
339 //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
340 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
341 ht_bin[3]=ht_bin[1];
342 ht_bin[2]=ht_bin[0];
343 ht_bin[1]=(int)head->addr;
344 ht_bin[0]=vaddr;
345 return head->addr;
346 }
347 head=head->next;
348 }
349 head=jump_dirty[vpage];
350 while(head!=NULL) {
351 if(head->vaddr==vaddr&&head->reg32==0) {
352 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
353 // Don't restore blocks which are about to expire from the cache
354 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
355 if(verify_dirty(head->addr)) {
356 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
357 invalid_code[vaddr>>12]=0;
358 memory_map[vaddr>>12]|=0x40000000;
359 if(vpage<2048) {
360#ifndef DISABLE_TLB
361 if(tlb_LUT_r[vaddr>>12]) {
362 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
363 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
364 }
365#endif
366 restore_candidate[vpage>>3]|=1<<(vpage&7);
367 }
368 else restore_candidate[page>>3]|=1<<(page&7);
369 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
370 if(ht_bin[0]==vaddr) {
371 ht_bin[1]=(int)head->addr; // Replace existing entry
372 }
373 else
374 {
375 ht_bin[3]=ht_bin[1];
376 ht_bin[2]=ht_bin[0];
377 ht_bin[1]=(int)head->addr;
378 ht_bin[0]=vaddr;
379 }
380 return head->addr;
381 }
382 }
383 head=head->next;
384 }
385 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
386 int r=new_recompile_block(vaddr);
387 if(r==0) return get_addr(vaddr);
388 // Execute in unmapped page, generate pagefault execption
389 Status|=2;
390 Cause=(vaddr<<31)|0x8;
391 EPC=(vaddr&1)?vaddr-5:vaddr;
392 BadVAddr=(vaddr&~1);
393 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
394 EntryHi=BadVAddr&0xFFFFE000;
395 return get_addr_ht(0x80000000);
396}
397// Look up address in hash table first
398void *get_addr_ht(u_int vaddr)
399{
400 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
401 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
402 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
403 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
404 return get_addr(vaddr);
405}
406
407void *get_addr_32(u_int vaddr,u_int flags)
408{
409#ifdef FORCE32
410 return get_addr(vaddr);
411#endif
412 //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
413 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
414 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
415 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
416 u_int page=get_page(vaddr);
417 u_int vpage=get_vpage(vaddr);
418 struct ll_entry *head;
419 head=jump_in[page];
420 while(head!=NULL) {
421 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
422 //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
423 if(head->reg32==0) {
424 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
425 if(ht_bin[0]==-1) {
426 ht_bin[1]=(int)head->addr;
427 ht_bin[0]=vaddr;
428 }else if(ht_bin[2]==-1) {
429 ht_bin[3]=(int)head->addr;
430 ht_bin[2]=vaddr;
431 }
432 //ht_bin[3]=ht_bin[1];
433 //ht_bin[2]=ht_bin[0];
434 //ht_bin[1]=(int)head->addr;
435 //ht_bin[0]=vaddr;
436 }
437 return head->addr;
438 }
439 head=head->next;
440 }
441 head=jump_dirty[vpage];
442 while(head!=NULL) {
443 if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
444 //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
445 // Don't restore blocks which are about to expire from the cache
446 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
447 if(verify_dirty(head->addr)) {
448 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
449 invalid_code[vaddr>>12]=0;
450 memory_map[vaddr>>12]|=0x40000000;
451 if(vpage<2048) {
452#ifndef DISABLE_TLB
453 if(tlb_LUT_r[vaddr>>12]) {
454 invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
455 memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
456 }
457#endif
458 restore_candidate[vpage>>3]|=1<<(vpage&7);
459 }
460 else restore_candidate[page>>3]|=1<<(page&7);
461 if(head->reg32==0) {
462 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
463 if(ht_bin[0]==-1) {
464 ht_bin[1]=(int)head->addr;
465 ht_bin[0]=vaddr;
466 }else if(ht_bin[2]==-1) {
467 ht_bin[3]=(int)head->addr;
468 ht_bin[2]=vaddr;
469 }
470 //ht_bin[3]=ht_bin[1];
471 //ht_bin[2]=ht_bin[0];
472 //ht_bin[1]=(int)head->addr;
473 //ht_bin[0]=vaddr;
474 }
475 return head->addr;
476 }
477 }
478 head=head->next;
479 }
480 //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
481 int r=new_recompile_block(vaddr);
482 if(r==0) return get_addr(vaddr);
483 // Execute in unmapped page, generate pagefault execption
484 Status|=2;
485 Cause=(vaddr<<31)|0x8;
486 EPC=(vaddr&1)?vaddr-5:vaddr;
487 BadVAddr=(vaddr&~1);
488 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
489 EntryHi=BadVAddr&0xFFFFE000;
490 return get_addr_ht(0x80000000);
491}
492
493void clear_all_regs(signed char regmap[])
494{
495 int hr;
496 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
497}
498
499signed char get_reg(signed char regmap[],int r)
500{
501 int hr;
502 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
503 return -1;
504}
505
506// Find a register that is available for two consecutive cycles
507signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
508{
509 int hr;
510 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
511 return -1;
512}
513
514int count_free_regs(signed char regmap[])
515{
516 int count=0;
517 int hr;
518 for(hr=0;hr<HOST_REGS;hr++)
519 {
520 if(hr!=EXCLUDE_REG) {
521 if(regmap[hr]<0) count++;
522 }
523 }
524 return count;
525}
526
527void dirty_reg(struct regstat *cur,signed char reg)
528{
529 int hr;
530 if(!reg) return;
531 for (hr=0;hr<HOST_REGS;hr++) {
532 if((cur->regmap[hr]&63)==reg) {
533 cur->dirty|=1<<hr;
534 }
535 }
536}
537
538// If we dirty the lower half of a 64 bit register which is now being
539// sign-extended, we need to dump the upper half.
540// Note: Do this only after completion of the instruction, because
541// some instructions may need to read the full 64-bit value even if
542// overwriting it (eg SLTI, DSRA32).
543static void flush_dirty_uppers(struct regstat *cur)
544{
545 int hr,reg;
546 for (hr=0;hr<HOST_REGS;hr++) {
547 if((cur->dirty>>hr)&1) {
548 reg=cur->regmap[hr];
549 if(reg>=64)
550 if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
551 }
552 }
553}
554
555void set_const(struct regstat *cur,signed char reg,uint64_t value)
556{
557 int hr;
558 if(!reg) return;
559 for (hr=0;hr<HOST_REGS;hr++) {
560 if(cur->regmap[hr]==reg) {
561 cur->isconst|=1<<hr;
562 cur->constmap[hr]=value;
563 }
564 else if((cur->regmap[hr]^64)==reg) {
565 cur->isconst|=1<<hr;
566 cur->constmap[hr]=value>>32;
567 }
568 }
569}
570
571void clear_const(struct regstat *cur,signed char reg)
572{
573 int hr;
574 if(!reg) return;
575 for (hr=0;hr<HOST_REGS;hr++) {
576 if((cur->regmap[hr]&63)==reg) {
577 cur->isconst&=~(1<<hr);
578 }
579 }
580}
581
582int is_const(struct regstat *cur,signed char reg)
583{
584 int hr;
585 if(!reg) return 1;
586 for (hr=0;hr<HOST_REGS;hr++) {
587 if((cur->regmap[hr]&63)==reg) {
588 return (cur->isconst>>hr)&1;
589 }
590 }
591 return 0;
592}
593uint64_t get_const(struct regstat *cur,signed char reg)
594{
595 int hr;
596 if(!reg) return 0;
597 for (hr=0;hr<HOST_REGS;hr++) {
598 if(cur->regmap[hr]==reg) {
599 return cur->constmap[hr];
600 }
601 }
602 printf("Unknown constant in r%d\n",reg);
603 exit(1);
604}
605
606// Least soon needed registers
607// Look at the next ten instructions and see which registers
608// will be used. Try not to reallocate these.
609void lsn(u_char hsn[], int i, int *preferred_reg)
610{
611 int j;
612 int b=-1;
613 for(j=0;j<9;j++)
614 {
615 if(i+j>=slen) {
616 j=slen-i-1;
617 break;
618 }
619 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
620 {
621 // Don't go past an unconditonal jump
622 j++;
623 break;
624 }
625 }
626 for(;j>=0;j--)
627 {
628 if(rs1[i+j]) hsn[rs1[i+j]]=j;
629 if(rs2[i+j]) hsn[rs2[i+j]]=j;
630 if(rt1[i+j]) hsn[rt1[i+j]]=j;
631 if(rt2[i+j]) hsn[rt2[i+j]]=j;
632 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
633 // Stores can allocate zero
634 hsn[rs1[i+j]]=j;
635 hsn[rs2[i+j]]=j;
636 }
637 // On some architectures stores need invc_ptr
638 #if defined(HOST_IMM8)
639 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
640 hsn[INVCP]=j;
641 }
642 #endif
643 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
644 {
645 hsn[CCREG]=j;
646 b=j;
647 }
648 }
649 if(b>=0)
650 {
651 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
652 {
653 // Follow first branch
654 int t=(ba[i+b]-start)>>2;
655 j=7-b;if(t+j>=slen) j=slen-t-1;
656 for(;j>=0;j--)
657 {
658 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
659 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
660 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
661 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
662 }
663 }
664 // TODO: preferred register based on backward branch
665 }
666 // Delay slot should preferably not overwrite branch conditions or cycle count
667 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
668 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
669 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
670 hsn[CCREG]=1;
671 // ...or hash tables
672 hsn[RHASH]=1;
673 hsn[RHTBL]=1;
674 }
675 // Coprocessor load/store needs FTEMP, even if not declared
676 if(itype[i]==C1LS||itype[i]==C2LS) {
677 hsn[FTEMP]=0;
678 }
679 // Load L/R also uses FTEMP as a temporary register
680 if(itype[i]==LOADLR) {
681 hsn[FTEMP]=0;
682 }
683 // Also SWL/SWR/SDL/SDR
684 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
685 hsn[FTEMP]=0;
686 }
687 // Don't remove the TLB registers either
688 if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
689 hsn[TLREG]=0;
690 }
691 // Don't remove the miniht registers
692 if(itype[i]==UJUMP||itype[i]==RJUMP)
693 {
694 hsn[RHASH]=0;
695 hsn[RHTBL]=0;
696 }
697}
698
699// We only want to allocate registers if we're going to use them again soon
700int needed_again(int r, int i)
701{
702 int j;
703 int b=-1;
704 int rn=10;
705 int hr;
706 u_char hsn[MAXREG+1];
707 int preferred_reg;
708
709 memset(hsn,10,sizeof(hsn));
710 lsn(hsn,i,&preferred_reg);
711
712 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
713 {
714 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
715 return 0; // Don't need any registers if exiting the block
716 }
717 for(j=0;j<9;j++)
718 {
719 if(i+j>=slen) {
720 j=slen-i-1;
721 break;
722 }
723 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
724 {
725 // Don't go past an unconditonal jump
726 j++;
727 break;
728 }
729 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||((source[i+j]&0xfc00003f)==0x0d))
730 {
731 break;
732 }
733 }
734 for(;j>=1;j--)
735 {
736 if(rs1[i+j]==r) rn=j;
737 if(rs2[i+j]==r) rn=j;
738 if((unneeded_reg[i+j]>>r)&1) rn=10;
739 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
740 {
741 b=j;
742 }
743 }
744 /*
745 if(b>=0)
746 {
747 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
748 {
749 // Follow first branch
750 int o=rn;
751 int t=(ba[i+b]-start)>>2;
752 j=7-b;if(t+j>=slen) j=slen-t-1;
753 for(;j>=0;j--)
754 {
755 if(!((unneeded_reg[t+j]>>r)&1)) {
756 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
757 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
758 }
759 else rn=o;
760 }
761 }
762 }*/
763 for(hr=0;hr<HOST_REGS;hr++) {
764 if(hr!=EXCLUDE_REG) {
765 if(rn<hsn[hr]) return 1;
766 }
767 }
768 return 0;
769}
770
771// Try to match register allocations at the end of a loop with those
772// at the beginning
773int loop_reg(int i, int r, int hr)
774{
775 int j,k;
776 for(j=0;j<9;j++)
777 {
778 if(i+j>=slen) {
779 j=slen-i-1;
780 break;
781 }
782 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
783 {
784 // Don't go past an unconditonal jump
785 j++;
786 break;
787 }
788 }
789 k=0;
790 if(i>0){
791 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
792 k--;
793 }
794 for(;k<j;k++)
795 {
796 if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
797 if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
798 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
799 {
800 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
801 {
802 int t=(ba[i+k]-start)>>2;
803 int reg=get_reg(regs[t].regmap_entry,r);
804 if(reg>=0) return reg;
805 //reg=get_reg(regs[t+1].regmap_entry,r);
806 //if(reg>=0) return reg;
807 }
808 }
809 }
810 return hr;
811}
812
813
814// Allocate every register, preserving source/target regs
815void alloc_all(struct regstat *cur,int i)
816{
817 int hr;
818
819 for(hr=0;hr<HOST_REGS;hr++) {
820 if(hr!=EXCLUDE_REG) {
821 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
822 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
823 {
824 cur->regmap[hr]=-1;
825 cur->dirty&=~(1<<hr);
826 }
827 // Don't need zeros
828 if((cur->regmap[hr]&63)==0)
829 {
830 cur->regmap[hr]=-1;
831 cur->dirty&=~(1<<hr);
832 }
833 }
834 }
835}
836
837
838void div64(int64_t dividend,int64_t divisor)
839{
840 lo=dividend/divisor;
841 hi=dividend%divisor;
842 //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
843 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
844}
845void divu64(uint64_t dividend,uint64_t divisor)
846{
847 lo=dividend/divisor;
848 hi=dividend%divisor;
849 //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
850 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
851}
852
853void mult64(uint64_t m1,uint64_t m2)
854{
855 unsigned long long int op1, op2, op3, op4;
856 unsigned long long int result1, result2, result3, result4;
857 unsigned long long int temp1, temp2, temp3, temp4;
858 int sign = 0;
859
860 if (m1 < 0)
861 {
862 op2 = -m1;
863 sign = 1 - sign;
864 }
865 else op2 = m1;
866 if (m2 < 0)
867 {
868 op4 = -m2;
869 sign = 1 - sign;
870 }
871 else op4 = m2;
872
873 op1 = op2 & 0xFFFFFFFF;
874 op2 = (op2 >> 32) & 0xFFFFFFFF;
875 op3 = op4 & 0xFFFFFFFF;
876 op4 = (op4 >> 32) & 0xFFFFFFFF;
877
878 temp1 = op1 * op3;
879 temp2 = (temp1 >> 32) + op1 * op4;
880 temp3 = op2 * op3;
881 temp4 = (temp3 >> 32) + op2 * op4;
882
883 result1 = temp1 & 0xFFFFFFFF;
884 result2 = temp2 + (temp3 & 0xFFFFFFFF);
885 result3 = (result2 >> 32) + temp4;
886 result4 = (result3 >> 32);
887
888 lo = result1 | (result2 << 32);
889 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
890 if (sign)
891 {
892 hi = ~hi;
893 if (!lo) hi++;
894 else lo = ~lo + 1;
895 }
896}
897
898void multu64(uint64_t m1,uint64_t m2)
899{
900 unsigned long long int op1, op2, op3, op4;
901 unsigned long long int result1, result2, result3, result4;
902 unsigned long long int temp1, temp2, temp3, temp4;
903
904 op1 = m1 & 0xFFFFFFFF;
905 op2 = (m1 >> 32) & 0xFFFFFFFF;
906 op3 = m2 & 0xFFFFFFFF;
907 op4 = (m2 >> 32) & 0xFFFFFFFF;
908
909 temp1 = op1 * op3;
910 temp2 = (temp1 >> 32) + op1 * op4;
911 temp3 = op2 * op3;
912 temp4 = (temp3 >> 32) + op2 * op4;
913
914 result1 = temp1 & 0xFFFFFFFF;
915 result2 = temp2 + (temp3 & 0xFFFFFFFF);
916 result3 = (result2 >> 32) + temp4;
917 result4 = (result3 >> 32);
918
919 lo = result1 | (result2 << 32);
920 hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
921
922 //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
923 // ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
924}
925
926uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
927{
928 if(bits) {
929 original<<=64-bits;
930 original>>=64-bits;
931 loaded<<=bits;
932 original|=loaded;
933 }
934 else original=loaded;
935 return original;
936}
937uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
938{
939 if(bits^56) {
940 original>>=64-(bits^56);
941 original<<=64-(bits^56);
942 loaded>>=bits^56;
943 original|=loaded;
944 }
945 else original=loaded;
946 return original;
947}
948
949#ifdef __i386__
950#include "assem_x86.c"
951#endif
952#ifdef __x86_64__
953#include "assem_x64.c"
954#endif
955#ifdef __arm__
956#include "assem_arm.c"
957#endif
958
959// Add virtual address mapping to linked list
960void ll_add(struct ll_entry **head,int vaddr,void *addr)
961{
962 struct ll_entry *new_entry;
963 new_entry=malloc(sizeof(struct ll_entry));
964 assert(new_entry!=NULL);
965 new_entry->vaddr=vaddr;
966 new_entry->reg32=0;
967 new_entry->addr=addr;
968 new_entry->next=*head;
969 *head=new_entry;
970}
971
972// Add virtual address mapping for 32-bit compiled block
973void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
974{
975 ll_add(head,vaddr,addr);
976#ifndef FORCE32
977 (*head)->reg32=reg32;
978#endif
979}
980
981// Check if an address is already compiled
982// but don't return addresses which are about to expire from the cache
983void *check_addr(u_int vaddr)
984{
985 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
986 if(ht_bin[0]==vaddr) {
987 if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
988 if(isclean(ht_bin[1])) return (void *)ht_bin[1];
989 }
990 if(ht_bin[2]==vaddr) {
991 if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
992 if(isclean(ht_bin[3])) return (void *)ht_bin[3];
993 }
994 u_int page=get_page(vaddr);
995 struct ll_entry *head;
996 head=jump_in[page];
997 while(head!=NULL) {
998 if(head->vaddr==vaddr&&head->reg32==0) {
999 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1000 // Update existing entry with current address
1001 if(ht_bin[0]==vaddr) {
1002 ht_bin[1]=(int)head->addr;
1003 return head->addr;
1004 }
1005 if(ht_bin[2]==vaddr) {
1006 ht_bin[3]=(int)head->addr;
1007 return head->addr;
1008 }
1009 // Insert into hash table with low priority.
1010 // Don't evict existing entries, as they are probably
1011 // addresses that are being accessed frequently.
1012 if(ht_bin[0]==-1) {
1013 ht_bin[1]=(int)head->addr;
1014 ht_bin[0]=vaddr;
1015 }else if(ht_bin[2]==-1) {
1016 ht_bin[3]=(int)head->addr;
1017 ht_bin[2]=vaddr;
1018 }
1019 return head->addr;
1020 }
1021 }
1022 head=head->next;
1023 }
1024 return 0;
1025}
1026
1027void remove_hash(int vaddr)
1028{
1029 //printf("remove hash: %x\n",vaddr);
1030 int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1031 if(ht_bin[2]==vaddr) {
1032 ht_bin[2]=ht_bin[3]=-1;
1033 }
1034 if(ht_bin[0]==vaddr) {
1035 ht_bin[0]=ht_bin[2];
1036 ht_bin[1]=ht_bin[3];
1037 ht_bin[2]=ht_bin[3]=-1;
1038 }
1039}
1040
1041void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1042{
1043 struct ll_entry *next;
1044 while(*head) {
1045 if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
1046 ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1047 {
1048 inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1049 remove_hash((*head)->vaddr);
1050 next=(*head)->next;
1051 free(*head);
1052 *head=next;
1053 }
1054 else
1055 {
1056 head=&((*head)->next);
1057 }
1058 }
1059}
1060
1061// Remove all entries from linked list
1062void ll_clear(struct ll_entry **head)
1063{
1064 struct ll_entry *cur;
1065 struct ll_entry *next;
1066 if(cur=*head) {
1067 *head=0;
1068 while(cur) {
1069 next=cur->next;
1070 free(cur);
1071 cur=next;
1072 }
1073 }
1074}
1075
1076// Dereference the pointers and remove if it matches
1077void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1078{
1079 u_int old_host_addr=0;
1080 while(head) {
1081 int ptr=get_pointer(head->addr);
1082 inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1083 if(((ptr>>shift)==(addr>>shift)) ||
1084 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1085 {
1086 printf("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1087 u_int host_addr=(u_int)kill_pointer(head->addr);
1088
1089 if((host_addr>>12)!=(old_host_addr>>12)) {
1090 #ifdef __arm__
1091 __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1092 #endif
1093 old_host_addr=host_addr;
1094 }
1095 }
1096 head=head->next;
1097 }
1098 #ifdef __arm__
1099 if (old_host_addr)
1100 __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1101 #endif
1102}
1103
1104// This is called when we write to a compiled block (see do_invstub)
1105void invalidate_page(u_int page)
1106{
1107 struct ll_entry *head;
1108 struct ll_entry *next;
1109 u_int old_host_addr=0;
1110 head=jump_in[page];
1111 jump_in[page]=0;
1112 while(head!=NULL) {
1113 inv_debug("INVALIDATE: %x\n",head->vaddr);
1114 remove_hash(head->vaddr);
1115 next=head->next;
1116 free(head);
1117 head=next;
1118 }
1119 head=jump_out[page];
1120 jump_out[page]=0;
1121 while(head!=NULL) {
1122 inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1123 u_int host_addr=(u_int)kill_pointer(head->addr);
1124
1125 if((host_addr>>12)!=(old_host_addr>>12)) {
1126 #ifdef __arm__
1127 __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1128 #endif
1129 old_host_addr=host_addr;
1130 }
1131 next=head->next;
1132 free(head);
1133 head=next;
1134 }
1135 #ifdef __arm__
1136 if (old_host_addr)
1137 __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
1138 #endif
1139}
1140void invalidate_block(u_int block)
1141{
1142 u_int page=get_page(block<<12);
1143 u_int vpage=get_vpage(block<<12);
1144 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1145 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1146 u_int first,last;
1147 first=last=page;
1148 struct ll_entry *head;
1149 head=jump_dirty[vpage];
1150 //printf("page=%d vpage=%d\n",page,vpage);
1151 while(head!=NULL) {
1152 u_int start,end;
1153 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1154 get_bounds((int)head->addr,&start,&end);
1155 //printf("start: %x end: %x\n",start,end);
1156 if(page<2048&&start>=0x80000000&&end<0x80800000) {
1157 if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1158 if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1159 if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1160 }
1161 }
1162#ifndef DISABLE_TLB
1163 if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1164 if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1165 if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1166 if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1167 }
1168 }
1169#endif
1170 }
1171 head=head->next;
1172 }
1173 //printf("first=%d last=%d\n",first,last);
1174 invalidate_page(page);
1175 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1176 assert(last<page+5);
1177 // Invalidate the adjacent pages if a block crosses a 4K boundary
1178 while(first<page) {
1179 invalidate_page(first);
1180 first++;
1181 }
1182 for(first=page+1;first<last;first++) {
1183 invalidate_page(first);
1184 }
1185
1186 // Don't trap writes
1187 invalid_code[block]=1;
1188#ifndef DISABLE_TLB
1189 // If there is a valid TLB entry for this page, remove write protect
1190 if(tlb_LUT_w[block]) {
1191 assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1192 // CHECK: Is this right?
1193 memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1194 u_int real_block=tlb_LUT_w[block]>>12;
1195 invalid_code[real_block]=1;
1196 if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1197 }
1198 else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1199#endif
1200
1201 #ifdef USE_MINI_HT
1202 memset(mini_ht,-1,sizeof(mini_ht));
1203 #endif
1204}
1205void invalidate_addr(u_int addr)
1206{
1207 invalidate_block(addr>>12);
1208}
1209void invalidate_all_pages()
1210{
1211 u_int page,n;
1212 for(page=0;page<4096;page++)
1213 invalidate_page(page);
1214 for(page=0;page<1048576;page++)
1215 if(!invalid_code[page]) {
1216 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1217 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1218 }
1219 #ifdef __arm__
1220 __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1221 #endif
1222 #ifdef USE_MINI_HT
1223 memset(mini_ht,-1,sizeof(mini_ht));
1224 #endif
1225 #ifndef DISABLE_TLB
1226 // TLB
1227 for(page=0;page<0x100000;page++) {
1228 if(tlb_LUT_r[page]) {
1229 memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1230 if(!tlb_LUT_w[page]||!invalid_code[page])
1231 memory_map[page]|=0x40000000; // Write protect
1232 }
1233 else memory_map[page]=-1;
1234 if(page==0x80000) page=0xC0000;
1235 }
1236 tlb_hacks();
1237 #endif
1238}
1239
1240// Add an entry to jump_out after making a link
1241void add_link(u_int vaddr,void *src)
1242{
1243 u_int page=get_page(vaddr);
1244 inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1245 ll_add(jump_out+page,vaddr,src);
1246 //int ptr=get_pointer(src);
1247 //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1248}
1249
1250// If a code block was found to be unmodified (bit was set in
1251// restore_candidate) and it remains unmodified (bit is clear
1252// in invalid_code) then move the entries for that 4K page from
1253// the dirty list to the clean list.
1254void clean_blocks(u_int page)
1255{
1256 struct ll_entry *head;
1257 inv_debug("INV: clean_blocks page=%d\n",page);
1258 head=jump_dirty[page];
1259 while(head!=NULL) {
1260 if(!invalid_code[head->vaddr>>12]) {
1261 // Don't restore blocks which are about to expire from the cache
1262 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1263 u_int start,end;
1264 if(verify_dirty((int)head->addr)) {
1265 //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1266 u_int i;
1267 u_int inv=0;
1268 get_bounds((int)head->addr,&start,&end);
1269 if(start-(u_int)rdram<0x800000) {
1270 for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1271 inv|=invalid_code[i];
1272 }
1273 }
1274 if((signed int)head->vaddr>=(signed int)0xC0000000) {
1275 u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1276 //printf("addr=%x start=%x end=%x\n",addr,start,end);
1277 if(addr<start||addr>=end) inv=1;
1278 }
1279 else if((signed int)head->vaddr>=(signed int)0x80800000) {
1280 inv=1;
1281 }
1282 if(!inv) {
1283 void * clean_addr=(void *)get_clean_addr((int)head->addr);
1284 if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1285 u_int ppage=page;
1286#ifndef DISABLE_TLB
1287 if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1288#endif
1289 inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1290 //printf("page=%x, addr=%x\n",page,head->vaddr);
1291 //assert(head->vaddr>>12==(page|0x80000));
1292 ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1293 int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1294 if(!head->reg32) {
1295 if(ht_bin[0]==head->vaddr) {
1296 ht_bin[1]=(int)clean_addr; // Replace existing entry
1297 }
1298 if(ht_bin[2]==head->vaddr) {
1299 ht_bin[3]=(int)clean_addr; // Replace existing entry
1300 }
1301 }
1302 }
1303 }
1304 }
1305 }
1306 }
1307 head=head->next;
1308 }
1309}
1310
1311
1312void mov_alloc(struct regstat *current,int i)
1313{
1314 // Note: Don't need to actually alloc the source registers
1315 if((~current->is32>>rs1[i])&1) {
1316 //alloc_reg64(current,i,rs1[i]);
1317 alloc_reg64(current,i,rt1[i]);
1318 current->is32&=~(1LL<<rt1[i]);
1319 } else {
1320 //alloc_reg(current,i,rs1[i]);
1321 alloc_reg(current,i,rt1[i]);
1322 current->is32|=(1LL<<rt1[i]);
1323 }
1324 clear_const(current,rs1[i]);
1325 clear_const(current,rt1[i]);
1326 dirty_reg(current,rt1[i]);
1327}
1328
1329void shiftimm_alloc(struct regstat *current,int i)
1330{
1331 clear_const(current,rs1[i]);
1332 clear_const(current,rt1[i]);
1333 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1334 {
1335 if(rt1[i]) {
1336 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1337 else lt1[i]=rs1[i];
1338 alloc_reg(current,i,rt1[i]);
1339 current->is32|=1LL<<rt1[i];
1340 dirty_reg(current,rt1[i]);
1341 }
1342 }
1343 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1344 {
1345 if(rt1[i]) {
1346 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1347 alloc_reg64(current,i,rt1[i]);
1348 current->is32&=~(1LL<<rt1[i]);
1349 dirty_reg(current,rt1[i]);
1350 }
1351 }
1352 if(opcode2[i]==0x3c) // DSLL32
1353 {
1354 if(rt1[i]) {
1355 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1356 alloc_reg64(current,i,rt1[i]);
1357 current->is32&=~(1LL<<rt1[i]);
1358 dirty_reg(current,rt1[i]);
1359 }
1360 }
1361 if(opcode2[i]==0x3e) // DSRL32
1362 {
1363 if(rt1[i]) {
1364 alloc_reg64(current,i,rs1[i]);
1365 if(imm[i]==32) {
1366 alloc_reg64(current,i,rt1[i]);
1367 current->is32&=~(1LL<<rt1[i]);
1368 } else {
1369 alloc_reg(current,i,rt1[i]);
1370 current->is32|=1LL<<rt1[i];
1371 }
1372 dirty_reg(current,rt1[i]);
1373 }
1374 }
1375 if(opcode2[i]==0x3f) // DSRA32
1376 {
1377 if(rt1[i]) {
1378 alloc_reg64(current,i,rs1[i]);
1379 alloc_reg(current,i,rt1[i]);
1380 current->is32|=1LL<<rt1[i];
1381 dirty_reg(current,rt1[i]);
1382 }
1383 }
1384}
1385
1386void shift_alloc(struct regstat *current,int i)
1387{
1388 if(rt1[i]) {
1389 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1390 {
1391 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1392 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1393 alloc_reg(current,i,rt1[i]);
1394 if(rt1[i]==rs2[i]) alloc_reg_temp(current,i,-1);
1395 current->is32|=1LL<<rt1[i];
1396 } else { // DSLLV/DSRLV/DSRAV
1397 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1398 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1399 alloc_reg64(current,i,rt1[i]);
1400 current->is32&=~(1LL<<rt1[i]);
1401 if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1402 alloc_reg_temp(current,i,-1);
1403 }
1404 clear_const(current,rs1[i]);
1405 clear_const(current,rs2[i]);
1406 clear_const(current,rt1[i]);
1407 dirty_reg(current,rt1[i]);
1408 }
1409}
1410
1411void alu_alloc(struct regstat *current,int i)
1412{
1413 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1414 if(rt1[i]) {
1415 if(rs1[i]&&rs2[i]) {
1416 alloc_reg(current,i,rs1[i]);
1417 alloc_reg(current,i,rs2[i]);
1418 }
1419 else {
1420 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1421 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1422 }
1423 alloc_reg(current,i,rt1[i]);
1424 }
1425 current->is32|=1LL<<rt1[i];
1426 }
1427 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1428 if(rt1[i]) {
1429 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1430 {
1431 alloc_reg64(current,i,rs1[i]);
1432 alloc_reg64(current,i,rs2[i]);
1433 alloc_reg(current,i,rt1[i]);
1434 } else {
1435 alloc_reg(current,i,rs1[i]);
1436 alloc_reg(current,i,rs2[i]);
1437 alloc_reg(current,i,rt1[i]);
1438 }
1439 }
1440 current->is32|=1LL<<rt1[i];
1441 }
1442 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1443 if(rt1[i]) {
1444 if(rs1[i]&&rs2[i]) {
1445 alloc_reg(current,i,rs1[i]);
1446 alloc_reg(current,i,rs2[i]);
1447 }
1448 else
1449 {
1450 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1451 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1452 }
1453 alloc_reg(current,i,rt1[i]);
1454 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1455 {
1456 if(!((current->uu>>rt1[i])&1)) {
1457 alloc_reg64(current,i,rt1[i]);
1458 }
1459 if(get_reg(current->regmap,rt1[i]|64)>=0) {
1460 if(rs1[i]&&rs2[i]) {
1461 alloc_reg64(current,i,rs1[i]);
1462 alloc_reg64(current,i,rs2[i]);
1463 }
1464 else
1465 {
1466 // Is is really worth it to keep 64-bit values in registers?
1467 #ifdef NATIVE_64BIT
1468 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1469 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1470 #endif
1471 }
1472 }
1473 current->is32&=~(1LL<<rt1[i]);
1474 } else {
1475 current->is32|=1LL<<rt1[i];
1476 }
1477 }
1478 }
1479 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1480 if(rt1[i]) {
1481 if(rs1[i]&&rs2[i]) {
1482 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1483 alloc_reg64(current,i,rs1[i]);
1484 alloc_reg64(current,i,rs2[i]);
1485 alloc_reg64(current,i,rt1[i]);
1486 } else {
1487 alloc_reg(current,i,rs1[i]);
1488 alloc_reg(current,i,rs2[i]);
1489 alloc_reg(current,i,rt1[i]);
1490 }
1491 }
1492 else {
1493 alloc_reg(current,i,rt1[i]);
1494 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1495 // DADD used as move, or zeroing
1496 // If we have a 64-bit source, then make the target 64 bits too
1497 if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1498 if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1499 alloc_reg64(current,i,rt1[i]);
1500 } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1501 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1502 alloc_reg64(current,i,rt1[i]);
1503 }
1504 if(opcode2[i]>=0x2e&&rs2[i]) {
1505 // DSUB used as negation - 64-bit result
1506 // If we have a 32-bit register, extend it to 64 bits
1507 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1508 alloc_reg64(current,i,rt1[i]);
1509 }
1510 }
1511 }
1512 if(rs1[i]&&rs2[i]) {
1513 current->is32&=~(1LL<<rt1[i]);
1514 } else if(rs1[i]) {
1515 current->is32&=~(1LL<<rt1[i]);
1516 if((current->is32>>rs1[i])&1)
1517 current->is32|=1LL<<rt1[i];
1518 } else if(rs2[i]) {
1519 current->is32&=~(1LL<<rt1[i]);
1520 if((current->is32>>rs2[i])&1)
1521 current->is32|=1LL<<rt1[i];
1522 } else {
1523 current->is32|=1LL<<rt1[i];
1524 }
1525 }
1526 }
1527 clear_const(current,rs1[i]);
1528 clear_const(current,rs2[i]);
1529 clear_const(current,rt1[i]);
1530 dirty_reg(current,rt1[i]);
1531}
1532
1533void imm16_alloc(struct regstat *current,int i)
1534{
1535 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1536 else lt1[i]=rs1[i];
1537 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1538 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1539 current->is32&=~(1LL<<rt1[i]);
1540 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1541 // TODO: Could preserve the 32-bit flag if the immediate is zero
1542 alloc_reg64(current,i,rt1[i]);
1543 alloc_reg64(current,i,rs1[i]);
1544 }
1545 clear_const(current,rs1[i]);
1546 clear_const(current,rt1[i]);
1547 }
1548 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1549 if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1550 current->is32|=1LL<<rt1[i];
1551 clear_const(current,rs1[i]);
1552 clear_const(current,rt1[i]);
1553 }
1554 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1555 if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1556 if(rs1[i]!=rt1[i]) {
1557 if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1558 alloc_reg64(current,i,rt1[i]);
1559 current->is32&=~(1LL<<rt1[i]);
1560 }
1561 }
1562 else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1563 if(is_const(current,rs1[i])) {
1564 int v=get_const(current,rs1[i]);
1565 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1566 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1567 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1568 }
1569 else clear_const(current,rt1[i]);
1570 }
1571 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1572 if(is_const(current,rs1[i])) {
1573 int v=get_const(current,rs1[i]);
1574 set_const(current,rt1[i],v+imm[i]);
1575 }
1576 else clear_const(current,rt1[i]);
1577 current->is32|=1LL<<rt1[i];
1578 }
1579 else {
1580 set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1581 current->is32|=1LL<<rt1[i];
1582 }
1583 dirty_reg(current,rt1[i]);
1584}
1585
1586void load_alloc(struct regstat *current,int i)
1587{
1588 clear_const(current,rt1[i]);
1589 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1590 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1591 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1592 if(rt1[i]) {
1593 alloc_reg(current,i,rt1[i]);
1594 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1595 {
1596 current->is32&=~(1LL<<rt1[i]);
1597 alloc_reg64(current,i,rt1[i]);
1598 }
1599 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1600 {
1601 current->is32&=~(1LL<<rt1[i]);
1602 alloc_reg64(current,i,rt1[i]);
1603 alloc_all(current,i);
1604 alloc_reg64(current,i,FTEMP);
1605 }
1606 else current->is32|=1LL<<rt1[i];
1607 dirty_reg(current,rt1[i]);
1608 // If using TLB, need a register for pointer to the mapping table
1609 if(using_tlb) alloc_reg(current,i,TLREG);
1610 // LWL/LWR need a temporary register for the old value
1611 if(opcode[i]==0x22||opcode[i]==0x26)
1612 {
1613 alloc_reg(current,i,FTEMP);
1614 alloc_reg_temp(current,i,-1);
1615 }
1616 }
1617 else
1618 {
1619 // Load to r0 (dummy load)
1620 // but we still need a register to calculate the address
1621 alloc_reg_temp(current,i,-1);
1622 }
1623}
1624
1625void store_alloc(struct regstat *current,int i)
1626{
1627 clear_const(current,rs2[i]);
1628 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1629 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1630 alloc_reg(current,i,rs2[i]);
1631 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1632 alloc_reg64(current,i,rs2[i]);
1633 if(rs2[i]) alloc_reg(current,i,FTEMP);
1634 }
1635 // If using TLB, need a register for pointer to the mapping table
1636 if(using_tlb) alloc_reg(current,i,TLREG);
1637 #if defined(HOST_IMM8)
1638 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1639 else alloc_reg(current,i,INVCP);
1640 #endif
1641 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1642 alloc_reg(current,i,FTEMP);
1643 }
1644 // We need a temporary register for address generation
1645 alloc_reg_temp(current,i,-1);
1646}
1647
1648void c1ls_alloc(struct regstat *current,int i)
1649{
1650 //clear_const(current,rs1[i]); // FIXME
1651 clear_const(current,rt1[i]);
1652 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1653 alloc_reg(current,i,CSREG); // Status
1654 alloc_reg(current,i,FTEMP);
1655 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1656 alloc_reg64(current,i,FTEMP);
1657 }
1658 // If using TLB, need a register for pointer to the mapping table
1659 if(using_tlb) alloc_reg(current,i,TLREG);
1660 #if defined(HOST_IMM8)
1661 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1662 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1663 alloc_reg(current,i,INVCP);
1664 #endif
1665 // We need a temporary register for address generation
1666 alloc_reg_temp(current,i,-1);
1667}
1668
1669void c2ls_alloc(struct regstat *current,int i)
1670{
1671 clear_const(current,rt1[i]);
1672 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1673 alloc_reg(current,i,FTEMP);
1674 // If using TLB, need a register for pointer to the mapping table
1675 if(using_tlb) alloc_reg(current,i,TLREG);
1676 #if defined(HOST_IMM8)
1677 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1678 else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1679 alloc_reg(current,i,INVCP);
1680 #endif
1681 // We need a temporary register for address generation
1682 alloc_reg_temp(current,i,-1);
1683}
1684
1685#ifndef multdiv_alloc
1686void multdiv_alloc(struct regstat *current,int i)
1687{
1688 // case 0x18: MULT
1689 // case 0x19: MULTU
1690 // case 0x1A: DIV
1691 // case 0x1B: DIVU
1692 // case 0x1C: DMULT
1693 // case 0x1D: DMULTU
1694 // case 0x1E: DDIV
1695 // case 0x1F: DDIVU
1696 clear_const(current,rs1[i]);
1697 clear_const(current,rs2[i]);
1698 if(rs1[i]&&rs2[i])
1699 {
1700 if((opcode2[i]&4)==0) // 32-bit
1701 {
1702 current->u&=~(1LL<<HIREG);
1703 current->u&=~(1LL<<LOREG);
1704 alloc_reg(current,i,HIREG);
1705 alloc_reg(current,i,LOREG);
1706 alloc_reg(current,i,rs1[i]);
1707 alloc_reg(current,i,rs2[i]);
1708 current->is32|=1LL<<HIREG;
1709 current->is32|=1LL<<LOREG;
1710 dirty_reg(current,HIREG);
1711 dirty_reg(current,LOREG);
1712 }
1713 else // 64-bit
1714 {
1715 current->u&=~(1LL<<HIREG);
1716 current->u&=~(1LL<<LOREG);
1717 current->uu&=~(1LL<<HIREG);
1718 current->uu&=~(1LL<<LOREG);
1719 alloc_reg64(current,i,HIREG);
1720 //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1721 alloc_reg64(current,i,rs1[i]);
1722 alloc_reg64(current,i,rs2[i]);
1723 alloc_all(current,i);
1724 current->is32&=~(1LL<<HIREG);
1725 current->is32&=~(1LL<<LOREG);
1726 dirty_reg(current,HIREG);
1727 dirty_reg(current,LOREG);
1728 }
1729 }
1730 else
1731 {
1732 // Multiply by zero is zero.
1733 // MIPS does not have a divide by zero exception.
1734 // The result is undefined, we return zero.
1735 alloc_reg(current,i,HIREG);
1736 alloc_reg(current,i,LOREG);
1737 current->is32|=1LL<<HIREG;
1738 current->is32|=1LL<<LOREG;
1739 dirty_reg(current,HIREG);
1740 dirty_reg(current,LOREG);
1741 }
1742}
1743#endif
1744
1745void cop0_alloc(struct regstat *current,int i)
1746{
1747 if(opcode2[i]==0) // MFC0
1748 {
1749 if(rt1[i]) {
1750 clear_const(current,rt1[i]);
1751 alloc_all(current,i);
1752 alloc_reg(current,i,rt1[i]);
1753 current->is32|=1LL<<rt1[i];
1754 dirty_reg(current,rt1[i]);
1755 }
1756 }
1757 else if(opcode2[i]==4) // MTC0
1758 {
1759 if(rs1[i]){
1760 clear_const(current,rs1[i]);
1761 alloc_reg(current,i,rs1[i]);
1762 alloc_all(current,i);
1763 }
1764 else {
1765 alloc_all(current,i); // FIXME: Keep r0
1766 current->u&=~1LL;
1767 alloc_reg(current,i,0);
1768 }
1769 }
1770 else
1771 {
1772 // TLBR/TLBWI/TLBWR/TLBP/ERET
1773 assert(opcode2[i]==0x10);
1774 alloc_all(current,i);
1775 }
1776}
1777
1778void cop1_alloc(struct regstat *current,int i)
1779{
1780 alloc_reg(current,i,CSREG); // Load status
1781 if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1782 {
1783 assert(rt1[i]);
1784 clear_const(current,rt1[i]);
1785 if(opcode2[i]==1) {
1786 alloc_reg64(current,i,rt1[i]); // DMFC1
1787 current->is32&=~(1LL<<rt1[i]);
1788 }else{
1789 alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1790 current->is32|=1LL<<rt1[i];
1791 }
1792 dirty_reg(current,rt1[i]);
1793 alloc_reg_temp(current,i,-1);
1794 }
1795 else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1796 {
1797 if(rs1[i]){
1798 clear_const(current,rs1[i]);
1799 if(opcode2[i]==5)
1800 alloc_reg64(current,i,rs1[i]); // DMTC1
1801 else
1802 alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1803 alloc_reg_temp(current,i,-1);
1804 }
1805 else {
1806 current->u&=~1LL;
1807 alloc_reg(current,i,0);
1808 alloc_reg_temp(current,i,-1);
1809 }
1810 }
1811}
1812void fconv_alloc(struct regstat *current,int i)
1813{
1814 alloc_reg(current,i,CSREG); // Load status
1815 alloc_reg_temp(current,i,-1);
1816}
1817void float_alloc(struct regstat *current,int i)
1818{
1819 alloc_reg(current,i,CSREG); // Load status
1820 alloc_reg_temp(current,i,-1);
1821}
1822void c2op_alloc(struct regstat *current,int i)
1823{
1824 alloc_reg_temp(current,i,-1);
1825}
1826void fcomp_alloc(struct regstat *current,int i)
1827{
1828 alloc_reg(current,i,CSREG); // Load status
1829 alloc_reg(current,i,FSREG); // Load flags
1830 dirty_reg(current,FSREG); // Flag will be modified
1831 alloc_reg_temp(current,i,-1);
1832}
1833
1834void syscall_alloc(struct regstat *current,int i)
1835{
1836 alloc_cc(current,i);
1837 dirty_reg(current,CCREG);
1838 alloc_all(current,i);
1839 current->isconst=0;
1840}
1841
1842void delayslot_alloc(struct regstat *current,int i)
1843{
1844 switch(itype[i]) {
1845 case UJUMP:
1846 case CJUMP:
1847 case SJUMP:
1848 case RJUMP:
1849 case FJUMP:
1850 case SYSCALL:
1851 case HLECALL:
1852 case SPAN:
1853 assem_debug("jump in the delay slot. this shouldn't happen.\n");//exit(1);
1854 printf("Disabled speculative precompilation\n");
1855 stop_after_jal=1;
1856 break;
1857 case IMM16:
1858 imm16_alloc(current,i);
1859 break;
1860 case LOAD:
1861 case LOADLR:
1862 load_alloc(current,i);
1863 break;
1864 case STORE:
1865 case STORELR:
1866 store_alloc(current,i);
1867 break;
1868 case ALU:
1869 alu_alloc(current,i);
1870 break;
1871 case SHIFT:
1872 shift_alloc(current,i);
1873 break;
1874 case MULTDIV:
1875 multdiv_alloc(current,i);
1876 break;
1877 case SHIFTIMM:
1878 shiftimm_alloc(current,i);
1879 break;
1880 case MOV:
1881 mov_alloc(current,i);
1882 break;
1883 case COP0:
1884 cop0_alloc(current,i);
1885 break;
1886 case COP1:
1887 case COP2:
1888 cop1_alloc(current,i);
1889 break;
1890 case C1LS:
1891 c1ls_alloc(current,i);
1892 break;
1893 case C2LS:
1894 c2ls_alloc(current,i);
1895 break;
1896 case FCONV:
1897 fconv_alloc(current,i);
1898 break;
1899 case FLOAT:
1900 float_alloc(current,i);
1901 break;
1902 case FCOMP:
1903 fcomp_alloc(current,i);
1904 break;
1905 case C2OP:
1906 c2op_alloc(current,i);
1907 break;
1908 }
1909}
1910
1911// Special case where a branch and delay slot span two pages in virtual memory
1912static void pagespan_alloc(struct regstat *current,int i)
1913{
1914 current->isconst=0;
1915 current->wasconst=0;
1916 regs[i].wasconst=0;
1917 alloc_all(current,i);
1918 alloc_cc(current,i);
1919 dirty_reg(current,CCREG);
1920 if(opcode[i]==3) // JAL
1921 {
1922 alloc_reg(current,i,31);
1923 dirty_reg(current,31);
1924 }
1925 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1926 {
1927 alloc_reg(current,i,rs1[i]);
1928 if (rt1[i]!=0) {
1929 alloc_reg(current,i,rt1[i]);
1930 dirty_reg(current,rt1[i]);
1931 }
1932 }
1933 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1934 {
1935 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1936 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1937 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1938 {
1939 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1940 if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1941 }
1942 }
1943 else
1944 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1945 {
1946 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1947 if(!((current->is32>>rs1[i])&1))
1948 {
1949 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1950 }
1951 }
1952 else
1953 if(opcode[i]==0x11) // BC1
1954 {
1955 alloc_reg(current,i,FSREG);
1956 alloc_reg(current,i,CSREG);
1957 }
1958 //else ...
1959}
1960
1961add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1962{
1963 stubs[stubcount][0]=type;
1964 stubs[stubcount][1]=addr;
1965 stubs[stubcount][2]=retaddr;
1966 stubs[stubcount][3]=a;
1967 stubs[stubcount][4]=b;
1968 stubs[stubcount][5]=c;
1969 stubs[stubcount][6]=d;
1970 stubs[stubcount][7]=e;
1971 stubcount++;
1972}
1973
1974// Write out a single register
1975void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1976{
1977 int hr;
1978 for(hr=0;hr<HOST_REGS;hr++) {
1979 if(hr!=EXCLUDE_REG) {
1980 if((regmap[hr]&63)==r) {
1981 if((dirty>>hr)&1) {
1982 if(regmap[hr]<64) {
1983 emit_storereg(r,hr);
1984#ifndef FORCE32
1985 if((is32>>regmap[hr])&1) {
1986 emit_sarimm(hr,31,hr);
1987 emit_storereg(r|64,hr);
1988 }
1989#endif
1990 }else{
1991 emit_storereg(r|64,hr);
1992 }
1993 }
1994 }
1995 }
1996 }
1997}
1998
1999int mchecksum()
2000{
2001 //if(!tracedebug) return 0;
2002 int i;
2003 int sum=0;
2004 for(i=0;i<2097152;i++) {
2005 unsigned int temp=sum;
2006 sum<<=1;
2007 sum|=(~temp)>>31;
2008 sum^=((u_int *)rdram)[i];
2009 }
2010 return sum;
2011}
2012int rchecksum()
2013{
2014 int i;
2015 int sum=0;
2016 for(i=0;i<64;i++)
2017 sum^=((u_int *)reg)[i];
2018 return sum;
2019}
2020void rlist()
2021{
2022 int i;
2023 printf("TRACE: ");
2024 for(i=0;i<32;i++)
2025 printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2026 printf("\n");
2027#ifndef DISABLE_COP1
2028 printf("TRACE: ");
2029 for(i=0;i<32;i++)
2030 printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2031 printf("\n");
2032#endif
2033}
2034
2035void enabletrace()
2036{
2037 tracedebug=1;
2038}
2039
2040void memdebug(int i)
2041{
2042 //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2043 //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2044 //rlist();
2045 //if(tracedebug) {
2046 //if(Count>=-2084597794) {
2047 if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2048 //if(0) {
2049 printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2050 //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2051 //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2052 rlist();
2053 #ifdef __i386__
2054 printf("TRACE: %x\n",(&i)[-1]);
2055 #endif
2056 #ifdef __arm__
2057 int j;
2058 printf("TRACE: %x \n",(&j)[10]);
2059 printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2060 #endif
2061 //fflush(stdout);
2062 }
2063 //printf("TRACE: %x\n",(&i)[-1]);
2064}
2065
2066void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2067{
2068 printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2069}
2070
2071void alu_assemble(int i,struct regstat *i_regs)
2072{
2073 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2074 if(rt1[i]) {
2075 signed char s1,s2,t;
2076 t=get_reg(i_regs->regmap,rt1[i]);
2077 if(t>=0) {
2078 s1=get_reg(i_regs->regmap,rs1[i]);
2079 s2=get_reg(i_regs->regmap,rs2[i]);
2080 if(rs1[i]&&rs2[i]) {
2081 assert(s1>=0);
2082 assert(s2>=0);
2083 if(opcode2[i]&2) emit_sub(s1,s2,t);
2084 else emit_add(s1,s2,t);
2085 }
2086 else if(rs1[i]) {
2087 if(s1>=0) emit_mov(s1,t);
2088 else emit_loadreg(rs1[i],t);
2089 }
2090 else if(rs2[i]) {
2091 if(s2>=0) {
2092 if(opcode2[i]&2) emit_neg(s2,t);
2093 else emit_mov(s2,t);
2094 }
2095 else {
2096 emit_loadreg(rs2[i],t);
2097 if(opcode2[i]&2) emit_neg(t,t);
2098 }
2099 }
2100 else emit_zeroreg(t);
2101 }
2102 }
2103 }
2104 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2105 if(rt1[i]) {
2106 signed char s1l,s2l,s1h,s2h,tl,th;
2107 tl=get_reg(i_regs->regmap,rt1[i]);
2108 th=get_reg(i_regs->regmap,rt1[i]|64);
2109 if(tl>=0) {
2110 s1l=get_reg(i_regs->regmap,rs1[i]);
2111 s2l=get_reg(i_regs->regmap,rs2[i]);
2112 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2113 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2114 if(rs1[i]&&rs2[i]) {
2115 assert(s1l>=0);
2116 assert(s2l>=0);
2117 if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2118 else emit_adds(s1l,s2l,tl);
2119 if(th>=0) {
2120 #ifdef INVERTED_CARRY
2121 if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2122 #else
2123 if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2124 #endif
2125 else emit_add(s1h,s2h,th);
2126 }
2127 }
2128 else if(rs1[i]) {
2129 if(s1l>=0) emit_mov(s1l,tl);
2130 else emit_loadreg(rs1[i],tl);
2131 if(th>=0) {
2132 if(s1h>=0) emit_mov(s1h,th);
2133 else emit_loadreg(rs1[i]|64,th);
2134 }
2135 }
2136 else if(rs2[i]) {
2137 if(s2l>=0) {
2138 if(opcode2[i]&2) emit_negs(s2l,tl);
2139 else emit_mov(s2l,tl);
2140 }
2141 else {
2142 emit_loadreg(rs2[i],tl);
2143 if(opcode2[i]&2) emit_negs(tl,tl);
2144 }
2145 if(th>=0) {
2146 #ifdef INVERTED_CARRY
2147 if(s2h>=0) emit_mov(s2h,th);
2148 else emit_loadreg(rs2[i]|64,th);
2149 if(opcode2[i]&2) {
2150 emit_adcimm(-1,th); // x86 has inverted carry flag
2151 emit_not(th,th);
2152 }
2153 #else
2154 if(opcode2[i]&2) {
2155 if(s2h>=0) emit_rscimm(s2h,0,th);
2156 else {
2157 emit_loadreg(rs2[i]|64,th);
2158 emit_rscimm(th,0,th);
2159 }
2160 }else{
2161 if(s2h>=0) emit_mov(s2h,th);
2162 else emit_loadreg(rs2[i]|64,th);
2163 }
2164 #endif
2165 }
2166 }
2167 else {
2168 emit_zeroreg(tl);
2169 if(th>=0) emit_zeroreg(th);
2170 }
2171 }
2172 }
2173 }
2174 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2175 if(rt1[i]) {
2176 signed char s1l,s1h,s2l,s2h,t;
2177 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2178 {
2179 t=get_reg(i_regs->regmap,rt1[i]);
2180 //assert(t>=0);
2181 if(t>=0) {
2182 s1l=get_reg(i_regs->regmap,rs1[i]);
2183 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2184 s2l=get_reg(i_regs->regmap,rs2[i]);
2185 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2186 if(rs2[i]==0) // rx<r0
2187 {
2188 assert(s1h>=0);
2189 if(opcode2[i]==0x2a) // SLT
2190 emit_shrimm(s1h,31,t);
2191 else // SLTU (unsigned can not be less than zero)
2192 emit_zeroreg(t);
2193 }
2194 else if(rs1[i]==0) // r0<rx
2195 {
2196 assert(s2h>=0);
2197 if(opcode2[i]==0x2a) // SLT
2198 emit_set_gz64_32(s2h,s2l,t);
2199 else // SLTU (set if not zero)
2200 emit_set_nz64_32(s2h,s2l,t);
2201 }
2202 else {
2203 assert(s1l>=0);assert(s1h>=0);
2204 assert(s2l>=0);assert(s2h>=0);
2205 if(opcode2[i]==0x2a) // SLT
2206 emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2207 else // SLTU
2208 emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2209 }
2210 }
2211 } else {
2212 t=get_reg(i_regs->regmap,rt1[i]);
2213 //assert(t>=0);
2214 if(t>=0) {
2215 s1l=get_reg(i_regs->regmap,rs1[i]);
2216 s2l=get_reg(i_regs->regmap,rs2[i]);
2217 if(rs2[i]==0) // rx<r0
2218 {
2219 assert(s1l>=0);
2220 if(opcode2[i]==0x2a) // SLT
2221 emit_shrimm(s1l,31,t);
2222 else // SLTU (unsigned can not be less than zero)
2223 emit_zeroreg(t);
2224 }
2225 else if(rs1[i]==0) // r0<rx
2226 {
2227 assert(s2l>=0);
2228 if(opcode2[i]==0x2a) // SLT
2229 emit_set_gz32(s2l,t);
2230 else // SLTU (set if not zero)
2231 emit_set_nz32(s2l,t);
2232 }
2233 else{
2234 assert(s1l>=0);assert(s2l>=0);
2235 if(opcode2[i]==0x2a) // SLT
2236 emit_set_if_less32(s1l,s2l,t);
2237 else // SLTU
2238 emit_set_if_carry32(s1l,s2l,t);
2239 }
2240 }
2241 }
2242 }
2243 }
2244 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2245 if(rt1[i]) {
2246 signed char s1l,s1h,s2l,s2h,th,tl;
2247 tl=get_reg(i_regs->regmap,rt1[i]);
2248 th=get_reg(i_regs->regmap,rt1[i]|64);
2249 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2250 {
2251 assert(tl>=0);
2252 if(tl>=0) {
2253 s1l=get_reg(i_regs->regmap,rs1[i]);
2254 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2255 s2l=get_reg(i_regs->regmap,rs2[i]);
2256 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2257 if(rs1[i]&&rs2[i]) {
2258 assert(s1l>=0);assert(s1h>=0);
2259 assert(s2l>=0);assert(s2h>=0);
2260 if(opcode2[i]==0x24) { // AND
2261 emit_and(s1l,s2l,tl);
2262 emit_and(s1h,s2h,th);
2263 } else
2264 if(opcode2[i]==0x25) { // OR
2265 emit_or(s1l,s2l,tl);
2266 emit_or(s1h,s2h,th);
2267 } else
2268 if(opcode2[i]==0x26) { // XOR
2269 emit_xor(s1l,s2l,tl);
2270 emit_xor(s1h,s2h,th);
2271 } else
2272 if(opcode2[i]==0x27) { // NOR
2273 emit_or(s1l,s2l,tl);
2274 emit_or(s1h,s2h,th);
2275 emit_not(tl,tl);
2276 emit_not(th,th);
2277 }
2278 }
2279 else
2280 {
2281 if(opcode2[i]==0x24) { // AND
2282 emit_zeroreg(tl);
2283 emit_zeroreg(th);
2284 } else
2285 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2286 if(rs1[i]){
2287 if(s1l>=0) emit_mov(s1l,tl);
2288 else emit_loadreg(rs1[i],tl);
2289 if(s1h>=0) emit_mov(s1h,th);
2290 else emit_loadreg(rs1[i]|64,th);
2291 }
2292 else
2293 if(rs2[i]){
2294 if(s2l>=0) emit_mov(s2l,tl);
2295 else emit_loadreg(rs2[i],tl);
2296 if(s2h>=0) emit_mov(s2h,th);
2297 else emit_loadreg(rs2[i]|64,th);
2298 }
2299 else{
2300 emit_zeroreg(tl);
2301 emit_zeroreg(th);
2302 }
2303 } else
2304 if(opcode2[i]==0x27) { // NOR
2305 if(rs1[i]){
2306 if(s1l>=0) emit_not(s1l,tl);
2307 else{
2308 emit_loadreg(rs1[i],tl);
2309 emit_not(tl,tl);
2310 }
2311 if(s1h>=0) emit_not(s1h,th);
2312 else{
2313 emit_loadreg(rs1[i]|64,th);
2314 emit_not(th,th);
2315 }
2316 }
2317 else
2318 if(rs2[i]){
2319 if(s2l>=0) emit_not(s2l,tl);
2320 else{
2321 emit_loadreg(rs2[i],tl);
2322 emit_not(tl,tl);
2323 }
2324 if(s2h>=0) emit_not(s2h,th);
2325 else{
2326 emit_loadreg(rs2[i]|64,th);
2327 emit_not(th,th);
2328 }
2329 }
2330 else {
2331 emit_movimm(-1,tl);
2332 emit_movimm(-1,th);
2333 }
2334 }
2335 }
2336 }
2337 }
2338 else
2339 {
2340 // 32 bit
2341 if(tl>=0) {
2342 s1l=get_reg(i_regs->regmap,rs1[i]);
2343 s2l=get_reg(i_regs->regmap,rs2[i]);
2344 if(rs1[i]&&rs2[i]) {
2345 assert(s1l>=0);
2346 assert(s2l>=0);
2347 if(opcode2[i]==0x24) { // AND
2348 emit_and(s1l,s2l,tl);
2349 } else
2350 if(opcode2[i]==0x25) { // OR
2351 emit_or(s1l,s2l,tl);
2352 } else
2353 if(opcode2[i]==0x26) { // XOR
2354 emit_xor(s1l,s2l,tl);
2355 } else
2356 if(opcode2[i]==0x27) { // NOR
2357 emit_or(s1l,s2l,tl);
2358 emit_not(tl,tl);
2359 }
2360 }
2361 else
2362 {
2363 if(opcode2[i]==0x24) { // AND
2364 emit_zeroreg(tl);
2365 } else
2366 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2367 if(rs1[i]){
2368 if(s1l>=0) emit_mov(s1l,tl);
2369 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2370 }
2371 else
2372 if(rs2[i]){
2373 if(s2l>=0) emit_mov(s2l,tl);
2374 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2375 }
2376 else emit_zeroreg(tl);
2377 } else
2378 if(opcode2[i]==0x27) { // NOR
2379 if(rs1[i]){
2380 if(s1l>=0) emit_not(s1l,tl);
2381 else {
2382 emit_loadreg(rs1[i],tl);
2383 emit_not(tl,tl);
2384 }
2385 }
2386 else
2387 if(rs2[i]){
2388 if(s2l>=0) emit_not(s2l,tl);
2389 else {
2390 emit_loadreg(rs2[i],tl);
2391 emit_not(tl,tl);
2392 }
2393 }
2394 else emit_movimm(-1,tl);
2395 }
2396 }
2397 }
2398 }
2399 }
2400 }
2401}
2402
2403void imm16_assemble(int i,struct regstat *i_regs)
2404{
2405 if (opcode[i]==0x0f) { // LUI
2406 if(rt1[i]) {
2407 signed char t;
2408 t=get_reg(i_regs->regmap,rt1[i]);
2409 //assert(t>=0);
2410 if(t>=0) {
2411 if(!((i_regs->isconst>>t)&1))
2412 emit_movimm(imm[i]<<16,t);
2413 }
2414 }
2415 }
2416 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2417 if(rt1[i]) {
2418 signed char s,t;
2419 t=get_reg(i_regs->regmap,rt1[i]);
2420 s=get_reg(i_regs->regmap,rs1[i]);
2421 if(rs1[i]) {
2422 //assert(t>=0);
2423 //assert(s>=0);
2424 if(t>=0) {
2425 if(!((i_regs->isconst>>t)&1)) {
2426 if(s<0) {
2427 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2428 emit_addimm(t,imm[i],t);
2429 }else{
2430 if(!((i_regs->wasconst>>s)&1))
2431 emit_addimm(s,imm[i],t);
2432 else
2433 emit_movimm(constmap[i][s]+imm[i],t);
2434 }
2435 }
2436 }
2437 } else {
2438 if(t>=0) {
2439 if(!((i_regs->isconst>>t)&1))
2440 emit_movimm(imm[i],t);
2441 }
2442 }
2443 }
2444 }
2445 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2446 if(rt1[i]) {
2447 signed char sh,sl,th,tl;
2448 th=get_reg(i_regs->regmap,rt1[i]|64);
2449 tl=get_reg(i_regs->regmap,rt1[i]);
2450 sh=get_reg(i_regs->regmap,rs1[i]|64);
2451 sl=get_reg(i_regs->regmap,rs1[i]);
2452 if(tl>=0) {
2453 if(rs1[i]) {
2454 assert(sh>=0);
2455 assert(sl>=0);
2456 if(th>=0) {
2457 emit_addimm64_32(sh,sl,imm[i],th,tl);
2458 }
2459 else {
2460 emit_addimm(sl,imm[i],tl);
2461 }
2462 } else {
2463 emit_movimm(imm[i],tl);
2464 if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2465 }
2466 }
2467 }
2468 }
2469 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2470 if(rt1[i]) {
2471 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2472 signed char sh,sl,t;
2473 t=get_reg(i_regs->regmap,rt1[i]);
2474 sh=get_reg(i_regs->regmap,rs1[i]|64);
2475 sl=get_reg(i_regs->regmap,rs1[i]);
2476 //assert(t>=0);
2477 if(t>=0) {
2478 if(rs1[i]>0) {
2479 if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2480 if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2481 if(opcode[i]==0x0a) { // SLTI
2482 if(sl<0) {
2483 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2484 emit_slti32(t,imm[i],t);
2485 }else{
2486 emit_slti32(sl,imm[i],t);
2487 }
2488 }
2489 else { // SLTIU
2490 if(sl<0) {
2491 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2492 emit_sltiu32(t,imm[i],t);
2493 }else{
2494 emit_sltiu32(sl,imm[i],t);
2495 }
2496 }
2497 }else{ // 64-bit
2498 assert(sl>=0);
2499 if(opcode[i]==0x0a) // SLTI
2500 emit_slti64_32(sh,sl,imm[i],t);
2501 else // SLTIU
2502 emit_sltiu64_32(sh,sl,imm[i],t);
2503 }
2504 }else{
2505 // SLTI(U) with r0 is just stupid,
2506 // nonetheless examples can be found
2507 if(opcode[i]==0x0a) // SLTI
2508 if(0<imm[i]) emit_movimm(1,t);
2509 else emit_zeroreg(t);
2510 else // SLTIU
2511 {
2512 if(imm[i]) emit_movimm(1,t);
2513 else emit_zeroreg(t);
2514 }
2515 }
2516 }
2517 }
2518 }
2519 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2520 if(rt1[i]) {
2521 signed char sh,sl,th,tl;
2522 th=get_reg(i_regs->regmap,rt1[i]|64);
2523 tl=get_reg(i_regs->regmap,rt1[i]);
2524 sh=get_reg(i_regs->regmap,rs1[i]|64);
2525 sl=get_reg(i_regs->regmap,rs1[i]);
2526 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2527 if(opcode[i]==0x0c) //ANDI
2528 {
2529 if(rs1[i]) {
2530 if(sl<0) {
2531 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2532 emit_andimm(tl,imm[i],tl);
2533 }else{
2534 if(!((i_regs->wasconst>>sl)&1))
2535 emit_andimm(sl,imm[i],tl);
2536 else
2537 emit_movimm(constmap[i][sl]&imm[i],tl);
2538 }
2539 }
2540 else
2541 emit_zeroreg(tl);
2542 if(th>=0) emit_zeroreg(th);
2543 }
2544 else
2545 {
2546 if(rs1[i]) {
2547 if(sl<0) {
2548 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2549 }
2550 if(th>=0) {
2551 if(sh<0) {
2552 emit_loadreg(rs1[i]|64,th);
2553 }else{
2554 emit_mov(sh,th);
2555 }
2556 }
2557 if(opcode[i]==0x0d) //ORI
2558 if(sl<0) {
2559 emit_orimm(tl,imm[i],tl);
2560 }else{
2561 if(!((i_regs->wasconst>>sl)&1))
2562 emit_orimm(sl,imm[i],tl);
2563 else
2564 emit_movimm(constmap[i][sl]|imm[i],tl);
2565 }
2566 if(opcode[i]==0x0e) //XORI
2567 if(sl<0) {
2568 emit_xorimm(tl,imm[i],tl);
2569 }else{
2570 if(!((i_regs->wasconst>>sl)&1))
2571 emit_xorimm(sl,imm[i],tl);
2572 else
2573 emit_movimm(constmap[i][sl]^imm[i],tl);
2574 }
2575 }
2576 else {
2577 emit_movimm(imm[i],tl);
2578 if(th>=0) emit_zeroreg(th);
2579 }
2580 }
2581 }
2582 }
2583 }
2584}
2585
2586void shiftimm_assemble(int i,struct regstat *i_regs)
2587{
2588 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2589 {
2590 if(rt1[i]) {
2591 signed char s,t;
2592 t=get_reg(i_regs->regmap,rt1[i]);
2593 s=get_reg(i_regs->regmap,rs1[i]);
2594 //assert(t>=0);
2595 if(t>=0){
2596 if(rs1[i]==0)
2597 {
2598 emit_zeroreg(t);
2599 }
2600 else
2601 {
2602 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2603 if(imm[i]) {
2604 if(opcode2[i]==0) // SLL
2605 {
2606 emit_shlimm(s<0?t:s,imm[i],t);
2607 }
2608 if(opcode2[i]==2) // SRL
2609 {
2610 emit_shrimm(s<0?t:s,imm[i],t);
2611 }
2612 if(opcode2[i]==3) // SRA
2613 {
2614 emit_sarimm(s<0?t:s,imm[i],t);
2615 }
2616 }else{
2617 // Shift by zero
2618 if(s>=0 && s!=t) emit_mov(s,t);
2619 }
2620 }
2621 }
2622 //emit_storereg(rt1[i],t); //DEBUG
2623 }
2624 }
2625 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2626 {
2627 if(rt1[i]) {
2628 signed char sh,sl,th,tl;
2629 th=get_reg(i_regs->regmap,rt1[i]|64);
2630 tl=get_reg(i_regs->regmap,rt1[i]);
2631 sh=get_reg(i_regs->regmap,rs1[i]|64);
2632 sl=get_reg(i_regs->regmap,rs1[i]);
2633 if(tl>=0) {
2634 if(rs1[i]==0)
2635 {
2636 emit_zeroreg(tl);
2637 if(th>=0) emit_zeroreg(th);
2638 }
2639 else
2640 {
2641 assert(sl>=0);
2642 assert(sh>=0);
2643 if(imm[i]) {
2644 if(opcode2[i]==0x38) // DSLL
2645 {
2646 if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2647 emit_shlimm(sl,imm[i],tl);
2648 }
2649 if(opcode2[i]==0x3a) // DSRL
2650 {
2651 emit_shrdimm(sl,sh,imm[i],tl);
2652 if(th>=0) emit_shrimm(sh,imm[i],th);
2653 }
2654 if(opcode2[i]==0x3b) // DSRA
2655 {
2656 emit_shrdimm(sl,sh,imm[i],tl);
2657 if(th>=0) emit_sarimm(sh,imm[i],th);
2658 }
2659 }else{
2660 // Shift by zero
2661 if(sl!=tl) emit_mov(sl,tl);
2662 if(th>=0&&sh!=th) emit_mov(sh,th);
2663 }
2664 }
2665 }
2666 }
2667 }
2668 if(opcode2[i]==0x3c) // DSLL32
2669 {
2670 if(rt1[i]) {
2671 signed char sl,tl,th;
2672 tl=get_reg(i_regs->regmap,rt1[i]);
2673 th=get_reg(i_regs->regmap,rt1[i]|64);
2674 sl=get_reg(i_regs->regmap,rs1[i]);
2675 if(th>=0||tl>=0){
2676 assert(tl>=0);
2677 assert(th>=0);
2678 assert(sl>=0);
2679 emit_mov(sl,th);
2680 emit_zeroreg(tl);
2681 if(imm[i]>32)
2682 {
2683 emit_shlimm(th,imm[i]&31,th);
2684 }
2685 }
2686 }
2687 }
2688 if(opcode2[i]==0x3e) // DSRL32
2689 {
2690 if(rt1[i]) {
2691 signed char sh,tl,th;
2692 tl=get_reg(i_regs->regmap,rt1[i]);
2693 th=get_reg(i_regs->regmap,rt1[i]|64);
2694 sh=get_reg(i_regs->regmap,rs1[i]|64);
2695 if(tl>=0){
2696 assert(sh>=0);
2697 emit_mov(sh,tl);
2698 if(th>=0) emit_zeroreg(th);
2699 if(imm[i]>32)
2700 {
2701 emit_shrimm(tl,imm[i]&31,tl);
2702 }
2703 }
2704 }
2705 }
2706 if(opcode2[i]==0x3f) // DSRA32
2707 {
2708 if(rt1[i]) {
2709 signed char sh,tl;
2710 tl=get_reg(i_regs->regmap,rt1[i]);
2711 sh=get_reg(i_regs->regmap,rs1[i]|64);
2712 if(tl>=0){
2713 assert(sh>=0);
2714 emit_mov(sh,tl);
2715 if(imm[i]>32)
2716 {
2717 emit_sarimm(tl,imm[i]&31,tl);
2718 }
2719 }
2720 }
2721 }
2722}
2723
2724#ifndef shift_assemble
2725void shift_assemble(int i,struct regstat *i_regs)
2726{
2727 printf("Need shift_assemble for this architecture.\n");
2728 exit(1);
2729}
2730#endif
2731
2732void load_assemble(int i,struct regstat *i_regs)
2733{
2734 int s,th,tl,addr,map=-1;
2735 int offset;
2736 int jaddr=0;
2737 int memtarget=0,c=0;
2738 u_int hr,reglist=0;
2739 th=get_reg(i_regs->regmap,rt1[i]|64);
2740 tl=get_reg(i_regs->regmap,rt1[i]);
2741 s=get_reg(i_regs->regmap,rs1[i]);
2742 offset=imm[i];
2743 for(hr=0;hr<HOST_REGS;hr++) {
2744 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2745 }
2746 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2747 if(s>=0) {
2748 c=(i_regs->wasconst>>s)&1;
2749 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80800000;
2750 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2751 }
2752 //printf("load_assemble: c=%d\n",c);
2753 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2754 // FIXME: Even if the load is a NOP, we should check for pagefaults...
2755#ifdef PCSX
2756 if(tl<0) {
2757 if(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80) {
2758 // could be FIFO, must perform the read
2759 assem_debug("(forced read)\n");
2760 tl=get_reg(i_regs->regmap,-1);
2761 assert(tl>=0);
2762 }
2763 }
2764 if(offset||s<0||c) addr=tl;
2765 else addr=s;
2766#endif
2767 if(tl>=0) {
2768 //assert(tl>=0);
2769 //assert(rt1[i]);
2770 reglist&=~(1<<tl);
2771 if(th>=0) reglist&=~(1<<th);
2772 if(!using_tlb) {
2773 if(!c) {
2774//#define R29_HACK 1
2775 #ifdef R29_HACK
2776 // Strmnnrmn's speed hack
2777 if(rs1[i]!=29||start<0x80001000||start>=0x80800000)
2778 #endif
2779 {
2780 emit_cmpimm(addr,0x800000);
2781 jaddr=(int)out;
2782 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2783 // Hint to branch predictor that the branch is unlikely to be taken
2784 if(rs1[i]>=28)
2785 emit_jno_unlikely(0);
2786 else
2787 #endif
2788 emit_jno(0);
2789 }
2790 }
2791 }else{ // using tlb
2792 int x=0;
2793 if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2794 if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2795 map=get_reg(i_regs->regmap,TLREG);
2796 assert(map>=0);
2797 map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2798 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2799 }
2800 if (opcode[i]==0x20) { // LB
2801 if(!c||memtarget) {
2802 #ifdef HOST_IMM_ADDR32
2803 if(c)
2804 emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2805 else
2806 #endif
2807 {
2808 //emit_xorimm(addr,3,tl);
2809 //gen_tlb_addr_r(tl,map);
2810 //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2811 int x=0;
2812#ifdef BIG_ENDIAN_MIPS
2813 if(!c) emit_xorimm(addr,3,tl);
2814 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2815#else
2816 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2817 else if (tl!=addr) emit_mov(addr,tl);
2818#endif
2819 emit_movsbl_indexed_tlb(x,tl,map,tl);
2820 }
2821 if(jaddr)
2822 add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2823 }
2824 else
2825 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2826 }
2827 if (opcode[i]==0x21) { // LH
2828 if(!c||memtarget) {
2829 #ifdef HOST_IMM_ADDR32
2830 if(c)
2831 emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2832 else
2833 #endif
2834 {
2835 int x=0;
2836#ifdef BIG_ENDIAN_MIPS
2837 if(!c) emit_xorimm(addr,2,tl);
2838 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2839#else
2840 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2841 else if (tl!=addr) emit_mov(addr,tl);
2842#endif
2843 //#ifdef
2844 //emit_movswl_indexed_tlb(x,tl,map,tl);
2845 //else
2846 if(map>=0) {
2847 gen_tlb_addr_r(tl,map);
2848 emit_movswl_indexed(x,tl,tl);
2849 }else
2850 emit_movswl_indexed((int)rdram-0x80000000+x,tl,tl);
2851 }
2852 if(jaddr)
2853 add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2854 }
2855 else
2856 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2857 }
2858 if (opcode[i]==0x23) { // LW
2859 if(!c||memtarget) {
2860 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2861 #ifdef HOST_IMM_ADDR32
2862 if(c)
2863 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2864 else
2865 #endif
2866 emit_readword_indexed_tlb(0,addr,map,tl);
2867 if(jaddr)
2868 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2869 }
2870 else
2871 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2872 }
2873 if (opcode[i]==0x24) { // LBU
2874 if(!c||memtarget) {
2875 #ifdef HOST_IMM_ADDR32
2876 if(c)
2877 emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2878 else
2879 #endif
2880 {
2881 //emit_xorimm(addr,3,tl);
2882 //gen_tlb_addr_r(tl,map);
2883 //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2884 int x=0;
2885#ifdef BIG_ENDIAN_MIPS
2886 if(!c) emit_xorimm(addr,3,tl);
2887 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2888#else
2889 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2890 else if (tl!=addr) emit_mov(addr,tl);
2891#endif
2892 emit_movzbl_indexed_tlb(x,tl,map,tl);
2893 }
2894 if(jaddr)
2895 add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2896 }
2897 else
2898 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2899 }
2900 if (opcode[i]==0x25) { // LHU
2901 if(!c||memtarget) {
2902 #ifdef HOST_IMM_ADDR32
2903 if(c)
2904 emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2905 else
2906 #endif
2907 {
2908 int x=0;
2909#ifdef BIG_ENDIAN_MIPS
2910 if(!c) emit_xorimm(addr,2,tl);
2911 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2912#else
2913 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
2914 else if (tl!=addr) emit_mov(addr,tl);
2915#endif
2916 //#ifdef
2917 //emit_movzwl_indexed_tlb(x,tl,map,tl);
2918 //#else
2919 if(map>=0) {
2920 gen_tlb_addr_r(tl,map);
2921 emit_movzwl_indexed(x,tl,tl);
2922 }else
2923 emit_movzwl_indexed((int)rdram-0x80000000+x,tl,tl);
2924 if(jaddr)
2925 add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2926 }
2927 }
2928 else
2929 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2930 }
2931 if (opcode[i]==0x27) { // LWU
2932 assert(th>=0);
2933 if(!c||memtarget) {
2934 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2935 #ifdef HOST_IMM_ADDR32
2936 if(c)
2937 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2938 else
2939 #endif
2940 emit_readword_indexed_tlb(0,addr,map,tl);
2941 if(jaddr)
2942 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2943 }
2944 else {
2945 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2946 }
2947 emit_zeroreg(th);
2948 }
2949 if (opcode[i]==0x37) { // LD
2950 if(!c||memtarget) {
2951 //gen_tlb_addr_r(tl,map);
2952 //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2953 //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2954 #ifdef HOST_IMM_ADDR32
2955 if(c)
2956 emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2957 else
2958 #endif
2959 emit_readdword_indexed_tlb(0,addr,map,th,tl);
2960 if(jaddr)
2961 add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2962 }
2963 else
2964 inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2965 }
2966 //emit_storereg(rt1[i],tl); // DEBUG
2967 }
2968 //if(opcode[i]==0x23)
2969 //if(opcode[i]==0x24)
2970 //if(opcode[i]==0x23||opcode[i]==0x24)
2971 /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2972 {
2973 //emit_pusha();
2974 save_regs(0x100f);
2975 emit_readword((int)&last_count,ECX);
2976 #ifdef __i386__
2977 if(get_reg(i_regs->regmap,CCREG)<0)
2978 emit_loadreg(CCREG,HOST_CCREG);
2979 emit_add(HOST_CCREG,ECX,HOST_CCREG);
2980 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2981 emit_writeword(HOST_CCREG,(int)&Count);
2982 #endif
2983 #ifdef __arm__
2984 if(get_reg(i_regs->regmap,CCREG)<0)
2985 emit_loadreg(CCREG,0);
2986 else
2987 emit_mov(HOST_CCREG,0);
2988 emit_add(0,ECX,0);
2989 emit_addimm(0,2*ccadj[i],0);
2990 emit_writeword(0,(int)&Count);
2991 #endif
2992 emit_call((int)memdebug);
2993 //emit_popa();
2994 restore_regs(0x100f);
2995 }/**/
2996}
2997
2998#ifndef loadlr_assemble
2999void loadlr_assemble(int i,struct regstat *i_regs)
3000{
3001 printf("Need loadlr_assemble for this architecture.\n");
3002 exit(1);
3003}
3004#endif
3005
3006void store_assemble(int i,struct regstat *i_regs)
3007{
3008 int s,th,tl,map=-1;
3009 int addr,temp;
3010 int offset;
3011 int jaddr=0,jaddr2,type;
3012 int memtarget=0,c=0;
3013 int agr=AGEN1+(i&1);
3014 u_int hr,reglist=0;
3015 th=get_reg(i_regs->regmap,rs2[i]|64);
3016 tl=get_reg(i_regs->regmap,rs2[i]);
3017 s=get_reg(i_regs->regmap,rs1[i]);
3018 temp=get_reg(i_regs->regmap,agr);
3019 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3020 offset=imm[i];
3021 if(s>=0) {
3022 c=(i_regs->wasconst>>s)&1;
3023 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80800000;
3024 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3025 }
3026 assert(tl>=0);
3027 assert(temp>=0);
3028 for(hr=0;hr<HOST_REGS;hr++) {
3029 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3030 }
3031 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3032 if(offset||s<0||c) addr=temp;
3033 else addr=s;
3034 if(!using_tlb) {
3035 if(!c) {
3036 #ifdef R29_HACK
3037 // Strmnnrmn's speed hack
3038 memtarget=1;
3039 if(rs1[i]!=29||start<0x80001000||start>=0x80800000)
3040 #endif
3041 emit_cmpimm(addr,0x800000);
3042 #ifdef DESTRUCTIVE_SHIFT
3043 if(s==addr) emit_mov(s,temp);
3044 #endif
3045 #ifdef R29_HACK
3046 if(rs1[i]!=29||start<0x80001000||start>=0x80800000)
3047 #endif
3048 {
3049 jaddr=(int)out;
3050 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3051 // Hint to branch predictor that the branch is unlikely to be taken
3052 if(rs1[i]>=28)
3053 emit_jno_unlikely(0);
3054 else
3055 #endif
3056 emit_jno(0);
3057 }
3058 }
3059 }else{ // using tlb
3060 int x=0;
3061 if (opcode[i]==0x28) x=3; // SB
3062 if (opcode[i]==0x29) x=2; // SH
3063 map=get_reg(i_regs->regmap,TLREG);
3064 assert(map>=0);
3065 map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3066 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3067 }
3068
3069 if (opcode[i]==0x28) { // SB
3070 if(!c||memtarget) {
3071 int x=0;
3072#ifdef BIG_ENDIAN_MIPS
3073 if(!c) emit_xorimm(addr,3,temp);
3074 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3075#else
3076 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3077 else if (addr!=temp) emit_mov(addr,temp);
3078#endif
3079 //gen_tlb_addr_w(temp,map);
3080 //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3081 emit_writebyte_indexed_tlb(tl,x,temp,map,temp);
3082 }
3083 type=STOREB_STUB;
3084 }
3085 if (opcode[i]==0x29) { // SH
3086 if(!c||memtarget) {
3087 int x=0;
3088#ifdef BIG_ENDIAN_MIPS
3089 if(!c) emit_xorimm(addr,2,temp);
3090 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3091#else
3092 if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
3093 else if (addr!=temp) emit_mov(addr,temp);
3094#endif
3095 //#ifdef
3096 //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3097 //#else
3098 if(map>=0) {
3099 gen_tlb_addr_w(temp,map);
3100 emit_writehword_indexed(tl,x,temp);
3101 }else
3102 emit_writehword_indexed(tl,(int)rdram-0x80000000+x,temp);
3103 }
3104 type=STOREH_STUB;
3105 }
3106 if (opcode[i]==0x2B) { // SW
3107 if(!c||memtarget)
3108 //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3109 emit_writeword_indexed_tlb(tl,0,addr,map,temp);
3110 type=STOREW_STUB;
3111 }
3112 if (opcode[i]==0x3F) { // SD
3113 if(!c||memtarget) {
3114 if(rs2[i]) {
3115 assert(th>=0);
3116 //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3117 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3118 emit_writedword_indexed_tlb(th,tl,0,addr,map,temp);
3119 }else{
3120 // Store zero
3121 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3122 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3123 emit_writedword_indexed_tlb(tl,tl,0,addr,map,temp);
3124 }
3125 }
3126 type=STORED_STUB;
3127 }
3128 if(!using_tlb&&(!c||memtarget))
3129 // addr could be a temp, make sure it survives STORE*_STUB
3130 reglist|=1<<addr;
3131 if(jaddr) {
3132 add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3133 } else if(!memtarget) {
3134 inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3135 }
3136 if(!using_tlb) {
3137 if(!c||memtarget) {
3138 #ifdef DESTRUCTIVE_SHIFT
3139 // The x86 shift operation is 'destructive'; it overwrites the
3140 // source register, so we need to make a copy first and use that.
3141 addr=temp;
3142 #endif
3143 #if defined(HOST_IMM8)
3144 int ir=get_reg(i_regs->regmap,INVCP);
3145 assert(ir>=0);
3146 emit_cmpmem_indexedsr12_reg(ir,addr,1);
3147 #else
3148 emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3149 #endif
3150 jaddr2=(int)out;
3151 emit_jne(0);
3152 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3153 }
3154 }
3155 //if(opcode[i]==0x2B || opcode[i]==0x3F)
3156 //if(opcode[i]==0x2B || opcode[i]==0x28)
3157 //if(opcode[i]==0x2B || opcode[i]==0x29)
3158 //if(opcode[i]==0x2B)
3159 /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3160 {
3161 //emit_pusha();
3162 save_regs(0x100f);
3163 emit_readword((int)&last_count,ECX);
3164 #ifdef __i386__
3165 if(get_reg(i_regs->regmap,CCREG)<0)
3166 emit_loadreg(CCREG,HOST_CCREG);
3167 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3168 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3169 emit_writeword(HOST_CCREG,(int)&Count);
3170 #endif
3171 #ifdef __arm__
3172 if(get_reg(i_regs->regmap,CCREG)<0)
3173 emit_loadreg(CCREG,0);
3174 else
3175 emit_mov(HOST_CCREG,0);
3176 emit_add(0,ECX,0);
3177 emit_addimm(0,2*ccadj[i],0);
3178 emit_writeword(0,(int)&Count);
3179 #endif
3180 emit_call((int)memdebug);
3181 //emit_popa();
3182 restore_regs(0x100f);
3183 }/**/
3184}
3185
3186void storelr_assemble(int i,struct regstat *i_regs)
3187{
3188 int s,th,tl;
3189 int temp;
3190 int temp2;
3191 int offset;
3192 int jaddr=0,jaddr2;
3193 int case1,case2,case3;
3194 int done0,done1,done2;
3195 int memtarget,c=0;
3196 u_int hr,reglist=0;
3197 th=get_reg(i_regs->regmap,rs2[i]|64);
3198 tl=get_reg(i_regs->regmap,rs2[i]);
3199 s=get_reg(i_regs->regmap,rs1[i]);
3200 temp=get_reg(i_regs->regmap,-1);
3201 offset=imm[i];
3202 if(s>=0) {
3203 c=(i_regs->isconst>>s)&1;
3204 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80800000;
3205 if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3206 }
3207 assert(tl>=0);
3208 for(hr=0;hr<HOST_REGS;hr++) {
3209 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3210 }
3211 if(tl>=0) {
3212 assert(temp>=0);
3213 if(!using_tlb) {
3214 if(!c) {
3215 emit_cmpimm(s<0||offset?temp:s,0x800000);
3216 if(!offset&&s!=temp) emit_mov(s,temp);
3217 jaddr=(int)out;
3218 emit_jno(0);
3219 }
3220 else
3221 {
3222 if(!memtarget||!rs1[i]) {
3223 jaddr=(int)out;
3224 emit_jmp(0);
3225 }
3226 }
3227 if((u_int)rdram!=0x80000000)
3228 emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3229 }else{ // using tlb
3230 int map=get_reg(i_regs->regmap,TLREG);
3231 assert(map>=0);
3232 map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3233 if(!c&&!offset&&s>=0) emit_mov(s,temp);
3234 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3235 if(!jaddr&&!memtarget) {
3236 jaddr=(int)out;
3237 emit_jmp(0);
3238 }
3239 gen_tlb_addr_w(temp,map);
3240 }
3241
3242 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3243 temp2=get_reg(i_regs->regmap,FTEMP);
3244 if(!rs2[i]) temp2=th=tl;
3245 }
3246
3247#ifndef BIG_ENDIAN_MIPS
3248 emit_xorimm(temp,3,temp);
3249#endif
3250 emit_testimm(temp,2);
3251 case2=(int)out;
3252 emit_jne(0);
3253 emit_testimm(temp,1);
3254 case1=(int)out;
3255 emit_jne(0);
3256 // 0
3257 if (opcode[i]==0x2A) { // SWL
3258 emit_writeword_indexed(tl,0,temp);
3259 }
3260 if (opcode[i]==0x2E) { // SWR
3261 emit_writebyte_indexed(tl,3,temp);
3262 }
3263 if (opcode[i]==0x2C) { // SDL
3264 emit_writeword_indexed(th,0,temp);
3265 if(rs2[i]) emit_mov(tl,temp2);
3266 }
3267 if (opcode[i]==0x2D) { // SDR
3268 emit_writebyte_indexed(tl,3,temp);
3269 if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3270 }
3271 done0=(int)out;
3272 emit_jmp(0);
3273 // 1
3274 set_jump_target(case1,(int)out);
3275 if (opcode[i]==0x2A) { // SWL
3276 // Write 3 msb into three least significant bytes
3277 if(rs2[i]) emit_rorimm(tl,8,tl);
3278 emit_writehword_indexed(tl,-1,temp);
3279 if(rs2[i]) emit_rorimm(tl,16,tl);
3280 emit_writebyte_indexed(tl,1,temp);
3281 if(rs2[i]) emit_rorimm(tl,8,tl);
3282 }
3283 if (opcode[i]==0x2E) { // SWR
3284 // Write two lsb into two most significant bytes
3285 emit_writehword_indexed(tl,1,temp);
3286 }
3287 if (opcode[i]==0x2C) { // SDL
3288 if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3289 // Write 3 msb into three least significant bytes
3290 if(rs2[i]) emit_rorimm(th,8,th);
3291 emit_writehword_indexed(th,-1,temp);
3292 if(rs2[i]) emit_rorimm(th,16,th);
3293 emit_writebyte_indexed(th,1,temp);
3294 if(rs2[i]) emit_rorimm(th,8,th);
3295 }
3296 if (opcode[i]==0x2D) { // SDR
3297 if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3298 // Write two lsb into two most significant bytes
3299 emit_writehword_indexed(tl,1,temp);
3300 }
3301 done1=(int)out;
3302 emit_jmp(0);
3303 // 2
3304 set_jump_target(case2,(int)out);
3305 emit_testimm(temp,1);
3306 case3=(int)out;
3307 emit_jne(0);
3308 if (opcode[i]==0x2A) { // SWL
3309 // Write two msb into two least significant bytes
3310 if(rs2[i]) emit_rorimm(tl,16,tl);
3311 emit_writehword_indexed(tl,-2,temp);
3312 if(rs2[i]) emit_rorimm(tl,16,tl);
3313 }
3314 if (opcode[i]==0x2E) { // SWR
3315 // Write 3 lsb into three most significant bytes
3316 emit_writebyte_indexed(tl,-1,temp);
3317 if(rs2[i]) emit_rorimm(tl,8,tl);
3318 emit_writehword_indexed(tl,0,temp);
3319 if(rs2[i]) emit_rorimm(tl,24,tl);
3320 }
3321 if (opcode[i]==0x2C) { // SDL
3322 if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3323 // Write two msb into two least significant bytes
3324 if(rs2[i]) emit_rorimm(th,16,th);
3325 emit_writehword_indexed(th,-2,temp);
3326 if(rs2[i]) emit_rorimm(th,16,th);
3327 }
3328 if (opcode[i]==0x2D) { // SDR
3329 if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3330 // Write 3 lsb into three most significant bytes
3331 emit_writebyte_indexed(tl,-1,temp);
3332 if(rs2[i]) emit_rorimm(tl,8,tl);
3333 emit_writehword_indexed(tl,0,temp);
3334 if(rs2[i]) emit_rorimm(tl,24,tl);
3335 }
3336 done2=(int)out;
3337 emit_jmp(0);
3338 // 3
3339 set_jump_target(case3,(int)out);
3340 if (opcode[i]==0x2A) { // SWL
3341 // Write msb into least significant byte
3342 if(rs2[i]) emit_rorimm(tl,24,tl);
3343 emit_writebyte_indexed(tl,-3,temp);
3344 if(rs2[i]) emit_rorimm(tl,8,tl);
3345 }
3346 if (opcode[i]==0x2E) { // SWR
3347 // Write entire word
3348 emit_writeword_indexed(tl,-3,temp);
3349 }
3350 if (opcode[i]==0x2C) { // SDL
3351 if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3352 // Write msb into least significant byte
3353 if(rs2[i]) emit_rorimm(th,24,th);
3354 emit_writebyte_indexed(th,-3,temp);
3355 if(rs2[i]) emit_rorimm(th,8,th);
3356 }
3357 if (opcode[i]==0x2D) { // SDR
3358 if(rs2[i]) emit_mov(th,temp2);
3359 // Write entire word
3360 emit_writeword_indexed(tl,-3,temp);
3361 }
3362 set_jump_target(done0,(int)out);
3363 set_jump_target(done1,(int)out);
3364 set_jump_target(done2,(int)out);
3365 if (opcode[i]==0x2C) { // SDL
3366 emit_testimm(temp,4);
3367 done0=(int)out;
3368 emit_jne(0);
3369 emit_andimm(temp,~3,temp);
3370 emit_writeword_indexed(temp2,4,temp);
3371 set_jump_target(done0,(int)out);
3372 }
3373 if (opcode[i]==0x2D) { // SDR
3374 emit_testimm(temp,4);
3375 done0=(int)out;
3376 emit_jeq(0);
3377 emit_andimm(temp,~3,temp);
3378 emit_writeword_indexed(temp2,-4,temp);
3379 set_jump_target(done0,(int)out);
3380 }
3381 if(!c||!memtarget)
3382 add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3383 }
3384 if(!using_tlb) {
3385 emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3386 #if defined(HOST_IMM8)
3387 int ir=get_reg(i_regs->regmap,INVCP);
3388 assert(ir>=0);
3389 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3390 #else
3391 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3392 #endif
3393 jaddr2=(int)out;
3394 emit_jne(0);
3395 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3396 }
3397 /*
3398 emit_pusha();
3399 //save_regs(0x100f);
3400 emit_readword((int)&last_count,ECX);
3401 if(get_reg(i_regs->regmap,CCREG)<0)
3402 emit_loadreg(CCREG,HOST_CCREG);
3403 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3404 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3405 emit_writeword(HOST_CCREG,(int)&Count);
3406 emit_call((int)memdebug);
3407 emit_popa();
3408 //restore_regs(0x100f);
3409 /**/
3410}
3411
3412void c1ls_assemble(int i,struct regstat *i_regs)
3413{
3414#ifndef DISABLE_COP1
3415 int s,th,tl;
3416 int temp,ar;
3417 int map=-1;
3418 int offset;
3419 int c=0;
3420 int jaddr,jaddr2=0,jaddr3,type;
3421 int agr=AGEN1+(i&1);
3422 u_int hr,reglist=0;
3423 th=get_reg(i_regs->regmap,FTEMP|64);
3424 tl=get_reg(i_regs->regmap,FTEMP);
3425 s=get_reg(i_regs->regmap,rs1[i]);
3426 temp=get_reg(i_regs->regmap,agr);
3427 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3428 offset=imm[i];
3429 assert(tl>=0);
3430 assert(rs1[i]>0);
3431 assert(temp>=0);
3432 for(hr=0;hr<HOST_REGS;hr++) {
3433 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3434 }
3435 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3436 if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3437 {
3438 // Loads use a temporary register which we need to save
3439 reglist|=1<<temp;
3440 }
3441 if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3442 ar=temp;
3443 else // LWC1/LDC1
3444 ar=tl;
3445 //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3446 //else c=(i_regs->wasconst>>s)&1;
3447 if(s>=0) c=(i_regs->wasconst>>s)&1;
3448 // Check cop1 unusable
3449 if(!cop1_usable) {
3450 signed char rs=get_reg(i_regs->regmap,CSREG);
3451 assert(rs>=0);
3452 emit_testimm(rs,0x20000000);
3453 jaddr=(int)out;
3454 emit_jeq(0);
3455 add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3456 cop1_usable=1;
3457 }
3458 if (opcode[i]==0x39) { // SWC1 (get float address)
3459 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3460 }
3461 if (opcode[i]==0x3D) { // SDC1 (get double address)
3462 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3463 }
3464 // Generate address + offset
3465 if(!using_tlb) {
3466 if(!c)
3467 emit_cmpimm(offset||c||s<0?ar:s,0x800000);
3468 }
3469 else
3470 {
3471 map=get_reg(i_regs->regmap,TLREG);
3472 assert(map>=0);
3473 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3474 map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3475 }
3476 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3477 map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3478 }
3479 }
3480 if (opcode[i]==0x39) { // SWC1 (read float)
3481 emit_readword_indexed(0,tl,tl);
3482 }
3483 if (opcode[i]==0x3D) { // SDC1 (read double)
3484 emit_readword_indexed(4,tl,th);
3485 emit_readword_indexed(0,tl,tl);
3486 }
3487 if (opcode[i]==0x31) { // LWC1 (get target address)
3488 emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3489 }
3490 if (opcode[i]==0x35) { // LDC1 (get target address)
3491 emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3492 }
3493 if(!using_tlb) {
3494 if(!c) {
3495 jaddr2=(int)out;
3496 emit_jno(0);
3497 }
3498 else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80800000) {
3499 jaddr2=(int)out;
3500 emit_jmp(0); // inline_readstub/inline_writestub? Very rare case
3501 }
3502 #ifdef DESTRUCTIVE_SHIFT
3503 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3504 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3505 }
3506 #endif
3507 }else{
3508 if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3509 do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3510 }
3511 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3512 do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3513 }
3514 }
3515 if (opcode[i]==0x31) { // LWC1
3516 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3517 //gen_tlb_addr_r(ar,map);
3518 //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3519 #ifdef HOST_IMM_ADDR32
3520 if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3521 else
3522 #endif
3523 emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3524 type=LOADW_STUB;
3525 }
3526 if (opcode[i]==0x35) { // LDC1
3527 assert(th>=0);
3528 //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3529 //gen_tlb_addr_r(ar,map);
3530 //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3531 //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3532 #ifdef HOST_IMM_ADDR32
3533 if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3534 else
3535 #endif
3536 emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3537 type=LOADD_STUB;
3538 }
3539 if (opcode[i]==0x39) { // SWC1
3540 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3541 emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3542 type=STOREW_STUB;
3543 }
3544 if (opcode[i]==0x3D) { // SDC1
3545 assert(th>=0);
3546 //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3547 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3548 emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3549 type=STORED_STUB;
3550 }
3551 if(!using_tlb) {
3552 if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3553 #ifndef DESTRUCTIVE_SHIFT
3554 temp=offset||c||s<0?ar:s;
3555 #endif
3556 #if defined(HOST_IMM8)
3557 int ir=get_reg(i_regs->regmap,INVCP);
3558 assert(ir>=0);
3559 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3560 #else
3561 emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3562 #endif
3563 jaddr3=(int)out;
3564 emit_jne(0);
3565 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3566 }
3567 }
3568 if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3569 if (opcode[i]==0x31) { // LWC1 (write float)
3570 emit_writeword_indexed(tl,0,temp);
3571 }
3572 if (opcode[i]==0x35) { // LDC1 (write double)
3573 emit_writeword_indexed(th,4,temp);
3574 emit_writeword_indexed(tl,0,temp);
3575 }
3576 //if(opcode[i]==0x39)
3577 /*if(opcode[i]==0x39||opcode[i]==0x31)
3578 {
3579 emit_pusha();
3580 emit_readword((int)&last_count,ECX);
3581 if(get_reg(i_regs->regmap,CCREG)<0)
3582 emit_loadreg(CCREG,HOST_CCREG);
3583 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3584 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3585 emit_writeword(HOST_CCREG,(int)&Count);
3586 emit_call((int)memdebug);
3587 emit_popa();
3588 }/**/
3589#else
3590 cop1_unusable(i, i_regs);
3591#endif
3592}
3593
3594void c2ls_assemble(int i,struct regstat *i_regs)
3595{
3596 int s,tl;
3597 int ar;
3598 int offset;
3599 int c=0;
3600 int jaddr,jaddr2=0,jaddr3,type;
3601 int agr=AGEN1+(i&1);
3602 u_int hr,reglist=0;
3603 u_int copr=(source[i]>>16)&0x1f;
3604 s=get_reg(i_regs->regmap,rs1[i]);
3605 tl=get_reg(i_regs->regmap,FTEMP);
3606 offset=imm[i];
3607 assert(rs1[i]>0);
3608 assert(tl>=0);
3609 assert(!using_tlb);
3610
3611 for(hr=0;hr<HOST_REGS;hr++) {
3612 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3613 }
3614 if(i_regs->regmap[HOST_CCREG]==CCREG)
3615 reglist&=~(1<<HOST_CCREG);
3616
3617 // get the address
3618 if (opcode[i]==0x3a) { // SWC2
3619 ar=get_reg(i_regs->regmap,agr);
3620 if(ar<0) ar=get_reg(i_regs->regmap,-1);
3621 reglist|=1<<ar;
3622 } else { // LWC2
3623 ar=tl;
3624 }
3625 if (!offset&&!c&&s>=0) ar=s;
3626 assert(ar>=0);
3627
3628 if (opcode[i]==0x3a) { // SWC2
3629 cop2_get_dreg(copr,tl,HOST_TEMPREG);
3630 }
3631 if(s>=0) c=(i_regs->wasconst>>s)&1;
3632 if(!c) {
3633 emit_cmpimm(offset||c||s<0?ar:s,0x800000);
3634 jaddr2=(int)out;
3635 emit_jno(0);
3636 }
3637 else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80800000) {
3638 jaddr2=(int)out;
3639 emit_jmp(0); // inline_readstub/inline_writestub? Very rare case
3640 }
3641 if (opcode[i]==0x32) { // LWC2
3642 #ifdef HOST_IMM_ADDR32
3643 if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3644 else
3645 #endif
3646 emit_readword_indexed(0,ar,tl);
3647 type=LOADW_STUB;
3648 }
3649 if (opcode[i]==0x3a) { // SWC2
3650#ifdef DESTRUCTIVE_SHIFT
3651 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3652#endif
3653 emit_writeword_indexed(tl,0,ar);
3654 type=STOREW_STUB;
3655 }
3656 if(jaddr2)
3657 add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3658 if (opcode[i]==0x3a) { // SWC2
3659#if defined(HOST_IMM8)
3660 int ir=get_reg(i_regs->regmap,INVCP);
3661 assert(ir>=0);
3662 emit_cmpmem_indexedsr12_reg(ir,ar,1);
3663#else
3664 emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3665#endif
3666 jaddr3=(int)out;
3667 emit_jne(0);
3668 add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3669 }
3670 if (opcode[i]==0x32) { // LWC2
3671 cop2_put_dreg(copr,tl,HOST_TEMPREG);
3672 }
3673}
3674
3675#ifndef multdiv_assemble
3676void multdiv_assemble(int i,struct regstat *i_regs)
3677{
3678 printf("Need multdiv_assemble for this architecture.\n");
3679 exit(1);
3680}
3681#endif
3682
3683void mov_assemble(int i,struct regstat *i_regs)
3684{
3685 //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3686 //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3687 assert(rt1[i]>0);
3688 if(rt1[i]) {
3689 signed char sh,sl,th,tl;
3690 th=get_reg(i_regs->regmap,rt1[i]|64);
3691 tl=get_reg(i_regs->regmap,rt1[i]);
3692 //assert(tl>=0);
3693 if(tl>=0) {
3694 sh=get_reg(i_regs->regmap,rs1[i]|64);
3695 sl=get_reg(i_regs->regmap,rs1[i]);
3696 if(sl>=0) emit_mov(sl,tl);
3697 else emit_loadreg(rs1[i],tl);
3698 if(th>=0) {
3699 if(sh>=0) emit_mov(sh,th);
3700 else emit_loadreg(rs1[i]|64,th);
3701 }
3702 }
3703 }
3704}
3705
3706#ifndef fconv_assemble
3707void fconv_assemble(int i,struct regstat *i_regs)
3708{
3709 printf("Need fconv_assemble for this architecture.\n");
3710 exit(1);
3711}
3712#endif
3713
3714#if 0
3715void float_assemble(int i,struct regstat *i_regs)
3716{
3717 printf("Need float_assemble for this architecture.\n");
3718 exit(1);
3719}
3720#endif
3721
3722void syscall_assemble(int i,struct regstat *i_regs)
3723{
3724 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3725 assert(ccreg==HOST_CCREG);
3726 assert(!is_delayslot);
3727 emit_movimm(start+i*4,EAX); // Get PC
3728 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
3729 emit_jmp((int)jump_syscall_hle); // XXX
3730}
3731
3732void hlecall_assemble(int i,struct regstat *i_regs)
3733{
3734 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3735 assert(ccreg==HOST_CCREG);
3736 assert(!is_delayslot);
3737 emit_movimm(start+i*4+4,0); // Get PC
3738 emit_movimm((int)psxHLEt[source[i]&7],1);
3739 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
3740 emit_jmp((int)jump_hlecall);
3741}
3742
3743void ds_assemble(int i,struct regstat *i_regs)
3744{
3745 is_delayslot=1;
3746 switch(itype[i]) {
3747 case ALU:
3748 alu_assemble(i,i_regs);break;
3749 case IMM16:
3750 imm16_assemble(i,i_regs);break;
3751 case SHIFT:
3752 shift_assemble(i,i_regs);break;
3753 case SHIFTIMM:
3754 shiftimm_assemble(i,i_regs);break;
3755 case LOAD:
3756 load_assemble(i,i_regs);break;
3757 case LOADLR:
3758 loadlr_assemble(i,i_regs);break;
3759 case STORE:
3760 store_assemble(i,i_regs);break;
3761 case STORELR:
3762 storelr_assemble(i,i_regs);break;
3763 case COP0:
3764 cop0_assemble(i,i_regs);break;
3765 case COP1:
3766 cop1_assemble(i,i_regs);break;
3767 case C1LS:
3768 c1ls_assemble(i,i_regs);break;
3769 case COP2:
3770 cop2_assemble(i,i_regs);break;
3771 case C2LS:
3772 c2ls_assemble(i,i_regs);break;
3773 case C2OP:
3774 c2op_assemble(i,i_regs);break;
3775 case FCONV:
3776 fconv_assemble(i,i_regs);break;
3777 case FLOAT:
3778 float_assemble(i,i_regs);break;
3779 case FCOMP:
3780 fcomp_assemble(i,i_regs);break;
3781 case MULTDIV:
3782 multdiv_assemble(i,i_regs);break;
3783 case MOV:
3784 mov_assemble(i,i_regs);break;
3785 case SYSCALL:
3786 case HLECALL:
3787 case SPAN:
3788 case UJUMP:
3789 case RJUMP:
3790 case CJUMP:
3791 case SJUMP:
3792 case FJUMP:
3793 printf("Jump in the delay slot. This is probably a bug.\n");
3794 }
3795 is_delayslot=0;
3796}
3797
3798// Is the branch target a valid internal jump?
3799int internal_branch(uint64_t i_is32,int addr)
3800{
3801 if(addr&1) return 0; // Indirect (register) jump
3802 if(addr>=start && addr<start+slen*4-4)
3803 {
3804 int t=(addr-start)>>2;
3805 // Delay slots are not valid branch targets
3806 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3807 // 64 -> 32 bit transition requires a recompile
3808 /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3809 {
3810 if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3811 else printf("optimizable: yes\n");
3812 }*/
3813 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3814 if(requires_32bit[t]&~i_is32) return 0;
3815 else return 1;
3816 }
3817 return 0;
3818}
3819
3820#ifndef wb_invalidate
3821void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3822 uint64_t u,uint64_t uu)
3823{
3824 int hr;
3825 for(hr=0;hr<HOST_REGS;hr++) {
3826 if(hr!=EXCLUDE_REG) {
3827 if(pre[hr]!=entry[hr]) {
3828 if(pre[hr]>=0) {
3829 if((dirty>>hr)&1) {
3830 if(get_reg(entry,pre[hr])<0) {
3831 if(pre[hr]<64) {
3832 if(!((u>>pre[hr])&1)) {
3833 emit_storereg(pre[hr],hr);
3834 if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3835 emit_sarimm(hr,31,hr);
3836 emit_storereg(pre[hr]|64,hr);
3837 }
3838 }
3839 }else{
3840 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3841 emit_storereg(pre[hr],hr);
3842 }
3843 }
3844 }
3845 }
3846 }
3847 }
3848 }
3849 }
3850 // Move from one register to another (no writeback)
3851 for(hr=0;hr<HOST_REGS;hr++) {
3852 if(hr!=EXCLUDE_REG) {
3853 if(pre[hr]!=entry[hr]) {
3854 if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3855 int nr;
3856 if((nr=get_reg(entry,pre[hr]))>=0) {
3857 emit_mov(hr,nr);
3858 }
3859 }
3860 }
3861 }
3862 }
3863}
3864#endif
3865
3866// Load the specified registers
3867// This only loads the registers given as arguments because
3868// we don't want to load things that will be overwritten
3869void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3870{
3871 int hr;
3872 // Load 32-bit regs
3873 for(hr=0;hr<HOST_REGS;hr++) {
3874 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3875 if(entry[hr]!=regmap[hr]) {
3876 if(regmap[hr]==rs1||regmap[hr]==rs2)
3877 {
3878 if(regmap[hr]==0) {
3879 emit_zeroreg(hr);
3880 }
3881 else
3882 {
3883 emit_loadreg(regmap[hr],hr);
3884 }
3885 }
3886 }
3887 }
3888 }
3889 //Load 64-bit regs
3890 for(hr=0;hr<HOST_REGS;hr++) {
3891 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3892 if(entry[hr]!=regmap[hr]) {
3893 if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3894 {
3895 assert(regmap[hr]!=64);
3896 if((is32>>(regmap[hr]&63))&1) {
3897 int lr=get_reg(regmap,regmap[hr]-64);
3898 if(lr>=0)
3899 emit_sarimm(lr,31,hr);
3900 else
3901 emit_loadreg(regmap[hr],hr);
3902 }
3903 else
3904 {
3905 emit_loadreg(regmap[hr],hr);
3906 }
3907 }
3908 }
3909 }
3910 }
3911}
3912
3913// Load registers prior to the start of a loop
3914// so that they are not loaded within the loop
3915static void loop_preload(signed char pre[],signed char entry[])
3916{
3917 int hr;
3918 for(hr=0;hr<HOST_REGS;hr++) {
3919 if(hr!=EXCLUDE_REG) {
3920 if(pre[hr]!=entry[hr]) {
3921 if(entry[hr]>=0) {
3922 if(get_reg(pre,entry[hr])<0) {
3923 assem_debug("loop preload:\n");
3924 //printf("loop preload: %d\n",hr);
3925 if(entry[hr]==0) {
3926 emit_zeroreg(hr);
3927 }
3928 else if(entry[hr]<TEMPREG)
3929 {
3930 emit_loadreg(entry[hr],hr);
3931 }
3932 else if(entry[hr]-64<TEMPREG)
3933 {
3934 emit_loadreg(entry[hr],hr);
3935 }
3936 }
3937 }
3938 }
3939 }
3940 }
3941}
3942
3943// Generate address for load/store instruction
3944// goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3945void address_generation(int i,struct regstat *i_regs,signed char entry[])
3946{
3947 if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3948 int ra;
3949 int agr=AGEN1+(i&1);
3950 int mgr=MGEN1+(i&1);
3951 if(itype[i]==LOAD) {
3952 ra=get_reg(i_regs->regmap,rt1[i]);
3953 //if(rt1[i]) assert(ra>=0);
3954 }
3955 if(itype[i]==LOADLR) {
3956 ra=get_reg(i_regs->regmap,FTEMP);
3957 }
3958 if(itype[i]==STORE||itype[i]==STORELR) {
3959 ra=get_reg(i_regs->regmap,agr);
3960 if(ra<0) ra=get_reg(i_regs->regmap,-1);
3961 }
3962 if(itype[i]==C1LS||itype[i]==C2LS) {
3963 if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3964 ra=get_reg(i_regs->regmap,FTEMP);
3965 else { // SWC1/SDC1
3966 ra=get_reg(i_regs->regmap,agr);
3967 if(ra<0) ra=get_reg(i_regs->regmap,-1);
3968 }
3969 }
3970 int rs=get_reg(i_regs->regmap,rs1[i]);
3971 int rm=get_reg(i_regs->regmap,TLREG);
3972 if(ra>=0) {
3973 int offset=imm[i];
3974 int c=(i_regs->wasconst>>rs)&1;
3975 if(rs1[i]==0) {
3976 // Using r0 as a base address
3977 /*if(rm>=0) {
3978 if(!entry||entry[rm]!=mgr) {
3979 generate_map_const(offset,rm);
3980 } // else did it in the previous cycle
3981 }*/
3982 if(!entry||entry[ra]!=agr) {
3983 if (opcode[i]==0x22||opcode[i]==0x26) {
3984 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3985 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3986 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3987 }else{
3988 emit_movimm(offset,ra);
3989 }
3990 } // else did it in the previous cycle
3991 }
3992 else if(rs<0) {
3993 if(!entry||entry[ra]!=rs1[i])
3994 emit_loadreg(rs1[i],ra);
3995 //if(!entry||entry[ra]!=rs1[i])
3996 // printf("poor load scheduling!\n");
3997 }
3998 else if(c) {
3999 if(rm>=0) {
4000 if(!entry||entry[rm]!=mgr) {
4001 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4002 // Stores to memory go thru the mapper to detect self-modifying
4003 // code, loads don't.
4004 if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4005 (unsigned int)(constmap[i][rs]+offset)<0x80800000 )
4006 generate_map_const(constmap[i][rs]+offset,rm);
4007 }else{
4008 if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4009 generate_map_const(constmap[i][rs]+offset,rm);
4010 }
4011 }
4012 }
4013 if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4014 if(!entry||entry[ra]!=agr) {
4015 if (opcode[i]==0x22||opcode[i]==0x26) {
4016 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4017 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4018 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4019 }else{
4020 #ifdef HOST_IMM_ADDR32
4021 if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4022 (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4023 #endif
4024 emit_movimm(constmap[i][rs]+offset,ra);
4025 }
4026 } // else did it in the previous cycle
4027 } // else load_consts already did it
4028 }
4029 if(offset&&!c&&rs1[i]) {
4030 if(rs>=0) {
4031 emit_addimm(rs,offset,ra);
4032 }else{
4033 emit_addimm(ra,offset,ra);
4034 }
4035 }
4036 }
4037 }
4038 // Preload constants for next instruction
4039 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4040 int agr,ra;
4041 #ifndef HOST_IMM_ADDR32
4042 // Mapper entry
4043 agr=MGEN1+((i+1)&1);
4044 ra=get_reg(i_regs->regmap,agr);
4045 if(ra>=0) {
4046 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4047 int offset=imm[i+1];
4048 int c=(regs[i+1].wasconst>>rs)&1;
4049 if(c) {
4050 if(itype[i+1]==STORE||itype[i+1]==STORELR
4051 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4052 // Stores to memory go thru the mapper to detect self-modifying
4053 // code, loads don't.
4054 if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4055 (unsigned int)(constmap[i+1][rs]+offset)<0x80800000 )
4056 generate_map_const(constmap[i+1][rs]+offset,ra);
4057 }else{
4058 if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4059 generate_map_const(constmap[i+1][rs]+offset,ra);
4060 }
4061 }
4062 /*else if(rs1[i]==0) {
4063 generate_map_const(offset,ra);
4064 }*/
4065 }
4066 #endif
4067 // Actual address
4068 agr=AGEN1+((i+1)&1);
4069 ra=get_reg(i_regs->regmap,agr);
4070 if(ra>=0) {
4071 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4072 int offset=imm[i+1];
4073 int c=(regs[i+1].wasconst>>rs)&1;
4074 if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4075 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4076 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4077 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4078 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4079 }else{
4080 #ifdef HOST_IMM_ADDR32
4081 if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4082 (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4083 #endif
4084 emit_movimm(constmap[i+1][rs]+offset,ra);
4085 }
4086 }
4087 else if(rs1[i+1]==0) {
4088 // Using r0 as a base address
4089 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4090 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4091 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4092 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4093 }else{
4094 emit_movimm(offset,ra);
4095 }
4096 }
4097 }
4098 }
4099}
4100
4101int get_final_value(int hr, int i, int *value)
4102{
4103 int reg=regs[i].regmap[hr];
4104 while(i<slen-1) {
4105 if(regs[i+1].regmap[hr]!=reg) break;
4106 if(!((regs[i+1].isconst>>hr)&1)) break;
4107 if(bt[i+1]) break;
4108 i++;
4109 }
4110 if(i<slen-1) {
4111 if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4112 *value=constmap[i][hr];
4113 return 1;
4114 }
4115 if(!bt[i+1]) {
4116 if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4117 // Load in delay slot, out-of-order execution
4118 if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4119 {
4120 #ifdef HOST_IMM_ADDR32
4121 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4122 #endif
4123 // Precompute load address
4124 *value=constmap[i][hr]+imm[i+2];
4125 return 1;
4126 }
4127 }
4128 if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4129 {
4130 #ifdef HOST_IMM_ADDR32
4131 if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4132 #endif
4133 // Precompute load address
4134 *value=constmap[i][hr]+imm[i+1];
4135 //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4136 return 1;
4137 }
4138 }
4139 }
4140 *value=constmap[i][hr];
4141 //printf("c=%x\n",(int)constmap[i][hr]);
4142 if(i==slen-1) return 1;
4143 if(reg<64) {
4144 return !((unneeded_reg[i+1]>>reg)&1);
4145 }else{
4146 return !((unneeded_reg_upper[i+1]>>reg)&1);
4147 }
4148}
4149
4150// Load registers with known constants
4151void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4152{
4153 int hr;
4154 // Load 32-bit regs
4155 for(hr=0;hr<HOST_REGS;hr++) {
4156 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4157 //if(entry[hr]!=regmap[hr]) {
4158 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4159 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4160 int value;
4161 if(get_final_value(hr,i,&value)) {
4162 if(value==0) {
4163 emit_zeroreg(hr);
4164 }
4165 else {
4166 emit_movimm(value,hr);
4167 }
4168 }
4169 }
4170 }
4171 }
4172 }
4173 // Load 64-bit regs
4174 for(hr=0;hr<HOST_REGS;hr++) {
4175 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4176 //if(entry[hr]!=regmap[hr]) {
4177 if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4178 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4179 if((is32>>(regmap[hr]&63))&1) {
4180 int lr=get_reg(regmap,regmap[hr]-64);
4181 assert(lr>=0);
4182 emit_sarimm(lr,31,hr);
4183 }
4184 else
4185 {
4186 int value;
4187 if(get_final_value(hr,i,&value)) {
4188 if(value==0) {
4189 emit_zeroreg(hr);
4190 }
4191 else {
4192 emit_movimm(value,hr);
4193 }
4194 }
4195 }
4196 }
4197 }
4198 }
4199 }
4200}
4201void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4202{
4203 int hr;
4204 // Load 32-bit regs
4205 for(hr=0;hr<HOST_REGS;hr++) {
4206 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4207 if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4208 int value=constmap[i][hr];
4209 if(value==0) {
4210 emit_zeroreg(hr);
4211 }
4212 else {
4213 emit_movimm(value,hr);
4214 }
4215 }
4216 }
4217 }
4218 // Load 64-bit regs
4219 for(hr=0;hr<HOST_REGS;hr++) {
4220 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4221 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4222 if((is32>>(regmap[hr]&63))&1) {
4223 int lr=get_reg(regmap,regmap[hr]-64);
4224 assert(lr>=0);
4225 emit_sarimm(lr,31,hr);
4226 }
4227 else
4228 {
4229 int value=constmap[i][hr];
4230 if(value==0) {
4231 emit_zeroreg(hr);
4232 }
4233 else {
4234 emit_movimm(value,hr);
4235 }
4236 }
4237 }
4238 }
4239 }
4240}
4241
4242// Write out all dirty registers (except cycle count)
4243void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4244{
4245 int hr;
4246 for(hr=0;hr<HOST_REGS;hr++) {
4247 if(hr!=EXCLUDE_REG) {
4248 if(i_regmap[hr]>0) {
4249 if(i_regmap[hr]!=CCREG) {
4250 if((i_dirty>>hr)&1) {
4251 if(i_regmap[hr]<64) {
4252 emit_storereg(i_regmap[hr],hr);
4253#ifndef FORCE32
4254 if( ((i_is32>>i_regmap[hr])&1) ) {
4255 #ifdef DESTRUCTIVE_WRITEBACK
4256 emit_sarimm(hr,31,hr);
4257 emit_storereg(i_regmap[hr]|64,hr);
4258 #else
4259 emit_sarimm(hr,31,HOST_TEMPREG);
4260 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4261 #endif
4262 }
4263#endif
4264 }else{
4265 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4266 emit_storereg(i_regmap[hr],hr);
4267 }
4268 }
4269 }
4270 }
4271 }
4272 }
4273 }
4274}
4275// Write out dirty registers that we need to reload (pair with load_needed_regs)
4276// This writes the registers not written by store_regs_bt
4277void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4278{
4279 int hr;
4280 int t=(addr-start)>>2;
4281 for(hr=0;hr<HOST_REGS;hr++) {
4282 if(hr!=EXCLUDE_REG) {
4283 if(i_regmap[hr]>0) {
4284 if(i_regmap[hr]!=CCREG) {
4285 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4286 if((i_dirty>>hr)&1) {
4287 if(i_regmap[hr]<64) {
4288 emit_storereg(i_regmap[hr],hr);
4289#ifndef FORCE32
4290 if( ((i_is32>>i_regmap[hr])&1) ) {
4291 #ifdef DESTRUCTIVE_WRITEBACK
4292 emit_sarimm(hr,31,hr);
4293 emit_storereg(i_regmap[hr]|64,hr);
4294 #else
4295 emit_sarimm(hr,31,HOST_TEMPREG);
4296 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4297 #endif
4298 }
4299#endif
4300 }else{
4301 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4302 emit_storereg(i_regmap[hr],hr);
4303 }
4304 }
4305 }
4306 }
4307 }
4308 }
4309 }
4310 }
4311}
4312
4313// Load all registers (except cycle count)
4314void load_all_regs(signed char i_regmap[])
4315{
4316 int hr;
4317 for(hr=0;hr<HOST_REGS;hr++) {
4318 if(hr!=EXCLUDE_REG) {
4319 if(i_regmap[hr]==0) {
4320 emit_zeroreg(hr);
4321 }
4322 else
4323 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4324 {
4325 emit_loadreg(i_regmap[hr],hr);
4326 }
4327 }
4328 }
4329}
4330
4331// Load all current registers also needed by next instruction
4332void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4333{
4334 int hr;
4335 for(hr=0;hr<HOST_REGS;hr++) {
4336 if(hr!=EXCLUDE_REG) {
4337 if(get_reg(next_regmap,i_regmap[hr])>=0) {
4338 if(i_regmap[hr]==0) {
4339 emit_zeroreg(hr);
4340 }
4341 else
4342 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
4343 {
4344 emit_loadreg(i_regmap[hr],hr);
4345 }
4346 }
4347 }
4348 }
4349}
4350
4351// Load all regs, storing cycle count if necessary
4352void load_regs_entry(int t)
4353{
4354 int hr;
4355 if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4356 else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4357 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4358 emit_storereg(CCREG,HOST_CCREG);
4359 }
4360 // Load 32-bit regs
4361 for(hr=0;hr<HOST_REGS;hr++) {
4362 if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4363 if(regs[t].regmap_entry[hr]==0) {
4364 emit_zeroreg(hr);
4365 }
4366 else if(regs[t].regmap_entry[hr]!=CCREG)
4367 {
4368 emit_loadreg(regs[t].regmap_entry[hr],hr);
4369 }
4370 }
4371 }
4372 // Load 64-bit regs
4373 for(hr=0;hr<HOST_REGS;hr++) {
4374 if(regs[t].regmap_entry[hr]>=64) {
4375 assert(regs[t].regmap_entry[hr]!=64);
4376 if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4377 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4378 if(lr<0) {
4379 emit_loadreg(regs[t].regmap_entry[hr],hr);
4380 }
4381 else
4382 {
4383 emit_sarimm(lr,31,hr);
4384 }
4385 }
4386 else
4387 {
4388 emit_loadreg(regs[t].regmap_entry[hr],hr);
4389 }
4390 }
4391 }
4392}
4393
4394// Store dirty registers prior to branch
4395void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4396{
4397 if(internal_branch(i_is32,addr))
4398 {
4399 int t=(addr-start)>>2;
4400 int hr;
4401 for(hr=0;hr<HOST_REGS;hr++) {
4402 if(hr!=EXCLUDE_REG) {
4403 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4404 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4405 if((i_dirty>>hr)&1) {
4406 if(i_regmap[hr]<64) {
4407 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4408 emit_storereg(i_regmap[hr],hr);
4409 if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4410 #ifdef DESTRUCTIVE_WRITEBACK
4411 emit_sarimm(hr,31,hr);
4412 emit_storereg(i_regmap[hr]|64,hr);
4413 #else
4414 emit_sarimm(hr,31,HOST_TEMPREG);
4415 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4416 #endif
4417 }
4418 }
4419 }else{
4420 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4421 emit_storereg(i_regmap[hr],hr);
4422 }
4423 }
4424 }
4425 }
4426 }
4427 }
4428 }
4429 }
4430 else
4431 {
4432 // Branch out of this block, write out all dirty regs
4433 wb_dirtys(i_regmap,i_is32,i_dirty);
4434 }
4435}
4436
4437// Load all needed registers for branch target
4438void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4439{
4440 //if(addr>=start && addr<(start+slen*4))
4441 if(internal_branch(i_is32,addr))
4442 {
4443 int t=(addr-start)>>2;
4444 int hr;
4445 // Store the cycle count before loading something else
4446 if(i_regmap[HOST_CCREG]!=CCREG) {
4447 assert(i_regmap[HOST_CCREG]==-1);
4448 }
4449 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4450 emit_storereg(CCREG,HOST_CCREG);
4451 }
4452 // Load 32-bit regs
4453 for(hr=0;hr<HOST_REGS;hr++) {
4454 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
4455 #ifdef DESTRUCTIVE_WRITEBACK
4456 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4457 #else
4458 if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4459 #endif
4460 if(regs[t].regmap_entry[hr]==0) {
4461 emit_zeroreg(hr);
4462 }
4463 else if(regs[t].regmap_entry[hr]!=CCREG)
4464 {
4465 emit_loadreg(regs[t].regmap_entry[hr],hr);
4466 }
4467 }
4468 }
4469 }
4470 //Load 64-bit regs
4471 for(hr=0;hr<HOST_REGS;hr++) {
4472 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64) {
4473 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4474 assert(regs[t].regmap_entry[hr]!=64);
4475 if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4476 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4477 if(lr<0) {
4478 emit_loadreg(regs[t].regmap_entry[hr],hr);
4479 }
4480 else
4481 {
4482 emit_sarimm(lr,31,hr);
4483 }
4484 }
4485 else
4486 {
4487 emit_loadreg(regs[t].regmap_entry[hr],hr);
4488 }
4489 }
4490 else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4491 int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4492 assert(lr>=0);
4493 emit_sarimm(lr,31,hr);
4494 }
4495 }
4496 }
4497 }
4498}
4499
4500int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4501{
4502 if(addr>=start && addr<start+slen*4-4)
4503 {
4504 int t=(addr-start)>>2;
4505 int hr;
4506 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4507 for(hr=0;hr<HOST_REGS;hr++)
4508 {
4509 if(hr!=EXCLUDE_REG)
4510 {
4511 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4512 {
4513 if(regs[t].regmap_entry[hr]!=-1)
4514 {
4515 return 0;
4516 }
4517 else
4518 if((i_dirty>>hr)&1)
4519 {
4520 if(i_regmap[hr]<64)
4521 {
4522 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4523 return 0;
4524 }
4525 else
4526 {
4527 if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4528 return 0;
4529 }
4530 }
4531 }
4532 else // Same register but is it 32-bit or dirty?
4533 if(i_regmap[hr]>=0)
4534 {
4535 if(!((regs[t].dirty>>hr)&1))
4536 {
4537 if((i_dirty>>hr)&1)
4538 {
4539 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4540 {
4541 //printf("%x: dirty no match\n",addr);
4542 return 0;
4543 }
4544 }
4545 }
4546 if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4547 {
4548 //printf("%x: is32 no match\n",addr);
4549 return 0;
4550 }
4551 }
4552 }
4553 }
4554 //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4555 if(requires_32bit[t]&~i_is32) return 0;
4556 // Delay slots are not valid branch targets
4557 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4558 // Delay slots require additional processing, so do not match
4559 if(is_ds[t]) return 0;
4560 }
4561 else
4562 {
4563 int hr;
4564 for(hr=0;hr<HOST_REGS;hr++)
4565 {
4566 if(hr!=EXCLUDE_REG)
4567 {
4568 if(i_regmap[hr]>=0)
4569 {
4570 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4571 {
4572 if((i_dirty>>hr)&1)
4573 {
4574 return 0;
4575 }
4576 }
4577 }
4578 }
4579 }
4580 }
4581 return 1;
4582}
4583
4584// Used when a branch jumps into the delay slot of another branch
4585void ds_assemble_entry(int i)
4586{
4587 int t=(ba[i]-start)>>2;
4588 if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4589 assem_debug("Assemble delay slot at %x\n",ba[i]);
4590 assem_debug("<->\n");
4591 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4592 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4593 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4594 address_generation(t,&regs[t],regs[t].regmap_entry);
4595 if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4596 load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4597 cop1_usable=0;
4598 is_delayslot=0;
4599 switch(itype[t]) {
4600 case ALU:
4601 alu_assemble(t,&regs[t]);break;
4602 case IMM16:
4603 imm16_assemble(t,&regs[t]);break;
4604 case SHIFT:
4605 shift_assemble(t,&regs[t]);break;
4606 case SHIFTIMM:
4607 shiftimm_assemble(t,&regs[t]);break;
4608 case LOAD:
4609 load_assemble(t,&regs[t]);break;
4610 case LOADLR:
4611 loadlr_assemble(t,&regs[t]);break;
4612 case STORE:
4613 store_assemble(t,&regs[t]);break;
4614 case STORELR:
4615 storelr_assemble(t,&regs[t]);break;
4616 case COP0:
4617 cop0_assemble(t,&regs[t]);break;
4618 case COP1:
4619 cop1_assemble(t,&regs[t]);break;
4620 case C1LS:
4621 c1ls_assemble(t,&regs[t]);break;
4622 case COP2:
4623 cop2_assemble(t,&regs[t]);break;
4624 case C2LS:
4625 c2ls_assemble(t,&regs[t]);break;
4626 case C2OP:
4627 c2op_assemble(t,&regs[t]);break;
4628 case FCONV:
4629 fconv_assemble(t,&regs[t]);break;
4630 case FLOAT:
4631 float_assemble(t,&regs[t]);break;
4632 case FCOMP:
4633 fcomp_assemble(t,&regs[t]);break;
4634 case MULTDIV:
4635 multdiv_assemble(t,&regs[t]);break;
4636 case MOV:
4637 mov_assemble(t,&regs[t]);break;
4638 case SYSCALL:
4639 case HLECALL:
4640 case SPAN:
4641 case UJUMP:
4642 case RJUMP:
4643 case CJUMP:
4644 case SJUMP:
4645 case FJUMP:
4646 printf("Jump in the delay slot. This is probably a bug.\n");
4647 }
4648 store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4649 load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4650 if(internal_branch(regs[t].is32,ba[i]+4))
4651 assem_debug("branch: internal\n");
4652 else
4653 assem_debug("branch: external\n");
4654 assert(internal_branch(regs[t].is32,ba[i]+4));
4655 add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4656 emit_jmp(0);
4657}
4658
4659void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4660{
4661 int count;
4662 int jaddr;
4663 int idle=0;
4664 if(itype[i]==RJUMP)
4665 {
4666 *adj=0;
4667 }
4668 //if(ba[i]>=start && ba[i]<(start+slen*4))
4669 if(internal_branch(branch_regs[i].is32,ba[i]))
4670 {
4671 int t=(ba[i]-start)>>2;
4672 if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4673 else *adj=ccadj[t];
4674 }
4675 else
4676 {
4677 *adj=0;
4678 }
4679 count=ccadj[i];
4680 if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4681 // Idle loop
4682 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4683 idle=(int)out;
4684 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4685 emit_andimm(HOST_CCREG,3,HOST_CCREG);
4686 jaddr=(int)out;
4687 emit_jmp(0);
4688 }
4689 else if(*adj==0||invert) {
4690 emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4691 jaddr=(int)out;
4692 emit_jns(0);
4693 }
4694 else
4695 {
4696 emit_cmpimm(HOST_CCREG,-2*(count+2));
4697 jaddr=(int)out;
4698 emit_jns(0);
4699 }
4700 add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4701}
4702
4703void do_ccstub(int n)
4704{
4705 literal_pool(256);
4706 assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4707 set_jump_target(stubs[n][1],(int)out);
4708 int i=stubs[n][4];
4709 if(stubs[n][6]==NULLDS) {
4710 // Delay slot instruction is nullified ("likely" branch)
4711 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4712 }
4713 else if(stubs[n][6]!=TAKEN) {
4714 wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4715 }
4716 else {
4717 if(internal_branch(branch_regs[i].is32,ba[i]))
4718 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4719 }
4720 if(stubs[n][5]!=-1)
4721 {
4722 // Save PC as return address
4723 emit_movimm(stubs[n][5],EAX);
4724 emit_writeword(EAX,(int)&pcaddr);
4725 }
4726 else
4727 {
4728 // Return address depends on which way the branch goes
4729 if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4730 {
4731 int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4732 int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4733 int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4734 int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4735 if(rs1[i]==0)
4736 {
4737 s1l=s2l;s1h=s2h;
4738 s2l=s2h=-1;
4739 }
4740 else if(rs2[i]==0)
4741 {
4742 s2l=s2h=-1;
4743 }
4744 if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4745 s1h=s2h=-1;
4746 }
4747 assert(s1l>=0);
4748 #ifdef DESTRUCTIVE_WRITEBACK
4749 if(rs1[i]) {
4750 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4751 emit_loadreg(rs1[i],s1l);
4752 }
4753 else {
4754 if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4755 emit_loadreg(rs2[i],s1l);
4756 }
4757 if(s2l>=0)
4758 if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4759 emit_loadreg(rs2[i],s2l);
4760 #endif
4761 int hr=0;
4762 int addr,alt,ntaddr;
4763 while(hr<HOST_REGS)
4764 {
4765 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4766 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4767 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4768 {
4769 addr=hr++;break;
4770 }
4771 hr++;
4772 }
4773 while(hr<HOST_REGS)
4774 {
4775 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4776 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4777 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4778 {
4779 alt=hr++;break;
4780 }
4781 hr++;
4782 }
4783 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4784 {
4785 while(hr<HOST_REGS)
4786 {
4787 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4788 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4789 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4790 {
4791 ntaddr=hr;break;
4792 }
4793 hr++;
4794 }
4795 assert(hr<HOST_REGS);
4796 }
4797 if((opcode[i]&0x2f)==4) // BEQ
4798 {
4799 #ifdef HAVE_CMOV_IMM
4800 if(s1h<0) {
4801 if(s2l>=0) emit_cmp(s1l,s2l);
4802 else emit_test(s1l,s1l);
4803 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4804 }
4805 else
4806 #endif
4807 {
4808 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4809 if(s1h>=0) {
4810 if(s2h>=0) emit_cmp(s1h,s2h);
4811 else emit_test(s1h,s1h);
4812 emit_cmovne_reg(alt,addr);
4813 }
4814 if(s2l>=0) emit_cmp(s1l,s2l);
4815 else emit_test(s1l,s1l);
4816 emit_cmovne_reg(alt,addr);
4817 }
4818 }
4819 if((opcode[i]&0x2f)==5) // BNE
4820 {
4821 #ifdef HAVE_CMOV_IMM
4822 if(s1h<0) {
4823 if(s2l>=0) emit_cmp(s1l,s2l);
4824 else emit_test(s1l,s1l);
4825 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4826 }
4827 else
4828 #endif
4829 {
4830 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4831 if(s1h>=0) {
4832 if(s2h>=0) emit_cmp(s1h,s2h);
4833 else emit_test(s1h,s1h);
4834 emit_cmovne_reg(alt,addr);
4835 }
4836 if(s2l>=0) emit_cmp(s1l,s2l);
4837 else emit_test(s1l,s1l);
4838 emit_cmovne_reg(alt,addr);
4839 }
4840 }
4841 if((opcode[i]&0x2f)==6) // BLEZ
4842 {
4843 //emit_movimm(ba[i],alt);
4844 //emit_movimm(start+i*4+8,addr);
4845 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4846 emit_cmpimm(s1l,1);
4847 if(s1h>=0) emit_mov(addr,ntaddr);
4848 emit_cmovl_reg(alt,addr);
4849 if(s1h>=0) {
4850 emit_test(s1h,s1h);
4851 emit_cmovne_reg(ntaddr,addr);
4852 emit_cmovs_reg(alt,addr);
4853 }
4854 }
4855 if((opcode[i]&0x2f)==7) // BGTZ
4856 {
4857 //emit_movimm(ba[i],addr);
4858 //emit_movimm(start+i*4+8,ntaddr);
4859 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4860 emit_cmpimm(s1l,1);
4861 if(s1h>=0) emit_mov(addr,alt);
4862 emit_cmovl_reg(ntaddr,addr);
4863 if(s1h>=0) {
4864 emit_test(s1h,s1h);
4865 emit_cmovne_reg(alt,addr);
4866 emit_cmovs_reg(ntaddr,addr);
4867 }
4868 }
4869 if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4870 {
4871 //emit_movimm(ba[i],alt);
4872 //emit_movimm(start+i*4+8,addr);
4873 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4874 if(s1h>=0) emit_test(s1h,s1h);
4875 else emit_test(s1l,s1l);
4876 emit_cmovs_reg(alt,addr);
4877 }
4878 if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4879 {
4880 //emit_movimm(ba[i],addr);
4881 //emit_movimm(start+i*4+8,alt);
4882 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4883 if(s1h>=0) emit_test(s1h,s1h);
4884 else emit_test(s1l,s1l);
4885 emit_cmovs_reg(alt,addr);
4886 }
4887 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4888 if(source[i]&0x10000) // BC1T
4889 {
4890 //emit_movimm(ba[i],alt);
4891 //emit_movimm(start+i*4+8,addr);
4892 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4893 emit_testimm(s1l,0x800000);
4894 emit_cmovne_reg(alt,addr);
4895 }
4896 else // BC1F
4897 {
4898 //emit_movimm(ba[i],addr);
4899 //emit_movimm(start+i*4+8,alt);
4900 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4901 emit_testimm(s1l,0x800000);
4902 emit_cmovne_reg(alt,addr);
4903 }
4904 }
4905 emit_writeword(addr,(int)&pcaddr);
4906 }
4907 else
4908 if(itype[i]==RJUMP)
4909 {
4910 int r=get_reg(branch_regs[i].regmap,rs1[i]);
4911 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4912 r=get_reg(branch_regs[i].regmap,RTEMP);
4913 }
4914 emit_writeword(r,(int)&pcaddr);
4915 }
4916 else {printf("Unknown branch type in do_ccstub\n");exit(1);}
4917 }
4918 // Update cycle count
4919 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4920 if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4921 emit_call((int)cc_interrupt);
4922 if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
4923 if(stubs[n][6]==TAKEN) {
4924 if(internal_branch(branch_regs[i].is32,ba[i]))
4925 load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4926 else if(itype[i]==RJUMP) {
4927 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4928 emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4929 else
4930 emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4931 }
4932 }else if(stubs[n][6]==NOTTAKEN) {
4933 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4934 else load_all_regs(branch_regs[i].regmap);
4935 }else if(stubs[n][6]==NULLDS) {
4936 // Delay slot instruction is nullified ("likely" branch)
4937 if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4938 else load_all_regs(regs[i].regmap);
4939 }else{
4940 load_all_regs(branch_regs[i].regmap);
4941 }
4942 emit_jmp(stubs[n][2]); // return address
4943
4944 /* This works but uses a lot of memory...
4945 emit_readword((int)&last_count,ECX);
4946 emit_add(HOST_CCREG,ECX,EAX);
4947 emit_writeword(EAX,(int)&Count);
4948 emit_call((int)gen_interupt);
4949 emit_readword((int)&Count,HOST_CCREG);
4950 emit_readword((int)&next_interupt,EAX);
4951 emit_readword((int)&pending_exception,EBX);
4952 emit_writeword(EAX,(int)&last_count);
4953 emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4954 emit_test(EBX,EBX);
4955 int jne_instr=(int)out;
4956 emit_jne(0);
4957 if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4958 load_all_regs(branch_regs[i].regmap);
4959 emit_jmp(stubs[n][2]); // return address
4960 set_jump_target(jne_instr,(int)out);
4961 emit_readword((int)&pcaddr,EAX);
4962 // Call get_addr_ht instead of doing the hash table here.
4963 // This code is executed infrequently and takes up a lot of space
4964 // so smaller is better.
4965 emit_storereg(CCREG,HOST_CCREG);
4966 emit_pushreg(EAX);
4967 emit_call((int)get_addr_ht);
4968 emit_loadreg(CCREG,HOST_CCREG);
4969 emit_addimm(ESP,4,ESP);
4970 emit_jmpreg(EAX);*/
4971}
4972
4973add_to_linker(int addr,int target,int ext)
4974{
4975 link_addr[linkcount][0]=addr;
4976 link_addr[linkcount][1]=target;
4977 link_addr[linkcount][2]=ext;
4978 linkcount++;
4979}
4980
4981void ujump_assemble(int i,struct regstat *i_regs)
4982{
4983 signed char *i_regmap=i_regs->regmap;
4984 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4985 address_generation(i+1,i_regs,regs[i].regmap_entry);
4986 #ifdef REG_PREFETCH
4987 int temp=get_reg(branch_regs[i].regmap,PTEMP);
4988 if(rt1[i]==31&&temp>=0)
4989 {
4990 int return_address=start+i*4+8;
4991 if(get_reg(branch_regs[i].regmap,31)>0)
4992 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4993 }
4994 #endif
4995 ds_assemble(i+1,i_regs);
4996 uint64_t bc_unneeded=branch_regs[i].u;
4997 uint64_t bc_unneeded_upper=branch_regs[i].uu;
4998 bc_unneeded|=1|(1LL<<rt1[i]);
4999 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5000 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5001 bc_unneeded,bc_unneeded_upper);
5002 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5003 if(rt1[i]==31) {
5004 int rt;
5005 unsigned int return_address;
5006 assert(rt1[i+1]!=31);
5007 assert(rt2[i+1]!=31);
5008 rt=get_reg(branch_regs[i].regmap,31);
5009 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5010 //assert(rt>=0);
5011 return_address=start+i*4+8;
5012 if(rt>=0) {
5013 #ifdef USE_MINI_HT
5014 if(internal_branch(branch_regs[i].is32,return_address)) {
5015 int temp=rt+1;
5016 if(temp==EXCLUDE_REG||temp>=HOST_REGS||
5017 branch_regs[i].regmap[temp]>=0)
5018 {
5019 temp=get_reg(branch_regs[i].regmap,-1);
5020 }
5021 #ifdef HOST_TEMPREG
5022 if(temp<0) temp=HOST_TEMPREG;
5023 #endif
5024 if(temp>=0) do_miniht_insert(return_address,rt,temp);
5025 else emit_movimm(return_address,rt);
5026 }
5027 else
5028 #endif
5029 {
5030 #ifdef REG_PREFETCH
5031 if(temp>=0)
5032 {
5033 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5034 }
5035 #endif
5036 emit_movimm(return_address,rt); // PC into link register
5037 #ifdef IMM_PREFETCH
5038 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5039 #endif
5040 }
5041 }
5042 }
5043 int cc,adj;
5044 cc=get_reg(branch_regs[i].regmap,CCREG);
5045 assert(cc==HOST_CCREG);
5046 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5047 #ifdef REG_PREFETCH
5048 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5049 #endif
5050 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5051 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5052 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5053 if(internal_branch(branch_regs[i].is32,ba[i]))
5054 assem_debug("branch: internal\n");
5055 else
5056 assem_debug("branch: external\n");
5057 if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5058 ds_assemble_entry(i);
5059 }
5060 else {
5061 add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5062 emit_jmp(0);
5063 }
5064}
5065
5066void rjump_assemble(int i,struct regstat *i_regs)
5067{
5068 signed char *i_regmap=i_regs->regmap;
5069 int temp;
5070 int rs,cc,adj;
5071 rs=get_reg(branch_regs[i].regmap,rs1[i]);
5072 assert(rs>=0);
5073 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5074 // Delay slot abuse, make a copy of the branch address register
5075 temp=get_reg(branch_regs[i].regmap,RTEMP);
5076 assert(temp>=0);
5077 assert(regs[i].regmap[temp]==RTEMP);
5078 emit_mov(rs,temp);
5079 rs=temp;
5080 }
5081 address_generation(i+1,i_regs,regs[i].regmap_entry);
5082 #ifdef REG_PREFETCH
5083 if(rt1[i]==31)
5084 {
5085 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5086 int return_address=start+i*4+8;
5087 if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5088 }
5089 }
5090 #endif
5091 #ifdef USE_MINI_HT
5092 if(rs1[i]==31) {
5093 int rh=get_reg(regs[i].regmap,RHASH);
5094 if(rh>=0) do_preload_rhash(rh);
5095 }
5096 #endif
5097 ds_assemble(i+1,i_regs);
5098 uint64_t bc_unneeded=branch_regs[i].u;
5099 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5100 bc_unneeded|=1|(1LL<<rt1[i]);
5101 bc_unneeded_upper|=1|(1LL<<rt1[i]);
5102 bc_unneeded&=~(1LL<<rs1[i]);
5103 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5104 bc_unneeded,bc_unneeded_upper);
5105 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5106 if(rt1[i]!=0) {
5107 int rt,return_address;
5108 assert(rt1[i+1]!=rt1[i]);
5109 assert(rt2[i+1]!=rt1[i]);
5110 rt=get_reg(branch_regs[i].regmap,rt1[i]);
5111 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5112 assert(rt>=0);
5113 return_address=start+i*4+8;
5114 #ifdef REG_PREFETCH
5115 if(temp>=0)
5116 {
5117 if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5118 }
5119 #endif
5120 emit_movimm(return_address,rt); // PC into link register
5121 #ifdef IMM_PREFETCH
5122 emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5123 #endif
5124 }
5125 cc=get_reg(branch_regs[i].regmap,CCREG);
5126 assert(cc==HOST_CCREG);
5127 #ifdef USE_MINI_HT
5128 int rh=get_reg(branch_regs[i].regmap,RHASH);
5129 int ht=get_reg(branch_regs[i].regmap,RHTBL);
5130 if(rs1[i]==31) {
5131 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5132 do_preload_rhtbl(ht);
5133 do_rhash(rs,rh);
5134 }
5135 #endif
5136 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5137 #ifdef DESTRUCTIVE_WRITEBACK
5138 if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5139 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5140 emit_loadreg(rs1[i],rs);
5141 }
5142 }
5143 #endif
5144 #ifdef REG_PREFETCH
5145 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5146 #endif
5147 #ifdef USE_MINI_HT
5148 if(rs1[i]==31) {
5149 do_miniht_load(ht,rh);
5150 }
5151 #endif
5152 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5153 //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5154 //assert(adj==0);
5155 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5156 add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5157 emit_jns(0);
5158 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5159 #ifdef USE_MINI_HT
5160 if(rs1[i]==31) {
5161 do_miniht_jump(rs,rh,ht);
5162 }
5163 else
5164 #endif
5165 {
5166 //if(rs!=EAX) emit_mov(rs,EAX);
5167 //emit_jmp((int)jump_vaddr_eax);
5168 emit_jmp(jump_vaddr_reg[rs]);
5169 }
5170 /* Check hash table
5171 temp=!rs;
5172 emit_mov(rs,temp);
5173 emit_shrimm(rs,16,rs);
5174 emit_xor(temp,rs,rs);
5175 emit_movzwl_reg(rs,rs);
5176 emit_shlimm(rs,4,rs);
5177 emit_cmpmem_indexed((int)hash_table,rs,temp);
5178 emit_jne((int)out+14);
5179 emit_readword_indexed((int)hash_table+4,rs,rs);
5180 emit_jmpreg(rs);
5181 emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5182 emit_addimm_no_flags(8,rs);
5183 emit_jeq((int)out-17);
5184 // No hit on hash table, call compiler
5185 emit_pushreg(temp);
5186//DEBUG >
5187#ifdef DEBUG_CYCLE_COUNT
5188 emit_readword((int)&last_count,ECX);
5189 emit_add(HOST_CCREG,ECX,HOST_CCREG);
5190 emit_readword((int)&next_interupt,ECX);
5191 emit_writeword(HOST_CCREG,(int)&Count);
5192 emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5193 emit_writeword(ECX,(int)&last_count);
5194#endif
5195//DEBUG <
5196 emit_storereg(CCREG,HOST_CCREG);
5197 emit_call((int)get_addr);
5198 emit_loadreg(CCREG,HOST_CCREG);
5199 emit_addimm(ESP,4,ESP);
5200 emit_jmpreg(EAX);*/
5201 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5202 if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5203 #endif
5204}
5205
5206void cjump_assemble(int i,struct regstat *i_regs)
5207{
5208 signed char *i_regmap=i_regs->regmap;
5209 int cc;
5210 int match;
5211 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5212 assem_debug("match=%d\n",match);
5213 int s1h,s1l,s2h,s2l;
5214 int prev_cop1_usable=cop1_usable;
5215 int unconditional=0,nop=0;
5216 int only32=0;
5217 int ooo=1;
5218 int invert=0;
5219 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5220 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5221 if(likely[i]) ooo=0;
5222 if(!match) invert=1;
5223 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5224 if(i>(ba[i]-start)>>2) invert=1;
5225 #endif
5226
5227 if(ooo)
5228 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
5229 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1])))
5230 {
5231 // Write-after-read dependency prevents out of order execution
5232 // First test branch condition, then execute delay slot, then branch
5233 ooo=0;
5234 }
5235
5236 if(ooo) {
5237 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5238 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5239 s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5240 s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5241 }
5242 else {
5243 s1l=get_reg(i_regmap,rs1[i]);
5244 s1h=get_reg(i_regmap,rs1[i]|64);
5245 s2l=get_reg(i_regmap,rs2[i]);
5246 s2h=get_reg(i_regmap,rs2[i]|64);
5247 }
5248 if(rs1[i]==0&&rs2[i]==0)
5249 {
5250 if(opcode[i]&1) nop=1;
5251 else unconditional=1;
5252 //assert(opcode[i]!=5);
5253 //assert(opcode[i]!=7);
5254 //assert(opcode[i]!=0x15);
5255 //assert(opcode[i]!=0x17);
5256 }
5257 else if(rs1[i]==0)
5258 {
5259 s1l=s2l;s1h=s2h;
5260 s2l=s2h=-1;
5261 only32=(regs[i].was32>>rs2[i])&1;
5262 }
5263 else if(rs2[i]==0)
5264 {
5265 s2l=s2h=-1;
5266 only32=(regs[i].was32>>rs1[i])&1;
5267 }
5268 else {
5269 only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5270 }
5271
5272 if(ooo) {
5273 // Out of order execution (delay slot first)
5274 //printf("OOOE\n");
5275 address_generation(i+1,i_regs,regs[i].regmap_entry);
5276 ds_assemble(i+1,i_regs);
5277 int adj;
5278 uint64_t bc_unneeded=branch_regs[i].u;
5279 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5280 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5281 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5282 bc_unneeded|=1;
5283 bc_unneeded_upper|=1;
5284 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5285 bc_unneeded,bc_unneeded_upper);
5286 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5287 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5288 cc=get_reg(branch_regs[i].regmap,CCREG);
5289 assert(cc==HOST_CCREG);
5290 if(unconditional)
5291 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5292 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5293 //assem_debug("cycle count (adj)\n");
5294 if(unconditional) {
5295 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5296 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5297 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5298 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5299 if(internal)
5300 assem_debug("branch: internal\n");
5301 else
5302 assem_debug("branch: external\n");
5303 if(internal&&is_ds[(ba[i]-start)>>2]) {
5304 ds_assemble_entry(i);
5305 }
5306 else {
5307 add_to_linker((int)out,ba[i],internal);
5308 emit_jmp(0);
5309 }
5310 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5311 if(((u_int)out)&7) emit_addnop(0);
5312 #endif
5313 }
5314 }
5315 else if(nop) {
5316 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5317 int jaddr=(int)out;
5318 emit_jns(0);
5319 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5320 }
5321 else {
5322 int taken=0,nottaken=0,nottaken1=0;
5323 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5324 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5325 if(!only32)
5326 {
5327 assert(s1h>=0);
5328 if(opcode[i]==4) // BEQ
5329 {
5330 if(s2h>=0) emit_cmp(s1h,s2h);
5331 else emit_test(s1h,s1h);
5332 nottaken1=(int)out;
5333 emit_jne(1);
5334 }
5335 if(opcode[i]==5) // BNE
5336 {
5337 if(s2h>=0) emit_cmp(s1h,s2h);
5338 else emit_test(s1h,s1h);
5339 if(invert) taken=(int)out;
5340 else add_to_linker((int)out,ba[i],internal);
5341 emit_jne(0);
5342 }
5343 if(opcode[i]==6) // BLEZ
5344 {
5345 emit_test(s1h,s1h);
5346 if(invert) taken=(int)out;
5347 else add_to_linker((int)out,ba[i],internal);
5348 emit_js(0);
5349 nottaken1=(int)out;
5350 emit_jne(1);
5351 }
5352 if(opcode[i]==7) // BGTZ
5353 {
5354 emit_test(s1h,s1h);
5355 nottaken1=(int)out;
5356 emit_js(1);
5357 if(invert) taken=(int)out;
5358 else add_to_linker((int)out,ba[i],internal);
5359 emit_jne(0);
5360 }
5361 } // if(!only32)
5362
5363 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5364 assert(s1l>=0);
5365 if(opcode[i]==4) // BEQ
5366 {
5367 if(s2l>=0) emit_cmp(s1l,s2l);
5368 else emit_test(s1l,s1l);
5369 if(invert){
5370 nottaken=(int)out;
5371 emit_jne(1);
5372 }else{
5373 add_to_linker((int)out,ba[i],internal);
5374 emit_jeq(0);
5375 }
5376 }
5377 if(opcode[i]==5) // BNE
5378 {
5379 if(s2l>=0) emit_cmp(s1l,s2l);
5380 else emit_test(s1l,s1l);
5381 if(invert){
5382 nottaken=(int)out;
5383 emit_jeq(1);
5384 }else{
5385 add_to_linker((int)out,ba[i],internal);
5386 emit_jne(0);
5387 }
5388 }
5389 if(opcode[i]==6) // BLEZ
5390 {
5391 emit_cmpimm(s1l,1);
5392 if(invert){
5393 nottaken=(int)out;
5394 emit_jge(1);
5395 }else{
5396 add_to_linker((int)out,ba[i],internal);
5397 emit_jl(0);
5398 }
5399 }
5400 if(opcode[i]==7) // BGTZ
5401 {
5402 emit_cmpimm(s1l,1);
5403 if(invert){
5404 nottaken=(int)out;
5405 emit_jl(1);
5406 }else{
5407 add_to_linker((int)out,ba[i],internal);
5408 emit_jge(0);
5409 }
5410 }
5411 if(invert) {
5412 if(taken) set_jump_target(taken,(int)out);
5413 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5414 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5415 if(adj) {
5416 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5417 add_to_linker((int)out,ba[i],internal);
5418 }else{
5419 emit_addnop(13);
5420 add_to_linker((int)out,ba[i],internal*2);
5421 }
5422 emit_jmp(0);
5423 }else
5424 #endif
5425 {
5426 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5427 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5428 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5429 if(internal)
5430 assem_debug("branch: internal\n");
5431 else
5432 assem_debug("branch: external\n");
5433 if(internal&&is_ds[(ba[i]-start)>>2]) {
5434 ds_assemble_entry(i);
5435 }
5436 else {
5437 add_to_linker((int)out,ba[i],internal);
5438 emit_jmp(0);
5439 }
5440 }
5441 set_jump_target(nottaken,(int)out);
5442 }
5443
5444 if(nottaken1) set_jump_target(nottaken1,(int)out);
5445 if(adj) {
5446 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5447 }
5448 } // (!unconditional)
5449 } // if(ooo)
5450 else
5451 {
5452 // In-order execution (branch first)
5453 //if(likely[i]) printf("IOL\n");
5454 //else
5455 //printf("IOE\n");
5456 int taken=0,nottaken=0,nottaken1=0;
5457 if(!unconditional&&!nop) {
5458 if(!only32)
5459 {
5460 assert(s1h>=0);
5461 if((opcode[i]&0x2f)==4) // BEQ
5462 {
5463 if(s2h>=0) emit_cmp(s1h,s2h);
5464 else emit_test(s1h,s1h);
5465 nottaken1=(int)out;
5466 emit_jne(2);
5467 }
5468 if((opcode[i]&0x2f)==5) // BNE
5469 {
5470 if(s2h>=0) emit_cmp(s1h,s2h);
5471 else emit_test(s1h,s1h);
5472 taken=(int)out;
5473 emit_jne(1);
5474 }
5475 if((opcode[i]&0x2f)==6) // BLEZ
5476 {
5477 emit_test(s1h,s1h);
5478 taken=(int)out;
5479 emit_js(1);
5480 nottaken1=(int)out;
5481 emit_jne(2);
5482 }
5483 if((opcode[i]&0x2f)==7) // BGTZ
5484 {
5485 emit_test(s1h,s1h);
5486 nottaken1=(int)out;
5487 emit_js(2);
5488 taken=(int)out;
5489 emit_jne(1);
5490 }
5491 } // if(!only32)
5492
5493 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5494 assert(s1l>=0);
5495 if((opcode[i]&0x2f)==4) // BEQ
5496 {
5497 if(s2l>=0) emit_cmp(s1l,s2l);
5498 else emit_test(s1l,s1l);
5499 nottaken=(int)out;
5500 emit_jne(2);
5501 }
5502 if((opcode[i]&0x2f)==5) // BNE
5503 {
5504 if(s2l>=0) emit_cmp(s1l,s2l);
5505 else emit_test(s1l,s1l);
5506 nottaken=(int)out;
5507 emit_jeq(2);
5508 }
5509 if((opcode[i]&0x2f)==6) // BLEZ
5510 {
5511 emit_cmpimm(s1l,1);
5512 nottaken=(int)out;
5513 emit_jge(2);
5514 }
5515 if((opcode[i]&0x2f)==7) // BGTZ
5516 {
5517 emit_cmpimm(s1l,1);
5518 nottaken=(int)out;
5519 emit_jl(2);
5520 }
5521 } // if(!unconditional)
5522 int adj;
5523 uint64_t ds_unneeded=branch_regs[i].u;
5524 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5525 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5526 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5527 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5528 ds_unneeded|=1;
5529 ds_unneeded_upper|=1;
5530 // branch taken
5531 if(!nop) {
5532 if(taken) set_jump_target(taken,(int)out);
5533 assem_debug("1:\n");
5534 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5535 ds_unneeded,ds_unneeded_upper);
5536 // load regs
5537 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5538 address_generation(i+1,&branch_regs[i],0);
5539 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5540 ds_assemble(i+1,&branch_regs[i]);
5541 cc=get_reg(branch_regs[i].regmap,CCREG);
5542 if(cc==-1) {
5543 emit_loadreg(CCREG,cc=HOST_CCREG);
5544 // CHECK: Is the following instruction (fall thru) allocated ok?
5545 }
5546 assert(cc==HOST_CCREG);
5547 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5548 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5549 assem_debug("cycle count (adj)\n");
5550 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5551 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5552 if(internal)
5553 assem_debug("branch: internal\n");
5554 else
5555 assem_debug("branch: external\n");
5556 if(internal&&is_ds[(ba[i]-start)>>2]) {
5557 ds_assemble_entry(i);
5558 }
5559 else {
5560 add_to_linker((int)out,ba[i],internal);
5561 emit_jmp(0);
5562 }
5563 }
5564 // branch not taken
5565 cop1_usable=prev_cop1_usable;
5566 if(!unconditional) {
5567 if(nottaken1) set_jump_target(nottaken1,(int)out);
5568 set_jump_target(nottaken,(int)out);
5569 assem_debug("2:\n");
5570 if(!likely[i]) {
5571 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5572 ds_unneeded,ds_unneeded_upper);
5573 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5574 address_generation(i+1,&branch_regs[i],0);
5575 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5576 ds_assemble(i+1,&branch_regs[i]);
5577 }
5578 cc=get_reg(branch_regs[i].regmap,CCREG);
5579 if(cc==-1&&!likely[i]) {
5580 // Cycle count isn't in a register, temporarily load it then write it out
5581 emit_loadreg(CCREG,HOST_CCREG);
5582 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5583 int jaddr=(int)out;
5584 emit_jns(0);
5585 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5586 emit_storereg(CCREG,HOST_CCREG);
5587 }
5588 else{
5589 cc=get_reg(i_regmap,CCREG);
5590 assert(cc==HOST_CCREG);
5591 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5592 int jaddr=(int)out;
5593 emit_jns(0);
5594 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5595 }
5596 }
5597 }
5598}
5599
5600void sjump_assemble(int i,struct regstat *i_regs)
5601{
5602 signed char *i_regmap=i_regs->regmap;
5603 int cc;
5604 int match;
5605 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5606 assem_debug("smatch=%d\n",match);
5607 int s1h,s1l;
5608 int prev_cop1_usable=cop1_usable;
5609 int unconditional=0,nevertaken=0;
5610 int only32=0;
5611 int ooo=1;
5612 int invert=0;
5613 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5614 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5615 if(likely[i]) ooo=0;
5616 if(!match) invert=1;
5617 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5618 if(i>(ba[i]-start)>>2) invert=1;
5619 #endif
5620
5621 //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5622 assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5623
5624 if(ooo)
5625 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))
5626 {
5627 // Write-after-read dependency prevents out of order execution
5628 // First test branch condition, then execute delay slot, then branch
5629 ooo=0;
5630 }
5631 // TODO: Conditional branches w/link must execute in-order so that
5632 // condition test and write to r31 occur before cycle count test
5633
5634 if(ooo) {
5635 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5636 s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5637 }
5638 else {
5639 s1l=get_reg(i_regmap,rs1[i]);
5640 s1h=get_reg(i_regmap,rs1[i]|64);
5641 }
5642 if(rs1[i]==0)
5643 {
5644 if(opcode2[i]&1) unconditional=1;
5645 else nevertaken=1;
5646 // These are never taken (r0 is never less than zero)
5647 //assert(opcode2[i]!=0);
5648 //assert(opcode2[i]!=2);
5649 //assert(opcode2[i]!=0x10);
5650 //assert(opcode2[i]!=0x12);
5651 }
5652 else {
5653 only32=(regs[i].was32>>rs1[i])&1;
5654 }
5655
5656 if(ooo) {
5657 // Out of order execution (delay slot first)
5658 //printf("OOOE\n");
5659 address_generation(i+1,i_regs,regs[i].regmap_entry);
5660 ds_assemble(i+1,i_regs);
5661 int adj;
5662 uint64_t bc_unneeded=branch_regs[i].u;
5663 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5664 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5665 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5666 bc_unneeded|=1;
5667 bc_unneeded_upper|=1;
5668 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5669 bc_unneeded,bc_unneeded_upper);
5670 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5671 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5672 if(rt1[i]==31) {
5673 int rt,return_address;
5674 assert(rt1[i+1]!=31);
5675 assert(rt2[i+1]!=31);
5676 rt=get_reg(branch_regs[i].regmap,31);
5677 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5678 if(rt>=0) {
5679 // Save the PC even if the branch is not taken
5680 return_address=start+i*4+8;
5681 emit_movimm(return_address,rt); // PC into link register
5682 #ifdef IMM_PREFETCH
5683 if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5684 #endif
5685 }
5686 }
5687 cc=get_reg(branch_regs[i].regmap,CCREG);
5688 assert(cc==HOST_CCREG);
5689 if(unconditional)
5690 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5691 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5692 assem_debug("cycle count (adj)\n");
5693 if(unconditional) {
5694 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5695 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5696 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5697 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5698 if(internal)
5699 assem_debug("branch: internal\n");
5700 else
5701 assem_debug("branch: external\n");
5702 if(internal&&is_ds[(ba[i]-start)>>2]) {
5703 ds_assemble_entry(i);
5704 }
5705 else {
5706 add_to_linker((int)out,ba[i],internal);
5707 emit_jmp(0);
5708 }
5709 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5710 if(((u_int)out)&7) emit_addnop(0);
5711 #endif
5712 }
5713 }
5714 else if(nevertaken) {
5715 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5716 int jaddr=(int)out;
5717 emit_jns(0);
5718 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5719 }
5720 else {
5721 int nottaken=0;
5722 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5723 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5724 if(!only32)
5725 {
5726 assert(s1h>=0);
5727 if(opcode2[i]==0) // BLTZ
5728 {
5729 emit_test(s1h,s1h);
5730 if(invert){
5731 nottaken=(int)out;
5732 emit_jns(1);
5733 }else{
5734 add_to_linker((int)out,ba[i],internal);
5735 emit_js(0);
5736 }
5737 }
5738 if(opcode2[i]==1) // BGEZ
5739 {
5740 emit_test(s1h,s1h);
5741 if(invert){
5742 nottaken=(int)out;
5743 emit_js(1);
5744 }else{
5745 add_to_linker((int)out,ba[i],internal);
5746 emit_jns(0);
5747 }
5748 }
5749 } // if(!only32)
5750 else
5751 {
5752 assert(s1l>=0);
5753 if(opcode2[i]==0) // BLTZ
5754 {
5755 emit_test(s1l,s1l);
5756 if(invert){
5757 nottaken=(int)out;
5758 emit_jns(1);
5759 }else{
5760 add_to_linker((int)out,ba[i],internal);
5761 emit_js(0);
5762 }
5763 }
5764 if(opcode2[i]==1) // BGEZ
5765 {
5766 emit_test(s1l,s1l);
5767 if(invert){
5768 nottaken=(int)out;
5769 emit_js(1);
5770 }else{
5771 add_to_linker((int)out,ba[i],internal);
5772 emit_jns(0);
5773 }
5774 }
5775 } // if(!only32)
5776
5777 if(invert) {
5778 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5779 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5780 if(adj) {
5781 emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5782 add_to_linker((int)out,ba[i],internal);
5783 }else{
5784 emit_addnop(13);
5785 add_to_linker((int)out,ba[i],internal*2);
5786 }
5787 emit_jmp(0);
5788 }else
5789 #endif
5790 {
5791 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5792 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5793 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5794 if(internal)
5795 assem_debug("branch: internal\n");
5796 else
5797 assem_debug("branch: external\n");
5798 if(internal&&is_ds[(ba[i]-start)>>2]) {
5799 ds_assemble_entry(i);
5800 }
5801 else {
5802 add_to_linker((int)out,ba[i],internal);
5803 emit_jmp(0);
5804 }
5805 }
5806 set_jump_target(nottaken,(int)out);
5807 }
5808
5809 if(adj) {
5810 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5811 }
5812 } // (!unconditional)
5813 } // if(ooo)
5814 else
5815 {
5816 // In-order execution (branch first)
5817 //printf("IOE\n");
5818 int nottaken=0;
5819 if(!unconditional) {
5820 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5821 if(!only32)
5822 {
5823 assert(s1h>=0);
5824 if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5825 {
5826 emit_test(s1h,s1h);
5827 nottaken=(int)out;
5828 emit_jns(1);
5829 }
5830 if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5831 {
5832 emit_test(s1h,s1h);
5833 nottaken=(int)out;
5834 emit_js(1);
5835 }
5836 } // if(!only32)
5837 else
5838 {
5839 assert(s1l>=0);
5840 if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
5841 {
5842 emit_test(s1l,s1l);
5843 nottaken=(int)out;
5844 emit_jns(1);
5845 }
5846 if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
5847 {
5848 emit_test(s1l,s1l);
5849 nottaken=(int)out;
5850 emit_js(1);
5851 }
5852 }
5853 } // if(!unconditional)
5854 int adj;
5855 uint64_t ds_unneeded=branch_regs[i].u;
5856 uint64_t ds_unneeded_upper=branch_regs[i].uu;
5857 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5858 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5859 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5860 ds_unneeded|=1;
5861 ds_unneeded_upper|=1;
5862 // branch taken
5863 if(!nevertaken) {
5864 //assem_debug("1:\n");
5865 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5866 ds_unneeded,ds_unneeded_upper);
5867 // load regs
5868 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5869 address_generation(i+1,&branch_regs[i],0);
5870 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5871 ds_assemble(i+1,&branch_regs[i]);
5872 cc=get_reg(branch_regs[i].regmap,CCREG);
5873 if(cc==-1) {
5874 emit_loadreg(CCREG,cc=HOST_CCREG);
5875 // CHECK: Is the following instruction (fall thru) allocated ok?
5876 }
5877 assert(cc==HOST_CCREG);
5878 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5879 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5880 assem_debug("cycle count (adj)\n");
5881 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5882 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5883 if(internal)
5884 assem_debug("branch: internal\n");
5885 else
5886 assem_debug("branch: external\n");
5887 if(internal&&is_ds[(ba[i]-start)>>2]) {
5888 ds_assemble_entry(i);
5889 }
5890 else {
5891 add_to_linker((int)out,ba[i],internal);
5892 emit_jmp(0);
5893 }
5894 }
5895 // branch not taken
5896 cop1_usable=prev_cop1_usable;
5897 if(!unconditional) {
5898 set_jump_target(nottaken,(int)out);
5899 assem_debug("1:\n");
5900 if(!likely[i]) {
5901 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5902 ds_unneeded,ds_unneeded_upper);
5903 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5904 address_generation(i+1,&branch_regs[i],0);
5905 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5906 ds_assemble(i+1,&branch_regs[i]);
5907 }
5908 cc=get_reg(branch_regs[i].regmap,CCREG);
5909 if(cc==-1&&!likely[i]) {
5910 // Cycle count isn't in a register, temporarily load it then write it out
5911 emit_loadreg(CCREG,HOST_CCREG);
5912 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5913 int jaddr=(int)out;
5914 emit_jns(0);
5915 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5916 emit_storereg(CCREG,HOST_CCREG);
5917 }
5918 else{
5919 cc=get_reg(i_regmap,CCREG);
5920 assert(cc==HOST_CCREG);
5921 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5922 int jaddr=(int)out;
5923 emit_jns(0);
5924 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5925 }
5926 }
5927 }
5928}
5929
5930void fjump_assemble(int i,struct regstat *i_regs)
5931{
5932 signed char *i_regmap=i_regs->regmap;
5933 int cc;
5934 int match;
5935 match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5936 assem_debug("fmatch=%d\n",match);
5937 int fs,cs;
5938 int eaddr;
5939 int ooo=1;
5940 int invert=0;
5941 int internal=internal_branch(branch_regs[i].is32,ba[i]);
5942 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5943 if(likely[i]) ooo=0;
5944 if(!match) invert=1;
5945 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5946 if(i>(ba[i]-start)>>2) invert=1;
5947 #endif
5948
5949 if(ooo)
5950 if(itype[i+1]==FCOMP)
5951 {
5952 // Write-after-read dependency prevents out of order execution
5953 // First test branch condition, then execute delay slot, then branch
5954 ooo=0;
5955 }
5956
5957 if(ooo) {
5958 fs=get_reg(branch_regs[i].regmap,FSREG);
5959 address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5960 }
5961 else {
5962 fs=get_reg(i_regmap,FSREG);
5963 }
5964
5965 // Check cop1 unusable
5966 if(!cop1_usable) {
5967 cs=get_reg(i_regmap,CSREG);
5968 assert(cs>=0);
5969 emit_testimm(cs,0x20000000);
5970 eaddr=(int)out;
5971 emit_jeq(0);
5972 add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5973 cop1_usable=1;
5974 }
5975
5976 if(ooo) {
5977 // Out of order execution (delay slot first)
5978 //printf("OOOE\n");
5979 ds_assemble(i+1,i_regs);
5980 int adj;
5981 uint64_t bc_unneeded=branch_regs[i].u;
5982 uint64_t bc_unneeded_upper=branch_regs[i].uu;
5983 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5984 bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5985 bc_unneeded|=1;
5986 bc_unneeded_upper|=1;
5987 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5988 bc_unneeded,bc_unneeded_upper);
5989 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5990 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5991 cc=get_reg(branch_regs[i].regmap,CCREG);
5992 assert(cc==HOST_CCREG);
5993 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5994 assem_debug("cycle count (adj)\n");
5995 if(1) {
5996 int nottaken=0;
5997 if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5998 if(1) {
5999 assert(fs>=0);
6000 emit_testimm(fs,0x800000);
6001 if(source[i]&0x10000) // BC1T
6002 {
6003 if(invert){
6004 nottaken=(int)out;
6005 emit_jeq(1);
6006 }else{
6007 add_to_linker((int)out,ba[i],internal);
6008 emit_jne(0);
6009 }
6010 }
6011 else // BC1F
6012 if(invert){
6013 nottaken=(int)out;
6014 emit_jne(1);
6015 }else{
6016 add_to_linker((int)out,ba[i],internal);
6017 emit_jeq(0);
6018 }
6019 {
6020 }
6021 } // if(!only32)
6022
6023 if(invert) {
6024 if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6025 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6026 else if(match) emit_addnop(13);
6027 #endif
6028 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6029 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6030 if(internal)
6031 assem_debug("branch: internal\n");
6032 else
6033 assem_debug("branch: external\n");
6034 if(internal&&is_ds[(ba[i]-start)>>2]) {
6035 ds_assemble_entry(i);
6036 }
6037 else {
6038 add_to_linker((int)out,ba[i],internal);
6039 emit_jmp(0);
6040 }
6041 set_jump_target(nottaken,(int)out);
6042 }
6043
6044 if(adj) {
6045 if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6046 }
6047 } // (!unconditional)
6048 } // if(ooo)
6049 else
6050 {
6051 // In-order execution (branch first)
6052 //printf("IOE\n");
6053 int nottaken=0;
6054 if(1) {
6055 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6056 if(1) {
6057 assert(fs>=0);
6058 emit_testimm(fs,0x800000);
6059 if(source[i]&0x10000) // BC1T
6060 {
6061 nottaken=(int)out;
6062 emit_jeq(1);
6063 }
6064 else // BC1F
6065 {
6066 nottaken=(int)out;
6067 emit_jne(1);
6068 }
6069 }
6070 } // if(!unconditional)
6071 int adj;
6072 uint64_t ds_unneeded=branch_regs[i].u;
6073 uint64_t ds_unneeded_upper=branch_regs[i].uu;
6074 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6075 ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6076 if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6077 ds_unneeded|=1;
6078 ds_unneeded_upper|=1;
6079 // branch taken
6080 //assem_debug("1:\n");
6081 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6082 ds_unneeded,ds_unneeded_upper);
6083 // load regs
6084 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6085 address_generation(i+1,&branch_regs[i],0);
6086 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6087 ds_assemble(i+1,&branch_regs[i]);
6088 cc=get_reg(branch_regs[i].regmap,CCREG);
6089 if(cc==-1) {
6090 emit_loadreg(CCREG,cc=HOST_CCREG);
6091 // CHECK: Is the following instruction (fall thru) allocated ok?
6092 }
6093 assert(cc==HOST_CCREG);
6094 store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6095 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6096 assem_debug("cycle count (adj)\n");
6097 if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6098 load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6099 if(internal)
6100 assem_debug("branch: internal\n");
6101 else
6102 assem_debug("branch: external\n");
6103 if(internal&&is_ds[(ba[i]-start)>>2]) {
6104 ds_assemble_entry(i);
6105 }
6106 else {
6107 add_to_linker((int)out,ba[i],internal);
6108 emit_jmp(0);
6109 }
6110
6111 // branch not taken
6112 if(1) { // <- FIXME (don't need this)
6113 set_jump_target(nottaken,(int)out);
6114 assem_debug("1:\n");
6115 if(!likely[i]) {
6116 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6117 ds_unneeded,ds_unneeded_upper);
6118 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6119 address_generation(i+1,&branch_regs[i],0);
6120 load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6121 ds_assemble(i+1,&branch_regs[i]);
6122 }
6123 cc=get_reg(branch_regs[i].regmap,CCREG);
6124 if(cc==-1&&!likely[i]) {
6125 // Cycle count isn't in a register, temporarily load it then write it out
6126 emit_loadreg(CCREG,HOST_CCREG);
6127 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6128 int jaddr=(int)out;
6129 emit_jns(0);
6130 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6131 emit_storereg(CCREG,HOST_CCREG);
6132 }
6133 else{
6134 cc=get_reg(i_regmap,CCREG);
6135 assert(cc==HOST_CCREG);
6136 emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6137 int jaddr=(int)out;
6138 emit_jns(0);
6139 add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6140 }
6141 }
6142 }
6143}
6144
6145static void pagespan_assemble(int i,struct regstat *i_regs)
6146{
6147 int s1l=get_reg(i_regs->regmap,rs1[i]);
6148 int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6149 int s2l=get_reg(i_regs->regmap,rs2[i]);
6150 int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6151 void *nt_branch=NULL;
6152 int taken=0;
6153 int nottaken=0;
6154 int unconditional=0;
6155 if(rs1[i]==0)
6156 {
6157 s1l=s2l;s1h=s2h;
6158 s2l=s2h=-1;
6159 }
6160 else if(rs2[i]==0)
6161 {
6162 s2l=s2h=-1;
6163 }
6164 if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6165 s1h=s2h=-1;
6166 }
6167 int hr=0;
6168 int addr,alt,ntaddr;
6169 if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6170 else {
6171 while(hr<HOST_REGS)
6172 {
6173 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6174 (i_regs->regmap[hr]&63)!=rs1[i] &&
6175 (i_regs->regmap[hr]&63)!=rs2[i] )
6176 {
6177 addr=hr++;break;
6178 }
6179 hr++;
6180 }
6181 }
6182 while(hr<HOST_REGS)
6183 {
6184 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6185 (i_regs->regmap[hr]&63)!=rs1[i] &&
6186 (i_regs->regmap[hr]&63)!=rs2[i] )
6187 {
6188 alt=hr++;break;
6189 }
6190 hr++;
6191 }
6192 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6193 {
6194 while(hr<HOST_REGS)
6195 {
6196 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6197 (i_regs->regmap[hr]&63)!=rs1[i] &&
6198 (i_regs->regmap[hr]&63)!=rs2[i] )
6199 {
6200 ntaddr=hr;break;
6201 }
6202 hr++;
6203 }
6204 }
6205 assert(hr<HOST_REGS);
6206 if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6207 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6208 }
6209 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6210 if(opcode[i]==2) // J
6211 {
6212 unconditional=1;
6213 }
6214 if(opcode[i]==3) // JAL
6215 {
6216 // TODO: mini_ht
6217 int rt=get_reg(i_regs->regmap,31);
6218 emit_movimm(start+i*4+8,rt);
6219 unconditional=1;
6220 }
6221 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6222 {
6223 emit_mov(s1l,addr);
6224 if(opcode2[i]==9) // JALR
6225 {
6226 int rt=get_reg(i_regs->regmap,rt1[i]);
6227 emit_movimm(start+i*4+8,rt);
6228 }
6229 }
6230 if((opcode[i]&0x3f)==4) // BEQ
6231 {
6232 if(rs1[i]==rs2[i])
6233 {
6234 unconditional=1;
6235 }
6236 else
6237 #ifdef HAVE_CMOV_IMM
6238 if(s1h<0) {
6239 if(s2l>=0) emit_cmp(s1l,s2l);
6240 else emit_test(s1l,s1l);
6241 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6242 }
6243 else
6244 #endif
6245 {
6246 assert(s1l>=0);
6247 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6248 if(s1h>=0) {
6249 if(s2h>=0) emit_cmp(s1h,s2h);
6250 else emit_test(s1h,s1h);
6251 emit_cmovne_reg(alt,addr);
6252 }
6253 if(s2l>=0) emit_cmp(s1l,s2l);
6254 else emit_test(s1l,s1l);
6255 emit_cmovne_reg(alt,addr);
6256 }
6257 }
6258 if((opcode[i]&0x3f)==5) // BNE
6259 {
6260 #ifdef HAVE_CMOV_IMM
6261 if(s1h<0) {
6262 if(s2l>=0) emit_cmp(s1l,s2l);
6263 else emit_test(s1l,s1l);
6264 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6265 }
6266 else
6267 #endif
6268 {
6269 assert(s1l>=0);
6270 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6271 if(s1h>=0) {
6272 if(s2h>=0) emit_cmp(s1h,s2h);
6273 else emit_test(s1h,s1h);
6274 emit_cmovne_reg(alt,addr);
6275 }
6276 if(s2l>=0) emit_cmp(s1l,s2l);
6277 else emit_test(s1l,s1l);
6278 emit_cmovne_reg(alt,addr);
6279 }
6280 }
6281 if((opcode[i]&0x3f)==0x14) // BEQL
6282 {
6283 if(s1h>=0) {
6284 if(s2h>=0) emit_cmp(s1h,s2h);
6285 else emit_test(s1h,s1h);
6286 nottaken=(int)out;
6287 emit_jne(0);
6288 }
6289 if(s2l>=0) emit_cmp(s1l,s2l);
6290 else emit_test(s1l,s1l);
6291 if(nottaken) set_jump_target(nottaken,(int)out);
6292 nottaken=(int)out;
6293 emit_jne(0);
6294 }
6295 if((opcode[i]&0x3f)==0x15) // BNEL
6296 {
6297 if(s1h>=0) {
6298 if(s2h>=0) emit_cmp(s1h,s2h);
6299 else emit_test(s1h,s1h);
6300 taken=(int)out;
6301 emit_jne(0);
6302 }
6303 if(s2l>=0) emit_cmp(s1l,s2l);
6304 else emit_test(s1l,s1l);
6305 nottaken=(int)out;
6306 emit_jeq(0);
6307 if(taken) set_jump_target(taken,(int)out);
6308 }
6309 if((opcode[i]&0x3f)==6) // BLEZ
6310 {
6311 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6312 emit_cmpimm(s1l,1);
6313 if(s1h>=0) emit_mov(addr,ntaddr);
6314 emit_cmovl_reg(alt,addr);
6315 if(s1h>=0) {
6316 emit_test(s1h,s1h);
6317 emit_cmovne_reg(ntaddr,addr);
6318 emit_cmovs_reg(alt,addr);
6319 }
6320 }
6321 if((opcode[i]&0x3f)==7) // BGTZ
6322 {
6323 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6324 emit_cmpimm(s1l,1);
6325 if(s1h>=0) emit_mov(addr,alt);
6326 emit_cmovl_reg(ntaddr,addr);
6327 if(s1h>=0) {
6328 emit_test(s1h,s1h);
6329 emit_cmovne_reg(alt,addr);
6330 emit_cmovs_reg(ntaddr,addr);
6331 }
6332 }
6333 if((opcode[i]&0x3f)==0x16) // BLEZL
6334 {
6335 assert((opcode[i]&0x3f)!=0x16);
6336 }
6337 if((opcode[i]&0x3f)==0x17) // BGTZL
6338 {
6339 assert((opcode[i]&0x3f)!=0x17);
6340 }
6341 assert(opcode[i]!=1); // BLTZ/BGEZ
6342
6343 //FIXME: Check CSREG
6344 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6345 if((source[i]&0x30000)==0) // BC1F
6346 {
6347 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6348 emit_testimm(s1l,0x800000);
6349 emit_cmovne_reg(alt,addr);
6350 }
6351 if((source[i]&0x30000)==0x10000) // BC1T
6352 {
6353 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6354 emit_testimm(s1l,0x800000);
6355 emit_cmovne_reg(alt,addr);
6356 }
6357 if((source[i]&0x30000)==0x20000) // BC1FL
6358 {
6359 emit_testimm(s1l,0x800000);
6360 nottaken=(int)out;
6361 emit_jne(0);
6362 }
6363 if((source[i]&0x30000)==0x30000) // BC1TL
6364 {
6365 emit_testimm(s1l,0x800000);
6366 nottaken=(int)out;
6367 emit_jeq(0);
6368 }
6369 }
6370
6371 assert(i_regs->regmap[HOST_CCREG]==CCREG);
6372 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6373 if(likely[i]||unconditional)
6374 {
6375 emit_movimm(ba[i],HOST_BTREG);
6376 }
6377 else if(addr!=HOST_BTREG)
6378 {
6379 emit_mov(addr,HOST_BTREG);
6380 }
6381 void *branch_addr=out;
6382 emit_jmp(0);
6383 int target_addr=start+i*4+5;
6384 void *stub=out;
6385 void *compiled_target_addr=check_addr(target_addr);
6386 emit_extjump_ds((int)branch_addr,target_addr);
6387 if(compiled_target_addr) {
6388 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6389 add_link(target_addr,stub);
6390 }
6391 else set_jump_target((int)branch_addr,(int)stub);
6392 if(likely[i]) {
6393 // Not-taken path
6394 set_jump_target((int)nottaken,(int)out);
6395 wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6396 void *branch_addr=out;
6397 emit_jmp(0);
6398 int target_addr=start+i*4+8;
6399 void *stub=out;
6400 void *compiled_target_addr=check_addr(target_addr);
6401 emit_extjump_ds((int)branch_addr,target_addr);
6402 if(compiled_target_addr) {
6403 set_jump_target((int)branch_addr,(int)compiled_target_addr);
6404 add_link(target_addr,stub);
6405 }
6406 else set_jump_target((int)branch_addr,(int)stub);
6407 }
6408}
6409
6410// Assemble the delay slot for the above
6411static void pagespan_ds()
6412{
6413 assem_debug("initial delay slot:\n");
6414 u_int vaddr=start+1;
6415 u_int page=get_page(vaddr);
6416 u_int vpage=get_vpage(vaddr);
6417 ll_add(jump_dirty+vpage,vaddr,(void *)out);
6418 do_dirty_stub_ds();
6419 ll_add(jump_in+page,vaddr,(void *)out);
6420 assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6421 if(regs[0].regmap[HOST_CCREG]!=CCREG)
6422 wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6423 if(regs[0].regmap[HOST_BTREG]!=BTREG)
6424 emit_writeword(HOST_BTREG,(int)&branch_target);
6425 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6426 address_generation(0,&regs[0],regs[0].regmap_entry);
6427 if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6428 load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6429 cop1_usable=0;
6430 is_delayslot=0;
6431 switch(itype[0]) {
6432 case ALU:
6433 alu_assemble(0,&regs[0]);break;
6434 case IMM16:
6435 imm16_assemble(0,&regs[0]);break;
6436 case SHIFT:
6437 shift_assemble(0,&regs[0]);break;
6438 case SHIFTIMM:
6439 shiftimm_assemble(0,&regs[0]);break;
6440 case LOAD:
6441 load_assemble(0,&regs[0]);break;
6442 case LOADLR:
6443 loadlr_assemble(0,&regs[0]);break;
6444 case STORE:
6445 store_assemble(0,&regs[0]);break;
6446 case STORELR:
6447 storelr_assemble(0,&regs[0]);break;
6448 case COP0:
6449 cop0_assemble(0,&regs[0]);break;
6450 case COP1:
6451 cop1_assemble(0,&regs[0]);break;
6452 case C1LS:
6453 c1ls_assemble(0,&regs[0]);break;
6454 case COP2:
6455 cop2_assemble(0,&regs[0]);break;
6456 case C2LS:
6457 c2ls_assemble(0,&regs[0]);break;
6458 case C2OP:
6459 c2op_assemble(0,&regs[0]);break;
6460 case FCONV:
6461 fconv_assemble(0,&regs[0]);break;
6462 case FLOAT:
6463 float_assemble(0,&regs[0]);break;
6464 case FCOMP:
6465 fcomp_assemble(0,&regs[0]);break;
6466 case MULTDIV:
6467 multdiv_assemble(0,&regs[0]);break;
6468 case MOV:
6469 mov_assemble(0,&regs[0]);break;
6470 case SYSCALL:
6471 case HLECALL:
6472 case SPAN:
6473 case UJUMP:
6474 case RJUMP:
6475 case CJUMP:
6476 case SJUMP:
6477 case FJUMP:
6478 printf("Jump in the delay slot. This is probably a bug.\n");
6479 }
6480 int btaddr=get_reg(regs[0].regmap,BTREG);
6481 if(btaddr<0) {
6482 btaddr=get_reg(regs[0].regmap,-1);
6483 emit_readword((int)&branch_target,btaddr);
6484 }
6485 assert(btaddr!=HOST_CCREG);
6486 if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6487#ifdef HOST_IMM8
6488 emit_movimm(start+4,HOST_TEMPREG);
6489 emit_cmp(btaddr,HOST_TEMPREG);
6490#else
6491 emit_cmpimm(btaddr,start+4);
6492#endif
6493 int branch=(int)out;
6494 emit_jeq(0);
6495 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6496 emit_jmp(jump_vaddr_reg[btaddr]);
6497 set_jump_target(branch,(int)out);
6498 store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6499 load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6500}
6501
6502// Basic liveness analysis for MIPS registers
6503void unneeded_registers(int istart,int iend,int r)
6504{
6505 int i;
6506 uint64_t u,uu,b,bu;
6507 uint64_t temp_u,temp_uu;
6508 uint64_t tdep;
6509 if(iend==slen-1) {
6510 u=1;uu=1;
6511 }else{
6512 u=unneeded_reg[iend+1];
6513 uu=unneeded_reg_upper[iend+1];
6514 u=1;uu=1;
6515 }
6516 for (i=iend;i>=istart;i--)
6517 {
6518 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6519 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6520 {
6521 // If subroutine call, flag return address as a possible branch target
6522 if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6523
6524 if(ba[i]<start || ba[i]>=(start+slen*4))
6525 {
6526 // Branch out of this block, flush all regs
6527 u=1;
6528 uu=1;
6529 /* Hexagon hack
6530 if(itype[i]==UJUMP&&rt1[i]==31)
6531 {
6532 uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6533 }
6534 if(itype[i]==RJUMP&&rs1[i]==31)
6535 {
6536 uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6537 }
6538 if(start>0x80000400&&start<0x80800000) {
6539 if(itype[i]==UJUMP&&rt1[i]==31)
6540 {
6541 //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6542 uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6543 }
6544 if(itype[i]==RJUMP&&rs1[i]==31)
6545 {
6546 //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6547 uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6548 }
6549 }*/
6550 branch_unneeded_reg[i]=u;
6551 branch_unneeded_reg_upper[i]=uu;
6552 // Merge in delay slot
6553 tdep=(~uu>>rt1[i+1])&1;
6554 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6555 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6556 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6557 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6558 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6559 u|=1;uu|=1;
6560 // If branch is "likely" (and conditional)
6561 // then we skip the delay slot on the fall-thru path
6562 if(likely[i]) {
6563 if(i<slen-1) {
6564 u&=unneeded_reg[i+2];
6565 uu&=unneeded_reg_upper[i+2];
6566 }
6567 else
6568 {
6569 u=1;
6570 uu=1;
6571 }
6572 }
6573 }
6574 else
6575 {
6576 // Internal branch, flag target
6577 bt[(ba[i]-start)>>2]=1;
6578 if(ba[i]<=start+i*4) {
6579 // Backward branch
6580 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6581 {
6582 // Unconditional branch
6583 temp_u=1;temp_uu=1;
6584 } else {
6585 // Conditional branch (not taken case)
6586 temp_u=unneeded_reg[i+2];
6587 temp_uu=unneeded_reg_upper[i+2];
6588 }
6589 // Merge in delay slot
6590 tdep=(~temp_uu>>rt1[i+1])&1;
6591 temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6592 temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6593 temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6594 temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6595 temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6596 temp_u|=1;temp_uu|=1;
6597 // If branch is "likely" (and conditional)
6598 // then we skip the delay slot on the fall-thru path
6599 if(likely[i]) {
6600 if(i<slen-1) {
6601 temp_u&=unneeded_reg[i+2];
6602 temp_uu&=unneeded_reg_upper[i+2];
6603 }
6604 else
6605 {
6606 temp_u=1;
6607 temp_uu=1;
6608 }
6609 }
6610 tdep=(~temp_uu>>rt1[i])&1;
6611 temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6612 temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6613 temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6614 temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6615 temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6616 temp_u|=1;temp_uu|=1;
6617 unneeded_reg[i]=temp_u;
6618 unneeded_reg_upper[i]=temp_uu;
6619 // Only go three levels deep. This recursion can take an
6620 // excessive amount of time if there are a lot of nested loops.
6621 if(r<2) {
6622 unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6623 }else{
6624 unneeded_reg[(ba[i]-start)>>2]=1;
6625 unneeded_reg_upper[(ba[i]-start)>>2]=1;
6626 }
6627 } /*else*/ if(1) {
6628 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6629 {
6630 // Unconditional branch
6631 u=unneeded_reg[(ba[i]-start)>>2];
6632 uu=unneeded_reg_upper[(ba[i]-start)>>2];
6633 branch_unneeded_reg[i]=u;
6634 branch_unneeded_reg_upper[i]=uu;
6635 //u=1;
6636 //uu=1;
6637 //branch_unneeded_reg[i]=u;
6638 //branch_unneeded_reg_upper[i]=uu;
6639 // Merge in delay slot
6640 tdep=(~uu>>rt1[i+1])&1;
6641 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6642 uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6643 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6644 uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6645 uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6646 u|=1;uu|=1;
6647 } else {
6648 // Conditional branch
6649 b=unneeded_reg[(ba[i]-start)>>2];
6650 bu=unneeded_reg_upper[(ba[i]-start)>>2];
6651 branch_unneeded_reg[i]=b;
6652 branch_unneeded_reg_upper[i]=bu;
6653 //b=1;
6654 //bu=1;
6655 //branch_unneeded_reg[i]=b;
6656 //branch_unneeded_reg_upper[i]=bu;
6657 // Branch delay slot
6658 tdep=(~uu>>rt1[i+1])&1;
6659 b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6660 bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6661 b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6662 bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6663 bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6664 b|=1;bu|=1;
6665 // If branch is "likely" then we skip the
6666 // delay slot on the fall-thru path
6667 if(likely[i]) {
6668 u=b;
6669 uu=bu;
6670 if(i<slen-1) {
6671 u&=unneeded_reg[i+2];
6672 uu&=unneeded_reg_upper[i+2];
6673 //u=1;
6674 //uu=1;
6675 }
6676 } else {
6677 u&=b;
6678 uu&=bu;
6679 //u=1;
6680 //uu=1;
6681 }
6682 if(i<slen-1) {
6683 branch_unneeded_reg[i]&=unneeded_reg[i+2];
6684 branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6685 //branch_unneeded_reg[i]=1;
6686 //branch_unneeded_reg_upper[i]=1;
6687 } else {
6688 branch_unneeded_reg[i]=1;
6689 branch_unneeded_reg_upper[i]=1;
6690 }
6691 }
6692 }
6693 }
6694 }
6695 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
6696 {
6697 // SYSCALL instruction (software interrupt)
6698 u=1;
6699 uu=1;
6700 }
6701 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6702 {
6703 // ERET instruction (return from interrupt)
6704 u=1;
6705 uu=1;
6706 }
6707 //u=uu=1; // DEBUG
6708 tdep=(~uu>>rt1[i])&1;
6709 // Written registers are unneeded
6710 u|=1LL<<rt1[i];
6711 u|=1LL<<rt2[i];
6712 uu|=1LL<<rt1[i];
6713 uu|=1LL<<rt2[i];
6714 // Accessed registers are needed
6715 u&=~(1LL<<rs1[i]);
6716 u&=~(1LL<<rs2[i]);
6717 uu&=~(1LL<<us1[i]);
6718 uu&=~(1LL<<us2[i]);
6719 // Source-target dependencies
6720 uu&=~(tdep<<dep1[i]);
6721 uu&=~(tdep<<dep2[i]);
6722 // R0 is always unneeded
6723 u|=1;uu|=1;
6724 // Save it
6725 unneeded_reg[i]=u;
6726 unneeded_reg_upper[i]=uu;
6727 /*
6728 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6729 printf("U:");
6730 int r;
6731 for(r=1;r<=CCREG;r++) {
6732 if((unneeded_reg[i]>>r)&1) {
6733 if(r==HIREG) printf(" HI");
6734 else if(r==LOREG) printf(" LO");
6735 else printf(" r%d",r);
6736 }
6737 }
6738 printf(" UU:");
6739 for(r=1;r<=CCREG;r++) {
6740 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6741 if(r==HIREG) printf(" HI");
6742 else if(r==LOREG) printf(" LO");
6743 else printf(" r%d",r);
6744 }
6745 }
6746 printf("\n");*/
6747 }
6748#ifdef FORCE32
6749 for (i=iend;i>=istart;i--)
6750 {
6751 unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6752 }
6753#endif
6754}
6755
6756// Identify registers which are likely to contain 32-bit values
6757// This is used to predict whether any branches will jump to a
6758// location with 64-bit values in registers.
6759static void provisional_32bit()
6760{
6761 int i,j;
6762 uint64_t is32=1;
6763 uint64_t lastbranch=1;
6764
6765 for(i=0;i<slen;i++)
6766 {
6767 if(i>0) {
6768 if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
6769 if(i>1) is32=lastbranch;
6770 else is32=1;
6771 }
6772 }
6773 if(i>1)
6774 {
6775 if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
6776 if(likely[i-2]) {
6777 if(i>2) is32=lastbranch;
6778 else is32=1;
6779 }
6780 }
6781 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
6782 {
6783 if(rs1[i-2]==0||rs2[i-2]==0)
6784 {
6785 if(rs1[i-2]) {
6786 is32|=1LL<<rs1[i-2];
6787 }
6788 if(rs2[i-2]) {
6789 is32|=1LL<<rs2[i-2];
6790 }
6791 }
6792 }
6793 }
6794 // If something jumps here with 64-bit values
6795 // then promote those registers to 64 bits
6796 if(bt[i])
6797 {
6798 uint64_t temp_is32=is32;
6799 for(j=i-1;j>=0;j--)
6800 {
6801 if(ba[j]==start+i*4)
6802 //temp_is32&=branch_regs[j].is32;
6803 temp_is32&=p32[j];
6804 }
6805 for(j=i;j<slen;j++)
6806 {
6807 if(ba[j]==start+i*4)
6808 temp_is32=1;
6809 }
6810 is32=temp_is32;
6811 }
6812 int type=itype[i];
6813 int op=opcode[i];
6814 int op2=opcode2[i];
6815 int rt=rt1[i];
6816 int s1=rs1[i];
6817 int s2=rs2[i];
6818 if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
6819 // Branches don't write registers, consider the delay slot instead.
6820 type=itype[i+1];
6821 op=opcode[i+1];
6822 op2=opcode2[i+1];
6823 rt=rt1[i+1];
6824 s1=rs1[i+1];
6825 s2=rs2[i+1];
6826 lastbranch=is32;
6827 }
6828 switch(type) {
6829 case LOAD:
6830 if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
6831 opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
6832 is32&=~(1LL<<rt);
6833 else
6834 is32|=1LL<<rt;
6835 break;
6836 case STORE:
6837 case STORELR:
6838 break;
6839 case LOADLR:
6840 if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
6841 if(op==0x22) is32|=1LL<<rt; // LWL
6842 break;
6843 case IMM16:
6844 if (op==0x08||op==0x09|| // ADDI/ADDIU
6845 op==0x0a||op==0x0b|| // SLTI/SLTIU
6846 op==0x0c|| // ANDI
6847 op==0x0f) // LUI
6848 {
6849 is32|=1LL<<rt;
6850 }
6851 if(op==0x18||op==0x19) { // DADDI/DADDIU
6852 is32&=~(1LL<<rt);
6853 //if(imm[i]==0)
6854 // is32|=((is32>>s1)&1LL)<<rt;
6855 }
6856 if(op==0x0d||op==0x0e) { // ORI/XORI
6857 uint64_t sr=((is32>>s1)&1LL);
6858 is32&=~(1LL<<rt);
6859 is32|=sr<<rt;
6860 }
6861 break;
6862 case UJUMP:
6863 break;
6864 case RJUMP:
6865 break;
6866 case CJUMP:
6867 break;
6868 case SJUMP:
6869 break;
6870 case FJUMP:
6871 break;
6872 case ALU:
6873 if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
6874 is32|=1LL<<rt;
6875 }
6876 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
6877 is32|=1LL<<rt;
6878 }
6879 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
6880 uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
6881 is32&=~(1LL<<rt);
6882 is32|=sr<<rt;
6883 }
6884 else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
6885 if(s1==0&&s2==0) {
6886 is32|=1LL<<rt;
6887 }
6888 else if(s2==0) {
6889 uint64_t sr=((is32>>s1)&1LL);
6890 is32&=~(1LL<<rt);
6891 is32|=sr<<rt;
6892 }
6893 else if(s1==0) {
6894 uint64_t sr=((is32>>s2)&1LL);
6895 is32&=~(1LL<<rt);
6896 is32|=sr<<rt;
6897 }
6898 else {
6899 is32&=~(1LL<<rt);
6900 }
6901 }
6902 else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
6903 if(s1==0&&s2==0) {
6904 is32|=1LL<<rt;
6905 }
6906 else if(s2==0) {
6907 uint64_t sr=((is32>>s1)&1LL);
6908 is32&=~(1LL<<rt);
6909 is32|=sr<<rt;
6910 }
6911 else {
6912 is32&=~(1LL<<rt);
6913 }
6914 }
6915 break;
6916 case MULTDIV:
6917 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
6918 is32&=~((1LL<<HIREG)|(1LL<<LOREG));
6919 }
6920 else {
6921 is32|=(1LL<<HIREG)|(1LL<<LOREG);
6922 }
6923 break;
6924 case MOV:
6925 {
6926 uint64_t sr=((is32>>s1)&1LL);
6927 is32&=~(1LL<<rt);
6928 is32|=sr<<rt;
6929 }
6930 break;
6931 case SHIFT:
6932 if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
6933 else is32|=1LL<<rt; // SLLV/SRLV/SRAV
6934 break;
6935 case SHIFTIMM:
6936 is32|=1LL<<rt;
6937 // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
6938 if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
6939 break;
6940 case COP0:
6941 if(op2==0) is32|=1LL<<rt; // MFC0
6942 break;
6943 case COP1:
6944 case COP2:
6945 if(op2==0) is32|=1LL<<rt; // MFC1
6946 if(op2==1) is32&=~(1LL<<rt); // DMFC1
6947 if(op2==2) is32|=1LL<<rt; // CFC1
6948 break;
6949 case C1LS:
6950 case C2LS:
6951 break;
6952 case FLOAT:
6953 case FCONV:
6954 break;
6955 case FCOMP:
6956 break;
6957 case C2OP:
6958 case SYSCALL:
6959 case HLECALL:
6960 break;
6961 default:
6962 break;
6963 }
6964 is32|=1;
6965 p32[i]=is32;
6966
6967 if(i>0)
6968 {
6969 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
6970 {
6971 if(rt1[i-1]==31) // JAL/JALR
6972 {
6973 // Subroutine call will return here, don't alloc any registers
6974 is32=1;
6975 }
6976 else if(i+1<slen)
6977 {
6978 // Internal branch will jump here, match registers to caller
6979 is32=0x3FFFFFFFFLL;
6980 }
6981 }
6982 }
6983 }
6984}
6985
6986// Identify registers which may be assumed to contain 32-bit values
6987// and where optimizations will rely on this.
6988// This is used to determine whether backward branches can safely
6989// jump to a location with 64-bit values in registers.
6990static void provisional_r32()
6991{
6992 u_int r32=0;
6993 int i;
6994
6995 for (i=slen-1;i>=0;i--)
6996 {
6997 int hr;
6998 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6999 {
7000 if(ba[i]<start || ba[i]>=(start+slen*4))
7001 {
7002 // Branch out of this block, don't need anything
7003 r32=0;
7004 }
7005 else
7006 {
7007 // Internal branch
7008 // Need whatever matches the target
7009 // (and doesn't get overwritten by the delay slot instruction)
7010 r32=0;
7011 int t=(ba[i]-start)>>2;
7012 if(ba[i]>start+i*4) {
7013 // Forward branch
7014 //if(!(requires_32bit[t]&~regs[i].was32))
7015 // r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7016 if(!(pr32[t]&~regs[i].was32))
7017 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7018 }else{
7019 // Backward branch
7020 if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7021 r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7022 }
7023 }
7024 // Conditional branch may need registers for following instructions
7025 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7026 {
7027 if(i<slen-2) {
7028 //r32|=requires_32bit[i+2];
7029 r32|=pr32[i+2];
7030 r32&=regs[i].was32;
7031 // Mark this address as a branch target since it may be called
7032 // upon return from interrupt
7033 //bt[i+2]=1;
7034 }
7035 }
7036 // Merge in delay slot
7037 if(!likely[i]) {
7038 // These are overwritten unless the branch is "likely"
7039 // and the delay slot is nullified if not taken
7040 r32&=~(1LL<<rt1[i+1]);
7041 r32&=~(1LL<<rt2[i+1]);
7042 }
7043 // Assume these are needed (delay slot)
7044 if(us1[i+1]>0)
7045 {
7046 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7047 }
7048 if(us2[i+1]>0)
7049 {
7050 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7051 }
7052 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7053 {
7054 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7055 }
7056 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7057 {
7058 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7059 }
7060 }
7061 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
7062 {
7063 // SYSCALL instruction (software interrupt)
7064 r32=0;
7065 }
7066 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7067 {
7068 // ERET instruction (return from interrupt)
7069 r32=0;
7070 }
7071 // Check 32 bits
7072 r32&=~(1LL<<rt1[i]);
7073 r32&=~(1LL<<rt2[i]);
7074 if(us1[i]>0)
7075 {
7076 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7077 }
7078 if(us2[i]>0)
7079 {
7080 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7081 }
7082 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7083 {
7084 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7085 }
7086 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7087 {
7088 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7089 }
7090 //requires_32bit[i]=r32;
7091 pr32[i]=r32;
7092
7093 // Dirty registers which are 32-bit, require 32-bit input
7094 // as they will be written as 32-bit values
7095 for(hr=0;hr<HOST_REGS;hr++)
7096 {
7097 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7098 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7099 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7100 pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7101 //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7102 }
7103 }
7104 }
7105 }
7106}
7107
7108// Write back dirty registers as soon as we will no longer modify them,
7109// so that we don't end up with lots of writes at the branches.
7110void clean_registers(int istart,int iend,int wr)
7111{
7112 int i;
7113 int r;
7114 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7115 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7116 if(iend==slen-1) {
7117 will_dirty_i=will_dirty_next=0;
7118 wont_dirty_i=wont_dirty_next=0;
7119 }else{
7120 will_dirty_i=will_dirty_next=will_dirty[iend+1];
7121 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7122 }
7123 for (i=iend;i>=istart;i--)
7124 {
7125 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7126 {
7127 if(ba[i]<start || ba[i]>=(start+slen*4))
7128 {
7129 // Branch out of this block, flush all regs
7130 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7131 {
7132 // Unconditional branch
7133 will_dirty_i=0;
7134 wont_dirty_i=0;
7135 // Merge in delay slot (will dirty)
7136 for(r=0;r<HOST_REGS;r++) {
7137 if(r!=EXCLUDE_REG) {
7138 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7139 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7140 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7141 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7142 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7143 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7144 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7145 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7146 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7147 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7148 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7149 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7150 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7151 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7152 }
7153 }
7154 }
7155 else
7156 {
7157 // Conditional branch
7158 will_dirty_i=0;
7159 wont_dirty_i=wont_dirty_next;
7160 // Merge in delay slot (will dirty)
7161 for(r=0;r<HOST_REGS;r++) {
7162 if(r!=EXCLUDE_REG) {
7163 if(!likely[i]) {
7164 // Might not dirty if likely branch is not taken
7165 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7166 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7167 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7168 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7169 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7170 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7171 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7172 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7173 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7174 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7175 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7176 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7177 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7178 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7179 }
7180 }
7181 }
7182 }
7183 // Merge in delay slot (wont dirty)
7184 for(r=0;r<HOST_REGS;r++) {
7185 if(r!=EXCLUDE_REG) {
7186 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7187 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7188 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7189 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7190 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7191 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7192 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7193 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7194 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7195 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7196 }
7197 }
7198 if(wr) {
7199 #ifndef DESTRUCTIVE_WRITEBACK
7200 branch_regs[i].dirty&=wont_dirty_i;
7201 #endif
7202 branch_regs[i].dirty|=will_dirty_i;
7203 }
7204 }
7205 else
7206 {
7207 // Internal branch
7208 if(ba[i]<=start+i*4) {
7209 // Backward branch
7210 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7211 {
7212 // Unconditional branch
7213 temp_will_dirty=0;
7214 temp_wont_dirty=0;
7215 // Merge in delay slot (will dirty)
7216 for(r=0;r<HOST_REGS;r++) {
7217 if(r!=EXCLUDE_REG) {
7218 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7219 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7220 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7221 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7222 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7223 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7224 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7225 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7226 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7227 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7228 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7229 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7230 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7231 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7232 }
7233 }
7234 } else {
7235 // Conditional branch (not taken case)
7236 temp_will_dirty=will_dirty_next;
7237 temp_wont_dirty=wont_dirty_next;
7238 // Merge in delay slot (will dirty)
7239 for(r=0;r<HOST_REGS;r++) {
7240 if(r!=EXCLUDE_REG) {
7241 if(!likely[i]) {
7242 // Will not dirty if likely branch is not taken
7243 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7244 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7245 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7246 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7247 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7248 if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7249 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7250 //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7251 //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7252 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7253 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7254 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7255 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7256 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7257 }
7258 }
7259 }
7260 }
7261 // Merge in delay slot (wont dirty)
7262 for(r=0;r<HOST_REGS;r++) {
7263 if(r!=EXCLUDE_REG) {
7264 if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7265 if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7266 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7267 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7268 if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7269 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7270 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7271 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7272 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7273 if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7274 }
7275 }
7276 // Deal with changed mappings
7277 if(i<iend) {
7278 for(r=0;r<HOST_REGS;r++) {
7279 if(r!=EXCLUDE_REG) {
7280 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7281 temp_will_dirty&=~(1<<r);
7282 temp_wont_dirty&=~(1<<r);
7283 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7284 temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7285 temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7286 } else {
7287 temp_will_dirty|=1<<r;
7288 temp_wont_dirty|=1<<r;
7289 }
7290 }
7291 }
7292 }
7293 }
7294 if(wr) {
7295 will_dirty[i]=temp_will_dirty;
7296 wont_dirty[i]=temp_wont_dirty;
7297 clean_registers((ba[i]-start)>>2,i-1,0);
7298 }else{
7299 // Limit recursion. It can take an excessive amount
7300 // of time if there are a lot of nested loops.
7301 will_dirty[(ba[i]-start)>>2]=0;
7302 wont_dirty[(ba[i]-start)>>2]=-1;
7303 }
7304 }
7305 /*else*/ if(1)
7306 {
7307 if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7308 {
7309 // Unconditional branch
7310 will_dirty_i=0;
7311 wont_dirty_i=0;
7312 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7313 for(r=0;r<HOST_REGS;r++) {
7314 if(r!=EXCLUDE_REG) {
7315 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7316 will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7317 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7318 }
7319 }
7320 }
7321 //}
7322 // Merge in delay slot
7323 for(r=0;r<HOST_REGS;r++) {
7324 if(r!=EXCLUDE_REG) {
7325 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7326 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7327 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7328 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7329 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7330 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7331 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7332 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7333 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7334 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7335 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7336 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7337 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7338 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7339 }
7340 }
7341 } else {
7342 // Conditional branch
7343 will_dirty_i=will_dirty_next;
7344 wont_dirty_i=wont_dirty_next;
7345 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7346 for(r=0;r<HOST_REGS;r++) {
7347 if(r!=EXCLUDE_REG) {
7348 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7349 will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7350 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7351 }
7352 else
7353 {
7354 will_dirty_i&=~(1<<r);
7355 }
7356 // Treat delay slot as part of branch too
7357 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7358 will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7359 wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7360 }
7361 else
7362 {
7363 will_dirty[i+1]&=~(1<<r);
7364 }*/
7365 }
7366 }
7367 //}
7368 // Merge in delay slot
7369 for(r=0;r<HOST_REGS;r++) {
7370 if(r!=EXCLUDE_REG) {
7371 if(!likely[i]) {
7372 // Might not dirty if likely branch is not taken
7373 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7374 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7375 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7376 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7377 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7378 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7379 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7380 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7381 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7382 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7383 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7384 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7385 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7386 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7387 }
7388 }
7389 }
7390 }
7391 // Merge in delay slot
7392 for(r=0;r<HOST_REGS;r++) {
7393 if(r!=EXCLUDE_REG) {
7394 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7395 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7396 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7397 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7398 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7399 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7400 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7401 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7402 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7403 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7404 }
7405 }
7406 if(wr) {
7407 #ifndef DESTRUCTIVE_WRITEBACK
7408 branch_regs[i].dirty&=wont_dirty_i;
7409 #endif
7410 branch_regs[i].dirty|=will_dirty_i;
7411 }
7412 }
7413 }
7414 }
7415 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
7416 {
7417 // SYSCALL instruction (software interrupt)
7418 will_dirty_i=0;
7419 wont_dirty_i=0;
7420 }
7421 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7422 {
7423 // ERET instruction (return from interrupt)
7424 will_dirty_i=0;
7425 wont_dirty_i=0;
7426 }
7427 will_dirty_next=will_dirty_i;
7428 wont_dirty_next=wont_dirty_i;
7429 for(r=0;r<HOST_REGS;r++) {
7430 if(r!=EXCLUDE_REG) {
7431 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7432 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7433 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7434 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7435 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7436 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7437 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7438 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7439 if(i>istart) {
7440 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
7441 {
7442 // Don't store a register immediately after writing it,
7443 // may prevent dual-issue.
7444 if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7445 if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7446 }
7447 }
7448 }
7449 }
7450 // Save it
7451 will_dirty[i]=will_dirty_i;
7452 wont_dirty[i]=wont_dirty_i;
7453 // Mark registers that won't be dirtied as not dirty
7454 if(wr) {
7455 /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7456 for(r=0;r<HOST_REGS;r++) {
7457 if((will_dirty_i>>r)&1) {
7458 printf(" r%d",r);
7459 }
7460 }
7461 printf("\n");*/
7462
7463 //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7464 regs[i].dirty|=will_dirty_i;
7465 #ifndef DESTRUCTIVE_WRITEBACK
7466 regs[i].dirty&=wont_dirty_i;
7467 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7468 {
7469 if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7470 for(r=0;r<HOST_REGS;r++) {
7471 if(r!=EXCLUDE_REG) {
7472 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7473 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7474 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7475 }
7476 }
7477 }
7478 }
7479 else
7480 {
7481 if(i<iend) {
7482 for(r=0;r<HOST_REGS;r++) {
7483 if(r!=EXCLUDE_REG) {
7484 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7485 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7486 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7487 }
7488 }
7489 }
7490 }
7491 #endif
7492 //}
7493 }
7494 // Deal with changed mappings
7495 temp_will_dirty=will_dirty_i;
7496 temp_wont_dirty=wont_dirty_i;
7497 for(r=0;r<HOST_REGS;r++) {
7498 if(r!=EXCLUDE_REG) {
7499 int nr;
7500 if(regs[i].regmap[r]==regmap_pre[i][r]) {
7501 if(wr) {
7502 #ifndef DESTRUCTIVE_WRITEBACK
7503 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7504 #endif
7505 regs[i].wasdirty|=will_dirty_i&(1<<r);
7506 }
7507 }
7508 else if((nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7509 // Register moved to a different register
7510 will_dirty_i&=~(1<<r);
7511 wont_dirty_i&=~(1<<r);
7512 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7513 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7514 if(wr) {
7515 #ifndef DESTRUCTIVE_WRITEBACK
7516 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7517 #endif
7518 regs[i].wasdirty|=will_dirty_i&(1<<r);
7519 }
7520 }
7521 else {
7522 will_dirty_i&=~(1<<r);
7523 wont_dirty_i&=~(1<<r);
7524 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7525 will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7526 wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7527 } else {
7528 wont_dirty_i|=1<<r;
7529 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7530 }
7531 }
7532 }
7533 }
7534 }
7535}
7536
7537 /* disassembly */
7538void disassemble_inst(int i)
7539{
7540 if (bt[i]) printf("*"); else printf(" ");
7541 switch(itype[i]) {
7542 case UJUMP:
7543 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7544 case CJUMP:
7545 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7546 case SJUMP:
7547 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7548 case FJUMP:
7549 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7550 case RJUMP:
7551 if (rt1[i]!=31)
7552 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7553 else
7554 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7555 break;
7556 case SPAN:
7557 printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7558 case IMM16:
7559 if(opcode[i]==0xf) //LUI
7560 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7561 else
7562 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7563 break;
7564 case LOAD:
7565 case LOADLR:
7566 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7567 break;
7568 case STORE:
7569 case STORELR:
7570 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7571 break;
7572 case ALU:
7573 case SHIFT:
7574 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7575 break;
7576 case MULTDIV:
7577 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7578 break;
7579 case SHIFTIMM:
7580 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7581 break;
7582 case MOV:
7583 if((opcode2[i]&0x1d)==0x10)
7584 printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7585 else if((opcode2[i]&0x1d)==0x11)
7586 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7587 else
7588 printf (" %x: %s\n",start+i*4,insn[i]);
7589 break;
7590 case COP0:
7591 if(opcode2[i]==0)
7592 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7593 else if(opcode2[i]==4)
7594 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7595 else printf (" %x: %s\n",start+i*4,insn[i]);
7596 break;
7597 case COP1:
7598 if(opcode2[i]<3)
7599 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7600 else if(opcode2[i]>3)
7601 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7602 else printf (" %x: %s\n",start+i*4,insn[i]);
7603 break;
7604 case COP2:
7605 if(opcode2[i]<3)
7606 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7607 else if(opcode2[i]>3)
7608 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7609 else printf (" %x: %s\n",start+i*4,insn[i]);
7610 break;
7611 case C1LS:
7612 printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7613 break;
7614 case C2LS:
7615 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7616 break;
7617 default:
7618 //printf (" %s %8x\n",insn[i],source[i]);
7619 printf (" %x: %s\n",start+i*4,insn[i]);
7620 }
7621}
7622
7623void new_dynarec_init()
7624{
7625 printf("Init new dynarec\n");
7626 out=(u_char *)BASE_ADDR;
7627 if (mmap (out, 1<<TARGET_SIZE_2,
7628 PROT_READ | PROT_WRITE | PROT_EXEC,
7629 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7630 -1, 0) <= 0) {printf("mmap() failed\n");}
7631#ifdef MUPEN64
7632 rdword=&readmem_dword;
7633 fake_pc.f.r.rs=&readmem_dword;
7634 fake_pc.f.r.rt=&readmem_dword;
7635 fake_pc.f.r.rd=&readmem_dword;
7636#endif
7637 int n;
7638 for(n=0x80000;n<0x80800;n++)
7639 invalid_code[n]=1;
7640 for(n=0;n<65536;n++)
7641 hash_table[n][0]=hash_table[n][2]=-1;
7642 memset(mini_ht,-1,sizeof(mini_ht));
7643 memset(restore_candidate,0,sizeof(restore_candidate));
7644 copy=shadow;
7645 expirep=16384; // Expiry pointer, +2 blocks
7646 pending_exception=0;
7647 literalcount=0;
7648#ifdef HOST_IMM8
7649 // Copy this into local area so we don't have to put it in every literal pool
7650 invc_ptr=invalid_code;
7651#endif
7652 stop_after_jal=0;
7653 // TLB
7654 using_tlb=0;
7655 for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7656 memory_map[n]=-1;
7657 for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7658 memory_map[n]=((u_int)rdram-0x80000000)>>2;
7659 for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7660 memory_map[n]=-1;
7661#ifdef MUPEN64
7662 for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7663 writemem[n] = write_nomem_new;
7664 writememb[n] = write_nomemb_new;
7665 writememh[n] = write_nomemh_new;
7666#ifndef FORCE32
7667 writememd[n] = write_nomemd_new;
7668#endif
7669 readmem[n] = read_nomem_new;
7670 readmemb[n] = read_nomemb_new;
7671 readmemh[n] = read_nomemh_new;
7672#ifndef FORCE32
7673 readmemd[n] = read_nomemd_new;
7674#endif
7675 }
7676 for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7677 writemem[n] = write_rdram_new;
7678 writememb[n] = write_rdramb_new;
7679 writememh[n] = write_rdramh_new;
7680#ifndef FORCE32
7681 writememd[n] = write_rdramd_new;
7682#endif
7683 }
7684 for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7685 writemem[n] = write_nomem_new;
7686 writememb[n] = write_nomemb_new;
7687 writememh[n] = write_nomemh_new;
7688#ifndef FORCE32
7689 writememd[n] = write_nomemd_new;
7690#endif
7691 readmem[n] = read_nomem_new;
7692 readmemb[n] = read_nomemb_new;
7693 readmemh[n] = read_nomemh_new;
7694#ifndef FORCE32
7695 readmemd[n] = read_nomemd_new;
7696#endif
7697 }
7698#endif
7699 tlb_hacks();
7700 arch_init();
7701}
7702
7703void new_dynarec_cleanup()
7704{
7705 int n;
7706 if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7707 for(n=0;n<4096;n++) ll_clear(jump_in+n);
7708 for(n=0;n<4096;n++) ll_clear(jump_out+n);
7709 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7710 #ifdef ROM_COPY
7711 if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7712 #endif
7713}
7714
7715int new_recompile_block(int addr)
7716{
7717/*
7718 if(addr==0x800cd050) {
7719 int block;
7720 for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7721 int n;
7722 for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7723 }
7724*/
7725 //if(Count==365117028) tracedebug=1;
7726 assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7727 //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7728 //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7729 //if(debug)
7730 //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7731 //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7732 /*if(Count>=312978186) {
7733 rlist();
7734 }*/
7735 //rlist();
7736 start = (u_int)addr&~3;
7737 //assert(((u_int)addr&1)==0);
7738#ifdef PCSX
7739 if (Config.HLE && start == 0x80001000) {
7740 // XXX: is this enough? Maybe check hleSoftCall?
7741 u_int beginning=(u_int)out;
7742 u_int page=get_page(start);
7743 ll_add(jump_in+page,start,out);
7744 invalid_code[start>>12]=0;
7745 emit_movimm(start,0);
7746 emit_writeword(0,(int)&pcaddr);
7747 emit_jmp((int)new_dyna_leave);
7748#ifdef __arm__
7749 __clear_cache((void *)beginning,out);
7750#endif
7751 return 0;
7752 }
7753 else if ((u_int)addr < 0x00200000) {
7754 // used for BIOS calls mostly?
7755 source = (u_int *)((u_int)rdram+start-0);
7756 pagelimit = 0x00200000;
7757 }
7758 else
7759#endif
7760#ifdef MUPEN64
7761 if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
7762 source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
7763 pagelimit = 0xa4001000;
7764 }
7765 else
7766#endif
7767 if ((int)addr >= 0x80000000 && (int)addr < 0x80800000) {
7768 source = (u_int *)((u_int)rdram+start-0x80000000);
7769 pagelimit = 0x80800000;
7770 }
7771#ifndef DISABLE_TLB
7772 else if ((signed int)addr >= (signed int)0xC0000000) {
7773 //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
7774 //if(tlb_LUT_r[start>>12])
7775 //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
7776 if((signed int)memory_map[start>>12]>=0) {
7777 source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
7778 pagelimit=(start+4096)&0xFFFFF000;
7779 int map=memory_map[start>>12];
7780 int i;
7781 for(i=0;i<5;i++) {
7782 //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
7783 if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
7784 }
7785 assem_debug("pagelimit=%x\n",pagelimit);
7786 assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
7787 }
7788 else {
7789 assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
7790 //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
7791 return 1; // Caller will invoke exception handler
7792 }
7793 //printf("source= %x\n",(int)source);
7794 }
7795#endif
7796 else {
7797 printf("Compile at bogus memory address: %x \n", (int)addr);
7798 exit(1);
7799 }
7800
7801 /* Pass 1: disassemble */
7802 /* Pass 2: register dependencies, branch targets */
7803 /* Pass 3: register allocation */
7804 /* Pass 4: branch dependencies */
7805 /* Pass 5: pre-alloc */
7806 /* Pass 6: optimize clean/dirty state */
7807 /* Pass 7: flag 32-bit registers */
7808 /* Pass 8: assembly */
7809 /* Pass 9: linker */
7810 /* Pass 10: garbage collection / free memory */
7811
7812 int i,j;
7813 int done=0;
7814 unsigned int type,op,op2;
7815
7816 //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7817
7818 /* Pass 1 disassembly */
7819
7820 for(i=0;!done;i++) {
7821 bt[i]=0;likely[i]=0;op2=0;
7822 opcode[i]=op=source[i]>>26;
7823 switch(op)
7824 {
7825 case 0x00: strcpy(insn[i],"special"); type=NI;
7826 op2=source[i]&0x3f;
7827 switch(op2)
7828 {
7829 case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7830 case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7831 case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7832 case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7833 case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7834 case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7835 case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7836 case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7837 case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7838 case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7839 case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7840 case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7841 case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7842 case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7843 case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7844 case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7845 case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7846 case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7847 case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7848 case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7849 case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7850 case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7851 case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7852 case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7853 case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7854 case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7855 case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7856 case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7857 case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7858 case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7859 case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7860 case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7861 case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7862 case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7863 case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7864 case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7865 case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7866 case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7867 case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7868 case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7869 case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7870 case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7871 case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7872 case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7873 case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7874 case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7875 case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7876 case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7877 case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7878 case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7879 case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7880 case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7881 }
7882 break;
7883 case 0x01: strcpy(insn[i],"regimm"); type=NI;
7884 op2=(source[i]>>16)&0x1f;
7885 switch(op2)
7886 {
7887 case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7888 case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7889 case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7890 case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7891 case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7892 case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7893 case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7894 case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7895 case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7896 case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7897 case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7898 case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7899 case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7900 case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7901 }
7902 break;
7903 case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7904 case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7905 case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7906 case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7907 case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7908 case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7909 case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7910 case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7911 case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7912 case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7913 case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7914 case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7915 case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7916 case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7917 case 0x10: strcpy(insn[i],"cop0"); type=NI;
7918 op2=(source[i]>>21)&0x1f;
7919 switch(op2)
7920 {
7921 case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7922 case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7923 case 0x10: strcpy(insn[i],"tlb"); type=NI;
7924 switch(source[i]&0x3f)
7925 {
7926 case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7927 case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7928 case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7929 case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
7930 case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
7931 }
7932 }
7933 break;
7934 case 0x11: strcpy(insn[i],"cop1"); type=NI;
7935 op2=(source[i]>>21)&0x1f;
7936 switch(op2)
7937 {
7938 case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7939 case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7940 case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7941 case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7942 case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7943 case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7944 case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7945 switch((source[i]>>16)&0x3)
7946 {
7947 case 0x00: strcpy(insn[i],"BC1F"); break;
7948 case 0x01: strcpy(insn[i],"BC1T"); break;
7949 case 0x02: strcpy(insn[i],"BC1FL"); break;
7950 case 0x03: strcpy(insn[i],"BC1TL"); break;
7951 }
7952 break;
7953 case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7954 switch(source[i]&0x3f)
7955 {
7956 case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7957 case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7958 case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7959 case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7960 case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7961 case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
7962 case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
7963 case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
7964 case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
7965 case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
7966 case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
7967 case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
7968 case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
7969 case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
7970 case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
7971 case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
7972 case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
7973 case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
7974 case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
7975 case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
7976 case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
7977 case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
7978 case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
7979 case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
7980 case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
7981 case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
7982 case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
7983 case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
7984 case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
7985 case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
7986 case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
7987 case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
7988 case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
7989 case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
7990 case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
7991 }
7992 break;
7993 case 0x11: strcpy(insn[i],"C1.D"); type=NI;
7994 switch(source[i]&0x3f)
7995 {
7996 case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
7997 case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
7998 case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
7999 case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8000 case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8001 case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8002 case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8003 case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8004 case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8005 case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8006 case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8007 case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8008 case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8009 case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8010 case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8011 case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8012 case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8013 case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8014 case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8015 case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8016 case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8017 case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8018 case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8019 case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8020 case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8021 case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8022 case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8023 case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8024 case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8025 case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8026 case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8027 case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8028 case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8029 case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8030 case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8031 }
8032 break;
8033 case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8034 switch(source[i]&0x3f)
8035 {
8036 case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8037 case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8038 }
8039 break;
8040 case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8041 switch(source[i]&0x3f)
8042 {
8043 case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8044 case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8045 }
8046 break;
8047 }
8048 break;
8049 case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8050 case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8051 case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8052 case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8053 case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8054 case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8055 case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8056 case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8057 case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8058 case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8059 case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8060 case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8061 case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8062 case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8063 case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8064 case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8065 case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8066 case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8067 case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8068 case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8069 case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8070 case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8071 case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8072 case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8073 case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8074 case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8075 case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8076 case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8077 case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8078 case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8079 case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8080 case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8081 case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8082 case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8083#ifdef PCSX
8084 case 0x12: strcpy(insn[i],"COP2"); type=NI;
8085 op2=(source[i]>>21)&0x1f;
8086 switch(op2)
8087 {
8088 case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8089 case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8090 case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8091 case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8092 default:
8093 if (gte_handlers[source[i]&0x3f]!=NULL) {
8094 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8095 type=C2OP;
8096 }
8097 break;
8098 }
8099 break;
8100 case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8101 case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8102 case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8103#endif
8104 default: strcpy(insn[i],"???"); type=NI;
8105 printf("NI %08x @%08x\n", source[i], addr + i*4);
8106 break;
8107 }
8108 itype[i]=type;
8109 opcode2[i]=op2;
8110 /* Get registers/immediates */
8111 lt1[i]=0;
8112 us1[i]=0;
8113 us2[i]=0;
8114 dep1[i]=0;
8115 dep2[i]=0;
8116 switch(type) {
8117 case LOAD:
8118 rs1[i]=(source[i]>>21)&0x1f;
8119 rs2[i]=0;
8120 rt1[i]=(source[i]>>16)&0x1f;
8121 rt2[i]=0;
8122 imm[i]=(short)source[i];
8123 break;
8124 case STORE:
8125 case STORELR:
8126 rs1[i]=(source[i]>>21)&0x1f;
8127 rs2[i]=(source[i]>>16)&0x1f;
8128 rt1[i]=0;
8129 rt2[i]=0;
8130 imm[i]=(short)source[i];
8131 if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8132 break;
8133 case LOADLR:
8134 // LWL/LWR only load part of the register,
8135 // therefore the target register must be treated as a source too
8136 rs1[i]=(source[i]>>21)&0x1f;
8137 rs2[i]=(source[i]>>16)&0x1f;
8138 rt1[i]=(source[i]>>16)&0x1f;
8139 rt2[i]=0;
8140 imm[i]=(short)source[i];
8141 if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8142 if(op==0x26) dep1[i]=rt1[i]; // LWR
8143 break;
8144 case IMM16:
8145 if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8146 else rs1[i]=(source[i]>>21)&0x1f;
8147 rs2[i]=0;
8148 rt1[i]=(source[i]>>16)&0x1f;
8149 rt2[i]=0;
8150 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8151 imm[i]=(unsigned short)source[i];
8152 }else{
8153 imm[i]=(short)source[i];
8154 }
8155 if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8156 if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8157 if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8158 break;
8159 case UJUMP:
8160 rs1[i]=0;
8161 rs2[i]=0;
8162 rt1[i]=0;
8163 rt2[i]=0;
8164 // The JAL instruction writes to r31.
8165 if (op&1) {
8166 rt1[i]=31;
8167 }
8168 rs2[i]=CCREG;
8169 break;
8170 case RJUMP:
8171 rs1[i]=(source[i]>>21)&0x1f;
8172 rs2[i]=0;
8173 rt1[i]=0;
8174 rt2[i]=0;
8175 // The JALR instruction writes to rd.
8176 if (op2&1) {
8177 rt1[i]=(source[i]>>11)&0x1f;
8178 }
8179 rs2[i]=CCREG;
8180 break;
8181 case CJUMP:
8182 rs1[i]=(source[i]>>21)&0x1f;
8183 rs2[i]=(source[i]>>16)&0x1f;
8184 rt1[i]=0;
8185 rt2[i]=0;
8186 if(op&2) { // BGTZ/BLEZ
8187 rs2[i]=0;
8188 }
8189 us1[i]=rs1[i];
8190 us2[i]=rs2[i];
8191 likely[i]=op>>4;
8192 break;
8193 case SJUMP:
8194 rs1[i]=(source[i]>>21)&0x1f;
8195 rs2[i]=CCREG;
8196 rt1[i]=0;
8197 rt2[i]=0;
8198 us1[i]=rs1[i];
8199 if(op2&0x10) { // BxxAL
8200 rt1[i]=31;
8201 // NOTE: If the branch is not taken, r31 is still overwritten
8202 }
8203 likely[i]=(op2&2)>>1;
8204 break;
8205 case FJUMP:
8206 rs1[i]=FSREG;
8207 rs2[i]=CSREG;
8208 rt1[i]=0;
8209 rt2[i]=0;
8210 likely[i]=((source[i])>>17)&1;
8211 break;
8212 case ALU:
8213 rs1[i]=(source[i]>>21)&0x1f; // source
8214 rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8215 rt1[i]=(source[i]>>11)&0x1f; // destination
8216 rt2[i]=0;
8217 if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8218 us1[i]=rs1[i];us2[i]=rs2[i];
8219 }
8220 else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8221 dep1[i]=rs1[i];dep2[i]=rs2[i];
8222 }
8223 else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8224 dep1[i]=rs1[i];dep2[i]=rs2[i];
8225 }
8226 break;
8227 case MULTDIV:
8228 rs1[i]=(source[i]>>21)&0x1f; // source
8229 rs2[i]=(source[i]>>16)&0x1f; // divisor
8230 rt1[i]=HIREG;
8231 rt2[i]=LOREG;
8232 if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8233 us1[i]=rs1[i];us2[i]=rs2[i];
8234 }
8235 break;
8236 case MOV:
8237 rs1[i]=0;
8238 rs2[i]=0;
8239 rt1[i]=0;
8240 rt2[i]=0;
8241 if(op2==0x10) rs1[i]=HIREG; // MFHI
8242 if(op2==0x11) rt1[i]=HIREG; // MTHI
8243 if(op2==0x12) rs1[i]=LOREG; // MFLO
8244 if(op2==0x13) rt1[i]=LOREG; // MTLO
8245 if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8246 if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8247 dep1[i]=rs1[i];
8248 break;
8249 case SHIFT:
8250 rs1[i]=(source[i]>>16)&0x1f; // target of shift
8251 rs2[i]=(source[i]>>21)&0x1f; // shift amount
8252 rt1[i]=(source[i]>>11)&0x1f; // destination
8253 rt2[i]=0;
8254 // DSLLV/DSRLV/DSRAV are 64-bit
8255 if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8256 break;
8257 case SHIFTIMM:
8258 rs1[i]=(source[i]>>16)&0x1f;
8259 rs2[i]=0;
8260 rt1[i]=(source[i]>>11)&0x1f;
8261 rt2[i]=0;
8262 imm[i]=(source[i]>>6)&0x1f;
8263 // DSxx32 instructions
8264 if(op2>=0x3c) imm[i]|=0x20;
8265 // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8266 if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8267 break;
8268 case COP0:
8269 rs1[i]=0;
8270 rs2[i]=0;
8271 rt1[i]=0;
8272 rt2[i]=0;
8273 if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8274 if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8275 if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8276 if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8277 break;
8278 case COP1:
8279 case COP2:
8280 rs1[i]=0;
8281 rs2[i]=0;
8282 rt1[i]=0;
8283 rt2[i]=0;
8284 if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8285 if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8286 if(op2==5) us1[i]=rs1[i]; // DMTC1
8287 rs2[i]=CSREG;
8288 break;
8289 case C1LS:
8290 rs1[i]=(source[i]>>21)&0x1F;
8291 rs2[i]=CSREG;
8292 rt1[i]=0;
8293 rt2[i]=0;
8294 imm[i]=(short)source[i];
8295 break;
8296 case C2LS:
8297 rs1[i]=(source[i]>>21)&0x1F;
8298 rs2[i]=0;
8299 rt1[i]=0;
8300 rt2[i]=0;
8301 imm[i]=(short)source[i];
8302 break;
8303 case FLOAT:
8304 case FCONV:
8305 rs1[i]=0;
8306 rs2[i]=CSREG;
8307 rt1[i]=0;
8308 rt2[i]=0;
8309 break;
8310 case FCOMP:
8311 rs1[i]=FSREG;
8312 rs2[i]=CSREG;
8313 rt1[i]=FSREG;
8314 rt2[i]=0;
8315 break;
8316 case SYSCALL:
8317 case HLECALL:
8318 rs1[i]=CCREG;
8319 rs2[i]=0;
8320 rt1[i]=0;
8321 rt2[i]=0;
8322 break;
8323 default:
8324 rs1[i]=0;
8325 rs2[i]=0;
8326 rt1[i]=0;
8327 rt2[i]=0;
8328 }
8329 /* Calculate branch target addresses */
8330 if(type==UJUMP)
8331 ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8332 else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8333 ba[i]=start+i*4+8; // Ignore never taken branch
8334 else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8335 ba[i]=start+i*4+8; // Ignore never taken branch
8336 else if(type==CJUMP||type==SJUMP||type==FJUMP)
8337 ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8338 else ba[i]=-1;
8339 /* Is this the end of the block? */
8340 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8341 if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8342 done=1;
8343 // Does the block continue due to a branch?
8344 for(j=i-1;j>=0;j--)
8345 {
8346 if(ba[j]==start+i*4+4) done=j=0;
8347 if(ba[j]==start+i*4+8) done=j=0;
8348 }
8349 }
8350 else {
8351 if(stop_after_jal) done=1;
8352 // Stop on BREAK
8353 if((source[i+1]&0xfc00003f)==0x0d) done=1;
8354 }
8355 // Don't recompile stuff that's already compiled
8356 if(check_addr(start+i*4+4)) done=1;
8357 // Don't get too close to the limit
8358 if(i>MAXBLOCK/2) done=1;
8359 }
8360 if(i>0&&itype[i-1]==SYSCALL&&stop_after_jal) done=1;
8361 if(itype[i-1]==HLECALL) done=1;
8362 assert(i<MAXBLOCK-1);
8363 if(start+i*4==pagelimit-4) done=1;
8364 assert(start+i*4<pagelimit);
8365 if (i==MAXBLOCK-1) done=1;
8366 // Stop if we're compiling junk
8367 if(itype[i]==NI&&opcode[i]==0x11) {
8368 done=stop_after_jal=1;
8369 printf("Disabled speculative precompilation\n");
8370 }
8371 }
8372 slen=i;
8373 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8374 if(start+i*4==pagelimit) {
8375 itype[i-1]=SPAN;
8376 }
8377 }
8378 assert(slen>0);
8379
8380 /* Pass 2 - Register dependencies and branch targets */
8381
8382 unneeded_registers(0,slen-1,0);
8383
8384 /* Pass 3 - Register allocation */
8385
8386 struct regstat current; // Current register allocations/status
8387 current.is32=1;
8388 current.dirty=0;
8389 current.u=unneeded_reg[0];
8390 current.uu=unneeded_reg_upper[0];
8391 clear_all_regs(current.regmap);
8392 alloc_reg(&current,0,CCREG);
8393 dirty_reg(&current,CCREG);
8394 current.isconst=0;
8395 current.wasconst=0;
8396 int ds=0;
8397 int cc=0;
8398 int hr;
8399
8400 provisional_32bit();
8401
8402 if((u_int)addr&1) {
8403 // First instruction is delay slot
8404 cc=-1;
8405 bt[1]=1;
8406 ds=1;
8407 unneeded_reg[0]=1;
8408 unneeded_reg_upper[0]=1;
8409 current.regmap[HOST_BTREG]=BTREG;
8410 }
8411
8412 for(i=0;i<slen;i++)
8413 {
8414 if(bt[i])
8415 {
8416 int hr;
8417 for(hr=0;hr<HOST_REGS;hr++)
8418 {
8419 // Is this really necessary?
8420 if(current.regmap[hr]==0) current.regmap[hr]=-1;
8421 }
8422 current.isconst=0;
8423 }
8424 if(i>1)
8425 {
8426 if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8427 {
8428 if(rs1[i-2]==0||rs2[i-2]==0)
8429 {
8430 if(rs1[i-2]) {
8431 current.is32|=1LL<<rs1[i-2];
8432 int hr=get_reg(current.regmap,rs1[i-2]|64);
8433 if(hr>=0) current.regmap[hr]=-1;
8434 }
8435 if(rs2[i-2]) {
8436 current.is32|=1LL<<rs2[i-2];
8437 int hr=get_reg(current.regmap,rs2[i-2]|64);
8438 if(hr>=0) current.regmap[hr]=-1;
8439 }
8440 }
8441 }
8442 }
8443 // If something jumps here with 64-bit values
8444 // then promote those registers to 64 bits
8445 if(bt[i])
8446 {
8447 uint64_t temp_is32=current.is32;
8448 for(j=i-1;j>=0;j--)
8449 {
8450 if(ba[j]==start+i*4)
8451 temp_is32&=branch_regs[j].is32;
8452 }
8453 for(j=i;j<slen;j++)
8454 {
8455 if(ba[j]==start+i*4)
8456 //temp_is32=1;
8457 temp_is32&=p32[j];
8458 }
8459 if(temp_is32!=current.is32) {
8460 //printf("dumping 32-bit regs (%x)\n",start+i*4);
8461 #ifdef DESTRUCTIVE_WRITEBACK
8462 for(hr=0;hr<HOST_REGS;hr++)
8463 {
8464 int r=current.regmap[hr];
8465 if(r>0&&r<64)
8466 {
8467 if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8468 temp_is32|=1LL<<r;
8469 //printf("restore %d\n",r);
8470 }
8471 }
8472 }
8473 #endif
8474 current.is32=temp_is32;
8475 }
8476 }
8477#ifdef FORCE32
8478 memset(p32, 0xff, sizeof(p32));
8479 current.is32=-1LL;
8480#endif
8481
8482 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8483 regs[i].wasconst=current.isconst;
8484 regs[i].was32=current.is32;
8485 regs[i].wasdirty=current.dirty;
8486 #ifdef DESTRUCTIVE_WRITEBACK
8487 // To change a dirty register from 32 to 64 bits, we must write
8488 // it out during the previous cycle (for branches, 2 cycles)
8489 if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8490 {
8491 uint64_t temp_is32=current.is32;
8492 for(j=i-1;j>=0;j--)
8493 {
8494 if(ba[j]==start+i*4+4)
8495 temp_is32&=branch_regs[j].is32;
8496 }
8497 for(j=i;j<slen;j++)
8498 {
8499 if(ba[j]==start+i*4+4)
8500 //temp_is32=1;
8501 temp_is32&=p32[j];
8502 }
8503 if(temp_is32!=current.is32) {
8504 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8505 for(hr=0;hr<HOST_REGS;hr++)
8506 {
8507 int r=current.regmap[hr];
8508 if(r>0)
8509 {
8510 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8511 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8512 {
8513 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8514 {
8515 //printf("dump %d/r%d\n",hr,r);
8516 current.regmap[hr]=-1;
8517 if(get_reg(current.regmap,r|64)>=0)
8518 current.regmap[get_reg(current.regmap,r|64)]=-1;
8519 }
8520 }
8521 }
8522 }
8523 }
8524 }
8525 }
8526 else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8527 {
8528 uint64_t temp_is32=current.is32;
8529 for(j=i-1;j>=0;j--)
8530 {
8531 if(ba[j]==start+i*4+8)
8532 temp_is32&=branch_regs[j].is32;
8533 }
8534 for(j=i;j<slen;j++)
8535 {
8536 if(ba[j]==start+i*4+8)
8537 //temp_is32=1;
8538 temp_is32&=p32[j];
8539 }
8540 if(temp_is32!=current.is32) {
8541 //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8542 for(hr=0;hr<HOST_REGS;hr++)
8543 {
8544 int r=current.regmap[hr];
8545 if(r>0)
8546 {
8547 if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8548 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8549 {
8550 //printf("dump %d/r%d\n",hr,r);
8551 current.regmap[hr]=-1;
8552 if(get_reg(current.regmap,r|64)>=0)
8553 current.regmap[get_reg(current.regmap,r|64)]=-1;
8554 }
8555 }
8556 }
8557 }
8558 }
8559 }
8560 #endif
8561 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8562 if(i+1<slen) {
8563 current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8564 current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8565 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8566 current.u|=1;
8567 current.uu|=1;
8568 } else {
8569 current.u=1;
8570 current.uu=1;
8571 }
8572 } else {
8573 if(i+1<slen) {
8574 current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8575 current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8576 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8577 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8578 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8579 current.u|=1;
8580 current.uu|=1;
8581 } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8582 }
8583 is_ds[i]=ds;
8584 if(ds) {
8585 ds=0; // Skip delay slot, already allocated as part of branch
8586 // ...but we need to alloc it in case something jumps here
8587 if(i+1<slen) {
8588 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8589 current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8590 }else{
8591 current.u=branch_unneeded_reg[i-1];
8592 current.uu=branch_unneeded_reg_upper[i-1];
8593 }
8594 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8595 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8596 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8597 current.u|=1;
8598 current.uu|=1;
8599 struct regstat temp;
8600 memcpy(&temp,&current,sizeof(current));
8601 temp.wasdirty=temp.dirty;
8602 temp.was32=temp.is32;
8603 // TODO: Take into account unconditional branches, as below
8604 delayslot_alloc(&temp,i);
8605 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8606 regs[i].wasdirty=temp.wasdirty;
8607 regs[i].was32=temp.was32;
8608 regs[i].dirty=temp.dirty;
8609 regs[i].is32=temp.is32;
8610 regs[i].isconst=0;
8611 regs[i].wasconst=0;
8612 current.isconst=0;
8613 // Create entry (branch target) regmap
8614 for(hr=0;hr<HOST_REGS;hr++)
8615 {
8616 int r=temp.regmap[hr];
8617 if(r>=0) {
8618 if(r!=regmap_pre[i][hr]) {
8619 regs[i].regmap_entry[hr]=-1;
8620 }
8621 else
8622 {
8623 if(r<64){
8624 if((current.u>>r)&1) {
8625 regs[i].regmap_entry[hr]=-1;
8626 regs[i].regmap[hr]=-1;
8627 //Don't clear regs in the delay slot as the branch might need them
8628 //current.regmap[hr]=-1;
8629 }else
8630 regs[i].regmap_entry[hr]=r;
8631 }
8632 else {
8633 if((current.uu>>(r&63))&1) {
8634 regs[i].regmap_entry[hr]=-1;
8635 regs[i].regmap[hr]=-1;
8636 //Don't clear regs in the delay slot as the branch might need them
8637 //current.regmap[hr]=-1;
8638 }else
8639 regs[i].regmap_entry[hr]=r;
8640 }
8641 }
8642 } else {
8643 // First instruction expects CCREG to be allocated
8644 if(i==0&&hr==HOST_CCREG)
8645 regs[i].regmap_entry[hr]=CCREG;
8646 else
8647 regs[i].regmap_entry[hr]=-1;
8648 }
8649 }
8650 }
8651 else { // Not delay slot
8652 switch(itype[i]) {
8653 case UJUMP:
8654 //current.isconst=0; // DEBUG
8655 //current.wasconst=0; // DEBUG
8656 //regs[i].wasconst=0; // DEBUG
8657 clear_const(&current,rt1[i]);
8658 alloc_cc(&current,i);
8659 dirty_reg(&current,CCREG);
8660 if (rt1[i]==31) {
8661 alloc_reg(&current,i,31);
8662 dirty_reg(&current,31);
8663 assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8664 #ifdef REG_PREFETCH
8665 alloc_reg(&current,i,PTEMP);
8666 #endif
8667 //current.is32|=1LL<<rt1[i];
8668 }
8669 delayslot_alloc(&current,i+1);
8670 //current.isconst=0; // DEBUG
8671 ds=1;
8672 //printf("i=%d, isconst=%x\n",i,current.isconst);
8673 break;
8674 case RJUMP:
8675 //current.isconst=0;
8676 //current.wasconst=0;
8677 //regs[i].wasconst=0;
8678 clear_const(&current,rs1[i]);
8679 clear_const(&current,rt1[i]);
8680 alloc_cc(&current,i);
8681 dirty_reg(&current,CCREG);
8682 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8683 alloc_reg(&current,i,rs1[i]);
8684 if (rt1[i]!=0) {
8685 alloc_reg(&current,i,rt1[i]);
8686 dirty_reg(&current,rt1[i]);
8687 assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8688 #ifdef REG_PREFETCH
8689 alloc_reg(&current,i,PTEMP);
8690 #endif
8691 }
8692 #ifdef USE_MINI_HT
8693 if(rs1[i]==31) { // JALR
8694 alloc_reg(&current,i,RHASH);
8695 #ifndef HOST_IMM_ADDR32
8696 alloc_reg(&current,i,RHTBL);
8697 #endif
8698 }
8699 #endif
8700 delayslot_alloc(&current,i+1);
8701 } else {
8702 // The delay slot overwrites our source register,
8703 // allocate a temporary register to hold the old value.
8704 current.isconst=0;
8705 current.wasconst=0;
8706 regs[i].wasconst=0;
8707 delayslot_alloc(&current,i+1);
8708 current.isconst=0;
8709 alloc_reg(&current,i,RTEMP);
8710 }
8711 //current.isconst=0; // DEBUG
8712 ds=1;
8713 break;
8714 case CJUMP:
8715 //current.isconst=0;
8716 //current.wasconst=0;
8717 //regs[i].wasconst=0;
8718 clear_const(&current,rs1[i]);
8719 clear_const(&current,rs2[i]);
8720 if((opcode[i]&0x3E)==4) // BEQ/BNE
8721 {
8722 alloc_cc(&current,i);
8723 dirty_reg(&current,CCREG);
8724 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8725 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8726 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8727 {
8728 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8729 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8730 }
8731 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8732 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8733 // The delay slot overwrites one of our conditions.
8734 // Allocate the branch condition registers instead.
8735 // Note that such a sequence of instructions could
8736 // be considered a bug since the branch can not be
8737 // re-executed if an exception occurs.
8738 current.isconst=0;
8739 current.wasconst=0;
8740 regs[i].wasconst=0;
8741 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8742 if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8743 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8744 {
8745 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8746 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8747 }
8748 }
8749 else delayslot_alloc(&current,i+1);
8750 }
8751 else
8752 if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8753 {
8754 alloc_cc(&current,i);
8755 dirty_reg(&current,CCREG);
8756 alloc_reg(&current,i,rs1[i]);
8757 if(!(current.is32>>rs1[i]&1))
8758 {
8759 alloc_reg64(&current,i,rs1[i]);
8760 }
8761 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8762 // The delay slot overwrites one of our conditions.
8763 // Allocate the branch condition registers instead.
8764 // Note that such a sequence of instructions could
8765 // be considered a bug since the branch can not be
8766 // re-executed if an exception occurs.
8767 current.isconst=0;
8768 current.wasconst=0;
8769 regs[i].wasconst=0;
8770 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8771 if(!((current.is32>>rs1[i])&1))
8772 {
8773 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8774 }
8775 }
8776 else delayslot_alloc(&current,i+1);
8777 }
8778 else
8779 // Don't alloc the delay slot yet because we might not execute it
8780 if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8781 {
8782 current.isconst=0;
8783 current.wasconst=0;
8784 regs[i].wasconst=0;
8785 alloc_cc(&current,i);
8786 dirty_reg(&current,CCREG);
8787 alloc_reg(&current,i,rs1[i]);
8788 alloc_reg(&current,i,rs2[i]);
8789 if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8790 {
8791 alloc_reg64(&current,i,rs1[i]);
8792 alloc_reg64(&current,i,rs2[i]);
8793 }
8794 }
8795 else
8796 if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8797 {
8798 current.isconst=0;
8799 current.wasconst=0;
8800 regs[i].wasconst=0;
8801 alloc_cc(&current,i);
8802 dirty_reg(&current,CCREG);
8803 alloc_reg(&current,i,rs1[i]);
8804 if(!(current.is32>>rs1[i]&1))
8805 {
8806 alloc_reg64(&current,i,rs1[i]);
8807 }
8808 }
8809 ds=1;
8810 //current.isconst=0;
8811 break;
8812 case SJUMP:
8813 //current.isconst=0;
8814 //current.wasconst=0;
8815 //regs[i].wasconst=0;
8816 clear_const(&current,rs1[i]);
8817 clear_const(&current,rt1[i]);
8818 //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8819 if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8820 {
8821 alloc_cc(&current,i);
8822 dirty_reg(&current,CCREG);
8823 alloc_reg(&current,i,rs1[i]);
8824 if(!(current.is32>>rs1[i]&1))
8825 {
8826 alloc_reg64(&current,i,rs1[i]);
8827 }
8828 if (rt1[i]==31) { // BLTZAL/BGEZAL
8829 alloc_reg(&current,i,31);
8830 dirty_reg(&current,31);
8831 assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8832 //#ifdef REG_PREFETCH
8833 //alloc_reg(&current,i,PTEMP);
8834 //#endif
8835 //current.is32|=1LL<<rt1[i];
8836 }
8837 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8838 // The delay slot overwrites the branch condition.
8839 // Allocate the branch condition registers instead.
8840 // Note that such a sequence of instructions could
8841 // be considered a bug since the branch can not be
8842 // re-executed if an exception occurs.
8843 current.isconst=0;
8844 current.wasconst=0;
8845 regs[i].wasconst=0;
8846 if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8847 if(!((current.is32>>rs1[i])&1))
8848 {
8849 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8850 }
8851 }
8852 else delayslot_alloc(&current,i+1);
8853 }
8854 else
8855 // Don't alloc the delay slot yet because we might not execute it
8856 if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8857 {
8858 current.isconst=0;
8859 current.wasconst=0;
8860 regs[i].wasconst=0;
8861 alloc_cc(&current,i);
8862 dirty_reg(&current,CCREG);
8863 alloc_reg(&current,i,rs1[i]);
8864 if(!(current.is32>>rs1[i]&1))
8865 {
8866 alloc_reg64(&current,i,rs1[i]);
8867 }
8868 }
8869 ds=1;
8870 //current.isconst=0;
8871 break;
8872 case FJUMP:
8873 current.isconst=0;
8874 current.wasconst=0;
8875 regs[i].wasconst=0;
8876 if(likely[i]==0) // BC1F/BC1T
8877 {
8878 // TODO: Theoretically we can run out of registers here on x86.
8879 // The delay slot can allocate up to six, and we need to check
8880 // CSREG before executing the delay slot. Possibly we can drop
8881 // the cycle count and then reload it after checking that the
8882 // FPU is in a usable state, or don't do out-of-order execution.
8883 alloc_cc(&current,i);
8884 dirty_reg(&current,CCREG);
8885 alloc_reg(&current,i,FSREG);
8886 alloc_reg(&current,i,CSREG);
8887 if(itype[i+1]==FCOMP) {
8888 // The delay slot overwrites the branch condition.
8889 // Allocate the branch condition registers instead.
8890 // Note that such a sequence of instructions could
8891 // be considered a bug since the branch can not be
8892 // re-executed if an exception occurs.
8893 alloc_cc(&current,i);
8894 dirty_reg(&current,CCREG);
8895 alloc_reg(&current,i,CSREG);
8896 alloc_reg(&current,i,FSREG);
8897 }
8898 else {
8899 delayslot_alloc(&current,i+1);
8900 alloc_reg(&current,i+1,CSREG);
8901 }
8902 }
8903 else
8904 // Don't alloc the delay slot yet because we might not execute it
8905 if(likely[i]) // BC1FL/BC1TL
8906 {
8907 alloc_cc(&current,i);
8908 dirty_reg(&current,CCREG);
8909 alloc_reg(&current,i,CSREG);
8910 alloc_reg(&current,i,FSREG);
8911 }
8912 ds=1;
8913 current.isconst=0;
8914 break;
8915 case IMM16:
8916 imm16_alloc(&current,i);
8917 break;
8918 case LOAD:
8919 case LOADLR:
8920 load_alloc(&current,i);
8921 break;
8922 case STORE:
8923 case STORELR:
8924 store_alloc(&current,i);
8925 break;
8926 case ALU:
8927 alu_alloc(&current,i);
8928 break;
8929 case SHIFT:
8930 shift_alloc(&current,i);
8931 break;
8932 case MULTDIV:
8933 multdiv_alloc(&current,i);
8934 break;
8935 case SHIFTIMM:
8936 shiftimm_alloc(&current,i);
8937 break;
8938 case MOV:
8939 mov_alloc(&current,i);
8940 break;
8941 case COP0:
8942 cop0_alloc(&current,i);
8943 break;
8944 case COP1:
8945 case COP2:
8946 cop1_alloc(&current,i);
8947 break;
8948 case C1LS:
8949 c1ls_alloc(&current,i);
8950 break;
8951 case C2LS:
8952 c2ls_alloc(&current,i);
8953 break;
8954 case C2OP:
8955 c2op_alloc(&current,i);
8956 break;
8957 case FCONV:
8958 fconv_alloc(&current,i);
8959 break;
8960 case FLOAT:
8961 float_alloc(&current,i);
8962 break;
8963 case FCOMP:
8964 fcomp_alloc(&current,i);
8965 break;
8966 case SYSCALL:
8967 case HLECALL:
8968 syscall_alloc(&current,i);
8969 break;
8970 case SPAN:
8971 pagespan_alloc(&current,i);
8972 break;
8973 }
8974
8975 // Drop the upper half of registers that have become 32-bit
8976 current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
8977 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8978 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8979 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8980 current.uu|=1;
8981 } else {
8982 current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
8983 current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8984 if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8985 current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8986 current.uu|=1;
8987 }
8988
8989 // Create entry (branch target) regmap
8990 for(hr=0;hr<HOST_REGS;hr++)
8991 {
8992 int r,or,er;
8993 r=current.regmap[hr];
8994 if(r>=0) {
8995 if(r!=regmap_pre[i][hr]) {
8996 // TODO: delay slot (?)
8997 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
8998 if(or<0||(r&63)>=TEMPREG){
8999 regs[i].regmap_entry[hr]=-1;
9000 }
9001 else
9002 {
9003 // Just move it to a different register
9004 regs[i].regmap_entry[hr]=r;
9005 // If it was dirty before, it's still dirty
9006 if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9007 }
9008 }
9009 else
9010 {
9011 // Unneeded
9012 if(r==0){
9013 regs[i].regmap_entry[hr]=0;
9014 }
9015 else
9016 if(r<64){
9017 if((current.u>>r)&1) {
9018 regs[i].regmap_entry[hr]=-1;
9019 //regs[i].regmap[hr]=-1;
9020 current.regmap[hr]=-1;
9021 }else
9022 regs[i].regmap_entry[hr]=r;
9023 }
9024 else {
9025 if((current.uu>>(r&63))&1) {
9026 regs[i].regmap_entry[hr]=-1;
9027 //regs[i].regmap[hr]=-1;
9028 current.regmap[hr]=-1;
9029 }else
9030 regs[i].regmap_entry[hr]=r;
9031 }
9032 }
9033 } else {
9034 // Branches expect CCREG to be allocated at the target
9035 if(regmap_pre[i][hr]==CCREG)
9036 regs[i].regmap_entry[hr]=CCREG;
9037 else
9038 regs[i].regmap_entry[hr]=-1;
9039 }
9040 }
9041 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9042 }
9043 /* Branch post-alloc */
9044 if(i>0)
9045 {
9046 current.was32=current.is32;
9047 current.wasdirty=current.dirty;
9048 switch(itype[i-1]) {
9049 case UJUMP:
9050 memcpy(&branch_regs[i-1],&current,sizeof(current));
9051 branch_regs[i-1].isconst=0;
9052 branch_regs[i-1].wasconst=0;
9053 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9054 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9055 alloc_cc(&branch_regs[i-1],i-1);
9056 dirty_reg(&branch_regs[i-1],CCREG);
9057 if(rt1[i-1]==31) { // JAL
9058 alloc_reg(&branch_regs[i-1],i-1,31);
9059 dirty_reg(&branch_regs[i-1],31);
9060 branch_regs[i-1].is32|=1LL<<31;
9061 }
9062 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9063 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9064 break;
9065 case RJUMP:
9066 memcpy(&branch_regs[i-1],&current,sizeof(current));
9067 branch_regs[i-1].isconst=0;
9068 branch_regs[i-1].wasconst=0;
9069 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9070 branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9071 alloc_cc(&branch_regs[i-1],i-1);
9072 dirty_reg(&branch_regs[i-1],CCREG);
9073 alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9074 if(rt1[i-1]!=0) { // JALR
9075 alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9076 dirty_reg(&branch_regs[i-1],rt1[i-1]);
9077 branch_regs[i-1].is32|=1LL<<rt1[i-1];
9078 }
9079 #ifdef USE_MINI_HT
9080 if(rs1[i-1]==31) { // JALR
9081 alloc_reg(&branch_regs[i-1],i-1,RHASH);
9082 #ifndef HOST_IMM_ADDR32
9083 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9084 #endif
9085 }
9086 #endif
9087 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9088 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9089 break;
9090 case CJUMP:
9091 if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9092 {
9093 alloc_cc(&current,i-1);
9094 dirty_reg(&current,CCREG);
9095 if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9096 (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9097 // The delay slot overwrote one of our conditions
9098 // Delay slot goes after the test (in order)
9099 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9100 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9101 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9102 current.u|=1;
9103 current.uu|=1;
9104 delayslot_alloc(&current,i);
9105 current.isconst=0;
9106 }
9107 else
9108 {
9109 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9110 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9111 // Alloc the branch condition registers
9112 if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9113 if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9114 if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9115 {
9116 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9117 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9118 }
9119 }
9120 memcpy(&branch_regs[i-1],&current,sizeof(current));
9121 branch_regs[i-1].isconst=0;
9122 branch_regs[i-1].wasconst=0;
9123 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9124 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9125 }
9126 else
9127 if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9128 {
9129 alloc_cc(&current,i-1);
9130 dirty_reg(&current,CCREG);
9131 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9132 // The delay slot overwrote the branch condition
9133 // Delay slot goes after the test (in order)
9134 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9135 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9136 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9137 current.u|=1;
9138 current.uu|=1;
9139 delayslot_alloc(&current,i);
9140 current.isconst=0;
9141 }
9142 else
9143 {
9144 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9145 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9146 // Alloc the branch condition register
9147 alloc_reg(&current,i-1,rs1[i-1]);
9148 if(!(current.is32>>rs1[i-1]&1))
9149 {
9150 alloc_reg64(&current,i-1,rs1[i-1]);
9151 }
9152 }
9153 memcpy(&branch_regs[i-1],&current,sizeof(current));
9154 branch_regs[i-1].isconst=0;
9155 branch_regs[i-1].wasconst=0;
9156 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9157 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9158 }
9159 else
9160 // Alloc the delay slot in case the branch is taken
9161 if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9162 {
9163 memcpy(&branch_regs[i-1],&current,sizeof(current));
9164 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9165 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9166 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9167 alloc_cc(&branch_regs[i-1],i);
9168 dirty_reg(&branch_regs[i-1],CCREG);
9169 delayslot_alloc(&branch_regs[i-1],i);
9170 branch_regs[i-1].isconst=0;
9171 alloc_reg(&current,i,CCREG); // Not taken path
9172 dirty_reg(&current,CCREG);
9173 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9174 }
9175 else
9176 if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9177 {
9178 memcpy(&branch_regs[i-1],&current,sizeof(current));
9179 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9180 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9181 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9182 alloc_cc(&branch_regs[i-1],i);
9183 dirty_reg(&branch_regs[i-1],CCREG);
9184 delayslot_alloc(&branch_regs[i-1],i);
9185 branch_regs[i-1].isconst=0;
9186 alloc_reg(&current,i,CCREG); // Not taken path
9187 dirty_reg(&current,CCREG);
9188 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9189 }
9190 break;
9191 case SJUMP:
9192 //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9193 if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9194 {
9195 alloc_cc(&current,i-1);
9196 dirty_reg(&current,CCREG);
9197 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9198 // The delay slot overwrote the branch condition
9199 // Delay slot goes after the test (in order)
9200 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9201 current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9202 if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9203 current.u|=1;
9204 current.uu|=1;
9205 delayslot_alloc(&current,i);
9206 current.isconst=0;
9207 }
9208 else
9209 {
9210 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9211 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9212 // Alloc the branch condition register
9213 alloc_reg(&current,i-1,rs1[i-1]);
9214 if(!(current.is32>>rs1[i-1]&1))
9215 {
9216 alloc_reg64(&current,i-1,rs1[i-1]);
9217 }
9218 }
9219 memcpy(&branch_regs[i-1],&current,sizeof(current));
9220 branch_regs[i-1].isconst=0;
9221 branch_regs[i-1].wasconst=0;
9222 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9223 memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9224 }
9225 else
9226 // Alloc the delay slot in case the branch is taken
9227 if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9228 {
9229 memcpy(&branch_regs[i-1],&current,sizeof(current));
9230 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9231 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9232 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9233 alloc_cc(&branch_regs[i-1],i);
9234 dirty_reg(&branch_regs[i-1],CCREG);
9235 delayslot_alloc(&branch_regs[i-1],i);
9236 branch_regs[i-1].isconst=0;
9237 alloc_reg(&current,i,CCREG); // Not taken path
9238 dirty_reg(&current,CCREG);
9239 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9240 }
9241 // FIXME: BLTZAL/BGEZAL
9242 if(opcode2[i-1]&0x10) { // BxxZAL
9243 alloc_reg(&branch_regs[i-1],i-1,31);
9244 dirty_reg(&branch_regs[i-1],31);
9245 branch_regs[i-1].is32|=1LL<<31;
9246 }
9247 break;
9248 case FJUMP:
9249 if(likely[i-1]==0) // BC1F/BC1T
9250 {
9251 alloc_cc(&current,i-1);
9252 dirty_reg(&current,CCREG);
9253 if(itype[i]==FCOMP) {
9254 // The delay slot overwrote the branch condition
9255 // Delay slot goes after the test (in order)
9256 delayslot_alloc(&current,i);
9257 current.isconst=0;
9258 }
9259 else
9260 {
9261 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9262 current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9263 // Alloc the branch condition register
9264 alloc_reg(&current,i-1,FSREG);
9265 }
9266 memcpy(&branch_regs[i-1],&current,sizeof(current));
9267 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9268 }
9269 else // BC1FL/BC1TL
9270 {
9271 // Alloc the delay slot in case the branch is taken
9272 memcpy(&branch_regs[i-1],&current,sizeof(current));
9273 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9274 branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9275 if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9276 alloc_cc(&branch_regs[i-1],i);
9277 dirty_reg(&branch_regs[i-1],CCREG);
9278 delayslot_alloc(&branch_regs[i-1],i);
9279 branch_regs[i-1].isconst=0;
9280 alloc_reg(&current,i,CCREG); // Not taken path
9281 dirty_reg(&current,CCREG);
9282 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9283 }
9284 break;
9285 }
9286
9287 if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9288 {
9289 if(rt1[i-1]==31) // JAL/JALR
9290 {
9291 // Subroutine call will return here, don't alloc any registers
9292 current.is32=1;
9293 current.dirty=0;
9294 clear_all_regs(current.regmap);
9295 alloc_reg(&current,i,CCREG);
9296 dirty_reg(&current,CCREG);
9297 }
9298 else if(i+1<slen)
9299 {
9300 // Internal branch will jump here, match registers to caller
9301 current.is32=0x3FFFFFFFFLL;
9302 current.dirty=0;
9303 clear_all_regs(current.regmap);
9304 alloc_reg(&current,i,CCREG);
9305 dirty_reg(&current,CCREG);
9306 for(j=i-1;j>=0;j--)
9307 {
9308 if(ba[j]==start+i*4+4) {
9309 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9310 current.is32=branch_regs[j].is32;
9311 current.dirty=branch_regs[j].dirty;
9312 break;
9313 }
9314 }
9315 while(j>=0) {
9316 if(ba[j]==start+i*4+4) {
9317 for(hr=0;hr<HOST_REGS;hr++) {
9318 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9319 current.regmap[hr]=-1;
9320 }
9321 current.is32&=branch_regs[j].is32;
9322 current.dirty&=branch_regs[j].dirty;
9323 }
9324 }
9325 j--;
9326 }
9327 }
9328 }
9329 }
9330
9331 // Count cycles in between branches
9332 ccadj[i]=cc;
9333 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9334 {
9335 cc=0;
9336 }
9337 else
9338 {
9339 cc++;
9340 }
9341
9342 flush_dirty_uppers(&current);
9343 if(!is_ds[i]) {
9344 regs[i].is32=current.is32;
9345 regs[i].dirty=current.dirty;
9346 regs[i].isconst=current.isconst;
9347 memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9348 }
9349 for(hr=0;hr<HOST_REGS;hr++) {
9350 if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9351 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9352 regs[i].wasconst&=~(1<<hr);
9353 }
9354 }
9355 }
9356 if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9357 }
9358
9359 /* Pass 4 - Cull unused host registers */
9360
9361 uint64_t nr=0;
9362
9363 for (i=slen-1;i>=0;i--)
9364 {
9365 int hr;
9366 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9367 {
9368 if(ba[i]<start || ba[i]>=(start+slen*4))
9369 {
9370 // Branch out of this block, don't need anything
9371 nr=0;
9372 }
9373 else
9374 {
9375 // Internal branch
9376 // Need whatever matches the target
9377 nr=0;
9378 int t=(ba[i]-start)>>2;
9379 for(hr=0;hr<HOST_REGS;hr++)
9380 {
9381 if(regs[i].regmap_entry[hr]>=0) {
9382 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9383 }
9384 }
9385 }
9386 // Conditional branch may need registers for following instructions
9387 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9388 {
9389 if(i<slen-2) {
9390 nr|=needed_reg[i+2];
9391 for(hr=0;hr<HOST_REGS;hr++)
9392 {
9393 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9394 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9395 }
9396 }
9397 }
9398 // Don't need stuff which is overwritten
9399 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9400 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9401 // Merge in delay slot
9402 for(hr=0;hr<HOST_REGS;hr++)
9403 {
9404 if(!likely[i]) {
9405 // These are overwritten unless the branch is "likely"
9406 // and the delay slot is nullified if not taken
9407 if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9408 if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9409 }
9410 if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9411 if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9412 if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9413 if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9414 if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9415 if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9416 if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9417 if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9418 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9419 if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9420 if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9421 }
9422 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9423 if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9424 if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9425 }
9426 if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
9427 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9428 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9429 }
9430 }
9431 }
9432 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
9433 {
9434 // SYSCALL instruction (software interrupt)
9435 nr=0;
9436 }
9437 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9438 {
9439 // ERET instruction (return from interrupt)
9440 nr=0;
9441 }
9442 else // Non-branch
9443 {
9444 if(i<slen-1) {
9445 for(hr=0;hr<HOST_REGS;hr++) {
9446 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9447 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9448 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9449 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9450 }
9451 }
9452 }
9453 for(hr=0;hr<HOST_REGS;hr++)
9454 {
9455 // Overwritten registers are not needed
9456 if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9457 if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9458 if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9459 // Source registers are needed
9460 if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9461 if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9462 if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9463 if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9464 if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9465 if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9466 if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9467 if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9468 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9469 if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9470 if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9471 }
9472 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9473 if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9474 if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9475 }
9476 if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
9477 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9478 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9479 }
9480 // Don't store a register immediately after writing it,
9481 // may prevent dual-issue.
9482 // But do so if this is a branch target, otherwise we
9483 // might have to load the register before the branch.
9484 if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9485 if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9486 (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9487 if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9488 if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9489 }
9490 if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9491 (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9492 if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9493 if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9494 }
9495 }
9496 }
9497 // Cycle count is needed at branches. Assume it is needed at the target too.
9498 if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9499 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9500 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9501 }
9502 // Save it
9503 needed_reg[i]=nr;
9504
9505 // Deallocate unneeded registers
9506 for(hr=0;hr<HOST_REGS;hr++)
9507 {
9508 if(!((nr>>hr)&1)) {
9509 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9510 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9511 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9512 (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9513 {
9514 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9515 {
9516 if(likely[i]) {
9517 regs[i].regmap[hr]=-1;
9518 regs[i].isconst&=~(1<<hr);
9519 if(i<slen-2) regmap_pre[i+2][hr]=-1;
9520 }
9521 }
9522 }
9523 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9524 {
9525 int d1=0,d2=0,map=0,temp=0;
9526 if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9527 {
9528 d1=dep1[i+1];
9529 d2=dep2[i+1];
9530 }
9531 if(using_tlb) {
9532 if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9533 itype[i+1]==STORE || itype[i+1]==STORELR ||
9534 itype[i+1]==C1LS || itype[i+1]==C2LS)
9535 map=TLREG;
9536 } else
9537 if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9538 (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9539 map=INVCP;
9540 }
9541 if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9542 itype[i+1]==C1LS || itype[i+1]==C2LS)
9543 temp=FTEMP;
9544 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9545 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9546 (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9547 (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9548 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9549 regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9550 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9551 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9552 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9553 regs[i].regmap[hr]!=map )
9554 {
9555 regs[i].regmap[hr]=-1;
9556 regs[i].isconst&=~(1<<hr);
9557 if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9558 (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9559 (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9560 (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9561 (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9562 branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9563 (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9564 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9565 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9566 branch_regs[i].regmap[hr]!=map)
9567 {
9568 branch_regs[i].regmap[hr]=-1;
9569 branch_regs[i].regmap_entry[hr]=-1;
9570 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9571 {
9572 if(!likely[i]&&i<slen-2) {
9573 regmap_pre[i+2][hr]=-1;
9574 }
9575 }
9576 }
9577 }
9578 }
9579 else
9580 {
9581 // Non-branch
9582 if(i>0)
9583 {
9584 int d1=0,d2=0,map=-1,temp=-1;
9585 if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9586 {
9587 d1=dep1[i];
9588 d2=dep2[i];
9589 }
9590 if(using_tlb) {
9591 if(itype[i]==LOAD || itype[i]==LOADLR ||
9592 itype[i]==STORE || itype[i]==STORELR ||
9593 itype[i]==C1LS || itype[i]==C2LS)
9594 map=TLREG;
9595 } else if(itype[i]==STORE || itype[i]==STORELR ||
9596 (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9597 map=INVCP;
9598 }
9599 if(itype[i]==LOADLR || itype[i]==STORELR ||
9600 itype[i]==C1LS || itype[i]==C2LS)
9601 temp=FTEMP;
9602 if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9603 (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9604 (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9605 regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9606 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9607 (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9608 {
9609 if(i<slen-1&&!is_ds[i]) {
9610 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9611 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9612 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9613 {
9614 printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9615 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9616 }
9617 regmap_pre[i+1][hr]=-1;
9618 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9619 }
9620 regs[i].regmap[hr]=-1;
9621 regs[i].isconst&=~(1<<hr);
9622 }
9623 }
9624 }
9625 }
9626 }
9627 }
9628
9629 /* Pass 5 - Pre-allocate registers */
9630
9631 // If a register is allocated during a loop, try to allocate it for the
9632 // entire loop, if possible. This avoids loading/storing registers
9633 // inside of the loop.
9634
9635 signed char f_regmap[HOST_REGS];
9636 clear_all_regs(f_regmap);
9637 for(i=0;i<slen-1;i++)
9638 {
9639 if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9640 {
9641 if(ba[i]>=start && ba[i]<(start+i*4))
9642 if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9643 ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9644 ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9645 ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9646 ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9647 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9648 {
9649 int t=(ba[i]-start)>>2;
9650 if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9651 if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
9652 for(hr=0;hr<HOST_REGS;hr++)
9653 {
9654 if(regs[i].regmap[hr]>64) {
9655 if(!((regs[i].dirty>>hr)&1))
9656 f_regmap[hr]=regs[i].regmap[hr];
9657 else f_regmap[hr]=-1;
9658 }
9659 else if(regs[i].regmap[hr]>=0) f_regmap[hr]=regs[i].regmap[hr];
9660 if(branch_regs[i].regmap[hr]>64) {
9661 if(!((branch_regs[i].dirty>>hr)&1))
9662 f_regmap[hr]=branch_regs[i].regmap[hr];
9663 else f_regmap[hr]=-1;
9664 }
9665 else if(branch_regs[i].regmap[hr]>=0) f_regmap[hr]=branch_regs[i].regmap[hr];
9666 if(itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9667 ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9668 ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9669 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9670 {
9671 // Test both in case the delay slot is ooo,
9672 // could be done better...
9673 if(count_free_regs(branch_regs[i].regmap)<2
9674 ||count_free_regs(regs[i].regmap)<2)
9675 f_regmap[hr]=branch_regs[i].regmap[hr];
9676 }
9677 // Avoid dirty->clean transition
9678 // #ifdef DESTRUCTIVE_WRITEBACK here?
9679 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9680 if(f_regmap[hr]>0) {
9681 if(regs[t].regmap_entry[hr]<0) {
9682 int r=f_regmap[hr];
9683 for(j=t;j<=i;j++)
9684 {
9685 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9686 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9687 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9688 if(r>63) {
9689 // NB This can exclude the case where the upper-half
9690 // register is lower numbered than the lower-half
9691 // register. Not sure if it's worth fixing...
9692 if(get_reg(regs[j].regmap,r&63)<0) break;
9693 if(regs[j].is32&(1LL<<(r&63))) break;
9694 }
9695 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9696 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9697 int k;
9698 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9699 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9700 if(r>63) {
9701 if(get_reg(regs[i].regmap,r&63)<0) break;
9702 if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9703 }
9704 k=i;
9705 while(k>1&&regs[k-1].regmap[hr]==-1) {
9706 if(itype[k-1]==STORE||itype[k-1]==STORELR
9707 ||itype[k-1]==C1LS||itype[k-1]==SHIFT||itype[k-1]==COP1
9708 ||itype[k-1]==FLOAT||itype[k-1]==FCONV||itype[k-1]==FCOMP
9709 ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9710 if(count_free_regs(regs[k-1].regmap)<2) {
9711 //printf("no free regs for store %x\n",start+(k-1)*4);
9712 break;
9713 }
9714 }
9715 else
9716 if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9717 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9718 //printf("no-match due to different register\n");
9719 break;
9720 }
9721 if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9722 //printf("no-match due to branch\n");
9723 break;
9724 }
9725 // call/ret fast path assumes no registers allocated
9726 if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
9727 break;
9728 }
9729 if(r>63) {
9730 // NB This can exclude the case where the upper-half
9731 // register is lower numbered than the lower-half
9732 // register. Not sure if it's worth fixing...
9733 if(get_reg(regs[k-1].regmap,r&63)<0) break;
9734 if(regs[k-1].is32&(1LL<<(r&63))) break;
9735 }
9736 k--;
9737 }
9738 if(i<slen-1) {
9739 if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9740 (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9741 //printf("bad match after branch\n");
9742 break;
9743 }
9744 }
9745 if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9746 //printf("Extend r%d, %x ->\n",hr,start+k*4);
9747 while(k<i) {
9748 regs[k].regmap_entry[hr]=f_regmap[hr];
9749 regs[k].regmap[hr]=f_regmap[hr];
9750 regmap_pre[k+1][hr]=f_regmap[hr];
9751 regs[k].wasdirty&=~(1<<hr);
9752 regs[k].dirty&=~(1<<hr);
9753 regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9754 regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9755 regs[k].wasconst&=~(1<<hr);
9756 regs[k].isconst&=~(1<<hr);
9757 k++;
9758 }
9759 }
9760 else {
9761 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9762 break;
9763 }
9764 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9765 if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9766 //printf("OK fill %x (r%d)\n",start+i*4,hr);
9767 regs[i].regmap_entry[hr]=f_regmap[hr];
9768 regs[i].regmap[hr]=f_regmap[hr];
9769 regs[i].wasdirty&=~(1<<hr);
9770 regs[i].dirty&=~(1<<hr);
9771 regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9772 regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9773 regs[i].wasconst&=~(1<<hr);
9774 regs[i].isconst&=~(1<<hr);
9775 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9776 branch_regs[i].wasdirty&=~(1<<hr);
9777 branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9778 branch_regs[i].regmap[hr]=f_regmap[hr];
9779 branch_regs[i].dirty&=~(1<<hr);
9780 branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9781 branch_regs[i].wasconst&=~(1<<hr);
9782 branch_regs[i].isconst&=~(1<<hr);
9783 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9784 regmap_pre[i+2][hr]=f_regmap[hr];
9785 regs[i+2].wasdirty&=~(1<<hr);
9786 regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9787 assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9788 (regs[i+2].was32&(1LL<<f_regmap[hr])));
9789 }
9790 }
9791 }
9792 for(k=t;k<j;k++) {
9793 regs[k].regmap_entry[hr]=f_regmap[hr];
9794 regs[k].regmap[hr]=f_regmap[hr];
9795 regmap_pre[k+1][hr]=f_regmap[hr];
9796 regs[k+1].wasdirty&=~(1<<hr);
9797 regs[k].dirty&=~(1<<hr);
9798 regs[k].wasconst&=~(1<<hr);
9799 regs[k].isconst&=~(1<<hr);
9800 }
9801 if(regs[j].regmap[hr]==f_regmap[hr])
9802 regs[j].regmap_entry[hr]=f_regmap[hr];
9803 break;
9804 }
9805 if(j==i) break;
9806 if(regs[j].regmap[hr]>=0)
9807 break;
9808 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9809 //printf("no-match due to different register\n");
9810 break;
9811 }
9812 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9813 //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9814 break;
9815 }
9816 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9817 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9818 ||itype[j]==FCOMP||itype[j]==FCONV
9819 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9820 if(count_free_regs(regs[j].regmap)<2) {
9821 //printf("No free regs for store %x\n",start+j*4);
9822 break;
9823 }
9824 }
9825 else if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9826 if(f_regmap[hr]>=64) {
9827 if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9828 break;
9829 }
9830 else
9831 {
9832 if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9833 break;
9834 }
9835 }
9836 }
9837 }
9838 }
9839 }
9840 }
9841 }
9842 }else{
9843 int count=0;
9844 for(hr=0;hr<HOST_REGS;hr++)
9845 {
9846 if(hr!=EXCLUDE_REG) {
9847 if(regs[i].regmap[hr]>64) {
9848 if(!((regs[i].dirty>>hr)&1))
9849 f_regmap[hr]=regs[i].regmap[hr];
9850 }
9851 else if(regs[i].regmap[hr]>=0) f_regmap[hr]=regs[i].regmap[hr];
9852 else if(regs[i].regmap[hr]<0) count++;
9853 }
9854 }
9855 // Try to restore cycle count at branch targets
9856 if(bt[i]) {
9857 for(j=i;j<slen-1;j++) {
9858 if(regs[j].regmap[HOST_CCREG]!=-1) break;
9859 if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
9860 ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
9861 ||itype[j]==FCOMP||itype[j]==FCONV
9862 ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
9863 if(count_free_regs(regs[j].regmap)<2) {
9864 //printf("no free regs for store %x\n",start+j*4);
9865 break;
9866 }
9867 }
9868 else
9869 if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
9870 }
9871 if(regs[j].regmap[HOST_CCREG]==CCREG) {
9872 int k=i;
9873 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9874 while(k<j) {
9875 regs[k].regmap_entry[HOST_CCREG]=CCREG;
9876 regs[k].regmap[HOST_CCREG]=CCREG;
9877 regmap_pre[k+1][HOST_CCREG]=CCREG;
9878 regs[k+1].wasdirty|=1<<HOST_CCREG;
9879 regs[k].dirty|=1<<HOST_CCREG;
9880 regs[k].wasconst&=~(1<<HOST_CCREG);
9881 regs[k].isconst&=~(1<<HOST_CCREG);
9882 k++;
9883 }
9884 regs[j].regmap_entry[HOST_CCREG]=CCREG;
9885 }
9886 // Work backwards from the branch target
9887 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9888 {
9889 //printf("Extend backwards\n");
9890 int k;
9891 k=i;
9892 while(regs[k-1].regmap[HOST_CCREG]==-1) {
9893 if(itype[k-1]==STORE||itype[k-1]==STORELR||itype[k-1]==C1LS
9894 ||itype[k-1]==SHIFT||itype[k-1]==COP1||itype[k-1]==FLOAT
9895 ||itype[k-1]==FCONV||itype[k-1]==FCOMP
9896 ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
9897 if(count_free_regs(regs[k-1].regmap)<2) {
9898 //printf("no free regs for store %x\n",start+(k-1)*4);
9899 break;
9900 }
9901 }
9902 else
9903 if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
9904 k--;
9905 }
9906 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
9907 //printf("Extend CC, %x ->\n",start+k*4);
9908 while(k<=i) {
9909 regs[k].regmap_entry[HOST_CCREG]=CCREG;
9910 regs[k].regmap[HOST_CCREG]=CCREG;
9911 regmap_pre[k+1][HOST_CCREG]=CCREG;
9912 regs[k+1].wasdirty|=1<<HOST_CCREG;
9913 regs[k].dirty|=1<<HOST_CCREG;
9914 regs[k].wasconst&=~(1<<HOST_CCREG);
9915 regs[k].isconst&=~(1<<HOST_CCREG);
9916 k++;
9917 }
9918 }
9919 else {
9920 //printf("Fail Extend CC, %x ->\n",start+k*4);
9921 }
9922 }
9923 }
9924 if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
9925 itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
9926 itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
9927 itype[i]!=FCONV&&itype[i]!=FCOMP&&
9928 itype[i]!=COP2&&itype[i]!=C2LS&&itype[i]!=C2OP)
9929 {
9930 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
9931 }
9932 }
9933 }
9934
9935 // This allocates registers (if possible) one instruction prior
9936 // to use, which can avoid a load-use penalty on certain CPUs.
9937 for(i=0;i<slen-1;i++)
9938 {
9939 if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
9940 {
9941 if(!bt[i+1])
9942 {
9943 if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
9944 ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
9945 {
9946 if(rs1[i+1]) {
9947 if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
9948 {
9949 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9950 {
9951 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9952 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9953 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9954 regs[i].isconst&=~(1<<hr);
9955 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9956 constmap[i][hr]=constmap[i+1][hr];
9957 regs[i+1].wasdirty&=~(1<<hr);
9958 regs[i].dirty&=~(1<<hr);
9959 }
9960 }
9961 }
9962 if(rs2[i+1]) {
9963 if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
9964 {
9965 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9966 {
9967 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9968 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9969 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9970 regs[i].isconst&=~(1<<hr);
9971 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9972 constmap[i][hr]=constmap[i+1][hr];
9973 regs[i+1].wasdirty&=~(1<<hr);
9974 regs[i].dirty&=~(1<<hr);
9975 }
9976 }
9977 }
9978 if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9979 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9980 {
9981 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9982 {
9983 regs[i].regmap[hr]=rs1[i+1];
9984 regmap_pre[i+1][hr]=rs1[i+1];
9985 regs[i+1].regmap_entry[hr]=rs1[i+1];
9986 regs[i].isconst&=~(1<<hr);
9987 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9988 constmap[i][hr]=constmap[i+1][hr];
9989 regs[i+1].wasdirty&=~(1<<hr);
9990 regs[i].dirty&=~(1<<hr);
9991 }
9992 }
9993 }
9994 if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9995 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9996 {
9997 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9998 {
9999 regs[i].regmap[hr]=rs1[i+1];
10000 regmap_pre[i+1][hr]=rs1[i+1];
10001 regs[i+1].regmap_entry[hr]=rs1[i+1];
10002 regs[i].isconst&=~(1<<hr);
10003 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10004 constmap[i][hr]=constmap[i+1][hr];
10005 regs[i+1].wasdirty&=~(1<<hr);
10006 regs[i].dirty&=~(1<<hr);
10007 }
10008 }
10009 }
10010 #ifndef HOST_IMM_ADDR32
10011 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10012 hr=get_reg(regs[i+1].regmap,TLREG);
10013 if(hr>=0) {
10014 int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10015 if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10016 int nr;
10017 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10018 {
10019 regs[i].regmap[hr]=MGEN1+((i+1)&1);
10020 regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10021 regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10022 regs[i].isconst&=~(1<<hr);
10023 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10024 constmap[i][hr]=constmap[i+1][hr];
10025 regs[i+1].wasdirty&=~(1<<hr);
10026 regs[i].dirty&=~(1<<hr);
10027 }
10028 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10029 {
10030 // move it to another register
10031 regs[i+1].regmap[hr]=-1;
10032 regmap_pre[i+2][hr]=-1;
10033 regs[i+1].regmap[nr]=TLREG;
10034 regmap_pre[i+2][nr]=TLREG;
10035 regs[i].regmap[nr]=MGEN1+((i+1)&1);
10036 regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10037 regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10038 regs[i].isconst&=~(1<<nr);
10039 regs[i+1].isconst&=~(1<<nr);
10040 regs[i].dirty&=~(1<<nr);
10041 regs[i+1].wasdirty&=~(1<<nr);
10042 regs[i+1].dirty&=~(1<<nr);
10043 regs[i+2].wasdirty&=~(1<<nr);
10044 }
10045 }
10046 }
10047 }
10048 #endif
10049 if(itype[i+1]==STORE||itype[i+1]==STORELR
10050 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10051 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10052 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10053 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10054 else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10055 assert(hr>=0);
10056 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10057 {
10058 regs[i].regmap[hr]=rs1[i+1];
10059 regmap_pre[i+1][hr]=rs1[i+1];
10060 regs[i+1].regmap_entry[hr]=rs1[i+1];
10061 regs[i].isconst&=~(1<<hr);
10062 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10063 constmap[i][hr]=constmap[i+1][hr];
10064 regs[i+1].wasdirty&=~(1<<hr);
10065 regs[i].dirty&=~(1<<hr);
10066 }
10067 }
10068 }
10069 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10070 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10071 int nr;
10072 hr=get_reg(regs[i+1].regmap,FTEMP);
10073 assert(hr>=0);
10074 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10075 {
10076 regs[i].regmap[hr]=rs1[i+1];
10077 regmap_pre[i+1][hr]=rs1[i+1];
10078 regs[i+1].regmap_entry[hr]=rs1[i+1];
10079 regs[i].isconst&=~(1<<hr);
10080 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10081 constmap[i][hr]=constmap[i+1][hr];
10082 regs[i+1].wasdirty&=~(1<<hr);
10083 regs[i].dirty&=~(1<<hr);
10084 }
10085 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10086 {
10087 // move it to another register
10088 regs[i+1].regmap[hr]=-1;
10089 regmap_pre[i+2][hr]=-1;
10090 regs[i+1].regmap[nr]=FTEMP;
10091 regmap_pre[i+2][nr]=FTEMP;
10092 regs[i].regmap[nr]=rs1[i+1];
10093 regmap_pre[i+1][nr]=rs1[i+1];
10094 regs[i+1].regmap_entry[nr]=rs1[i+1];
10095 regs[i].isconst&=~(1<<nr);
10096 regs[i+1].isconst&=~(1<<nr);
10097 regs[i].dirty&=~(1<<nr);
10098 regs[i+1].wasdirty&=~(1<<nr);
10099 regs[i+1].dirty&=~(1<<nr);
10100 regs[i+2].wasdirty&=~(1<<nr);
10101 }
10102 }
10103 }
10104 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10105 if(itype[i+1]==LOAD)
10106 hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10107 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10108 hr=get_reg(regs[i+1].regmap,FTEMP);
10109 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10110 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10111 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10112 }
10113 if(hr>=0&&regs[i].regmap[hr]<0) {
10114 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10115 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10116 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10117 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10118 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10119 regs[i].isconst&=~(1<<hr);
10120 regs[i+1].wasdirty&=~(1<<hr);
10121 regs[i].dirty&=~(1<<hr);
10122 }
10123 }
10124 }
10125 }
10126 }
10127 }
10128 }
10129
10130 /* Pass 6 - Optimize clean/dirty state */
10131 clean_registers(0,slen-1,1);
10132
10133 /* Pass 7 - Identify 32-bit registers */
10134
10135 provisional_r32();
10136
10137 u_int r32=0;
10138
10139 for (i=slen-1;i>=0;i--)
10140 {
10141 int hr;
10142 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10143 {
10144 if(ba[i]<start || ba[i]>=(start+slen*4))
10145 {
10146 // Branch out of this block, don't need anything
10147 r32=0;
10148 }
10149 else
10150 {
10151 // Internal branch
10152 // Need whatever matches the target
10153 // (and doesn't get overwritten by the delay slot instruction)
10154 r32=0;
10155 int t=(ba[i]-start)>>2;
10156 if(ba[i]>start+i*4) {
10157 // Forward branch
10158 if(!(requires_32bit[t]&~regs[i].was32))
10159 r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10160 }else{
10161 // Backward branch
10162 //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10163 // r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10164 if(!(pr32[t]&~regs[i].was32))
10165 r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10166 }
10167 }
10168 // Conditional branch may need registers for following instructions
10169 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10170 {
10171 if(i<slen-2) {
10172 r32|=requires_32bit[i+2];
10173 r32&=regs[i].was32;
10174 // Mark this address as a branch target since it may be called
10175 // upon return from interrupt
10176 bt[i+2]=1;
10177 }
10178 }
10179 // Merge in delay slot
10180 if(!likely[i]) {
10181 // These are overwritten unless the branch is "likely"
10182 // and the delay slot is nullified if not taken
10183 r32&=~(1LL<<rt1[i+1]);
10184 r32&=~(1LL<<rt2[i+1]);
10185 }
10186 // Assume these are needed (delay slot)
10187 if(us1[i+1]>0)
10188 {
10189 if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10190 }
10191 if(us2[i+1]>0)
10192 {
10193 if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10194 }
10195 if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10196 {
10197 if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10198 }
10199 if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10200 {
10201 if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10202 }
10203 }
10204 else if(itype[i]==SYSCALL||itype[i]==HLECALL)
10205 {
10206 // SYSCALL instruction (software interrupt)
10207 r32=0;
10208 }
10209 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10210 {
10211 // ERET instruction (return from interrupt)
10212 r32=0;
10213 }
10214 // Check 32 bits
10215 r32&=~(1LL<<rt1[i]);
10216 r32&=~(1LL<<rt2[i]);
10217 if(us1[i]>0)
10218 {
10219 if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10220 }
10221 if(us2[i]>0)
10222 {
10223 if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10224 }
10225 if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10226 {
10227 if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10228 }
10229 if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10230 {
10231 if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10232 }
10233 requires_32bit[i]=r32;
10234
10235 // Dirty registers which are 32-bit, require 32-bit input
10236 // as they will be written as 32-bit values
10237 for(hr=0;hr<HOST_REGS;hr++)
10238 {
10239 if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10240 if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10241 if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10242 requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10243 }
10244 }
10245 }
10246 //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10247 }
10248
10249 if(itype[slen-1]==SPAN) {
10250 bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10251 }
10252
10253 /* Debug/disassembly */
10254 if((void*)assem_debug==(void*)printf)
10255 for(i=0;i<slen;i++)
10256 {
10257 printf("U:");
10258 int r;
10259 for(r=1;r<=CCREG;r++) {
10260 if((unneeded_reg[i]>>r)&1) {
10261 if(r==HIREG) printf(" HI");
10262 else if(r==LOREG) printf(" LO");
10263 else printf(" r%d",r);
10264 }
10265 }
10266#ifndef FORCE32
10267 printf(" UU:");
10268 for(r=1;r<=CCREG;r++) {
10269 if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10270 if(r==HIREG) printf(" HI");
10271 else if(r==LOREG) printf(" LO");
10272 else printf(" r%d",r);
10273 }
10274 }
10275 printf(" 32:");
10276 for(r=0;r<=CCREG;r++) {
10277 //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10278 if((regs[i].was32>>r)&1) {
10279 if(r==CCREG) printf(" CC");
10280 else if(r==HIREG) printf(" HI");
10281 else if(r==LOREG) printf(" LO");
10282 else printf(" r%d",r);
10283 }
10284 }
10285#endif
10286 printf("\n");
10287 #if defined(__i386__) || defined(__x86_64__)
10288 printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10289 #endif
10290 #ifdef __arm__
10291 printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10292 #endif
10293 printf("needs: ");
10294 if(needed_reg[i]&1) printf("eax ");
10295 if((needed_reg[i]>>1)&1) printf("ecx ");
10296 if((needed_reg[i]>>2)&1) printf("edx ");
10297 if((needed_reg[i]>>3)&1) printf("ebx ");
10298 if((needed_reg[i]>>5)&1) printf("ebp ");
10299 if((needed_reg[i]>>6)&1) printf("esi ");
10300 if((needed_reg[i]>>7)&1) printf("edi ");
10301 printf("r:");
10302 for(r=0;r<=CCREG;r++) {
10303 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10304 if((requires_32bit[i]>>r)&1) {
10305 if(r==CCREG) printf(" CC");
10306 else if(r==HIREG) printf(" HI");
10307 else if(r==LOREG) printf(" LO");
10308 else printf(" r%d",r);
10309 }
10310 }
10311 printf("\n");
10312 /*printf("pr:");
10313 for(r=0;r<=CCREG;r++) {
10314 //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10315 if((pr32[i]>>r)&1) {
10316 if(r==CCREG) printf(" CC");
10317 else if(r==HIREG) printf(" HI");
10318 else if(r==LOREG) printf(" LO");
10319 else printf(" r%d",r);
10320 }
10321 }
10322 if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10323 printf("\n");*/
10324 #if defined(__i386__) || defined(__x86_64__)
10325 printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10326 printf("dirty: ");
10327 if(regs[i].wasdirty&1) printf("eax ");
10328 if((regs[i].wasdirty>>1)&1) printf("ecx ");
10329 if((regs[i].wasdirty>>2)&1) printf("edx ");
10330 if((regs[i].wasdirty>>3)&1) printf("ebx ");
10331 if((regs[i].wasdirty>>5)&1) printf("ebp ");
10332 if((regs[i].wasdirty>>6)&1) printf("esi ");
10333 if((regs[i].wasdirty>>7)&1) printf("edi ");
10334 #endif
10335 #ifdef __arm__
10336 printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10337 printf("dirty: ");
10338 if(regs[i].wasdirty&1) printf("r0 ");
10339 if((regs[i].wasdirty>>1)&1) printf("r1 ");
10340 if((regs[i].wasdirty>>2)&1) printf("r2 ");
10341 if((regs[i].wasdirty>>3)&1) printf("r3 ");
10342 if((regs[i].wasdirty>>4)&1) printf("r4 ");
10343 if((regs[i].wasdirty>>5)&1) printf("r5 ");
10344 if((regs[i].wasdirty>>6)&1) printf("r6 ");
10345 if((regs[i].wasdirty>>7)&1) printf("r7 ");
10346 if((regs[i].wasdirty>>8)&1) printf("r8 ");
10347 if((regs[i].wasdirty>>9)&1) printf("r9 ");
10348 if((regs[i].wasdirty>>10)&1) printf("r10 ");
10349 if((regs[i].wasdirty>>12)&1) printf("r12 ");
10350 #endif
10351 printf("\n");
10352 disassemble_inst(i);
10353 //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
10354 #if defined(__i386__) || defined(__x86_64__)
10355 printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
10356 if(regs[i].dirty&1) printf("eax ");
10357 if((regs[i].dirty>>1)&1) printf("ecx ");
10358 if((regs[i].dirty>>2)&1) printf("edx ");
10359 if((regs[i].dirty>>3)&1) printf("ebx ");
10360 if((regs[i].dirty>>5)&1) printf("ebp ");
10361 if((regs[i].dirty>>6)&1) printf("esi ");
10362 if((regs[i].dirty>>7)&1) printf("edi ");
10363 #endif
10364 #ifdef __arm__
10365 printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
10366 if(regs[i].dirty&1) printf("r0 ");
10367 if((regs[i].dirty>>1)&1) printf("r1 ");
10368 if((regs[i].dirty>>2)&1) printf("r2 ");
10369 if((regs[i].dirty>>3)&1) printf("r3 ");
10370 if((regs[i].dirty>>4)&1) printf("r4 ");
10371 if((regs[i].dirty>>5)&1) printf("r5 ");
10372 if((regs[i].dirty>>6)&1) printf("r6 ");
10373 if((regs[i].dirty>>7)&1) printf("r7 ");
10374 if((regs[i].dirty>>8)&1) printf("r8 ");
10375 if((regs[i].dirty>>9)&1) printf("r9 ");
10376 if((regs[i].dirty>>10)&1) printf("r10 ");
10377 if((regs[i].dirty>>12)&1) printf("r12 ");
10378 #endif
10379 printf("\n");
10380 if(regs[i].isconst) {
10381 printf("constants: ");
10382 #if defined(__i386__) || defined(__x86_64__)
10383 if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
10384 if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
10385 if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
10386 if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
10387 if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
10388 if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
10389 if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
10390 #endif
10391 #ifdef __arm__
10392 if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
10393 if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
10394 if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
10395 if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
10396 if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
10397 if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
10398 if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
10399 if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
10400 if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
10401 if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
10402 if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
10403 if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
10404 #endif
10405 printf("\n");
10406 }
10407#ifndef FORCE32
10408 printf(" 32:");
10409 for(r=0;r<=CCREG;r++) {
10410 if((regs[i].is32>>r)&1) {
10411 if(r==CCREG) printf(" CC");
10412 else if(r==HIREG) printf(" HI");
10413 else if(r==LOREG) printf(" LO");
10414 else printf(" r%d",r);
10415 }
10416 }
10417 printf("\n");
10418#endif
10419 /*printf(" p32:");
10420 for(r=0;r<=CCREG;r++) {
10421 if((p32[i]>>r)&1) {
10422 if(r==CCREG) printf(" CC");
10423 else if(r==HIREG) printf(" HI");
10424 else if(r==LOREG) printf(" LO");
10425 else printf(" r%d",r);
10426 }
10427 }
10428 if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
10429 else printf("\n");*/
10430 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10431 #if defined(__i386__) || defined(__x86_64__)
10432 printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
10433 if(branch_regs[i].dirty&1) printf("eax ");
10434 if((branch_regs[i].dirty>>1)&1) printf("ecx ");
10435 if((branch_regs[i].dirty>>2)&1) printf("edx ");
10436 if((branch_regs[i].dirty>>3)&1) printf("ebx ");
10437 if((branch_regs[i].dirty>>5)&1) printf("ebp ");
10438 if((branch_regs[i].dirty>>6)&1) printf("esi ");
10439 if((branch_regs[i].dirty>>7)&1) printf("edi ");
10440 #endif
10441 #ifdef __arm__
10442 printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
10443 if(branch_regs[i].dirty&1) printf("r0 ");
10444 if((branch_regs[i].dirty>>1)&1) printf("r1 ");
10445 if((branch_regs[i].dirty>>2)&1) printf("r2 ");
10446 if((branch_regs[i].dirty>>3)&1) printf("r3 ");
10447 if((branch_regs[i].dirty>>4)&1) printf("r4 ");
10448 if((branch_regs[i].dirty>>5)&1) printf("r5 ");
10449 if((branch_regs[i].dirty>>6)&1) printf("r6 ");
10450 if((branch_regs[i].dirty>>7)&1) printf("r7 ");
10451 if((branch_regs[i].dirty>>8)&1) printf("r8 ");
10452 if((branch_regs[i].dirty>>9)&1) printf("r9 ");
10453 if((branch_regs[i].dirty>>10)&1) printf("r10 ");
10454 if((branch_regs[i].dirty>>12)&1) printf("r12 ");
10455 #endif
10456#ifndef FORCE32
10457 printf(" 32:");
10458 for(r=0;r<=CCREG;r++) {
10459 if((branch_regs[i].is32>>r)&1) {
10460 if(r==CCREG) printf(" CC");
10461 else if(r==HIREG) printf(" HI");
10462 else if(r==LOREG) printf(" LO");
10463 else printf(" r%d",r);
10464 }
10465 }
10466 printf("\n");
10467#endif
10468 }
10469 }
10470
10471 /* Pass 8 - Assembly */
10472 linkcount=0;stubcount=0;
10473 ds=0;is_delayslot=0;
10474 cop1_usable=0;
10475 uint64_t is32_pre=0;
10476 u_int dirty_pre=0;
10477 u_int beginning=(u_int)out;
10478 if((u_int)addr&1) {
10479 ds=1;
10480 pagespan_ds();
10481 }
10482 for(i=0;i<slen;i++)
10483 {
10484 //if(ds) printf("ds: ");
10485 if((void*)assem_debug==(void*)printf) disassemble_inst(i);
10486 if(ds) {
10487 ds=0; // Skip delay slot
10488 if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10489 instr_addr[i]=0;
10490 } else {
10491 #ifndef DESTRUCTIVE_WRITEBACK
10492 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10493 {
10494 wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
10495 unneeded_reg[i],unneeded_reg_upper[i]);
10496 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10497 unneeded_reg[i],unneeded_reg_upper[i]);
10498 }
10499 is32_pre=regs[i].is32;
10500 dirty_pre=regs[i].dirty;
10501 #endif
10502 // write back
10503 if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10504 {
10505 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10506 unneeded_reg[i],unneeded_reg_upper[i]);
10507 loop_preload(regmap_pre[i],regs[i].regmap_entry);
10508 }
10509 // branch target entry point
10510 instr_addr[i]=(u_int)out;
10511 assem_debug("<->\n");
10512 // load regs
10513 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10514 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10515 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10516 address_generation(i,&regs[i],regs[i].regmap_entry);
10517 load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10518 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10519 {
10520 // Load the delay slot registers if necessary
10521 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10522 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10523 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10524 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10525 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10526 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10527 }
10528 else if(i+1<slen)
10529 {
10530 // Preload registers for following instruction
10531 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10532 if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10533 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10534 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10535 if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10536 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10537 }
10538 // TODO: if(is_ooo(i)) address_generation(i+1);
10539 if(itype[i]==CJUMP||itype[i]==FJUMP)
10540 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10541 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10542 load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10543 if(bt[i]) cop1_usable=0;
10544 // assemble
10545 switch(itype[i]) {
10546 case ALU:
10547 alu_assemble(i,&regs[i]);break;
10548 case IMM16:
10549 imm16_assemble(i,&regs[i]);break;
10550 case SHIFT:
10551 shift_assemble(i,&regs[i]);break;
10552 case SHIFTIMM:
10553 shiftimm_assemble(i,&regs[i]);break;
10554 case LOAD:
10555 load_assemble(i,&regs[i]);break;
10556 case LOADLR:
10557 loadlr_assemble(i,&regs[i]);break;
10558 case STORE:
10559 store_assemble(i,&regs[i]);break;
10560 case STORELR:
10561 storelr_assemble(i,&regs[i]);break;
10562 case COP0:
10563 cop0_assemble(i,&regs[i]);break;
10564 case COP1:
10565 cop1_assemble(i,&regs[i]);break;
10566 case C1LS:
10567 c1ls_assemble(i,&regs[i]);break;
10568 case COP2:
10569 cop2_assemble(i,&regs[i]);break;
10570 case C2LS:
10571 c2ls_assemble(i,&regs[i]);break;
10572 case C2OP:
10573 c2op_assemble(i,&regs[i]);break;
10574 case FCONV:
10575 fconv_assemble(i,&regs[i]);break;
10576 case FLOAT:
10577 float_assemble(i,&regs[i]);break;
10578 case FCOMP:
10579 fcomp_assemble(i,&regs[i]);break;
10580 case MULTDIV:
10581 multdiv_assemble(i,&regs[i]);break;
10582 case MOV:
10583 mov_assemble(i,&regs[i]);break;
10584 case SYSCALL:
10585 syscall_assemble(i,&regs[i]);break;
10586 case HLECALL:
10587 hlecall_assemble(i,&regs[i]);break;
10588 case UJUMP:
10589 ujump_assemble(i,&regs[i]);ds=1;break;
10590 case RJUMP:
10591 rjump_assemble(i,&regs[i]);ds=1;break;
10592 case CJUMP:
10593 cjump_assemble(i,&regs[i]);ds=1;break;
10594 case SJUMP:
10595 sjump_assemble(i,&regs[i]);ds=1;break;
10596 case FJUMP:
10597 fjump_assemble(i,&regs[i]);ds=1;break;
10598 case SPAN:
10599 pagespan_assemble(i,&regs[i]);break;
10600 }
10601 if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10602 literal_pool(1024);
10603 else
10604 literal_pool_jumpover(256);
10605 }
10606 }
10607 //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10608 // If the block did not end with an unconditional branch,
10609 // add a jump to the next instruction.
10610 if(i>1) {
10611 if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10612 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10613 assert(i==slen);
10614 if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10615 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10616 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10617 emit_loadreg(CCREG,HOST_CCREG);
10618 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10619 }
10620 else if(!likely[i-2])
10621 {
10622 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10623 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10624 }
10625 else
10626 {
10627 store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10628 assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10629 }
10630 add_to_linker((int)out,start+i*4,0);
10631 emit_jmp(0);
10632 }
10633 }
10634 else
10635 {
10636 assert(i>0);
10637 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10638 store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10639 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10640 emit_loadreg(CCREG,HOST_CCREG);
10641 emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
10642 add_to_linker((int)out,start+i*4,0);
10643 emit_jmp(0);
10644 }
10645
10646 // TODO: delay slot stubs?
10647 // Stubs
10648 for(i=0;i<stubcount;i++)
10649 {
10650 switch(stubs[i][0])
10651 {
10652 case LOADB_STUB:
10653 case LOADH_STUB:
10654 case LOADW_STUB:
10655 case LOADD_STUB:
10656 case LOADBU_STUB:
10657 case LOADHU_STUB:
10658 do_readstub(i);break;
10659 case STOREB_STUB:
10660 case STOREH_STUB:
10661 case STOREW_STUB:
10662 case STORED_STUB:
10663 do_writestub(i);break;
10664 case CC_STUB:
10665 do_ccstub(i);break;
10666 case INVCODE_STUB:
10667 do_invstub(i);break;
10668 case FP_STUB:
10669 do_cop1stub(i);break;
10670 case STORELR_STUB:
10671 do_unalignedwritestub(i);break;
10672 }
10673 }
10674
10675 /* Pass 9 - Linker */
10676 for(i=0;i<linkcount;i++)
10677 {
10678 assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10679 literal_pool(64);
10680 if(!link_addr[i][2])
10681 {
10682 void *stub=out;
10683 void *addr=check_addr(link_addr[i][1]);
10684 emit_extjump(link_addr[i][0],link_addr[i][1]);
10685 if(addr) {
10686 set_jump_target(link_addr[i][0],(int)addr);
10687 add_link(link_addr[i][1],stub);
10688 }
10689 else set_jump_target(link_addr[i][0],(int)stub);
10690 }
10691 else
10692 {
10693 // Internal branch
10694 int target=(link_addr[i][1]-start)>>2;
10695 assert(target>=0&&target<slen);
10696 assert(instr_addr[target]);
10697 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10698 //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10699 //#else
10700 set_jump_target(link_addr[i][0],instr_addr[target]);
10701 //#endif
10702 }
10703 }
10704 // External Branch Targets (jump_in)
10705 if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10706 for(i=0;i<slen;i++)
10707 {
10708 if(bt[i]||i==0)
10709 {
10710 if(instr_addr[i]) // TODO - delay slots (=null)
10711 {
10712 u_int vaddr=start+i*4;
10713 u_int page=get_page(vaddr);
10714 u_int vpage=get_vpage(vaddr);
10715 literal_pool(256);
10716 //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
10717 if(!requires_32bit[i])
10718 {
10719 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10720 assem_debug("jump_in: %x\n",start+i*4);
10721 ll_add(jump_dirty+vpage,vaddr,(void *)out);
10722 int entry_point=do_dirty_stub(i);
10723 ll_add(jump_in+page,vaddr,(void *)entry_point);
10724 // If there was an existing entry in the hash table,
10725 // replace it with the new address.
10726 // Don't add new entries. We'll insert the
10727 // ones that actually get used in check_addr().
10728 int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10729 if(ht_bin[0]==vaddr) {
10730 ht_bin[1]=entry_point;
10731 }
10732 if(ht_bin[2]==vaddr) {
10733 ht_bin[3]=entry_point;
10734 }
10735 }
10736 else
10737 {
10738 u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
10739 assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10740 assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
10741 //int entry_point=(int)out;
10742 ////assem_debug("entry_point: %x\n",entry_point);
10743 //load_regs_entry(i);
10744 //if(entry_point==(int)out)
10745 // entry_point=instr_addr[i];
10746 //else
10747 // emit_jmp(instr_addr[i]);
10748 //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10749 ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
10750 int entry_point=do_dirty_stub(i);
10751 ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
10752 }
10753 }
10754 }
10755 }
10756 // Write out the literal pool if necessary
10757 literal_pool(0);
10758 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10759 // Align code
10760 if(((u_int)out)&7) emit_addnop(13);
10761 #endif
10762 assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
10763 //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10764 memcpy(copy,source,slen*4);
10765 copy+=slen*4;
10766
10767 #ifdef __arm__
10768 __clear_cache((void *)beginning,out);
10769 #endif
10770
10771 // If we're within 256K of the end of the buffer,
10772 // start over from the beginning. (Is 256K enough?)
10773 if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10774
10775 // Trap writes to any of the pages we compiled
10776 for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10777 invalid_code[i]=0;
10778#ifndef DISABLE_TLB
10779 memory_map[i]|=0x40000000;
10780 if((signed int)start>=(signed int)0xC0000000) {
10781 assert(using_tlb);
10782 j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
10783 invalid_code[j]=0;
10784 memory_map[j]|=0x40000000;
10785 //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
10786 }
10787#endif
10788 }
10789
10790 /* Pass 10 - Free memory by expiring oldest blocks */
10791
10792 int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10793 while(expirep!=end)
10794 {
10795 int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10796 int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10797 inv_debug("EXP: Phase %d\n",expirep);
10798 switch((expirep>>11)&3)
10799 {
10800 case 0:
10801 // Clear jump_in and jump_dirty
10802 ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10803 ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10804 ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10805 ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10806 break;
10807 case 1:
10808 // Clear pointers
10809 ll_kill_pointers(jump_out[expirep&2047],base,shift);
10810 ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10811 break;
10812 case 2:
10813 // Clear hash table
10814 for(i=0;i<32;i++) {
10815 int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10816 if((ht_bin[3]>>shift)==(base>>shift) ||
10817 ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10818 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10819 ht_bin[2]=ht_bin[3]=-1;
10820 }
10821 if((ht_bin[1]>>shift)==(base>>shift) ||
10822 ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10823 inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10824 ht_bin[0]=ht_bin[2];
10825 ht_bin[1]=ht_bin[3];
10826 ht_bin[2]=ht_bin[3]=-1;
10827 }
10828 }
10829 break;
10830 case 3:
10831 // Clear jump_out
10832 ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10833 ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10834 break;
10835 }
10836 expirep=(expirep+1)&65535;
10837 }
10838 return 0;
10839}
10840
10841// vim:shiftwidth=2:expandtab