X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?p=pcsx_rearmed.git;a=blobdiff_plain;f=libpcsxcore%2Fnew_dynarec%2Fnew_dynarec.c;h=1383b2f55f9894ad29c555178e8b54ac21d23ab1;hp=d19fcad0dad1bd0bf5695e40a91babc8fef1237a;hb=687b45804b5c028dd5644bda85981c0235eb4d32;hpb=00fa9369a9c361f2308306685e84c8e302c682b7 diff --git a/libpcsxcore/new_dynarec/new_dynarec.c b/libpcsxcore/new_dynarec/new_dynarec.c index d19fcad0..1383b2f5 100644 --- a/libpcsxcore/new_dynarec/new_dynarec.c +++ b/libpcsxcore/new_dynarec/new_dynarec.c @@ -57,6 +57,9 @@ static int sceBlock; #ifdef __arm__ #include "assem_arm.h" #endif +#ifdef __aarch64__ +#include "assem_arm64.h" +#endif #define MAXBLOCK 4096 #define MAX_OUTPUT_BLOCK_SIZE 262144 @@ -83,8 +86,6 @@ struct regstat { signed char regmap_entry[HOST_REGS]; signed char regmap[HOST_REGS]; - uint64_t was32; - uint64_t is32; uint64_t wasdirty; uint64_t dirty; uint64_t u; @@ -197,8 +198,14 @@ struct link_entry int new_dynarec_hacks; int new_dynarec_did_compile; + + extern int cycle_count; // ... until end of the timeslice, counts -N -> 0 + extern int last_count; // last absolute target, often = next_interupt + extern int pcaddr; + extern int pending_exception; + extern int branch_target; + extern u_int mini_ht[32][2]; extern u_char restore_candidate[512]; - extern int cycle_count; /* registers that may be allocated */ /* 1-31 gpr */ @@ -243,7 +250,7 @@ struct link_entry #define COP0 15 // Coprocessor 0 #define COP1 16 // Coprocessor 1 #define C1LS 17 // Coprocessor 1 load/store -#define FJUMP 18 // Conditional branch (floating point) +//#define FJUMP 18 // Conditional branch (floating point) //#define FLOAT 19 // Floating point unit //#define FCONV 20 // Convert integer to float //#define FCOMP 21 // Floating point compare (sets FSREG) @@ -282,13 +289,13 @@ void jump_intcall(); void new_dyna_leave(); // Needed by assembler -static void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32); -static void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty); -static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr); +static void wb_register(signed char r,signed char regmap[],uint64_t dirty); +static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty); +static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr); static void load_all_regs(signed char i_regmap[]); static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]); static void load_regs_entry(int t); -static void load_all_consts(signed char regmap[],int is32,u_int dirty,int i); +static void load_all_consts(signed char regmap[],u_int dirty,int i); static int verify_dirty(u_int *ptr); static int get_final_value(int hr, int i, int *value); @@ -297,6 +304,10 @@ static void add_stub(enum stub_type type, void *addr, void *retaddr, static void add_stub_r(enum stub_type type, void *addr, void *retaddr, int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist); static void add_to_linker(void *addr, u_int target, int ext); +static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override); +static void *get_direct_memhandler(void *table, u_int addr, + enum stub_type type, uintptr_t *addr_host); +static void pass_args(int a0, int a1); static void mprotect_w_x(void *start, void *end, int is_x) { @@ -339,6 +350,8 @@ static void end_tcache_write(void *start, void *end) __clear_cache(start, end); #endif (void)len; +#else + __clear_cache(start, end); #endif mprotect_w_x(start, end, 1); @@ -515,23 +528,6 @@ void dirty_reg(struct regstat *cur,signed char reg) } } -// If we dirty the lower half of a 64 bit register which is now being -// sign-extended, we need to dump the upper half. -// Note: Do this only after completion of the instruction, because -// some instructions may need to read the full 64-bit value even if -// overwriting it (eg SLTI, DSRA32). -static void flush_dirty_uppers(struct regstat *cur) -{ - int hr,reg; - for (hr=0;hrdirty>>hr)&1) { - reg=cur->regmap[hr]; - if(reg>=64) - if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1; - } - } -} - void set_const(struct regstat *cur,signed char reg,uint64_t value) { int hr; @@ -621,7 +617,7 @@ void lsn(u_char hsn[], int i, int *preferred_reg) hsn[INVCP]=j; } #endif - if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP)) + if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP)) { hsn[CCREG]=j; b=j; @@ -645,7 +641,7 @@ void lsn(u_char hsn[], int i, int *preferred_reg) // TODO: preferred register based on backward branch } // Delay slot should preferably not overwrite branch conditions or cycle count - if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) { + if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) { if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1; if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1; hsn[CCREG]=1; @@ -707,7 +703,7 @@ int needed_again(int r, int i) if(rs1[i+j]==r) rn=j; if(rs2[i+j]==r) rn=j; if((unneeded_reg[i+j]>>r)&1) rn=10; - if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP)) + if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP)) { b=j; } @@ -756,14 +752,14 @@ int loop_reg(int i, int r, int hr) } k=0; if(i>0){ - if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) + if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP) k--; } for(;k>r)&1) return hr; - if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP)) + if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP)) { if(ba[i+k]>=start && ba[i+k]<(start+i*4)) { @@ -802,6 +798,47 @@ void alloc_all(struct regstat *cur,int i) } } +#ifdef DRC_DBG +extern void gen_interupt(); +extern void do_insn_cmp(); +#define FUNCNAME(f) { (intptr_t)f, " " #f } +static const struct { + intptr_t addr; + const char *name; +} function_names[] = { + FUNCNAME(cc_interrupt), + FUNCNAME(gen_interupt), + FUNCNAME(get_addr_ht), + FUNCNAME(get_addr), + FUNCNAME(jump_handler_read8), + FUNCNAME(jump_handler_read16), + FUNCNAME(jump_handler_read32), + FUNCNAME(jump_handler_write8), + FUNCNAME(jump_handler_write16), + FUNCNAME(jump_handler_write32), + FUNCNAME(invalidate_addr), + FUNCNAME(verify_code_vm), + FUNCNAME(verify_code), + FUNCNAME(jump_hlecall), + FUNCNAME(jump_syscall_hle), + FUNCNAME(new_dyna_leave), + FUNCNAME(pcsx_mtc0), + FUNCNAME(pcsx_mtc0_ds), + FUNCNAME(do_insn_cmp), +}; + +static const char *func_name(intptr_t a) +{ + int i; + for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++) + if (function_names[i].addr == a) + return function_names[i].name; + return ""; +} +#else +#define func_name(x) "" +#endif + #ifdef __i386__ #include "assem_x86.c" #endif @@ -811,6 +848,9 @@ void alloc_all(struct regstat *cur,int i) #ifdef __arm__ #include "assem_arm.c" #endif +#ifdef __aarch64__ +#include "assem_arm64.c" +#endif // Add virtual address mapping to linked list void ll_add(struct ll_entry **head,int vaddr,void *addr) @@ -939,7 +979,7 @@ static void ll_kill_pointers(struct ll_entry *head,uintptr_t addr,int shift) { inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr); void *host_addr=find_extjump_insn(head->addr); - #ifdef __arm__ + #if defined(__arm__) || defined(__aarch64__) mark_clear_cache(host_addr); #endif set_jump_target(host_addr, head->addr); @@ -967,7 +1007,7 @@ void invalidate_page(u_int page) while(head!=NULL) { inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr); void *host_addr=find_extjump_insn(head->addr); - #ifdef __arm__ + #if defined(__arm__) || defined(__aarch64__) mark_clear_cache(host_addr); #endif set_jump_target(host_addr, head->addr); @@ -992,7 +1032,7 @@ static void invalidate_block_range(u_int block, u_int first, u_int last) for(first=page+1;firstis32>>rs1[i])&1) { - //alloc_reg64(current,i,rs1[i]); - assert(0); - } else { - //alloc_reg(current,i,rs1[i]); - alloc_reg(current,i,rt1[i]); - current->is32|=(1LL<u>>reg)&1) return; + + // see if it's already allocated + for(hr=0;hrregmap[hr]==reg) return; + } + + // Keep the same mapping if the register was already allocated in a loop + preferred_reg = loop_reg(i,reg,preferred_reg); + + // Try to allocate the preferred register + if(cur->regmap[preferred_reg]==-1) { + cur->regmap[preferred_reg]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[preferred_reg]; + assert(r < 64); + if((cur->u>>r)&1) { + cur->regmap[preferred_reg]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]; + if(r>=0) { + assert(r < 64); + if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;} + } + } + // Try to allocate any available register, but prefer + // registers that have not been used recently. + if(i>0) { + for(hr=0;hrregmap[hr]==-1) { + if(regs[i-1].regmap[hr]!=rs1[i-1]&®s[i-1].regmap[hr]!=rs2[i-1]&®s[i-1].regmap[hr]!=rt1[i-1]&®s[i-1].regmap[hr]!=rt2[i-1]) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==-1) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]); + //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]); + if(i>0) { + // Don't evict the cycle count at entry points, otherwise the entry + // stub will have to write it. + if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2; + if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2; + for(j=10;j>=3;j--) + { + // Alloc preferred register if available + if(hsn[r=cur->regmap[preferred_reg]&63]==j) { + for(hr=0;hrregmap[hr]&63)==r) { + cur->regmap[hr]=-1; + cur->dirty&=~(1<isconst&=~(1<regmap[preferred_reg]=reg; + return; + } + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) { + for(hr=0;hrregmap[hr]==r+64) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==r) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<=0;j--) + { + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j) { + for(hr=0;hrregmap[hr]==r+64) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==r) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==reg) return; + } + + // Try to allocate any available register + for(hr=HOST_REGS-1;hr>=0;hr--) { + if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<=0;hr--) + { + r=cur->regmap[hr]; + if(r>=0) { + assert(r < 64); + if((cur->u>>r)&1) { + if(i==0||((unneeded_reg[i-1]>>r)&1)) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]); + if(i>0) { + // Don't evict the cycle count at entry points, otherwise the entry + // stub will have to write it. + if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2; + if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2; + for(j=10;j>=3;j--) + { + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) { + for(hr=0;hr2) { + if(cur->regmap[hr]==r+64) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<2) { + if(cur->regmap[hr]==r) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<=0;j--) + { + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j) { + for(hr=0;hrregmap[hr]==r+64) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==r) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<is32|=1LL<is32|=1LL<=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU if(rt1[i]) { @@ -1265,22 +1567,13 @@ void alu_alloc(struct regstat *current,int i) } alloc_reg(current,i,rt1[i]); } - current->is32|=1LL<is32>>rs1[i])&(current->is32>>rs2[i])&1)) - { - alloc_reg64(current,i,rs1[i]); - alloc_reg64(current,i,rs2[i]); - alloc_reg(current,i,rt1[i]); - } else { - alloc_reg(current,i,rs1[i]); - alloc_reg(current,i,rs2[i]); - alloc_reg(current,i,rt1[i]); - } + alloc_reg(current,i,rs1[i]); + alloc_reg(current,i,rs2[i]); + alloc_reg(current,i,rt1[i]); } - current->is32|=1LL<=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR if(rt1[i]) { @@ -1294,15 +1587,6 @@ void alu_alloc(struct regstat *current,int i) if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]); } alloc_reg(current,i,rt1[i]); - if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1)) - { - if(get_reg(current->regmap,rt1[i]|64)>=0) { - assert(0); - } - current->is32&=~(1LL<is32|=1LL<=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU @@ -1314,7 +1598,7 @@ void alu_alloc(struct regstat *current,int i) dirty_reg(current,rt1[i]); } -void imm16_alloc(struct regstat *current,int i) +static void imm16_alloc(struct regstat *current,int i) { if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]); else lt1[i]=rs1[i]; @@ -1323,20 +1607,10 @@ void imm16_alloc(struct regstat *current,int i) assert(0); } else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU - if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]); - current->is32|=1LL<=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI - if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) { - if(rs1[i]!=rt1[i]) { - if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]); - alloc_reg64(current,i,rt1[i]); - current->is32&=~(1LL<is32|=1LL<is32|=1LL<is32|=1LL<regmap,rt1[i])>=0); if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD { - current->is32&=~(1LL<is32&=~(1LL<is32|=1LL<is32|=1LL<is32|=1LL<is32|=1LL<is32|=1LL<is32|=1LL<is32|=1LL<is32>>rs1[i])&(current->is32>>rs2[i])&1)) - { - assert(0); - } } else if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL { if(rs1[i]) alloc_reg(current,i,rs1[i]); - if(!((current->is32>>rs1[i])&1)) - { - assert(0); - } } //else ... } @@ -1717,17 +1965,37 @@ static void add_stub_r(enum stub_type type, void *addr, void *retaddr, } // Write out a single register -void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32) +static void wb_register(signed char r,signed char regmap[],uint64_t dirty) { int hr; for(hr=0;hr>hr)&1) { - if(regmap[hr]<64) { - emit_storereg(r,hr); - }else{ - emit_storereg(r|64,hr); + assert(regmap[hr]<64); + emit_storereg(r,hr); + } + } + } + } +} + +static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u) +{ + //if(dirty_pre==dirty) return; + int hr,reg; + for(hr=0;hr>(reg&63))&1) { + if(reg>0) { + if(((dirty_pre&~dirty)>>hr)&1) { + if(reg>0&®<34) { + emit_storereg(reg,hr); + } + else if(reg>=64) { + assert(0); + } } } } @@ -1744,7 +2012,24 @@ void rlist() printf("\n"); } -void alu_assemble(int i,struct regstat *i_regs) +// trashes r2 +static void pass_args(int a0, int a1) +{ + if(a0==1&&a1==0) { + // must swap + emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0); + } + else if(a0!=0&&a1==0) { + emit_mov(a1,1); + if (a0>=0) emit_mov(a0,0); + } + else { + if(a0>=0&&a0!=0) emit_mov(a0,0); + if(a1>=0&&a1!=1) emit_mov(a1,1); + } +} + +static void alu_assemble(int i,struct regstat *i_regs) { if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU if(rt1[i]) { @@ -1782,42 +2067,8 @@ void alu_assemble(int i,struct regstat *i_regs) } if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU if(rt1[i]) { - signed char s1l,s1h,s2l,s2h,t; - if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)) + signed char s1l,s2l,t; { - t=get_reg(i_regs->regmap,rt1[i]); - //assert(t>=0); - if(t>=0) { - s1l=get_reg(i_regs->regmap,rs1[i]); - s1h=get_reg(i_regs->regmap,rs1[i]|64); - s2l=get_reg(i_regs->regmap,rs2[i]); - s2h=get_reg(i_regs->regmap,rs2[i]|64); - if(rs2[i]==0) // rx=0); - if(opcode2[i]==0x2a) // SLT - emit_shrimm(s1h,31,t); - else // SLTU (unsigned can not be less than zero) - emit_zeroreg(t); - } - else if(rs1[i]==0) // r0=0); - if(opcode2[i]==0x2a) // SLT - emit_set_gz64_32(s2h,s2l,t); - else // SLTU (set if not zero) - emit_set_nz64_32(s2h,s2l,t); - } - else { - assert(s1l>=0);assert(s1h>=0); - assert(s2l>=0);assert(s2h>=0); - if(opcode2[i]==0x2a) // SLT - emit_set_if_less64_32(s1h,s1l,s2h,s2l,t); - else // SLTU - emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t); - } - } - } else { t=get_reg(i_regs->regmap,rt1[i]); //assert(t>=0); if(t>=0) { @@ -1852,101 +2103,9 @@ void alu_assemble(int i,struct regstat *i_regs) } if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR if(rt1[i]) { - signed char s1l,s1h,s2l,s2h,th,tl; + signed char s1l,s2l,tl; tl=get_reg(i_regs->regmap,rt1[i]); - th=get_reg(i_regs->regmap,rt1[i]|64); - if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0) - { - assert(tl>=0); - if(tl>=0) { - s1l=get_reg(i_regs->regmap,rs1[i]); - s1h=get_reg(i_regs->regmap,rs1[i]|64); - s2l=get_reg(i_regs->regmap,rs2[i]); - s2h=get_reg(i_regs->regmap,rs2[i]|64); - if(rs1[i]&&rs2[i]) { - assert(s1l>=0);assert(s1h>=0); - assert(s2l>=0);assert(s2h>=0); - if(opcode2[i]==0x24) { // AND - emit_and(s1l,s2l,tl); - emit_and(s1h,s2h,th); - } else - if(opcode2[i]==0x25) { // OR - emit_or(s1l,s2l,tl); - emit_or(s1h,s2h,th); - } else - if(opcode2[i]==0x26) { // XOR - emit_xor(s1l,s2l,tl); - emit_xor(s1h,s2h,th); - } else - if(opcode2[i]==0x27) { // NOR - emit_or(s1l,s2l,tl); - emit_or(s1h,s2h,th); - emit_not(tl,tl); - emit_not(th,th); - } - } - else - { - if(opcode2[i]==0x24) { // AND - emit_zeroreg(tl); - emit_zeroreg(th); - } else - if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR - if(rs1[i]){ - if(s1l>=0) emit_mov(s1l,tl); - else emit_loadreg(rs1[i],tl); - if(s1h>=0) emit_mov(s1h,th); - else emit_loadreg(rs1[i]|64,th); - } - else - if(rs2[i]){ - if(s2l>=0) emit_mov(s2l,tl); - else emit_loadreg(rs2[i],tl); - if(s2h>=0) emit_mov(s2h,th); - else emit_loadreg(rs2[i]|64,th); - } - else{ - emit_zeroreg(tl); - emit_zeroreg(th); - } - } else - if(opcode2[i]==0x27) { // NOR - if(rs1[i]){ - if(s1l>=0) emit_not(s1l,tl); - else{ - emit_loadreg(rs1[i],tl); - emit_not(tl,tl); - } - if(s1h>=0) emit_not(s1h,th); - else{ - emit_loadreg(rs1[i]|64,th); - emit_not(th,th); - } - } - else - if(rs2[i]){ - if(s2l>=0) emit_not(s2l,tl); - else{ - emit_loadreg(rs2[i],tl); - emit_not(tl,tl); - } - if(s2h>=0) emit_not(s2h,th); - else{ - emit_loadreg(rs2[i]|64,th); - emit_not(th,th); - } - } - else { - emit_movimm(-1,tl); - emit_movimm(-1,th); - } - } - } - } - } - else { - // 32 bit if(tl>=0) { s1l=get_reg(i_regs->regmap,rs1[i]); s2l=get_reg(i_regs->regmap,rs2[i]); @@ -2078,15 +2237,12 @@ void imm16_assemble(int i,struct regstat *i_regs) else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU if(rt1[i]) { //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug - signed char sh,sl,t; + signed char sl,t; t=get_reg(i_regs->regmap,rt1[i]); - sh=get_reg(i_regs->regmap,rs1[i]|64); sl=get_reg(i_regs->regmap,rs1[i]); //assert(t>=0); if(t>=0) { if(rs1[i]>0) { - if(sh<0) assert((i_regs->was32>>rs1[i])&1); - if(sh<0||((i_regs->was32>>rs1[i])&1)) { if(opcode[i]==0x0a) { // SLTI if(sl<0) { if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t); @@ -2103,13 +2259,6 @@ void imm16_assemble(int i,struct regstat *i_regs) emit_sltiu32(sl,imm[i],t); } } - }else{ // 64-bit - assert(sl>=0); - if(opcode[i]==0x0a) // SLTI - emit_slti64_32(sh,sl,imm[i],t); - else // SLTIU - emit_sltiu64_32(sh,sl,imm[i],t); - } }else{ // SLTI(U) with r0 is just stupid, // nonetheless examples can be found @@ -2259,7 +2408,124 @@ void shift_assemble(int i,struct regstat *i_regs) } #endif -void load_assemble(int i,struct regstat *i_regs) +enum { + MTYPE_8000 = 0, + MTYPE_8020, + MTYPE_0000, + MTYPE_A000, + MTYPE_1F80, +}; + +static int get_ptr_mem_type(u_int a) +{ + if(a < 0x00200000) { + if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0)) + // return wrong, must use memhandler for BIOS self-test to pass + // 007 does similar stuff from a00 mirror, weird stuff + return MTYPE_8000; + return MTYPE_0000; + } + if(0x1f800000 <= a && a < 0x1f801000) + return MTYPE_1F80; + if(0x80200000 <= a && a < 0x80800000) + return MTYPE_8020; + if(0xa0000000 <= a && a < 0xa0200000) + return MTYPE_A000; + return MTYPE_8000; +} + +static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override) +{ + void *jaddr = NULL; + int type=0; + int mr=rs1[i]; + if(((smrv_strong|smrv_weak)>>mr)&1) { + type=get_ptr_mem_type(smrv[mr]); + //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type); + } + else { + // use the mirror we are running on + type=get_ptr_mem_type(start); + //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type); + } + + if(type==MTYPE_8020) { // RAM 80200000+ mirror + emit_andimm(addr,~0x00e00000,HOST_TEMPREG); + addr=*addr_reg_override=HOST_TEMPREG; + type=0; + } + else if(type==MTYPE_0000) { // RAM 0 mirror + emit_orimm(addr,0x80000000,HOST_TEMPREG); + addr=*addr_reg_override=HOST_TEMPREG; + type=0; + } + else if(type==MTYPE_A000) { // RAM A mirror + emit_andimm(addr,~0x20000000,HOST_TEMPREG); + addr=*addr_reg_override=HOST_TEMPREG; + type=0; + } + else if(type==MTYPE_1F80) { // scratchpad + if (psxH == (void *)0x1f800000) { + emit_addimm(addr,-0x1f800000,HOST_TEMPREG); + emit_cmpimm(HOST_TEMPREG,0x1000); + jaddr=out; + emit_jc(0); + } + else { + // do the usual RAM check, jump will go to the right handler + type=0; + } + } + + if(type==0) + { + emit_cmpimm(addr,RAM_SIZE); + jaddr=out; + #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK + // Hint to branch predictor that the branch is unlikely to be taken + if(rs1[i]>=28) + emit_jno_unlikely(0); + else + #endif + emit_jno(0); + if(ram_offset!=0) { + emit_addimm(addr,ram_offset,HOST_TEMPREG); + addr=*addr_reg_override=HOST_TEMPREG; + } + } + + return jaddr; +} + +// return memhandler, or get directly accessable address and return 0 +static void *get_direct_memhandler(void *table, u_int addr, + enum stub_type type, uintptr_t *addr_host) +{ + uintptr_t l1, l2 = 0; + l1 = ((uintptr_t *)table)[addr>>12]; + if ((l1 & (1ul << (sizeof(l1)*8-1))) == 0) { + uintptr_t v = l1 << 1; + *addr_host = v + addr; + return NULL; + } + else { + l1 <<= 1; + if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB) + l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)]; + else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB) + l2=((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2]; + else + l2=((uintptr_t *)l1)[(addr&0xfff)/4]; + if ((l2 & (1<<31)) == 0) { + uintptr_t v = l2 << 1; + *addr_host = v + (addr&0xfff); + return NULL; + } + return (void *)(l2 << 1); + } +} + +static void load_assemble(int i,struct regstat *i_regs) { int s,th,tl,addr; int offset; @@ -2527,8 +2793,8 @@ void store_assemble(int i,struct regstat *i_regs) SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4); assert(i_regs->regmap==regs[i].regmap); // not delay slot if(i_regs->regmap==regs[i].regmap) { - load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i); - wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty); + load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i); + wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty); emit_movimm(start+i*4+4,0); emit_writeword(0,&pcaddr); emit_jmp(do_interrupt); @@ -2700,12 +2966,236 @@ void storelr_assemble(int i,struct regstat *i_regs) } } -void c1ls_assemble(int i,struct regstat *i_regs) +static void cop0_assemble(int i,struct regstat *i_regs) +{ + if(opcode2[i]==0) // MFC0 + { + signed char t=get_reg(i_regs->regmap,rt1[i]); + u_int copr=(source[i]>>11)&0x1f; + //assert(t>=0); // Why does this happen? OOT is weird + if(t>=0&&rt1[i]!=0) { + emit_readword(®_cop0[copr],t); + } + } + else if(opcode2[i]==4) // MTC0 + { + signed char s=get_reg(i_regs->regmap,rs1[i]); + char copr=(source[i]>>11)&0x1f; + assert(s>=0); + wb_register(rs1[i],i_regs->regmap,i_regs->dirty); + if(copr==9||copr==11||copr==12||copr==13) { + emit_readword(&last_count,HOST_TEMPREG); + emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc + emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG); + emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); + emit_writeword(HOST_CCREG,&Count); + } + // What a mess. The status register (12) can enable interrupts, + // so needs a special case to handle a pending interrupt. + // The interrupt must be taken immediately, because a subsequent + // instruction might disable interrupts again. + if(copr==12||copr==13) { + if (is_delayslot) { + // burn cycles to cause cc_interrupt, which will + // reschedule next_interupt. Relies on CCREG from above. + assem_debug("MTC0 DS %d\n", copr); + emit_writeword(HOST_CCREG,&last_count); + emit_movimm(0,HOST_CCREG); + emit_storereg(CCREG,HOST_CCREG); + emit_loadreg(rs1[i],1); + emit_movimm(copr,0); + emit_call(pcsx_mtc0_ds); + emit_loadreg(rs1[i],s); + return; + } + emit_movimm(start+i*4+4,HOST_TEMPREG); + emit_writeword(HOST_TEMPREG,&pcaddr); + emit_movimm(0,HOST_TEMPREG); + emit_writeword(HOST_TEMPREG,&pending_exception); + } + //else if(copr==12&&is_delayslot) emit_call((int)MTC0_R12); + //else + if(s==HOST_CCREG) + emit_loadreg(rs1[i],1); + else if(s!=1) + emit_mov(s,1); + emit_movimm(copr,0); + emit_call(pcsx_mtc0); + if(copr==9||copr==11||copr==12||copr==13) { + emit_readword(&Count,HOST_CCREG); + emit_readword(&next_interupt,HOST_TEMPREG); + emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG); + emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG); + emit_writeword(HOST_TEMPREG,&last_count); + emit_storereg(CCREG,HOST_CCREG); + } + if(copr==12||copr==13) { + assert(!is_delayslot); + emit_readword(&pending_exception,14); + emit_test(14,14); + emit_jne(&do_interrupt); + } + emit_loadreg(rs1[i],s); + if(get_reg(i_regs->regmap,rs1[i]|64)>=0) + emit_loadreg(rs1[i]|64,get_reg(i_regs->regmap,rs1[i]|64)); + } + else + { + assert(opcode2[i]==0x10); + //if((source[i]&0x3f)==0x10) // RFE + { + emit_readword(&Status,0); + emit_andimm(0,0x3c,1); + emit_andimm(0,~0xf,0); + emit_orrshr_imm(1,2,0); + emit_writeword(0,&Status); + } + } +} + +static void cop1_unusable(int i,struct regstat *i_regs) +{ + // XXX: should just just do the exception instead + //if(!cop1_usable) + { + void *jaddr=out; + emit_jmp(0); + add_stub_r(FP_STUB,jaddr,out,i,0,i_regs,is_delayslot,0); + } +} + +static void cop1_assemble(int i,struct regstat *i_regs) +{ + cop1_unusable(i, i_regs); +} + +static void c1ls_assemble(int i,struct regstat *i_regs) { cop1_unusable(i, i_regs); } -void c2ls_assemble(int i,struct regstat *i_regs) +// FP_STUB +static void do_cop1stub(int n) +{ + literal_pool(256); + assem_debug("do_cop1stub %x\n",start+stubs[n].a*4); + set_jump_target(stubs[n].addr, out); + int i=stubs[n].a; +// int rs=stubs[n].b; + struct regstat *i_regs=(struct regstat *)stubs[n].c; + int ds=stubs[n].d; + if(!ds) { + load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i); + //if(i_regs!=®s[i]) printf("oops: regs[i]=%x i_regs=%x",(int)®s[i],(int)i_regs); + } + //else {printf("fp exception in delay slot\n");} + wb_dirtys(i_regs->regmap_entry,i_regs->wasdirty); + if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG); + emit_movimm(start+(i-ds)*4,EAX); // Get PC + emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle... + emit_jmp(ds?fp_exception_ds:fp_exception); +} + +static void cop2_get_dreg(u_int copr,signed char tl,signed char temp) +{ + switch (copr) { + case 1: + case 3: + case 5: + case 8: + case 9: + case 10: + case 11: + emit_readword(®_cop2d[copr],tl); + emit_signextend16(tl,tl); + emit_writeword(tl,®_cop2d[copr]); // hmh + break; + case 7: + case 16: + case 17: + case 18: + case 19: + emit_readword(®_cop2d[copr],tl); + emit_andimm(tl,0xffff,tl); + emit_writeword(tl,®_cop2d[copr]); + break; + case 15: + emit_readword(®_cop2d[14],tl); // SXY2 + emit_writeword(tl,®_cop2d[copr]); + break; + case 28: + case 29: + emit_readword(®_cop2d[9],temp); + emit_testimm(temp,0x8000); // do we need this? + emit_andimm(temp,0xf80,temp); + emit_andne_imm(temp,0,temp); + emit_shrimm(temp,7,tl); + emit_readword(®_cop2d[10],temp); + emit_testimm(temp,0x8000); + emit_andimm(temp,0xf80,temp); + emit_andne_imm(temp,0,temp); + emit_orrshr_imm(temp,2,tl); + emit_readword(®_cop2d[11],temp); + emit_testimm(temp,0x8000); + emit_andimm(temp,0xf80,temp); + emit_andne_imm(temp,0,temp); + emit_orrshl_imm(temp,3,tl); + emit_writeword(tl,®_cop2d[copr]); + break; + default: + emit_readword(®_cop2d[copr],tl); + break; + } +} + +static void cop2_put_dreg(u_int copr,signed char sl,signed char temp) +{ + switch (copr) { + case 15: + emit_readword(®_cop2d[13],temp); // SXY1 + emit_writeword(sl,®_cop2d[copr]); + emit_writeword(temp,®_cop2d[12]); // SXY0 + emit_readword(®_cop2d[14],temp); // SXY2 + emit_writeword(sl,®_cop2d[14]); + emit_writeword(temp,®_cop2d[13]); // SXY1 + break; + case 28: + emit_andimm(sl,0x001f,temp); + emit_shlimm(temp,7,temp); + emit_writeword(temp,®_cop2d[9]); + emit_andimm(sl,0x03e0,temp); + emit_shlimm(temp,2,temp); + emit_writeword(temp,®_cop2d[10]); + emit_andimm(sl,0x7c00,temp); + emit_shrimm(temp,3,temp); + emit_writeword(temp,®_cop2d[11]); + emit_writeword(sl,®_cop2d[28]); + break; + case 30: + emit_movs(sl,temp); + emit_mvnmi(temp,temp); +#if defined(HAVE_ARMV5) || defined(__aarch64__) + emit_clz(temp,temp); +#else + emit_movs(temp,HOST_TEMPREG); + emit_movimm(0,temp); + emit_jeq((int)out+4*4); + emit_addpl_imm(temp,1,temp); + emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG); + emit_jns((int)out-2*4); +#endif + emit_writeword(sl,®_cop2d[30]); + emit_writeword(temp,®_cop2d[31]); + break; + case 31: + break; + default: + emit_writeword(sl,®_cop2d[copr]); + break; + } +} + +static void c2ls_assemble(int i,struct regstat *i_regs) { int s,tl; int ar; @@ -2799,6 +3289,57 @@ void c2ls_assemble(int i,struct regstat *i_regs) } } +static void cop2_assemble(int i,struct regstat *i_regs) +{ + u_int copr=(source[i]>>11)&0x1f; + signed char temp=get_reg(i_regs->regmap,-1); + if (opcode2[i]==0) { // MFC2 + signed char tl=get_reg(i_regs->regmap,rt1[i]); + if(tl>=0&&rt1[i]!=0) + cop2_get_dreg(copr,tl,temp); + } + else if (opcode2[i]==4) { // MTC2 + signed char sl=get_reg(i_regs->regmap,rs1[i]); + cop2_put_dreg(copr,sl,temp); + } + else if (opcode2[i]==2) // CFC2 + { + signed char tl=get_reg(i_regs->regmap,rt1[i]); + if(tl>=0&&rt1[i]!=0) + emit_readword(®_cop2c[copr],tl); + } + else if (opcode2[i]==6) // CTC2 + { + signed char sl=get_reg(i_regs->regmap,rs1[i]); + switch(copr) { + case 4: + case 12: + case 20: + case 26: + case 27: + case 29: + case 30: + emit_signextend16(sl,temp); + break; + case 31: + //value = value & 0x7ffff000; + //if (value & 0x7f87e000) value |= 0x80000000; + emit_shrimm(sl,12,temp); + emit_shlimm(temp,12,temp); + emit_testimm(temp,0x7f000000); + emit_testeqimm(temp,0x00870000); + emit_testeqimm(temp,0x0000e000); + emit_orrne_imm(temp,0x80000000,temp); + break; + default: + temp=sl; + break; + } + emit_writeword(temp,®_cop2c[copr]); + assert(sl>=0); + } +} + #ifndef multdiv_assemble void multdiv_assemble(int i,struct regstat *i_regs) { @@ -2868,6 +3409,98 @@ void intcall_assemble(int i,struct regstat *i_regs) emit_jmp(jump_intcall); } +static void speculate_mov(int rs,int rt) +{ + if(rt!=0) { + smrv_strong_next|=1<>rs1[i])&1) speculate_mov(rs1[i],rt1[i]); + else if((smrv_strong>>rs2[i])&1) speculate_mov(rs2[i],rt1[i]); + else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]); + else if((smrv_weak>>rs2[i])&1) speculate_mov_weak(rs2[i],rt1[i]); + else { + smrv_strong_next&=~(1<=0) { + if(get_final_value(hr,i,&value)) + smrv[rt1[i]]=value; + else smrv[rt1[i]]=constmap[i][hr]; + smrv_strong_next|=1<>rs1[i])&1) speculate_mov(rs1[i],rt1[i]); + else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]); + } + break; + case LOAD: + if(start<0x2000&&(rt1[i]==26||(smrv[rt1[i]]>>24)==0xa0)) { + // special case for BIOS + smrv[rt1[i]]=0xa0000000; + smrv_strong_next|=1<>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst); +#endif +} + void ds_assemble(int i,struct regstat *i_regs) { speculate_register_values(i); @@ -2913,14 +3546,13 @@ void ds_assemble(int i,struct regstat *i_regs) case RJUMP: case CJUMP: case SJUMP: - case FJUMP: SysPrintf("Jump in the delay slot. This is probably a bug.\n"); } is_delayslot=0; } // Is the branch target a valid internal jump? -int internal_branch(uint64_t i_is32,int addr) +static int internal_branch(int addr) { if(addr&1) return 0; // Indirect (register) jump if(addr>=start && addr=0&&(pre[hr]&63)=0) { - emit_mov(hr,nr); - } - } - } - } - } -} - -// Load the specified registers -// This only loads the registers given as arguments because -// we don't want to load things that will be overwritten -void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2) -{ - int hr; - // Load 32-bit regs - for(hr=0;hr=0) { - if(entry[hr]!=regmap[hr]) { - if(regmap[hr]==rs1||regmap[hr]==rs2) - { - if(regmap[hr]==0) { - emit_zeroreg(hr); - } - else - { - emit_loadreg(regmap[hr],hr); + emit_mov(hr,nr); } } } } } - //Load 64-bit regs +} + +// Load the specified registers +// This only loads the registers given as arguments because +// we don't want to load things that will be overwritten +static void load_regs(signed char entry[],signed char regmap[],int rs1,int rs2) +{ + int hr; + // Load 32-bit regs for(hr=0;hr=0) { if(entry[hr]!=regmap[hr]) { - if(regmap[hr]-64==rs1||regmap[hr]-64==rs2) + if(regmap[hr]==rs1||regmap[hr]==rs2) { - assert(regmap[hr]!=64); - if((is32>>(regmap[hr]&63))&1) { - int lr=get_reg(regmap,regmap[hr]-64); - if(lr>=0) - emit_sarimm(lr,31,hr); - else - emit_loadreg(regmap[hr],hr); + if(regmap[hr]==0) { + emit_zeroreg(hr); } else { @@ -3187,7 +3797,7 @@ static int get_final_value(int hr, int i, int *value) } // Load registers with known constants -void load_consts(signed char pre[],signed char regmap[],int is32,int i) +static void load_consts(signed char pre[],signed char regmap[],int i) { int hr,hr2; // propagate loaded constant flags @@ -3207,7 +3817,8 @@ void load_consts(signed char pre[],signed char regmap[],int is32,int i) if(hr!=EXCLUDE_REG&®map[hr]>=0) { //if(entry[hr]!=regmap[hr]) { if(!((regs[i].loadedconst>>hr)&1)) { - if(((regs[i].isconst>>hr)&1)&®map[hr]<64&®map[hr]>0) { + assert(regmap[hr]<64); + if(((regs[i].isconst>>hr)&1)&®map[hr]>0) { int value,similar=0; if(get_final_value(hr,i,&value)) { // see if some other register has similar value @@ -3238,41 +3849,16 @@ void load_consts(signed char pre[],signed char regmap[],int is32,int i) } } } - // Load 64-bit regs - for(hr=0;hr=0) { - //if(entry[hr]!=regmap[hr]) { - if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) { - if(((regs[i].isconst>>hr)&1)&®map[hr]>64) { - if((is32>>(regmap[hr]&63))&1) { - int lr=get_reg(regmap,regmap[hr]-64); - assert(lr>=0); - emit_sarimm(lr,31,hr); - } - else - { - int value; - if(get_final_value(hr,i,&value)) { - if(value==0) { - emit_zeroreg(hr); - } - else { - emit_movimm(value,hr); - } - } - } - } - } - } - } } -void load_all_consts(signed char regmap[],int is32,u_int dirty,int i) + +void load_all_consts(signed char regmap[], u_int dirty, int i) { int hr; // Load 32-bit regs for(hr=0;hr=0&&((dirty>>hr)&1)) { - if(((regs[i].isconst>>hr)&1)&®map[hr]<64&®map[hr]>0) { + assert(regmap[hr] < 64); + if(((regs[i].isconst>>hr)&1)&®map[hr]>0) { int value=constmap[i][hr]; if(value==0) { emit_zeroreg(hr); @@ -3283,32 +3869,10 @@ void load_all_consts(signed char regmap[],int is32,u_int dirty,int i) } } } - // Load 64-bit regs - for(hr=0;hr=0&&((dirty>>hr)&1)) { - if(((regs[i].isconst>>hr)&1)&®map[hr]>64) { - if((is32>>(regmap[hr]&63))&1) { - int lr=get_reg(regmap,regmap[hr]-64); - assert(lr>=0); - emit_sarimm(lr,31,hr); - } - else - { - int value=constmap[i][hr]; - if(value==0) { - emit_zeroreg(hr); - } - else { - emit_movimm(value,hr); - } - } - } - } - } } // Write out all dirty registers (except cycle count) -void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty) +static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty) { int hr; for(hr=0;hr>2; @@ -3334,7 +3899,7 @@ void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,in if(hr!=EXCLUDE_REG) { if(i_regmap[hr]>0) { if(i_regmap[hr]!=CCREG) { - if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32)>>(i_regmap[hr]&63))&1)) { + if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1)) { if((i_dirty>>hr)&1) { assert(i_regmap[hr]<64); emit_storereg(i_regmap[hr],hr); @@ -3405,39 +3970,19 @@ void load_regs_entry(int t) } } } - // Load 64-bit regs - for(hr=0;hr=64&®s[t].regmap_entry[hr]>(regs[t].regmap_entry[hr]&63))&1) { - int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64); - if(lr<0) { - emit_loadreg(regs[t].regmap_entry[hr],hr); - } - else - { - emit_sarimm(lr,31,hr); - } - } - else - { - emit_loadreg(regs[t].regmap_entry[hr],hr); - } - } - } } // Store dirty registers prior to branch -void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr) +void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr) { - if(internal_branch(i_is32,addr)) + if(internal_branch(addr)) { int t=(addr-start)>>2; int hr; for(hr=0;hr0 && i_regmap[hr]!=CCREG) { - if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32)>>(i_regmap[hr]&63))&1)) { + if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1)) { if((i_dirty>>hr)&1) { assert(i_regmap[hr]<64); if(!((unneeded_reg[t]>>i_regmap[hr])&1)) @@ -3451,15 +3996,15 @@ void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int a else { // Branch out of this block, write out all dirty regs - wb_dirtys(i_regmap,i_is32,i_dirty); + wb_dirtys(i_regmap,i_dirty); } } // Load all needed registers for branch target -void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr) +static void load_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr) { //if(addr>=start && addr<(start+slen*4)) - if(internal_branch(i_is32,addr)) + if(internal_branch(addr)) { int t=(addr-start)>>2; int hr; @@ -3484,37 +4029,10 @@ void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int ad } } } - //Load 64-bit regs - for(hr=0;hr=64&®s[t].regmap_entry[hr]>(regs[t].regmap_entry[hr]&63))&1) { - int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64); - if(lr<0) { - emit_loadreg(regs[t].regmap_entry[hr],hr); - } - else - { - emit_sarimm(lr,31,hr); - } - } - else - { - emit_loadreg(regs[t].regmap_entry[hr],hr); - } - } - else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) { - int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64); - assert(lr>=0); - emit_sarimm(lr,31,hr); - } - } - } } } -int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr) +static int match_bt(signed char i_regmap[],uint64_t i_dirty,int addr) { if(addr>=start && addr0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0; + //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP)) return 0; // Delay slots require additional processing, so do not match if(is_ds[t]) return 0; } @@ -3622,11 +4140,11 @@ void ds_assemble_entry(int i) assem_debug("<->\n"); drc_dbg_emit_do_cmp(t); if(regs[t].regmap_entry[HOST_CCREG]==CCREG&®s[t].regmap[HOST_CCREG]!=CCREG) - wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32); - load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]); + wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty); + load_regs(regs[t].regmap_entry,regs[t].regmap,rs1[t],rs2[t]); address_generation(t,®s[t],regs[t].regmap_entry); if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a) - load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP); + load_regs(regs[t].regmap_entry,regs[t].regmap,INVCP,INVCP); is_delayslot=0; switch(itype[t]) { case ALU: @@ -3669,17 +4187,16 @@ void ds_assemble_entry(int i) case RJUMP: case CJUMP: case SJUMP: - case FJUMP: SysPrintf("Jump in the delay slot. This is probably a bug.\n"); } - store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4); - load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4); - if(internal_branch(regs[t].is32,ba[i]+4)) + store_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4); + load_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4); + if(internal_branch(ba[i]+4)) assem_debug("branch: internal\n"); else assem_debug("branch: external\n"); - assert(internal_branch(regs[t].is32,ba[i]+4)); - add_to_linker(out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4)); + assert(internal_branch(ba[i]+4)); + add_to_linker(out,ba[i]+4,internal_branch(ba[i]+4)); emit_jmp(0); } @@ -3694,7 +4211,7 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert) *adj=0; } //if(ba[i]>=start && ba[i]<(start+slen*4)) - if(internal_branch(branch_regs[i].is32,ba[i])) + if(internal_branch(ba[i])) { t=(ba[i]-start)>>2; if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle @@ -3738,19 +4255,19 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert) static void do_ccstub(int n) { literal_pool(256); - assem_debug("do_ccstub %x\n",start+stubs[n].b*4); + assem_debug("do_ccstub %lx\n",start+stubs[n].b*4); set_jump_target(stubs[n].addr, out); int i=stubs[n].b; if(stubs[n].d==NULLDS) { // Delay slot instruction is nullified ("likely" branch) - wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty); + wb_dirtys(regs[i].regmap,regs[i].dirty); } else if(stubs[n].d!=TAKEN) { - wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty); + wb_dirtys(branch_regs[i].regmap,branch_regs[i].dirty); } else { - if(internal_branch(branch_regs[i].is32,ba[i])) - wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + if(internal_branch(ba[i])) + wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); } if(stubs[n].c!=-1) { @@ -3761,36 +4278,31 @@ static void do_ccstub(int n) else { // Return address depends on which way the branch goes - if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) + if(itype[i]==CJUMP||itype[i]==SJUMP) { int s1l=get_reg(branch_regs[i].regmap,rs1[i]); - int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64); int s2l=get_reg(branch_regs[i].regmap,rs2[i]); - int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64); if(rs1[i]==0) { - s1l=s2l;s1h=s2h; - s2l=s2h=-1; + s1l=s2l; + s2l=-1; } else if(rs2[i]==0) { - s2l=s2h=-1; - } - if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) { - s1h=s2h=-1; + s2l=-1; } assert(s1l>=0); #ifdef DESTRUCTIVE_WRITEBACK if(rs1[i]) { - if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1) + if((branch_regs[i].dirty>>s1l)&&1) emit_loadreg(rs1[i],s1l); } else { - if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1) + if((branch_regs[i].dirty>>s1l)&1) emit_loadreg(rs2[i],s1l); } if(s2l>=0) - if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1) + if((branch_regs[i].dirty>>s2l)&1) emit_loadreg(rs2[i],s2l); #endif int hr=0; @@ -3832,46 +4344,28 @@ static void do_ccstub(int n) if((opcode[i]&0x2f)==4) // BEQ { #ifdef HAVE_CMOV_IMM - if(s1h<0) { - if(s2l>=0) emit_cmp(s1l,s2l); - else emit_test(s1l,s1l); - emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr); - } - else + if(s2l>=0) emit_cmp(s1l,s2l); + else emit_test(s1l,s1l); + emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr); + #else + emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt); + if(s2l>=0) emit_cmp(s1l,s2l); + else emit_test(s1l,s1l); + emit_cmovne_reg(alt,addr); #endif - { - emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt); - if(s1h>=0) { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - emit_cmovne_reg(alt,addr); - } - if(s2l>=0) emit_cmp(s1l,s2l); - else emit_test(s1l,s1l); - emit_cmovne_reg(alt,addr); - } } if((opcode[i]&0x2f)==5) // BNE { #ifdef HAVE_CMOV_IMM - if(s1h<0) { - if(s2l>=0) emit_cmp(s1l,s2l); - else emit_test(s1l,s1l); - emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr); - } - else + if(s2l>=0) emit_cmp(s1l,s2l); + else emit_test(s1l,s1l); + emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr); + #else + emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt); + if(s2l>=0) emit_cmp(s1l,s2l); + else emit_test(s1l,s1l); + emit_cmovne_reg(alt,addr); #endif - { - emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt); - if(s1h>=0) { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - emit_cmovne_reg(alt,addr); - } - if(s2l>=0) emit_cmp(s1l,s2l); - else emit_test(s1l,s1l); - emit_cmovne_reg(alt,addr); - } } if((opcode[i]&0x2f)==6) // BLEZ { @@ -3879,13 +4373,7 @@ static void do_ccstub(int n) //emit_movimm(start+i*4+8,addr); emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr); emit_cmpimm(s1l,1); - if(s1h>=0) emit_mov(addr,ntaddr); emit_cmovl_reg(alt,addr); - if(s1h>=0) { - emit_test(s1h,s1h); - emit_cmovne_reg(ntaddr,addr); - emit_cmovs_reg(alt,addr); - } } if((opcode[i]&0x2f)==7) // BGTZ { @@ -3893,21 +4381,14 @@ static void do_ccstub(int n) //emit_movimm(start+i*4+8,ntaddr); emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr); emit_cmpimm(s1l,1); - if(s1h>=0) emit_mov(addr,alt); emit_cmovl_reg(ntaddr,addr); - if(s1h>=0) { - emit_test(s1h,s1h); - emit_cmovne_reg(alt,addr); - emit_cmovs_reg(ntaddr,addr); - } } if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ { //emit_movimm(ba[i],alt); //emit_movimm(start+i*4+8,addr); emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr); - if(s1h>=0) emit_test(s1h,s1h); - else emit_test(s1l,s1l); + emit_test(s1l,s1l); emit_cmovs_reg(alt,addr); } if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ @@ -3915,8 +4396,7 @@ static void do_ccstub(int n) //emit_movimm(ba[i],addr); //emit_movimm(start+i*4+8,alt); emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt); - if(s1h>=0) emit_test(s1h,s1h); - else emit_test(s1l,s1l); + emit_test(s1l,s1l); emit_cmovs_reg(alt,addr); } if(opcode[i]==0x11 && opcode2[i]==0x08 ) { @@ -3956,7 +4436,7 @@ static void do_ccstub(int n) emit_call(cc_interrupt); if(stubs[n].a) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG); if(stubs[n].d==TAKEN) { - if(internal_branch(branch_regs[i].is32,ba[i])) + if(internal_branch(ba[i])) load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry); else if(itype[i]==RJUMP) { if(get_reg(branch_regs[i].regmap,RTEMP)>=0) @@ -3996,7 +4476,7 @@ static void ujump_assemble_write_ra(int i) return_address=start+i*4+8; if(rt>=0) { #ifdef USE_MINI_HT - if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) { + if(internal_branch(return_address)&&rt1[i+1]!=31) { int temp=-1; // note: must be ds-safe #ifdef HOST_TEMPREG temp=HOST_TEMPREG; @@ -4043,29 +4523,29 @@ void ujump_assemble(int i,struct regstat *i_regs) ds_assemble(i+1,i_regs); uint64_t bc_unneeded=branch_regs[i].u; bc_unneeded|=1|(1LL<=0) emit_prefetchreg(temp); #endif do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0); if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc); - load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); - if(internal_branch(branch_regs[i].is32,ba[i])) + load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); + if(internal_branch(ba[i])) assem_debug("branch: internal\n"); else assem_debug("branch: external\n"); - if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) { + if(internal_branch(ba[i])&&is_ds[(ba[i]-start)>>2]) { ds_assemble_entry(i); } else { - add_to_linker(out,ba[i],internal_branch(branch_regs[i].is32,ba[i])); + add_to_linker(out,ba[i],internal_branch(ba[i])); emit_jmp(0); } } @@ -4131,8 +4611,8 @@ void rjump_assemble(int i,struct regstat *i_regs) uint64_t bc_unneeded=branch_regs[i].u; bc_unneeded|=1|(1LL<>rs)&(branch_regs[i].is32>>rs1[i])&1) { + if((branch_regs[i].dirty>>rs)&1) { if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) { emit_loadreg(rs1[i],rs); } @@ -4173,7 +4653,7 @@ void rjump_assemble(int i,struct regstat *i_regs) emit_jmp(0); else emit_jns(0); - //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1); + //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1); #ifdef USE_MINI_HT if(rs1[i]==31) { do_miniht_jump(rs,rh,ht); @@ -4193,13 +4673,12 @@ void cjump_assemble(int i,struct regstat *i_regs) signed char *i_regmap=i_regs->regmap; int cc; int match; - match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); assem_debug("match=%d\n",match); - int s1h,s1l,s2h,s2l; + int s1l,s2l; int unconditional=0,nop=0; - int only32=0; int invert=0; - int internal=internal_branch(branch_regs[i].is32,ba[i]); + int internal=internal_branch(ba[i]); if(i==(ba[i]-start)>>2) assem_debug("idle loop\n"); if(!match) invert=1; #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK @@ -4208,15 +4687,11 @@ void cjump_assemble(int i,struct regstat *i_regs) if(ooo[i]) { s1l=get_reg(branch_regs[i].regmap,rs1[i]); - s1h=get_reg(branch_regs[i].regmap,rs1[i]|64); s2l=get_reg(branch_regs[i].regmap,rs2[i]); - s2h=get_reg(branch_regs[i].regmap,rs2[i]|64); } else { s1l=get_reg(i_regmap,rs1[i]); - s1h=get_reg(i_regmap,rs1[i]|64); s2l=get_reg(i_regmap,rs2[i]); - s2h=get_reg(i_regmap,rs2[i]|64); } if(rs1[i]==0&&rs2[i]==0) { @@ -4229,17 +4704,12 @@ void cjump_assemble(int i,struct regstat *i_regs) } else if(rs1[i]==0) { - s1l=s2l;s1h=s2h; - s2l=s2h=-1; - only32=(regs[i].was32>>rs2[i])&1; + s1l=s2l; + s2l=-1; } else if(rs2[i]==0) { - s2l=s2h=-1; - only32=(regs[i].was32>>rs1[i])&1; - } - else { - only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1; + s2l=-1; } if(ooo[i]) { @@ -4251,20 +4721,20 @@ void cjump_assemble(int i,struct regstat *i_regs) uint64_t bc_unneeded=branch_regs[i].u; bc_unneeded&=~((1LL<>2 || source[i+1]!=0) { if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc); - load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); if(internal) assem_debug("branch: internal\n"); else @@ -4291,43 +4761,6 @@ void cjump_assemble(int i,struct regstat *i_regs) void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL; do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert); if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc); - if(!only32) - { - assert(s1h>=0); - if(opcode[i]==4) // BEQ - { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - nottaken1=out; - emit_jne((void *)1l); - } - if(opcode[i]==5) // BNE - { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - if(invert) taken=out; - else add_to_linker(out,ba[i],internal); - emit_jne(0); - } - if(opcode[i]==6) // BLEZ - { - emit_test(s1h,s1h); - if(invert) taken=out; - else add_to_linker(out,ba[i],internal); - emit_js(0); - nottaken1=out; - emit_jne((void *)1l); - } - if(opcode[i]==7) // BGTZ - { - emit_test(s1h,s1h); - nottaken1=out; - emit_js(1); - if(invert) taken=out; - else add_to_linker(out,ba[i],internal); - emit_jne(0); - } - } // if(!only32) //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]); assert(s1l>=0); @@ -4393,8 +4826,8 @@ void cjump_assemble(int i,struct regstat *i_regs) #endif { if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc); - store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); - load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); + load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); if(internal) assem_debug("branch: internal\n"); else @@ -4424,41 +4857,6 @@ void cjump_assemble(int i,struct regstat *i_regs) //printf("IOE\n"); void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL; if(!unconditional&&!nop) { - if(!only32) - { - assert(s1h>=0); - if((opcode[i]&0x2f)==4) // BEQ - { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - nottaken1=out; - emit_jne((void *)2l); - } - if((opcode[i]&0x2f)==5) // BNE - { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - taken=out; - emit_jne((void *)1l); - } - if((opcode[i]&0x2f)==6) // BLEZ - { - emit_test(s1h,s1h); - taken=out; - emit_js(1); - nottaken1=out; - emit_jne((void *)2l); - } - if((opcode[i]&0x2f)==7) // BGTZ - { - emit_test(s1h,s1h); - nottaken1=out; - emit_js(2); - taken=out; - emit_jne((void *)1l); - } - } // if(!only32) - //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]); assert(s1l>=0); if((opcode[i]&0x2f)==4) // BEQ @@ -4496,11 +4894,11 @@ void cjump_assemble(int i,struct regstat *i_regs) if(!nop) { if(taken) set_jump_target(taken, out); assem_debug("1:\n"); - wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,ds_unneeded); + wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded); // load regs - load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]); + load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]); address_generation(i+1,&branch_regs[i],0); - load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP); + load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP); ds_assemble(i+1,&branch_regs[i]); cc=get_reg(branch_regs[i].regmap,CCREG); if(cc==-1) { @@ -4508,11 +4906,11 @@ void cjump_assemble(int i,struct regstat *i_regs) // CHECK: Is the following instruction (fall thru) allocated ok? } assert(cc==HOST_CCREG); - store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); do_cc(i,i_regmap,&adj,ba[i],TAKEN,0); assem_debug("cycle count (adj)\n"); if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc); - load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); if(internal) assem_debug("branch: internal\n"); else @@ -4531,10 +4929,10 @@ void cjump_assemble(int i,struct regstat *i_regs) set_jump_target(nottaken, out); assem_debug("2:\n"); if(!likely[i]) { - wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,ds_unneeded); - load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]); + wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded); + load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]); address_generation(i+1,&branch_regs[i],0); - load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG); + load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG); ds_assemble(i+1,&branch_regs[i]); } cc=get_reg(branch_regs[i].regmap,CCREG); @@ -4564,13 +4962,12 @@ void sjump_assemble(int i,struct regstat *i_regs) signed char *i_regmap=i_regs->regmap; int cc; int match; - match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); assem_debug("smatch=%d\n",match); - int s1h,s1l; + int s1l; int unconditional=0,nevertaken=0; - int only32=0; int invert=0; - int internal=internal_branch(branch_regs[i].is32,ba[i]); + int internal=internal_branch(ba[i]); if(i==(ba[i]-start)>>2) assem_debug("idle loop\n"); if(!match) invert=1; #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK @@ -4582,11 +4979,9 @@ void sjump_assemble(int i,struct regstat *i_regs) if(ooo[i]) { s1l=get_reg(branch_regs[i].regmap,rs1[i]); - s1h=get_reg(branch_regs[i].regmap,rs1[i]|64); } else { s1l=get_reg(i_regmap,rs1[i]); - s1h=get_reg(i_regmap,rs1[i]|64); } if(rs1[i]==0) { @@ -4598,9 +4993,6 @@ void sjump_assemble(int i,struct regstat *i_regs) //assert(opcode2[i]!=0x10); //assert(opcode2[i]!=0x12); } - else { - only32=(regs[i].was32>>rs1[i])&1; - } if(ooo[i]) { // Out of order execution (delay slot first) @@ -4611,9 +5003,9 @@ void sjump_assemble(int i,struct regstat *i_regs) uint64_t bc_unneeded=branch_regs[i].u; bc_unneeded&=~((1LL<>2 || source[i+1]!=0) { if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc); - load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); if(internal) assem_debug("branch: internal\n"); else @@ -4664,33 +5056,6 @@ void sjump_assemble(int i,struct regstat *i_regs) void *nottaken = NULL; do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert); if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc); - if(!only32) - { - assert(s1h>=0); - if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL - { - emit_test(s1h,s1h); - if(invert){ - nottaken=out; - emit_jns(1); - }else{ - add_to_linker(out,ba[i],internal); - emit_js(0); - } - } - if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL - { - emit_test(s1h,s1h); - if(invert){ - nottaken=out; - emit_js(1); - }else{ - add_to_linker(out,ba[i],internal); - emit_jns(0); - } - } - } // if(!only32) - else { assert(s1l>=0); if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL @@ -4715,7 +5080,7 @@ void sjump_assemble(int i,struct regstat *i_regs) emit_jns(0); } } - } // if(!only32) + } if(invert) { #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK @@ -4732,8 +5097,8 @@ void sjump_assemble(int i,struct regstat *i_regs) #endif { if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc); - store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); - load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); + load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); if(internal) assem_debug("branch: internal\n"); else @@ -4773,24 +5138,6 @@ void sjump_assemble(int i,struct regstat *i_regs) } if(!unconditional) { //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]); - if(!only32) - { - assert(s1h>=0); - if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL - { - emit_test(s1h,s1h); - nottaken=out; - emit_jns(1); - } - if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL - { - emit_test(s1h,s1h); - nottaken=out; - emit_js(1); - } - } // if(!only32) - else - { assert(s1l>=0); if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL { @@ -4804,7 +5151,6 @@ void sjump_assemble(int i,struct regstat *i_regs) nottaken=out; emit_js(1); } - } } // if(!unconditional) int adj; uint64_t ds_unneeded=branch_regs[i].u; @@ -4813,11 +5159,11 @@ void sjump_assemble(int i,struct regstat *i_regs) // branch taken if(!nevertaken) { //assem_debug("1:\n"); - wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,ds_unneeded); + wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded); // load regs - load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]); + load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]); address_generation(i+1,&branch_regs[i],0); - load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP); + load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP); ds_assemble(i+1,&branch_regs[i]); cc=get_reg(branch_regs[i].regmap,CCREG); if(cc==-1) { @@ -4825,11 +5171,11 @@ void sjump_assemble(int i,struct regstat *i_regs) // CHECK: Is the following instruction (fall thru) allocated ok? } assert(cc==HOST_CCREG); - store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); do_cc(i,i_regmap,&adj,ba[i],TAKEN,0); assem_debug("cycle count (adj)\n"); if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc); - load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]); + load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]); if(internal) assem_debug("branch: internal\n"); else @@ -4847,10 +5193,10 @@ void sjump_assemble(int i,struct regstat *i_regs) set_jump_target(nottaken, out); assem_debug("1:\n"); if(!likely[i]) { - wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,ds_unneeded); - load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]); + wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded); + load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]); address_generation(i+1,&branch_regs[i],0); - load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG); + load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG); ds_assemble(i+1,&branch_regs[i]); } cc=get_reg(branch_regs[i].regmap,CCREG); @@ -4878,23 +5224,18 @@ void sjump_assemble(int i,struct regstat *i_regs) static void pagespan_assemble(int i,struct regstat *i_regs) { int s1l=get_reg(i_regs->regmap,rs1[i]); - int s1h=get_reg(i_regs->regmap,rs1[i]|64); int s2l=get_reg(i_regs->regmap,rs2[i]); - int s2h=get_reg(i_regs->regmap,rs2[i]|64); void *taken = NULL; void *nottaken = NULL; int unconditional=0; if(rs1[i]==0) { - s1l=s2l;s1h=s2h; - s2l=s2h=-1; + s1l=s2l; + s2l=-1; } else if(rs2[i]==0) { - s2l=s2h=-1; - } - if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) { - s1h=s2h=-1; + s2l=-1; } int hr=0; int addr=-1,alt=-1,ntaddr=-1; @@ -4936,7 +5277,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs) } assert(hr=0) emit_cmp(s1l,s2l); else emit_test(s1l,s1l); emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr); @@ -4977,11 +5318,6 @@ static void pagespan_assemble(int i,struct regstat *i_regs) { assert(s1l>=0); emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt); - if(s1h>=0) { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - emit_cmovne_reg(alt,addr); - } if(s2l>=0) emit_cmp(s1l,s2l); else emit_test(s1l,s1l); emit_cmovne_reg(alt,addr); @@ -4990,34 +5326,19 @@ static void pagespan_assemble(int i,struct regstat *i_regs) if((opcode[i]&0x3f)==5) // BNE { #ifdef HAVE_CMOV_IMM - if(s1h<0) { - if(s2l>=0) emit_cmp(s1l,s2l); - else emit_test(s1l,s1l); - emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr); - } - else + if(s2l>=0) emit_cmp(s1l,s2l); + else emit_test(s1l,s1l); + emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr); + #else + assert(s1l>=0); + emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt); + if(s2l>=0) emit_cmp(s1l,s2l); + else emit_test(s1l,s1l); + emit_cmovne_reg(alt,addr); #endif - { - assert(s1l>=0); - emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt); - if(s1h>=0) { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - emit_cmovne_reg(alt,addr); - } - if(s2l>=0) emit_cmp(s1l,s2l); - else emit_test(s1l,s1l); - emit_cmovne_reg(alt,addr); - } } if((opcode[i]&0x3f)==0x14) // BEQL { - if(s1h>=0) { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - nottaken=out; - emit_jne(0); - } if(s2l>=0) emit_cmp(s1l,s2l); else emit_test(s1l,s1l); if(nottaken) set_jump_target(nottaken, out); @@ -5026,12 +5347,6 @@ static void pagespan_assemble(int i,struct regstat *i_regs) } if((opcode[i]&0x3f)==0x15) // BNEL { - if(s1h>=0) { - if(s2h>=0) emit_cmp(s1h,s2h); - else emit_test(s1h,s1h); - taken=out; - emit_jne(0); - } if(s2l>=0) emit_cmp(s1l,s2l); else emit_test(s1l,s1l); nottaken=out; @@ -5042,25 +5357,13 @@ static void pagespan_assemble(int i,struct regstat *i_regs) { emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr); emit_cmpimm(s1l,1); - if(s1h>=0) emit_mov(addr,ntaddr); emit_cmovl_reg(alt,addr); - if(s1h>=0) { - emit_test(s1h,s1h); - emit_cmovne_reg(ntaddr,addr); - emit_cmovs_reg(alt,addr); - } } if((opcode[i]&0x3f)==7) // BGTZ { emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr); emit_cmpimm(s1l,1); - if(s1h>=0) emit_mov(addr,alt); emit_cmovl_reg(ntaddr,addr); - if(s1h>=0) { - emit_test(s1h,s1h); - emit_cmovne_reg(alt,addr); - emit_cmovs_reg(ntaddr,addr); - } } if((opcode[i]&0x3f)==0x16) // BLEZL { @@ -5101,7 +5404,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs) } assert(i_regs->regmap[HOST_CCREG]==CCREG); - wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty); + wb_dirtys(regs[i].regmap,regs[i].dirty); if(likely[i]||unconditional) { emit_movimm(ba[i],HOST_BTREG); @@ -5124,7 +5427,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs) if(likely[i]) { // Not-taken path set_jump_target(nottaken, out); - wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty); + wb_dirtys(regs[i].regmap,regs[i].dirty); void *branch_addr=out; emit_jmp(0); int target_addr=start+i*4+8; @@ -5151,13 +5454,13 @@ static void pagespan_ds() ll_add(jump_in+page,vaddr,(void *)out); assert(regs[0].regmap_entry[HOST_CCREG]==CCREG); if(regs[0].regmap[HOST_CCREG]!=CCREG) - wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32); + wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty); if(regs[0].regmap[HOST_BTREG]!=BTREG) emit_writeword(HOST_BTREG,&branch_target); - load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]); + load_regs(regs[0].regmap_entry,regs[0].regmap,rs1[0],rs2[0]); address_generation(0,®s[0],regs[0].regmap_entry); if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a) - load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP); + load_regs(regs[0].regmap_entry,regs[0].regmap,INVCP,INVCP); is_delayslot=0; switch(itype[0]) { case ALU: @@ -5200,7 +5503,6 @@ static void pagespan_ds() case RJUMP: case CJUMP: case SJUMP: - case FJUMP: SysPrintf("Jump in the delay slot. This is probably a bug.\n"); } int btaddr=get_reg(regs[0].regmap,BTREG); @@ -5218,11 +5520,11 @@ static void pagespan_ds() #endif void *branch = out; emit_jeq(0); - store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1); + store_regs_bt(regs[0].regmap,regs[0].dirty,-1); emit_jmp(jump_vaddr_reg[btaddr]); set_jump_target(branch, out); - store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4); - load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4); + store_regs_bt(regs[0].regmap,regs[0].dirty,start+4); + load_regs_bt(regs[0].regmap,regs[0].dirty,start+4); } // Basic liveness analysis for MIPS registers @@ -5246,7 +5548,7 @@ void unneeded_registers(int istart,int iend,int r) for (i=iend;i>=istart;i--) { //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r); - if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) + if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) { // If subroutine call, flag return address as a possible branch target if(rt1[i]==31 && i=istart;i--) { - if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) + if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) { if(ba[i]=(start+slen*4)) { @@ -5753,7 +6055,7 @@ void clean_registers(int istart,int iend,int wr) if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<istart) { - if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP) + if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP) { // Don't store a register immediately after writing it, // may prevent dual-issue. @@ -5776,11 +6078,11 @@ void clean_registers(int istart,int iend,int wr) } printf("\n");*/ - //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) { + //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP)) { regs[i].dirty|=will_dirty_i; #ifndef DESTRUCTIVE_WRITEBACK regs[i].dirty&=wont_dirty_i; - if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) + if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) { if(i>16)!=0x1000) { for(r=0;r>14):*ba);break; case SJUMP: printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break; - case FJUMP: - printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break; case RJUMP: if (opcode[i]==0x9&&rt1[i]!=31) printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]); @@ -5945,25 +6245,39 @@ static void disassemble_inst(int i) {} #define DRC_TEST_VAL 0x74657374 -static int new_dynarec_test(void) +static void new_dynarec_test(void) { - int (*testfunc)(void) = (void *)out; + int (*testfunc)(void); void *beginning; - int ret; + int ret[2]; + size_t i; - beginning = start_block(); - emit_movimm(DRC_TEST_VAL,0); // test - emit_jmpreg(14); - literal_pool(0); - end_block(beginning); - SysPrintf("testing if we can run recompiled code..\n"); - ret = testfunc(); - if (ret == DRC_TEST_VAL) + // check structure linkage + if ((void *)reg != (void *)&psxRegs + || (u_char *)rcnts - (u_char *)reg != sizeof(psxRegs)) + { + SysPrintf("linkage_arm miscompilation/breakage detected.\n"); + } + + SysPrintf("testing if we can run recompiled code...\n"); + ((volatile u_int *)out)[0]++; // make cache dirty + + for (i = 0; i < ARRAY_SIZE(ret); i++) { + out = translation_cache; + beginning = start_block(); + emit_movimm(DRC_TEST_VAL + i, 0); // test + emit_ret(); + literal_pool(0); + end_block(beginning); + testfunc = beginning; + ret[i] = testfunc(); + } + + if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1) SysPrintf("test passed.\n"); else - SysPrintf("test failed: %08x\n", ret); + SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]); out = translation_cache; - return ret == DRC_TEST_VAL; } // clear the state completely, instead of just marking @@ -6407,7 +6721,7 @@ int new_recompile_block(int addr) #endif case 0x12: strcpy(insn[i],"COP2"); type=NI; op2=(source[i]>>21)&0x1f; - //if (op2 & 0x10) { + //if (op2 & 0x10) if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns if (gte_handlers[source[i]&0x3f]!=NULL) { if (gte_regnames[source[i]&0x3f]!=NULL) @@ -6530,13 +6844,6 @@ int new_recompile_block(int addr) } likely[i]=(op2&2)>>1; break; - case FJUMP: - rs1[i]=FSREG; - rs2[i]=CSREG; - rt1[i]=0; - rt2[i]=0; - likely[i]=((source[i])>>17)&1; - break; case ALU: rs1[i]=(source[i]>>21)&0x1f; // source rs2[i]=(source[i]>>16)&0x1f; // subtract amount @@ -6682,13 +6989,13 @@ int new_recompile_block(int addr) ba[i]=start+i*4+8; // Ignore never taken branch else if(type==SJUMP&&rs1[i]==0&&!(op2&1)) ba[i]=start+i*4+8; // Ignore never taken branch - else if(type==CJUMP||type==SJUMP||type==FJUMP) + else if(type==CJUMP||type==SJUMP) ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14); else ba[i]=-1; - if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) { + if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) { int do_in_intrp=0; // branch in delay slot? - if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) { + if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP) { // don't handle first branch and call interpreter if it's hit SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr); do_in_intrp=1; @@ -6755,7 +7062,7 @@ int new_recompile_block(int addr) } } slen=i; - if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) { + if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP) { if(start+i*4==pagelimit) { itype[i-1]=SPAN; } @@ -6769,7 +7076,6 @@ int new_recompile_block(int addr) /* Pass 3 - Register allocation */ struct regstat current; // Current register allocations/status - current.is32=1; current.dirty=0; current.u=unneeded_reg[0]; clear_all_regs(current.regmap); @@ -6811,26 +7117,22 @@ int new_recompile_block(int addr) if(rs1[i-2]==0||rs2[i-2]==0) { if(rs1[i-2]) { - current.is32|=1LL<=0) current.regmap[hr]=-1; } if(rs2[i-2]) { - current.is32|=1LL<=0) current.regmap[hr]=-1; } } } } - current.is32=-1LL; memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap)); regs[i].wasconst=current.isconst; - regs[i].was32=current.is32; regs[i].wasdirty=current.dirty; regs[i].loadedconst=0; - if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) { + if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP) { if(i+1>rs1[i])&(current.is32>>rs2[i])&1)) - { - assert(0); - } if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))|| (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) { // The delay slot overwrites one of our conditions. @@ -6991,10 +7285,6 @@ int new_recompile_block(int addr) regs[i].wasconst=0; if(rs1[i]) alloc_reg(¤t,i,rs1[i]); if(rs2[i]) alloc_reg(¤t,i,rs2[i]); - if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1)) - { - assert(0); - } } else { @@ -7008,10 +7298,6 @@ int new_recompile_block(int addr) alloc_cc(¤t,i); dirty_reg(¤t,CCREG); alloc_reg(¤t,i,rs1[i]); - if(!(current.is32>>rs1[i]&1)) - { - assert(0); - } if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) { // The delay slot overwrites one of our conditions. // Allocate the branch condition registers instead. @@ -7019,10 +7305,6 @@ int new_recompile_block(int addr) current.wasconst=0; regs[i].wasconst=0; if(rs1[i]) alloc_reg(¤t,i,rs1[i]); - if(!((current.is32>>rs1[i])&1)) - { - assert(0); - } } else { @@ -7041,10 +7323,6 @@ int new_recompile_block(int addr) dirty_reg(¤t,CCREG); alloc_reg(¤t,i,rs1[i]); alloc_reg(¤t,i,rs2[i]); - if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1)) - { - assert(0); - } } else if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL @@ -7055,10 +7333,6 @@ int new_recompile_block(int addr) alloc_cc(¤t,i); dirty_reg(¤t,CCREG); alloc_reg(¤t,i,rs1[i]); - if(!(current.is32>>rs1[i]&1)) - { - assert(0); - } } ds=1; //current.isconst=0; @@ -7075,17 +7349,12 @@ int new_recompile_block(int addr) alloc_cc(¤t,i); dirty_reg(¤t,CCREG); alloc_reg(¤t,i,rs1[i]); - if(!(current.is32>>rs1[i]&1)) - { - assert(0); - } if (rt1[i]==31) { // BLTZAL/BGEZAL alloc_reg(¤t,i,31); dirty_reg(¤t,31); //#ifdef REG_PREFETCH //alloc_reg(¤t,i,PTEMP); //#endif - //current.is32|=1LL<>rs1[i])&1)) - { - assert(0); - } } else { @@ -7115,17 +7380,10 @@ int new_recompile_block(int addr) alloc_cc(¤t,i); dirty_reg(¤t,CCREG); alloc_reg(¤t,i,rs1[i]); - if(!(current.is32>>rs1[i]&1)) - { - assert(0); - } } ds=1; //current.isconst=0; break; - case FJUMP: - assert(0); - break; case IMM16: imm16_alloc(¤t,i); break; @@ -7238,7 +7496,6 @@ int new_recompile_block(int addr) /* Branch post-alloc */ if(i>0) { - current.was32=current.is32; current.wasdirty=current.dirty; switch(itype[i-1]) { case UJUMP: @@ -7251,7 +7508,6 @@ int new_recompile_block(int addr) if(rt1[i-1]==31) { // JAL alloc_reg(&branch_regs[i-1],i-1,31); dirty_reg(&branch_regs[i-1],31); - branch_regs[i-1].is32|=1LL<<31; } memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap)); memcpy(constmap[i],constmap[i-1],sizeof(current_constmap)); @@ -7267,7 +7523,6 @@ int new_recompile_block(int addr) if(rt1[i-1]!=0) { // JALR alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]); dirty_reg(&branch_regs[i-1],rt1[i-1]); - branch_regs[i-1].is32|=1LL<>rs1[i-1])&(current.is32>>rs2[i-1])&1)) - { - assert(0); - } } memcpy(&branch_regs[i-1],¤t,sizeof(current)); branch_regs[i-1].isconst=0; @@ -7327,10 +7578,6 @@ int new_recompile_block(int addr) current.u=branch_unneeded_reg[i-1]&~(1LL<>rs1[i-1]&1)) - { - assert(0); - } } memcpy(&branch_regs[i-1],¤t,sizeof(current)); branch_regs[i-1].isconst=0; @@ -7385,10 +7632,6 @@ int new_recompile_block(int addr) current.u=branch_unneeded_reg[i-1]&~(1LL<>rs1[i-1]&1)) - { - assert(0); - } } memcpy(&branch_regs[i-1],¤t,sizeof(current)); branch_regs[i-1].isconst=0; @@ -7414,12 +7657,8 @@ int new_recompile_block(int addr) if(opcode2[i-1]&0x10) { // BxxZAL alloc_reg(&branch_regs[i-1],i-1,31); dirty_reg(&branch_regs[i-1],31); - branch_regs[i-1].is32|=1LL<<31; } break; - case FJUMP: - assert(0); - break; } if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000) @@ -7427,7 +7666,6 @@ int new_recompile_block(int addr) if(rt1[i-1]==31) // JAL/JALR { // Subroutine call will return here, don't alloc any registers - current.is32=1; current.dirty=0; clear_all_regs(current.regmap); alloc_reg(¤t,i,CCREG); @@ -7436,7 +7674,6 @@ int new_recompile_block(int addr) else if(i+10&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL)) + if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i]==SYSCALL||itype[i]==HLECALL)) { cc=0; } @@ -7496,9 +7731,7 @@ int new_recompile_block(int addr) cc++; } - flush_dirty_uppers(¤t); if(!is_ds[i]) { - regs[i].is32=current.is32; regs[i].dirty=current.dirty; regs[i].isconst=current.isconst; memcpy(constmap[i],current_constmap,sizeof(current_constmap)); @@ -7521,7 +7754,7 @@ int new_recompile_block(int addr) for (i=slen-1;i>=0;i--) { int hr; - if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) + if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) { if(ba[i]=(start+slen*4)) { @@ -7635,7 +7868,7 @@ int new_recompile_block(int addr) } } // Cycle count is needed at branches. Assume it is needed at the target too. - if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) { + if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==SPAN) { if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0) @@ -7740,9 +7973,9 @@ int new_recompile_block(int addr) (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG)) { if(i>(regs[i].regmap[hr]&63))&1)) { SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]); assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]); @@ -7770,7 +8003,7 @@ int new_recompile_block(int addr) clear_all_regs(f_regmap); for(i=0;i=start && ba[i]<(start+i*4)) if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU @@ -7780,7 +8013,7 @@ int new_recompile_block(int addr) ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP) { int t=(ba[i]-start)>>2; - if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots + if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP)) // loop_preload can't handle jumps into delay slots if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated for(hr=0;hr2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) { break; } - if(r>63) { - // NB This can exclude the case where the upper-half - // register is lower numbered than the lower-half - // register. Not sure if it's worth fixing... - if(get_reg(regs[k-1].regmap,r&63)<0) break; - if(regs[k-1].is32&(1LL<<(r&63))) break; - } + assert(r < 64); k--; } - if(i\n",hr,start+k*4); while(k>16)!=0x1000) { regmap_pre[k+2][hr]=f_regmap[hr]; regs[k+2].wasdirty&=~(1<>16)==0x1000) { // Stop on unconditional branch break; } - if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) + if(itype[j]==CJUMP||itype[j]==SJUMP) { if(ooo[j]) { if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) @@ -7999,17 +8211,7 @@ int new_recompile_block(int addr) //printf("No free regs for store %x\n",start+j*4); break; } - if(f_regmap[hr]>=64) { - if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) { - break; - } - else - { - if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) { - break; - } - } - } + assert(f_regmap[hr]<64); } } } @@ -8106,7 +8308,7 @@ int new_recompile_block(int addr) // to use, which can avoid a load-use penalty on certain CPUs. for(i=0;i=0;i--) { - if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) + if(itype[i]==CJUMP||itype[i]==SJUMP) { // Conditional branch if((source[i]>>16)!=0x1000&&i>16)!=0x1000)) { - wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre, - unneeded_reg[i]); + wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]); } - if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) { - is32_pre=branch_regs[i].is32; + if((itype[i]==CJUMP||itype[i]==SJUMP)&&!likely[i]) { dirty_pre=branch_regs[i].dirty; }else{ - is32_pre=regs[i].is32; dirty_pre=regs[i].dirty; } #endif // write back if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000)) { - wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,unneeded_reg[i]); + wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]); loop_preload(regmap_pre[i],regs[i].regmap_entry); } // branch target entry point @@ -8478,35 +8676,35 @@ int new_recompile_block(int addr) // load regs if(regs[i].regmap_entry[HOST_CCREG]==CCREG&®s[i].regmap[HOST_CCREG]!=CCREG) - wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32); - load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]); + wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty); + load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i],rs2[i]); address_generation(i,®s[i],regs[i].regmap_entry); - load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i); - if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) + load_consts(regmap_pre[i],regs[i].regmap,i); + if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) { // Load the delay slot registers if necessary if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0)) - load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]); + load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i+1],rs1[i+1]); if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0)) - load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]); + load_regs(regs[i].regmap_entry,regs[i].regmap,rs2[i+1],rs2[i+1]); if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) - load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP); + load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP); } else if(i+11) { if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) { - assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP); + assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP); assert(i==slen); - if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) { - store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4); + if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP) { + store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4); if(regs[i-1].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG); emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG); } else if(!likely[i-2]) { - store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4); + store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].dirty,start+i*4); assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG); } else { - store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4); + store_regs_bt(regs[i-2].regmap,regs[i-2].dirty,start+i*4); assert(regs[i-2].regmap[HOST_CCREG]==CCREG); } add_to_linker(out,start+i*4,0); @@ -8596,8 +8792,8 @@ int new_recompile_block(int addr) else { assert(i>0); - assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP); - store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4); + assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP); + store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4); if(regs[i-1].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG); emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG); @@ -8773,7 +8969,7 @@ int new_recompile_block(int addr) break; case 3: // Clear jump_out - #ifdef __arm__ + #if defined(__arm__) || defined(__aarch64__) if((expirep&2047)==0) do_clear_cache(); #endif