X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?p=pcsx_rearmed.git;a=blobdiff_plain;f=libpcsxcore%2Fnew_dynarec%2Fnew_dynarec.c;h=e0cff62ca622b1b5b04dd942626a76e89d4ab5f0;hp=9ce1f069a92bdd5766adb1db2e175aedf489c746;hb=e3c6bdb5e46f72f063bb7f588da6588ac1893b17;hpb=3968e69e7fa8f9cb0d44ac79477d5929b9649271 diff --git a/libpcsxcore/new_dynarec/new_dynarec.c b/libpcsxcore/new_dynarec/new_dynarec.c index 9ce1f069..e0cff62c 100644 --- a/libpcsxcore/new_dynarec/new_dynarec.c +++ b/libpcsxcore/new_dynarec/new_dynarec.c @@ -37,12 +37,16 @@ static int sceBlock; #include "new_dynarec_config.h" #include "../psxhle.h" #include "../psxinterpreter.h" -#include "emu_if.h" //emulator interface +#include "../gte.h" +#include "emu_if.h" // emulator interface #define noinline __attribute__((noinline,noclone)) #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) #endif +#ifndef min +#define min(a, b) ((b) < (a) ? (b) : (a)) +#endif //#define DISASM //#define assem_debug printf @@ -63,9 +67,27 @@ static int sceBlock; #include "assem_arm64.h" #endif +#define RAM_SIZE 0x200000 #define MAXBLOCK 4096 #define MAX_OUTPUT_BLOCK_SIZE 262144 +struct ndrc_mem +{ + u_char translation_cache[1 << TARGET_SIZE_2]; + struct + { + struct tramp_insns ops[2048 / sizeof(struct tramp_insns)]; + const void *f[2048 / sizeof(void *)]; + } tramp; +}; + +#ifdef BASE_ADDR_DYNAMIC +static struct ndrc_mem *ndrc; +#else +static struct ndrc_mem ndrc_ __attribute__((aligned(4096))); +static struct ndrc_mem *ndrc = &ndrc_; +#endif + // stubs enum stub_type { CC_STUB = 1, @@ -168,8 +190,10 @@ struct link_entry static uint64_t unneeded_reg[MAXBLOCK]; static uint64_t branch_unneeded_reg[MAXBLOCK]; static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i? - static uint64_t current_constmap[HOST_REGS]; - static uint64_t constmap[MAXBLOCK][HOST_REGS]; + // contains 'real' consts at [i] insn, but may differ from what's actually + // loaded in host reg as 'final' value is always loaded, see get_final_value() + static uint32_t current_constmap[HOST_REGS]; + static uint32_t constmap[MAXBLOCK][HOST_REGS]; static struct regstat regs[MAXBLOCK]; static struct regstat branch_regs[MAXBLOCK]; static signed char minimum_free_regs[MAXBLOCK]; @@ -197,8 +221,11 @@ struct link_entry #endif int new_dynarec_hacks; + int new_dynarec_hacks_pergame; int new_dynarec_did_compile; + #define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x)) + extern int cycle_count; // ... until end of the timeslice, counts -N -> 0 extern int last_count; // last absolute target, often = next_interupt extern int pcaddr; @@ -286,6 +313,7 @@ void cc_interrupt(); void fp_exception(); void fp_exception_ds(); void jump_to_new_pc(); +void call_gteStall(); void new_dyna_leave(); // Needed by assembler @@ -296,18 +324,22 @@ static void load_all_regs(signed char i_regmap[]); static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]); static void load_regs_entry(int t); static void load_all_consts(signed char regmap[],u_int dirty,int i); +static u_int get_host_reglist(const signed char *regmap); static int verify_dirty(const u_int *ptr); static int get_final_value(int hr, int i, int *value); static void add_stub(enum stub_type type, void *addr, void *retaddr, u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e); static void add_stub_r(enum stub_type type, void *addr, void *retaddr, - int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist); + int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist); static void add_to_linker(void *addr, u_int target, int ext); static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override); static void *get_direct_memhandler(void *table, u_int addr, enum stub_type type, uintptr_t *addr_host); +static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist); static void pass_args(int a0, int a1); +static void emit_far_jump(const void *f); +static void emit_far_call(const void *f); static void mprotect_w_x(void *start, void *end, int is_x) { @@ -336,7 +368,7 @@ static void start_tcache_write(void *start, void *end) static void end_tcache_write(void *start, void *end) { -#ifdef __arm__ +#if defined(__arm__) || defined(__aarch64__) size_t len = (char *)end - (char *)start; #if defined(__BLACKBERRY_QNX__) msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE); @@ -346,12 +378,14 @@ static void end_tcache_write(void *start, void *end) sceKernelSyncVMDomain(sceBlock, start, len); #elif defined(_3DS) ctr_flush_invalidate_cache(); + #elif defined(__aarch64__) + // as of 2021, __clear_cache() is still broken on arm64 + // so here is a custom one :( + clear_cache_arm64(start, end); #else __clear_cache(start, end); #endif (void)len; -#else - __clear_cache(start, end); #endif mprotect_w_x(start, end, 1); @@ -360,8 +394,8 @@ static void end_tcache_write(void *start, void *end) static void *start_block(void) { u_char *end = out + MAX_OUTPUT_BLOCK_SIZE; - if (end > translation_cache + (1< ndrc->translation_cache + sizeof(ndrc->translation_cache)) + end = ndrc->translation_cache + sizeof(ndrc->translation_cache); start_tcache_write(out, end); return out; } @@ -371,16 +405,74 @@ static void end_block(void *start) end_tcache_write(start, out); } +// also takes care of w^x mappings when patching code +static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)]; + +static void mark_clear_cache(void *target) +{ + uintptr_t offset = (u_char *)target - ndrc->translation_cache; + u_int mask = 1u << ((offset >> 12) & 31); + if (!(needs_clear_cache[offset >> 17] & mask)) { + char *start = (char *)((uintptr_t)target & ~4095l); + start_tcache_write(start, start + 4095); + needs_clear_cache[offset >> 17] |= mask; + } +} + +// Clearing the cache is rather slow on ARM Linux, so mark the areas +// that need to be cleared, and then only clear these areas once. +static void do_clear_cache(void) +{ + int i, j; + for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++) + { + u_int bitmap = needs_clear_cache[i]; + if (!bitmap) + continue; + for (j = 0; j < 32; j++) + { + u_char *start, *end; + if (!(bitmap & (1<translation_cache + i*131072 + j*4096; + end = start + 4095; + for (j++; j < 32; j++) { + if (!(bitmap & (1<>31)|1; - return (x * cycle_multiplier + s * 50) / 100; + return (x * m + s * 50) / 100; +} + +// is the op an unconditional jump? +static int is_ujump(int i) +{ + return itype[i] == UJUMP || itype[i] == RJUMP + || (source[i] >> 16) == 0x1000; // beq r0, r0, offset // b offset +} + +static int is_jump(int i) +{ + return itype[i] == RJUMP || itype[i] == UJUMP || itype[i] == CJUMP || itype[i] == SJUMP; } static u_int get_page(u_int vaddr) @@ -528,7 +620,7 @@ void dirty_reg(struct regstat *cur,signed char reg) } } -void set_const(struct regstat *cur,signed char reg,uint64_t value) +static void set_const(struct regstat *cur, signed char reg, uint32_t value) { int hr; if(!reg) return; @@ -540,7 +632,7 @@ void set_const(struct regstat *cur,signed char reg,uint64_t value) } } -void clear_const(struct regstat *cur,signed char reg) +static void clear_const(struct regstat *cur, signed char reg) { int hr; if(!reg) return; @@ -551,7 +643,7 @@ void clear_const(struct regstat *cur,signed char reg) } } -int is_const(struct regstat *cur,signed char reg) +static int is_const(struct regstat *cur, signed char reg) { int hr; if(reg<0) return 0; @@ -563,7 +655,8 @@ int is_const(struct regstat *cur,signed char reg) } return 0; } -uint64_t get_const(struct regstat *cur,signed char reg) + +static uint32_t get_const(struct regstat *cur, signed char reg) { int hr; if(!reg) return 0; @@ -589,7 +682,7 @@ void lsn(u_char hsn[], int i, int *preferred_reg) j=slen-i-1; break; } - if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000) + if (is_ujump(i+j)) { // Don't go past an unconditonal jump j++; @@ -637,7 +730,7 @@ void lsn(u_char hsn[], int i, int *preferred_reg) // TODO: preferred register based on backward branch } // Delay slot should preferably not overwrite branch conditions or cycle count - if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) { + if (i > 0 && is_jump(i-1)) { if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1; if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1; hsn[CCREG]=1; @@ -672,7 +765,7 @@ int needed_again(int r, int i) int b=-1; int rn=10; - if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) + if (i > 0 && is_ujump(i-1)) { if(ba[i-1]start+slen*4-4) return 0; // Don't need any registers if exiting the block @@ -683,7 +776,7 @@ int needed_again(int r, int i) j=slen-i-1; break; } - if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000) + if (is_ujump(i+j)) { // Don't go past an unconditonal jump j++; @@ -739,7 +832,7 @@ int loop_reg(int i, int r, int hr) j=slen-i-1; break; } - if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000) + if (is_ujump(i+j)) { // Don't go past an unconditonal jump j++; @@ -832,6 +925,7 @@ static const struct { FUNCNAME(jump_handler_write32), FUNCNAME(invalidate_addr), FUNCNAME(jump_to_new_pc), + FUNCNAME(call_gteStall), FUNCNAME(new_dyna_leave), FUNCNAME(pcsx_mtc0), FUNCNAME(pcsx_mtc0_ds), @@ -866,6 +960,48 @@ static const char *func_name(const void *a) #include "assem_arm64.c" #endif +static void *get_trampoline(const void *f) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(ndrc->tramp.f); i++) { + if (ndrc->tramp.f[i] == f || ndrc->tramp.f[i] == NULL) + break; + } + if (i == ARRAY_SIZE(ndrc->tramp.f)) { + SysPrintf("trampoline table is full, last func %p\n", f); + abort(); + } + if (ndrc->tramp.f[i] == NULL) { + start_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]); + ndrc->tramp.f[i] = f; + end_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]); + } + return &ndrc->tramp.ops[i]; +} + +static void emit_far_jump(const void *f) +{ + if (can_jump_or_call(f)) { + emit_jmp(f); + return; + } + + f = get_trampoline(f); + emit_jmp(f); +} + +static void emit_far_call(const void *f) +{ + if (can_jump_or_call(f)) { + emit_call(f); + return; + } + + f = get_trampoline(f); + emit_call(f); +} + // Add virtual address mapping to linked list void ll_add(struct ll_entry **head,int vaddr,void *addr) { @@ -993,9 +1129,7 @@ static void ll_kill_pointers(struct ll_entry *head,uintptr_t addr,int shift) { inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr); void *host_addr=find_extjump_insn(head->addr); - #if defined(__arm__) || defined(__aarch64__) - mark_clear_cache(host_addr); - #endif + mark_clear_cache(host_addr); set_jump_target(host_addr, head->addr); } head=head->next; @@ -1021,9 +1155,7 @@ static void invalidate_page(u_int page) while(head!=NULL) { inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr); void *host_addr=find_extjump_insn(head->addr); - #if defined(__arm__) || defined(__aarch64__) - mark_clear_cache(host_addr); - #endif + mark_clear_cache(host_addr); set_jump_target(host_addr, head->addr); next=head->next; free(head); @@ -1046,9 +1178,7 @@ static void invalidate_block_range(u_int block, u_int first, u_int last) for(first=page+1;first3) // MTC1/CTC1 + else if (opcode2[i] > 3) // MTC2/CTC2 { if(rs1[i]){ clear_const(current,rs1[i]); @@ -1818,13 +1949,15 @@ static void cop12_alloc(struct regstat *current,int i) current->u&=~1LL; alloc_reg(current,i,0); } - alloc_reg_temp(current,i,-1); } + alloc_reg_temp(current,i,-1); minimum_free_regs[i]=1; } void c2op_alloc(struct regstat *current,int i) { + alloc_cc(current,i); // for stalls + dirty_reg(current,CCREG); alloc_reg_temp(current,i,-1); } @@ -1881,8 +2014,9 @@ void delayslot_alloc(struct regstat *current,int i) cop0_alloc(current,i); break; case COP1: + break; case COP2: - cop12_alloc(current,i); + cop2_alloc(current,i); break; case C1LS: c1ls_alloc(current,i); @@ -1948,7 +2082,7 @@ static void add_stub(enum stub_type type, void *addr, void *retaddr, } static void add_stub_r(enum stub_type type, void *addr, void *retaddr, - int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist) + int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist) { add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist); } @@ -2056,10 +2190,11 @@ static void alu_assemble(int i,struct regstat *i_regs) s2l=get_reg(i_regs->regmap,rs2[i]); if(rs2[i]==0) // rx=0); - if(opcode2[i]==0x2a) // SLT + if(opcode2[i]==0x2a&&rs1[i]!=0) { // SLT + assert(s1l>=0); emit_shrimm(s1l,31,t); - else // SLTU (unsigned can not be less than zero) + } + else // SLTU (unsigned can not be less than zero, 0<0) emit_zeroreg(t); } else if(rs1[i]==0) // r0= 0) + reglist |= 1 << hr; + } + return reglist; +} + +static u_int reglist_exclude(u_int reglist, int r1, int r2) +{ + if (r1 >= 0) + reglist &= ~(1u << r1); + if (r2 >= 0) + reglist &= ~(1u << r2); + return reglist; +} + +// find a temp caller-saved register not in reglist (so assumed to be free) +static int reglist_find_free(u_int reglist) +{ + u_int free_regs = ~reglist & CALLER_SAVE_REGS; + if (free_regs == 0) + return -1; + return __builtin_ctz(free_regs); +} + +static void load_assemble(int i, const struct regstat *i_regs) { int s,tl,addr; int offset; void *jaddr=0; int memtarget=0,c=0; int fastio_reg_override=-1; - u_int hr,reglist=0; + u_int reglist=get_host_reglist(i_regs->regmap); tl=get_reg(i_regs->regmap,rt1[i]); s=get_reg(i_regs->regmap,rs1[i]); offset=imm[i]; - for(hr=0;hrregmap[hr]>=0) reglist|=1<regmap[HOST_CCREG]==CCREG) reglist&=~(1<=0) { c=(i_regs->wasconst>>s)&1; @@ -2664,14 +2824,14 @@ static void load_assemble(int i,struct regstat *i_regs) } #ifndef loadlr_assemble -static void loadlr_assemble(int i,struct regstat *i_regs) +static void loadlr_assemble(int i, const struct regstat *i_regs) { int s,tl,temp,temp2,addr; int offset; void *jaddr=0; int memtarget=0,c=0; int fastio_reg_override=-1; - u_int hr,reglist=0; + u_int reglist=get_host_reglist(i_regs->regmap); tl=get_reg(i_regs->regmap,rt1[i]); s=get_reg(i_regs->regmap,rs1[i]); temp=get_reg(i_regs->regmap,-1); @@ -2679,9 +2839,6 @@ static void loadlr_assemble(int i,struct regstat *i_regs) addr=get_reg(i_regs->regmap,AGEN1+(i&1)); assert(addr<0); offset=imm[i]; - for(hr=0;hrregmap[hr]>=0) reglist|=1<regmap); tl=get_reg(i_regs->regmap,rs2[i]); s=get_reg(i_regs->regmap,rs1[i]); temp=get_reg(i_regs->regmap,agr); @@ -2771,9 +2928,6 @@ void store_assemble(int i,struct regstat *i_regs) } assert(tl>=0); assert(temp>=0); - for(hr=0;hrregmap[hr]>=0) reglist|=1<regmap[HOST_CCREG]==CCREG) reglist&=~(1<waswritten&(1<waswritten&(1<regmap); tl=get_reg(i_regs->regmap,rs2[i]); s=get_reg(i_regs->regmap,rs1[i]); temp=get_reg(i_regs->regmap,agr); @@ -2894,9 +3048,6 @@ static void storelr_assemble(int i,struct regstat *i_regs) } } assert(tl>=0); - for(hr=0;hrregmap[hr]>=0) reglist|=1<=0); if(!c) { emit_cmpimm(s<0||offset?temp:s,RAM_SIZE); @@ -2989,7 +3140,7 @@ static void storelr_assemble(int i,struct regstat *i_regs) set_jump_target(done2, out); if(!c||!memtarget) add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist); - if(!(i_regs->waswritten&(1<waswritten&(1<regmap,INVCP); @@ -3046,7 +3197,7 @@ static void cop0_assemble(int i,struct regstat *i_regs) emit_storereg(CCREG,HOST_CCREG); emit_loadreg(rs1[i],1); emit_movimm(copr,0); - emit_call(pcsx_mtc0_ds); + emit_far_call(pcsx_mtc0_ds); emit_loadreg(rs1[i],s); return; } @@ -3055,14 +3206,12 @@ static void cop0_assemble(int i,struct regstat *i_regs) emit_movimm(0,HOST_TEMPREG); emit_writeword(HOST_TEMPREG,&pending_exception); } - //else if(copr==12&&is_delayslot) emit_call((int)MTC0_R12); - //else if(s==HOST_CCREG) emit_loadreg(rs1[i],1); else if(s!=1) emit_mov(s,1); emit_movimm(copr,0); - emit_call(pcsx_mtc0); + emit_far_call(pcsx_mtc0); if(copr==9||copr==11||copr==12||copr==13) { emit_readword(&Count,HOST_CCREG); emit_readword(&next_interupt,HOST_TEMPREG); @@ -3079,7 +3228,7 @@ static void cop0_assemble(int i,struct regstat *i_regs) emit_jeq(0); emit_readword(&pcaddr, 0); emit_addimm(HOST_CCREG,2,HOST_CCREG); - emit_call(get_addr_ht); + emit_far_call(get_addr_ht); emit_jmpreg(0); set_jump_target(jaddr, out); } @@ -3139,7 +3288,131 @@ static void do_cop1stub(int n) if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG); emit_movimm(start+(i-ds)*4,EAX); // Get PC emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle... - emit_jmp(ds?fp_exception_ds:fp_exception); + emit_far_jump(ds?fp_exception_ds:fp_exception); +} + +static int cop2_is_stalling_op(int i, int *cycles) +{ + if (opcode[i] == 0x3a) { // SWC2 + *cycles = 0; + return 1; + } + if (itype[i] == COP2 && (opcode2[i] == 0 || opcode2[i] == 2)) { // MFC2/CFC2 + *cycles = 0; + return 1; + } + if (itype[i] == C2OP) { + *cycles = gte_cycletab[source[i] & 0x3f]; + return 1; + } + // ... what about MTC2/CTC2/LWC2? + return 0; +} + +#if 0 +static void log_gte_stall(int stall, u_int cycle) +{ + if ((u_int)stall <= 44) + printf("x stall %2d %u\n", stall, cycle + last_count); + if (cycle + last_count > 1215348544) exit(1); +} + +static void emit_log_gte_stall(int i, int stall, u_int reglist) +{ + save_regs(reglist); + if (stall > 0) + emit_movimm(stall, 0); + else + emit_mov(HOST_TEMPREG, 0); + emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1); + emit_far_call(log_gte_stall); + restore_regs(reglist); +} +#endif + +static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist) +{ + int j = i, other_gte_op_cycles = -1, stall = -MAXBLOCK, cycles_passed; + int rtmp = reglist_find_free(reglist); + + if (HACK_ENABLED(NDHACK_GTE_NO_STALL)) + return; + //assert(get_reg(i_regs->regmap, CCREG) == HOST_CCREG); + if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) { + // happens occasionally... cc evicted? Don't bother then + //printf("no cc %08x\n", start + i*4); + return; + } + if (!bt[i]) { + for (j = i - 1; j >= 0; j--) { + //if (is_ds[j]) break; + if (cop2_is_stalling_op(j, &other_gte_op_cycles) || bt[j]) + break; + } + } + cycles_passed = CLOCK_ADJUST(ccadj[i] - ccadj[j]); + if (other_gte_op_cycles >= 0) + stall = other_gte_op_cycles - cycles_passed; + else if (cycles_passed >= 44) + stall = 0; // can't stall + if (stall == -MAXBLOCK && rtmp >= 0) { + // unknown stall, do the expensive runtime check + assem_debug("; cop2_call_stall_check\n"); +#if 0 // too slow + save_regs(reglist); + emit_movimm(gte_cycletab[op], 0); + emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1); + emit_far_call(call_gteStall); + restore_regs(reglist); +#else + host_tempreg_acquire(); + emit_readword(&psxRegs.gteBusyCycle, rtmp); + emit_addimm(rtmp, -CLOCK_ADJUST(ccadj[i]), rtmp); + emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG); + emit_cmpimm(HOST_TEMPREG, 44); + emit_cmovb_reg(rtmp, HOST_CCREG); + //emit_log_gte_stall(i, 0, reglist); + host_tempreg_release(); +#endif + } + else if (stall > 0) { + //emit_log_gte_stall(i, stall, reglist); + emit_addimm(HOST_CCREG, stall, HOST_CCREG); + } + + // save gteBusyCycle, if needed + if (gte_cycletab[op] == 0) + return; + other_gte_op_cycles = -1; + for (j = i + 1; j < slen; j++) { + if (cop2_is_stalling_op(j, &other_gte_op_cycles)) + break; + if (is_jump(j)) { + // check ds + if (j + 1 < slen && cop2_is_stalling_op(j + 1, &other_gte_op_cycles)) + j++; + break; + } + } + if (other_gte_op_cycles >= 0) + // will handle stall when assembling that op + return; + cycles_passed = CLOCK_ADJUST(ccadj[min(j, slen -1)] - ccadj[i]); + if (cycles_passed >= 44) + return; + assem_debug("; save gteBusyCycle\n"); + host_tempreg_acquire(); +#if 0 + emit_readword(&last_count, HOST_TEMPREG); + emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG); + emit_addimm(HOST_TEMPREG, CLOCK_ADJUST(ccadj[i]), HOST_TEMPREG); + emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG); + emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle); +#else + emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]) + gte_cycletab[op], HOST_TEMPREG); + emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle); +#endif + host_tempreg_release(); } static void cop2_get_dreg(u_int copr,signed char tl,signed char temp) @@ -3225,7 +3498,7 @@ static void cop2_put_dreg(u_int copr,signed char sl,signed char temp) } } -static void c2ls_assemble(int i,struct regstat *i_regs) +static void c2ls_assemble(int i, const struct regstat *i_regs) { int s,tl; int ar; @@ -3235,7 +3508,7 @@ static void c2ls_assemble(int i,struct regstat *i_regs) enum stub_type type; int agr=AGEN1+(i&1); int fastio_reg_override=-1; - u_int hr,reglist=0; + u_int reglist=get_host_reglist(i_regs->regmap); u_int copr=(source[i]>>16)&0x1f; s=get_reg(i_regs->regmap,rs1[i]); tl=get_reg(i_regs->regmap,FTEMP); @@ -3243,9 +3516,6 @@ static void c2ls_assemble(int i,struct regstat *i_regs) assert(rs1[i]>0); assert(tl>=0); - for(hr=0;hrregmap[hr]>=0) reglist|=1<regmap[HOST_CCREG]==CCREG) reglist&=~(1<=0); if (opcode[i]==0x3a) { // SWC2 + cop2_call_stall_check(0, i, i_regs, reglist_exclude(reglist, tl, -1)); cop2_get_dreg(copr,tl,-1); type=STOREW_STUB; } @@ -3301,7 +3572,7 @@ static void c2ls_assemble(int i,struct regstat *i_regs) if(jaddr2) add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj[i],reglist); if(opcode[i]==0x3a) // SWC2 - if(!(i_regs->waswritten&(1<waswritten&(1<regmap,INVCP); assert(ir>=0); @@ -3324,10 +3595,18 @@ static void c2ls_assemble(int i,struct regstat *i_regs) } } -static void cop2_assemble(int i,struct regstat *i_regs) +static void cop2_assemble(int i, const struct regstat *i_regs) { - u_int copr=(source[i]>>11)&0x1f; - signed char temp=get_reg(i_regs->regmap,-1); + u_int copr = (source[i]>>11) & 0x1f; + signed char temp = get_reg(i_regs->regmap, -1); + + if (opcode2[i] == 0 || opcode2[i] == 2) { // MFC2/CFC2 + if (!HACK_ENABLED(NDHACK_GTE_NO_STALL)) { + signed char tl = get_reg(i_regs->regmap, rt1[i]); + u_int reglist = reglist_exclude(get_host_reglist(i_regs->regmap), tl, temp); + cop2_call_stall_check(0, i, i_regs, reglist); + } + } if (opcode2[i]==0) { // MFC2 signed char tl=get_reg(i_regs->regmap,rt1[i]); if(tl>=0&&rt1[i]!=0) @@ -3396,7 +3675,7 @@ static void do_unalignedwritestub(int n) if(cc<0) emit_loadreg(CCREG,2); emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2); - emit_call((opcode[i]==0x2a?jump_handle_swl:jump_handle_swr)); + emit_far_call((opcode[i]==0x2a?jump_handle_swl:jump_handle_swr)); emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc); if(cc<0) emit_storereg(CCREG,2); @@ -3490,8 +3769,8 @@ static void call_c_cpu_handler(int i, const struct regstat *i_regs, u_int pc, vo emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX emit_add(2,HOST_CCREG,2); emit_writeword(2,&psxRegs.cycle); - emit_call(func); - emit_jmp(jump_to_new_pc); + emit_far_call(func); + emit_far_jump(jump_to_new_pc); } static void syscall_assemble(int i,struct regstat *i_regs) @@ -4220,19 +4499,33 @@ static void drc_dbg_emit_do_cmp(int i) { extern void do_insn_cmp(); //extern int cycle; - u_int hr,reglist=0; + u_int hr, reglist = get_host_reglist(regs[i].regmap); - for(hr=0;hr=0) reglist|=1< 0 && !bt[i]) { + for (hr = 0; hr < HOST_REGS; hr++) { + int reg = regs[i-1].regmap[hr]; + if (hr == EXCLUDE_REG || reg < 0) + continue; + if (!((regs[i-1].isconst >> hr) & 1)) + continue; + if (i > 1 && reg == regs[i-2].regmap[hr] && constmap[i-1][hr] == constmap[i-2][hr]) + continue; + emit_movimm(constmap[i-1][hr],0); + emit_storereg(reg, 0); + } + } emit_movimm(start+i*4,0); emit_writeword(0,&pcaddr); - emit_call(do_insn_cmp); + emit_far_call(do_insn_cmp); //emit_readword(&cycle,0); //emit_addimm(0,2,0); //emit_writeword(0,&cycle); (void)get_reg2; restore_regs(reglist); + assem_debug("\\\\do_insn_cmp\n"); } #else #define drc_dbg_emit_do_cmp(x) @@ -4359,11 +4652,13 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert) else if(*adj==0||invert) { int cycles=CLOCK_ADJUST(count+2); // faster loop HACK +#if 0 if (t&&*adj) { int rel=t-i; if(-NO_CYCLE_PENALTY_THR>2]=1; if(ba[i]<=start+i*4) { // Backward branch - if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000) + if(is_ujump(i)) { // Unconditional branch temp_u=1; @@ -5766,7 +6061,7 @@ void unneeded_registers(int istart,int iend,int r) gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown; } } /*else*/ if(1) { - if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000) + if (is_ujump(i)) { // Unconditional branch u=unneeded_reg[(ba[i]-start)>>2]; @@ -5876,7 +6171,7 @@ void clean_registers(int istart,int iend,int wr) if(ba[i]=(start+slen*4)) { // Branch out of this block, flush all regs - if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000) + if (is_ujump(i)) { // Unconditional branch will_dirty_i=0; @@ -5956,7 +6251,7 @@ void clean_registers(int istart,int iend,int wr) // Internal branch if(ba[i]<=start+i*4) { // Backward branch - if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000) + if (is_ujump(i)) { // Unconditional branch temp_will_dirty=0; @@ -6053,7 +6348,7 @@ void clean_registers(int istart,int iend,int wr) } /*else*/ if(1) { - if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000) + if (is_ujump(i)) { // Unconditional branch will_dirty_i=0; @@ -6220,7 +6515,7 @@ void clean_registers(int istart,int iend,int wr) regs[i].dirty&=wont_dirty_i; if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) { - if(i>16)!=0x1000) { + if (i < iend-1 && !is_ujump(i)) { for(r=0;rtranslation_cache; beginning = start_block(); emit_movimm(DRC_TEST_VAL + i, 0); // test emit_ret(); @@ -6412,15 +6707,15 @@ static void new_dynarec_test(void) SysPrintf("test passed.\n"); else SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]); - out = translation_cache; + out = ndrc->translation_cache; } // clear the state completely, instead of just marking // things invalid like invalidate_all_pages() does -void new_dynarec_clear_full() +void new_dynarec_clear_full(void) { int n; - out = translation_cache; + out = ndrc->translation_cache; memset(invalid_code,1,sizeof(invalid_code)); memset(hash_table,0xff,sizeof(hash_table)); memset(mini_ht,-1,sizeof(mini_ht)); @@ -6438,34 +6733,28 @@ void new_dynarec_clear_full() for(n=0;n<4096;n++) ll_clear(jump_dirty+n); } -void new_dynarec_init() +void new_dynarec_init(void) { SysPrintf("Init new dynarec\n"); - // allocate/prepare a buffer for translation cache - // see assem_arm.h for some explanation -#if defined(BASE_ADDR_FIXED) - if (mmap(translation_cache, 1 << TARGET_SIZE_2, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_PRIVATE | MAP_ANONYMOUS, - -1, 0) != translation_cache) { - SysPrintf("mmap() failed: %s\n", strerror(errno)); - SysPrintf("disable BASE_ADDR_FIXED and recompile\n"); - abort(); - } -#elif defined(BASE_ADDR_DYNAMIC) +#ifdef BASE_ADDR_DYNAMIC #ifdef VITA sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2); if (sceBlock < 0) SysPrintf("sceKernelAllocMemBlockForVM failed\n"); - int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&translation_cache); + int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&ndrc); if (ret < 0) SysPrintf("sceKernelGetMemBlockBase failed\n"); #else - translation_cache = mmap (NULL, 1 << TARGET_SIZE_2, + uintptr_t desired_addr = 0; + #ifdef __ELF__ + extern char _end; + desired_addr = ((uintptr_t)&_end + 0xffffff) & ~0xffffffl; + #endif + ndrc = mmap((void *)desired_addr, sizeof(*ndrc), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (translation_cache == MAP_FAILED) { + if (ndrc == MAP_FAILED) { SysPrintf("mmap() failed: %s\n", strerror(errno)); abort(); } @@ -6473,11 +6762,12 @@ void new_dynarec_init() #else #ifndef NO_WRITE_EXEC // not all systems allow execute in data segment by default - if (mprotect(translation_cache, 1<translation_cache) + sizeof(ndrc->tramp.ops), + PROT_READ | PROT_WRITE | PROT_EXEC) != 0) SysPrintf("mprotect() failed: %s\n", strerror(errno)); #endif #endif - out = translation_cache; + out = ndrc->translation_cache; cycle_multiplier=200; new_dynarec_clear_full(); #ifdef HOST_IMM8 @@ -6493,15 +6783,15 @@ void new_dynarec_init() SysPrintf("warning: RAM is not directly mapped, performance will suffer\n"); } -void new_dynarec_cleanup() +void new_dynarec_cleanup(void) { int n; -#if defined(BASE_ADDR_FIXED) || defined(BASE_ADDR_DYNAMIC) +#ifdef BASE_ADDR_DYNAMIC #ifdef VITA sceKernelFreeMemBlock(sceBlock); sceBlock = -1; #else - if (munmap(translation_cache, 1<>12]=0; emit_movimm(start,0); emit_writeword(0,&pcaddr); - emit_jmp(new_dyna_leave); + emit_far_jump(new_dyna_leave); literal_pool(0); end_block(beginning); ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning); @@ -7107,7 +7406,7 @@ int new_recompile_block(u_int addr) else if(type==CJUMP||type==SJUMP) ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14); else ba[i]=-1; - if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) { + if (i > 0 && is_jump(i-1)) { int do_in_intrp=0; // branch in delay slot? if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP) { @@ -7125,7 +7424,7 @@ int new_recompile_block(u_int addr) bt[t+1]=1; // expected return from interpreter } else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&& - !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) { + !(i>=3&&is_jump(i-3))) { // v0 overwrite like this is a sign of trouble, bail out SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr); do_in_intrp=1; @@ -7141,7 +7440,7 @@ int new_recompile_block(u_int addr) } } /* Is this the end of the block? */ - if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) { + if (i > 0 && is_ujump(i-1)) { if(rt1[i-1]==0) { // Continue past subroutine call (JAL) done=2; } @@ -7508,8 +7807,9 @@ int new_recompile_block(u_int addr) cop0_alloc(¤t,i); break; case COP1: + break; case COP2: - cop12_alloc(¤t,i); + cop2_alloc(¤t,i); break; case C1LS: c1ls_alloc(¤t,i); @@ -7602,7 +7902,7 @@ int new_recompile_block(u_int addr) dirty_reg(&branch_regs[i-1],31); } memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap)); - memcpy(constmap[i],constmap[i-1],sizeof(current_constmap)); + memcpy(constmap[i],constmap[i-1],sizeof(constmap[i])); break; case RJUMP: memcpy(&branch_regs[i-1],¤t,sizeof(current)); @@ -7623,7 +7923,7 @@ int new_recompile_block(u_int addr) } #endif memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap)); - memcpy(constmap[i],constmap[i-1],sizeof(current_constmap)); + memcpy(constmap[i],constmap[i-1],sizeof(constmap[i])); break; case CJUMP: if((opcode[i-1]&0x3E)==4) // BEQ/BNE @@ -7650,7 +7950,7 @@ int new_recompile_block(u_int addr) branch_regs[i-1].isconst=0; branch_regs[i-1].wasconst=0; memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap)); - memcpy(constmap[i],constmap[i-1],sizeof(current_constmap)); + memcpy(constmap[i],constmap[i-1],sizeof(constmap[i])); } else if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ @@ -7675,7 +7975,7 @@ int new_recompile_block(u_int addr) branch_regs[i-1].isconst=0; branch_regs[i-1].wasconst=0; memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap)); - memcpy(constmap[i],constmap[i-1],sizeof(current_constmap)); + memcpy(constmap[i],constmap[i-1],sizeof(constmap[i])); } else // Alloc the delay slot in case the branch is taken @@ -7729,7 +8029,7 @@ int new_recompile_block(u_int addr) branch_regs[i-1].isconst=0; branch_regs[i-1].wasconst=0; memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap)); - memcpy(constmap[i],constmap[i-1],sizeof(current_constmap)); + memcpy(constmap[i],constmap[i-1],sizeof(constmap[i])); } else // Alloc the delay slot in case the branch is taken @@ -7753,7 +8053,7 @@ int new_recompile_block(u_int addr) break; } - if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000) + if (is_ujump(i-1)) { if(rt1[i-1]==31) // JAL/JALR { @@ -7802,12 +8102,10 @@ int new_recompile_block(u_int addr) #if !defined(DRC_DBG) else if(itype[i]==C2OP&>e_cycletab[source[i]&0x3f]>2) { - // GTE runs in parallel until accessed, divide by 2 for a rough guess - cc+=gte_cycletab[source[i]&0x3f]/2; - } - else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues - { - cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER) + // this should really be removed since the real stalls have been implemented, + // but doing so causes sizeable perf regression against the older version + u_int gtec = gte_cycletab[source[i] & 0x3f]; + cc += HACK_ENABLED(NDHACK_GTE_NO_STALL) ? gtec/2 : 2; } else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i]) { @@ -7815,7 +8113,8 @@ int new_recompile_block(u_int addr) } else if(itype[i]==C2LS) { - cc+=4; + // same as with C2OP + cc += HACK_ENABLED(NDHACK_GTE_NO_STALL) ? 4 : 2; } #endif else @@ -7826,7 +8125,7 @@ int new_recompile_block(u_int addr) if(!is_ds[i]) { regs[i].dirty=current.dirty; regs[i].isconst=current.isconst; - memcpy(constmap[i],current_constmap,sizeof(current_constmap)); + memcpy(constmap[i],current_constmap,sizeof(constmap[i])); } for(hr=0;hr=0) { @@ -7867,7 +8166,7 @@ int new_recompile_block(u_int addr) } } // Conditional branch may need registers for following instructions - if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) + if (!is_ujump(i)) { if(i>16)!=0x1000) + if (!is_ujump(i)) { if(likely[i]) { regs[i].regmap[hr]=-1; @@ -8012,7 +8311,7 @@ int new_recompile_block(u_int addr) { branch_regs[i].regmap[hr]=-1; branch_regs[i].regmap_entry[hr]=-1; - if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) + if (!is_ujump(i)) { if(!likely[i]&&i>16)!=0x1000) { + if (!is_ujump(i)) { regmap_pre[i+2][hr]=f_regmap[hr]; regs[i+2].wasdirty&=~(1<>16)!=0x1000) { + if (!is_ujump(k)) { regmap_pre[k+2][hr]=f_regmap[hr]; regs[k+2].wasdirty&=~(1<>16)==0x1000) + if (is_ujump(j)) { // Stop on unconditional branch break; @@ -8696,7 +8995,7 @@ int new_recompile_block(u_int addr) emit_cmp(0,1); #ifdef __aarch64__ emit_jeq(out + 4*2); - emit_jmp(new_dyna_leave); + emit_far_jump(new_dyna_leave); #else emit_jne(new_dyna_leave); #endif @@ -8712,7 +9011,7 @@ int new_recompile_block(u_int addr) } else { speculate_register_values(i); #ifndef DESTRUCTIVE_WRITEBACK - if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000)) + if (i < 2 || !is_ujump(i-2)) { wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]); } @@ -8723,7 +9022,7 @@ int new_recompile_block(u_int addr) } #endif // write back - if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000)) + if (i < 2 || !is_ujump(i-2)) { wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]); loop_preload(regmap_pre[i],regs[i].regmap_entry); @@ -8815,17 +9114,17 @@ int new_recompile_block(u_int addr) case SPAN: pagespan_assemble(i,®s[i]);break; } - if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000) + if (is_ujump(i)) literal_pool(1024); else literal_pool_jumpover(256); } } - //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000); + //assert(is_ujump(i-2)); // If the block did not end with an unconditional branch, // add a jump to the next instruction. if(i>1) { - if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) { + if(!is_ujump(i-2)&&itype[i-1]!=SPAN) { assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP); assert(i==slen); if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP) { @@ -8968,8 +9267,8 @@ int new_recompile_block(u_int addr) // If we're within 256K of the end of the buffer, // start over from the beginning. (Is 256K enough?) - if (out > translation_cache+(1< ndrc->translation_cache + sizeof(ndrc->translation_cache) - MAX_OUTPUT_BLOCK_SIZE) + out = ndrc->translation_cache; // Trap writes to any of the pages we compiled for(i=start>>12;i<=(start+slen*4)>>12;i++) { @@ -8986,11 +9285,11 @@ int new_recompile_block(u_int addr) /* Pass 10 - Free memory by expiring oldest blocks */ - int end=(((out-translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535; + int end=(((out-ndrc->translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535; while(expirep!=end) { int shift=TARGET_SIZE_2-3; // Divide into 8 blocks - uintptr_t base=(uintptr_t)translation_cache+((expirep>>13)<translation_cache+((expirep>>13)<>11)&3) { @@ -9028,10 +9327,8 @@ int new_recompile_block(u_int addr) break; case 3: // Clear jump_out - #if defined(__arm__) || defined(__aarch64__) if((expirep&2047)==0) do_clear_cache(); - #endif ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift); ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift); break;