diff --git a/libpcsxcore/new_dynarec/linkage_arm.S b/libpcsxcore/new_dynarec/linkage_arm.S index fcb4e1a..e2547c4 100644 --- a/libpcsxcore/new_dynarec/linkage_arm.S +++ b/libpcsxcore/new_dynarec/linkage_arm.S @@ -439,7 +439,7 @@ FUNCTION(cc_interrupt): str r1, [fp, #LO_pending_exception] and r2, r2, r10, lsr #17 add r3, fp, #LO_restore_candidate - str r10, [fp, #LO_cycle] /* PCSX cycles */ +@@@ str r10, [fp, #LO_cycle] /* PCSX cycles */ @@ str r10, [fp, #LO_reg_cop0+36] /* Count */ ldr r4, [r2, r3] mov r10, lr @@ -519,7 +519,7 @@ FUNCTION(jump_syscall_hle): mov r1, #0 /* in delay slot */ add r2, r2, r10 mov r0, #0x20 /* cause */ - str r2, [fp, #LO_cycle] /* PCSX cycle counter */ +@@@ str r2, [fp, #LO_cycle] /* PCSX cycle counter */ bl psxException /* note: psxException might do recursive recompiler call from it's HLE code, @@ -540,7 +540,7 @@ FUNCTION(jump_hlecall): str r0, [fp, #LO_pcaddr] add r2, r2, r10 adr lr, pcsx_return - str r2, [fp, #LO_cycle] /* PCSX cycle counter */ +@@@ str r2, [fp, #LO_cycle] /* PCSX cycle counter */ bx r1 .size jump_hlecall, .-jump_hlecall @@ -550,7 +550,7 @@ FUNCTION(jump_intcall): str r0, [fp, #LO_pcaddr] add r2, r2, r10 adr lr, pcsx_return - str r2, [fp, #LO_cycle] /* PCSX cycle counter */ +@@@ str r2, [fp, #LO_cycle] /* PCSX cycle counter */ b execI .size jump_hlecall, .-jump_hlecall @@ -559,7 +559,7 @@ FUNCTION(new_dyna_leave): ldr r0, [fp, #LO_last_count] add r12, fp, #28 add r10, r0, r10 - str r10, [fp, #LO_cycle] +@@@ str r10, [fp, #LO_cycle] ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc} .size new_dyna_leave, .-new_dyna_leave @@ -676,7 +676,7 @@ FUNCTION(new_dyna_start): \readop r0, [r1, r3, lsl #\tab_shift] .endif movcc pc, lr - str r2, [fp, #LO_cycle] +@@@ str r2, [fp, #LO_cycle] bx r1 .endm @@ -711,7 +711,7 @@ FUNCTION(jump_handler_read32): mov r0, r1 add r2, r2, r12 push {r2, lr} - str r2, [fp, #LO_cycle] +@@@ str r2, [fp, #LO_cycle] blx r3 ldr r0, [fp, #LO_next_interupt] @@ -739,7 +739,7 @@ FUNCTION(jump_handler_write_h): add r2, r2, r12 mov r0, r1 push {r2, lr} - str r2, [fp, #LO_cycle] +@@@ str r2, [fp, #LO_cycle] blx r3 ldr r0, [fp, #LO_next_interupt] diff --git a/libpcsxcore/new_dynarec/linkage_arm64.S b/libpcsxcore/new_dynarec/linkage_arm64.S index 060ac48..e3007d3 100644 --- a/libpcsxcore/new_dynarec/linkage_arm64.S +++ b/libpcsxcore/new_dynarec/linkage_arm64.S @@ -131,7 +131,7 @@ FUNCTION(cc_interrupt): str wzr, [rFP, #LO_pending_exception] and w2, w2, rCC, lsr #17 add x3, rFP, #LO_restore_candidate - str rCC, [rFP, #LO_cycle] /* PCSX cycles */ +## str rCC, [rFP, #LO_cycle] /* PCSX cycles */ # str rCC, [rFP, #LO_reg_cop0+36] /* Count */ ldr w19, [x3, w2, uxtw] mov x21, lr @@ -254,7 +254,7 @@ FUNCTION(new_dyna_start): FUNCTION(new_dyna_leave): ldr w0, [rFP, #LO_last_count] add rCC, rCC, w0 - str rCC, [rFP, #LO_cycle] +## str rCC, [rFP, #LO_cycle] ldp x19, x20, [sp, #16*1] ldp x21, x22, [sp, #16*2] ldp x23, x24, [sp, #16*3] @@ -272,7 +272,7 @@ FUNCTION(new_dyna_leave): /* w0 = adddr/data, x1 = rhandler, w2 = cycles, x3 = whandler */ ldr w4, [rFP, #LO_last_count] add w4, w4, w2 - str w4, [rFP, #LO_cycle] +## str w4, [rFP, #LO_cycle] .endm .macro memhandler_post diff --git a/libpcsxcore/new_dynarec/new_dynarec.c b/libpcsxcore/new_dynarec/new_dynarec.c index e7b55b6..caa06d0 100644 --- a/libpcsxcore/new_dynarec/new_dynarec.c +++ b/libpcsxcore/new_dynarec/new_dynarec.c @@ -43,10 +43,10 @@ static int sceBlock; #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) #endif -//#define DISASM -//#define assem_debug printf +#define DISASM +#define assem_debug printf //#define inv_debug printf -#define assem_debug(...) +//#define assem_debug(...) #define inv_debug(...) #ifdef __i386__ @@ -424,6 +424,9 @@ static int doesnt_expire_soon(void *tcaddr) // This is called from the recompiled JR/JALR instructions void noinline *get_addr(u_int vaddr) { +#ifdef DRC_DBG +printf("get_addr %08x, pc=%08x\n", vaddr, psxRegs.pc); +#endif u_int page=get_page(vaddr); u_int vpage=get_vpage(vaddr); struct ll_entry *head; @@ -4221,13 +4224,15 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert) } emit_addimm_and_set_flags(cycles,HOST_CCREG); jaddr=out; - emit_jns(0); + //emit_jns(0); + emit_jmp(0); } else { emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2)); jaddr=out; - emit_jns(0); + //emit_jns(0); + emit_jmp(0); } add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0); } @@ -4635,7 +4640,8 @@ static void rjump_assemble(int i,struct regstat *i_regs) // special case for RFE emit_jmp(0); else - emit_jns(0); + //emit_jns(0); + emit_jmp(0); //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1); #ifdef USE_MINI_HT if(rs1[i]==31) { @@ -4737,7 +4743,8 @@ static void cjump_assemble(int i,struct regstat *i_regs) else if(nop) { emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc); void *jaddr=out; - emit_jns(0); + //emit_jns(0); + emit_jmp(0); add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0); } else { @@ -4924,7 +4931,8 @@ static void cjump_assemble(int i,struct regstat *i_regs) emit_loadreg(CCREG,HOST_CCREG); emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG); void *jaddr=out; - emit_jns(0); + //emit_jns(0); + emit_jmp(0); add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0); emit_storereg(CCREG,HOST_CCREG); } @@ -4933,7 +4941,8 @@ static void cjump_assemble(int i,struct regstat *i_regs) assert(cc==HOST_CCREG); emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc); void *jaddr=out; - emit_jns(0); + //emit_jns(0); + emit_jmp(0); add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0); } } @@ -5032,7 +5041,8 @@ static void sjump_assemble(int i,struct regstat *i_regs) else if(nevertaken) { emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc); void *jaddr=out; - emit_jns(0); + //emit_jns(0); + emit_jmp(0); add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0); } else { @@ -5188,7 +5198,8 @@ static void sjump_assemble(int i,struct regstat *i_regs) emit_loadreg(CCREG,HOST_CCREG); emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG); void *jaddr=out; - emit_jns(0); + //emit_jns(0); + emit_jmp(0); add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0); emit_storereg(CCREG,HOST_CCREG); } @@ -5197,7 +5208,8 @@ static void sjump_assemble(int i,struct regstat *i_regs) assert(cc==HOST_CCREG); emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc); void *jaddr=out; - emit_jns(0); + //emit_jns(0); + emit_jmp(0); add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0); } } @@ -5685,7 +5697,7 @@ void unneeded_registers(int istart,int iend,int r) // R0 is always unneeded u|=1; // Save it - unneeded_reg[i]=u; + unneeded_reg[i]=1;//u; gte_unneeded[i]=gte_u; /* printf("ur (%d,%d) %x: ",istart,iend,start+i*4); @@ -8209,6 +8221,7 @@ int new_recompile_block(int addr) // This allocates registers (if possible) one instruction prior // to use, which can avoid a load-use penalty on certain CPUs. +#if 0 for(i=0;i>16)==0x1000) literal_pool(1024); else @@ -8767,7 +8786,7 @@ int new_recompile_block(int addr) } } // External Branch Targets (jump_in) - if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow; + if(copy+slen*4>(void *)shadow+sizeof(shadow)) {copy=shadow;printf("shadow overflow\n");} for(i=0;i