From: notaz Date: Mon, 22 Nov 2021 18:55:37 +0000 (+0200) Subject: drc: minor adjustments X-Git-Tag: r23~65 X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3d680478922d5f28e3dbe471308cc27a70e31fdf;p=pcsx_rearmed.git drc: minor adjustments like not marking INTCALL as compiled code --- diff --git a/libpcsxcore/new_dynarec/assem_arm.c b/libpcsxcore/new_dynarec/assem_arm.c index d68aea6c..4ccd19fe 100644 --- a/libpcsxcore/new_dynarec/assem_arm.c +++ b/libpcsxcore/new_dynarec/assem_arm.c @@ -206,7 +206,7 @@ static void *get_pointer(void *stub) { //printf("get_pointer(%x)\n",(int)stub); int *i_ptr=find_extjump_insn(stub); - assert((*i_ptr&0x0f000000)==0x0a000000); + assert((*i_ptr&0x0f000000)==0x0a000000); // b return (u_char *)i_ptr+((*i_ptr<<8)>>6)+8; } @@ -1946,26 +1946,26 @@ static void inline_writestub(enum stub_type type, int i, u_int addr, } // this output is parsed by verify_dirty, get_bounds, isclean, get_clean_addr -static void do_dirty_stub_emit_args(u_int arg0) +static void do_dirty_stub_emit_args(u_int arg0, u_int source_len) { #ifndef HAVE_ARMV7 emit_loadlp((int)source, 1); emit_loadlp((int)copy, 2); - emit_loadlp(slen*4, 3); + emit_loadlp(source_len, 3); #else emit_movw(((u_int)source)&0x0000FFFF, 1); emit_movw(((u_int)copy)&0x0000FFFF, 2); emit_movt(((u_int)source)&0xFFFF0000, 1); emit_movt(((u_int)copy)&0xFFFF0000, 2); - emit_movw(slen*4, 3); + emit_movw(source_len, 3); #endif emit_movimm(arg0, 0); } -static void *do_dirty_stub(int i) +static void *do_dirty_stub(int i, u_int source_len) { assem_debug("do_dirty_stub %x\n",start+i*4); - do_dirty_stub_emit_args(start + i*4); + do_dirty_stub_emit_args(start + i*4, source_len); emit_far_call(verify_code); void *entry = out; load_regs_entry(i); @@ -1975,9 +1975,9 @@ static void *do_dirty_stub(int i) return entry; } -static void do_dirty_stub_ds() +static void do_dirty_stub_ds(u_int source_len) { - do_dirty_stub_emit_args(start + 1); + do_dirty_stub_emit_args(start + 1, source_len); emit_far_call(verify_code_ds); } diff --git a/libpcsxcore/new_dynarec/assem_arm64.c b/libpcsxcore/new_dynarec/assem_arm64.c index 070c80fc..14d71563 100644 --- a/libpcsxcore/new_dynarec/assem_arm64.c +++ b/libpcsxcore/new_dynarec/assem_arm64.c @@ -44,7 +44,7 @@ static void set_jump_target(void *addr, void *target) || (*ptr&0x7e000000) == 0x34000000) { // cbz/cbnz // Conditional branch are limited to +/- 1MB // block max size is 256k so branching beyond the +/- 1MB limit - // should only happen when jumping to an already compiled block (see add_link) + // should only happen when jumping to an already compiled block (see add_jump_out) // a workaround would be to do a trampoline jump via a stub at the end of the block assert(-1048576 <= offset && offset < 1048576); *ptr=(*ptr&0xFF00000F)|(((offset>>2)&0x7ffff)<<5); @@ -1665,12 +1665,12 @@ static int verify_code_arm64(const void *source, const void *copy, u_int size) } // this output is parsed by verify_dirty, get_bounds, isclean, get_clean_addr -static void do_dirty_stub_base(u_int vaddr) +static void do_dirty_stub_base(u_int vaddr, u_int source_len) { - assert(slen <= MAXBLOCK); + assert(source_len <= MAXBLOCK*4); emit_loadlp_ofs(0, 0); // ldr x1, source emit_loadlp_ofs(0, 1); // ldr x2, copy - emit_movz(slen*4, 2); + emit_movz(source_len, 2); emit_far_call(verify_code_arm64); void *jmp = out; emit_cbz(0, 0); @@ -1685,7 +1685,7 @@ static void assert_dirty_stub(const u_int *ptr) { assert((ptr[0] & 0xff00001f) == 0x58000000); // ldr x0, source assert((ptr[1] & 0xff00001f) == 0x58000001); // ldr x1, copy - assert((ptr[2] & 0xffe0001f) == 0x52800002); // movz w2, #slen*4 + assert((ptr[2] & 0xffe0001f) == 0x52800002); // movz w2, #source_len assert( ptr[8] == 0xd61f0000); // br x0 } @@ -1706,11 +1706,11 @@ static void do_dirty_stub_emit_literals(u_int *loadlps) output_w64((uintptr_t)copy); } -static void *do_dirty_stub(int i) +static void *do_dirty_stub(int i, u_int source_len) { assem_debug("do_dirty_stub %x\n",start+i*4); u_int *loadlps = (void *)out; - do_dirty_stub_base(start + i*4); + do_dirty_stub_base(start + i*4, source_len); void *entry = out; load_regs_entry(i); if (entry == out) @@ -1720,10 +1720,10 @@ static void *do_dirty_stub(int i) return entry; } -static void do_dirty_stub_ds(void) +static void do_dirty_stub_ds(u_int source_len) { u_int *loadlps = (void *)out; - do_dirty_stub_base(start + 1); + do_dirty_stub_base(start + 1, source_len); void *lit_jumpover = out; emit_jmp(out + 8*2); do_dirty_stub_emit_literals(loadlps); @@ -1760,7 +1760,7 @@ static int verify_dirty(const u_int *ptr) assert_dirty_stub(ptr); source = (void *)get_from_ldr_literal(&ptr[0]); // ldr x1, source copy = (void *)get_from_ldr_literal(&ptr[1]); // ldr x1, copy - len = get_from_movz(&ptr[2]); // movz w3, #slen*4 + len = get_from_movz(&ptr[2]); // movz w3, #source_len return !memcmp(source, copy, len); } @@ -1780,7 +1780,7 @@ static void get_bounds(void *addr, u_char **start, u_char **end) const u_int *ptr = addr; assert_dirty_stub(ptr); *start = (u_char *)get_from_ldr_literal(&ptr[0]); // ldr x1, source - *end = *start + get_from_movz(&ptr[2]); // movz w3, #slen*4 + *end = *start + get_from_movz(&ptr[2]); // movz w3, #source_len } /* Special assem */ diff --git a/libpcsxcore/new_dynarec/linkage_arm.S b/libpcsxcore/new_dynarec/linkage_arm.S index f18488ce..970d91c7 100644 --- a/libpcsxcore/new_dynarec/linkage_arm.S +++ b/libpcsxcore/new_dynarec/linkage_arm.S @@ -26,7 +26,7 @@ #ifdef __MACH__ #define dynarec_local ESYM(dynarec_local) -#define add_link ESYM(add_link) +#define add_jump_out ESYM(add_jump_out) #define new_recompile_block ESYM(new_recompile_block) #define get_addr ESYM(get_addr) #define get_addr_ht ESYM(get_addr_ht) @@ -177,7 +177,7 @@ ptr_hash_table: orrcs r2, r6, #2048 ldr r5, [r3, r2, lsl #2] lsl r12, r12, #8 - add r6, r1, r12, asr #6 + add r6, r1, r12, asr #6 /* old target */ mov r8, #0 /* jump_in lookup */ 1: @@ -197,7 +197,7 @@ ptr_hash_table: mov r5, r1 mov r1, r6 - bl add_link + bl add_jump_out sub r2, r8, r5 and r1, r7, #0xff000000 lsl r2, r2, #6 diff --git a/libpcsxcore/new_dynarec/new_dynarec.c b/libpcsxcore/new_dynarec/new_dynarec.c index bd553b88..f81c9853 100644 --- a/libpcsxcore/new_dynarec/new_dynarec.c +++ b/libpcsxcore/new_dynarec/new_dynarec.c @@ -1172,7 +1172,7 @@ static void invalidate_page(u_int page) inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr); void *host_addr=find_extjump_insn(head->addr); mark_clear_cache(host_addr); - set_jump_target(host_addr, head->addr); + set_jump_target(host_addr, head->addr); // point back to dyna_linker next=head->next; free(head); head=next; @@ -1321,14 +1321,13 @@ static void do_invstub(int n) // Add an entry to jump_out after making a link // src should point to code by emit_extjump2() -void add_link(u_int vaddr,void *src) +void add_jump_out(u_int vaddr,void *src) { u_int page=get_page(vaddr); - inv_debug("add_link: %p -> %x (%d)\n",src,vaddr,page); + inv_debug("add_jump_out: %p -> %x (%d)\n",src,vaddr,page); check_extjump2(src); ll_add(jump_out+page,vaddr,src); - //void *ptr=get_pointer(src); - //inv_debug("add_link: Pointer is to %p\n",ptr); + //inv_debug("add_jump_out: to %p\n",get_pointer(src)); } // If a code block was found to be unmodified (bit was set in @@ -5972,7 +5971,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs) emit_extjump_ds(branch_addr, target_addr); if(compiled_target_addr) { set_jump_target(branch_addr, compiled_target_addr); - add_link(target_addr,stub); + add_jump_out(target_addr,stub); } else set_jump_target(branch_addr, stub); if(likely[i]) { @@ -5987,7 +5986,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs) emit_extjump_ds(branch_addr, target_addr); if(compiled_target_addr) { set_jump_target(branch_addr, compiled_target_addr); - add_link(target_addr,stub); + add_jump_out(target_addr,stub); } else set_jump_target(branch_addr, stub); } @@ -6001,7 +6000,7 @@ static void pagespan_ds() u_int page=get_page(vaddr); u_int vpage=get_vpage(vaddr); ll_add(jump_dirty+vpage,vaddr,(void *)out); - do_dirty_stub_ds(); + do_dirty_stub_ds(slen*4); ll_add(jump_in+page,vaddr,(void *)out); assert(regs[0].regmap_entry[HOST_CCREG]==CCREG); if(regs[0].regmap[HOST_CCREG]!=CCREG) @@ -9249,10 +9248,14 @@ int new_recompile_block(u_int addr) literal_pool_jumpover(256); } } - //assert(is_ujump(i-2)); + + assert(slen > 0); + if (itype[slen-1] == INTCALL) { + // no ending needed for this block since INTCALL never returns + } // If the block did not end with an unconditional branch, // add a jump to the next instruction. - if(i>1) { + else if (i > 1) { if(!is_ujump(i-2)&&itype[i-1]!=SPAN) { assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP); assert(i==slen); @@ -9332,7 +9335,7 @@ int new_recompile_block(u_int addr) emit_extjump(link_addr[i].addr, link_addr[i].target); if (addr) { set_jump_target(link_addr[i].addr, addr); - add_link(link_addr[i].target,stub); + add_jump_out(link_addr[i].target,stub); } else set_jump_target(link_addr[i].addr, stub); @@ -9350,8 +9353,17 @@ int new_recompile_block(u_int addr) //#endif } } + + u_int source_len = slen*4; + if (itype[slen-1] == INTCALL && source_len > 4) + // no need to treat the last instruction as compiled + // as interpreter fully handles it + source_len -= 4; + + if ((u_char *)copy + source_len > (u_char *)shadow + sizeof(shadow)) + copy = shadow; + // External Branch Targets (jump_in) - if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow; for(i=0;i