X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?p=pcsx_rearmed.git;a=blobdiff_plain;f=libpcsxcore%2Fnew_dynarec%2Fassem_arm64.c;h=17517eff7c65925772b3046cc5bb864adc3991fd;hp=a0c628b5892242ff230593bf9e5f6160bd212a2c;hb=e3c6bdb5e46f72f063bb7f588da6588ac1893b17;hpb=d1e4ebd9988a9a5d9fb38b89f19e24b9ab6029d7 diff --git a/libpcsxcore/new_dynarec/assem_arm64.c b/libpcsxcore/new_dynarec/assem_arm64.c index a0c628b5..17517eff 100644 --- a/libpcsxcore/new_dynarec/assem_arm64.c +++ b/libpcsxcore/new_dynarec/assem_arm64.c @@ -20,16 +20,9 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +#include "pcnt.h" #include "arm_features.h" -#if defined(BASE_ADDR_FIXED) -#elif defined(BASE_ADDR_DYNAMIC) -u_char *translation_cache; -#else -u_char translation_cache[1 << TARGET_SIZE_2] __attribute__((aligned(4096))); -#endif -static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)]; - #define CALLER_SAVE_REGS 0x0007ffff #define unused __attribute__((unused)) @@ -43,25 +36,26 @@ static void set_jump_target(void *addr, void *target) u_int *ptr = addr; intptr_t offset = (u_char *)target - (u_char *)addr; - if((*ptr&0xFC000000)==0x14000000) { + if ((*ptr&0xFC000000) == 0x14000000) { // b assert(offset>=-134217728LL&&offset<134217728LL); *ptr=(*ptr&0xFC000000)|((offset>>2)&0x3ffffff); } - else if((*ptr&0xff000000)==0x54000000) { + else if ((*ptr&0xff000000) == 0x54000000 // b.cond + || (*ptr&0x7e000000) == 0x34000000) { // cbz/cbnz // Conditional branch are limited to +/- 1MB // block max size is 256k so branching beyond the +/- 1MB limit // should only happen when jumping to an already compiled block (see add_link) // a workaround would be to do a trampoline jump via a stub at the end of the block - assert(offset>=-1048576LL&&offset<1048576LL); + assert(-1048576 <= offset && offset < 1048576); *ptr=(*ptr&0xFF00000F)|(((offset>>2)&0x7ffff)<<5); } - else if((*ptr&0x9f000000)==0x10000000) { //adr + else if((*ptr&0x9f000000)==0x10000000) { // adr // generated by do_miniht_insert assert(offset>=-1048576LL&&offset<1048576LL); *ptr=(*ptr&0x9F00001F)|(offset&0x3)<<29|((offset>>2)&0x7ffff)<<5; } else - assert(0); // should not happen + abort(); // should not happen } // from a pointer to external jump stub (which was produced by emit_extjump2) @@ -75,62 +69,21 @@ static void *find_extjump_insn(void *stub) } // find where external branch is liked to using addr of it's stub: -// get address that insn one after stub loads (dyna_linker arg1), +// get address that the stub loads (dyna_linker arg1), // treat it as a pointer to branch insn, // return addr where that branch jumps to static void *get_pointer(void *stub) { int *i_ptr = find_extjump_insn(stub); - assert((*i_ptr&0xfc000000) == 0x14000000); // b - return (u_char *)i_ptr+(((signed int)(*i_ptr<<6)>>6)<<2); -} - -// Find the "clean" entry point from a "dirty" entry point -// by skipping past the call to verify_code -static void *get_clean_addr(void *addr) -{ + if ((*i_ptr&0xfc000000) == 0x14000000) // b + return i_ptr + ((signed int)(*i_ptr<<6)>>6); + if ((*i_ptr&0xff000000) == 0x54000000 // b.cond + || (*i_ptr&0x7e000000) == 0x34000000) // cbz/cbnz + return i_ptr + ((signed int)(*i_ptr<<8)>>13); assert(0); return NULL; } -static int verify_dirty(u_int *ptr) -{ - assert(0); - return 0; -} - -static int isclean(void *addr) -{ - u_int *ptr = addr; - return (*ptr >> 24) != 0x58; // the only place ldr (literal) is used -} - -static uint64_t get_from_ldr_literal(const u_int *i) -{ - signed int ofs; - assert((i[0] & 0xff000000) == 0x58000000); - ofs = i[0] << 8; - ofs >>= 5+8; - return *(uint64_t *)(i + ofs); -} - -static uint64_t get_from_movz(const u_int *i) -{ - assert((i[0] & 0x7fe00000) == 0x52800000); - return (i[0] >> 5) & 0xffff; -} - -// get source that block at addr was compiled from (host pointers) -static void get_bounds(void *addr, u_char **start, u_char **end) -{ - const u_int *ptr = addr; - assert((ptr[0] & 0xff00001f) == 0x58000001); // ldr x1, source - assert((ptr[1] & 0xff00001f) == 0x58000002); // ldr x2, copy - assert((ptr[2] & 0xffe0001f) == 0x52800003); // movz w3, #slen*4 - *start = (u_char *)get_from_ldr_literal(&ptr[0]); - *end = *start + get_from_movz(&ptr[2]); -} - // Allocate a specific ARM register. static void alloc_arm_reg(struct regstat *cur,int i,signed char reg,int hr) { @@ -208,6 +161,13 @@ static u_int rm_rd(u_int rm, u_int rd) } */ +static u_int rn_rd(u_int rn, u_int rd) +{ + assert(rn < 31); + assert(rd < 31); + return (rn << 5) | rd; +} + static u_int rm_rn_rd(u_int rm, u_int rn, u_int rd) { assert(rm < 32); @@ -216,6 +176,12 @@ static u_int rm_rn_rd(u_int rm, u_int rn, u_int rd) return (rm << 16) | (rn << 5) | rd; } +static u_int rm_ra_rn_rd(u_int rm, u_int ra, u_int rn, u_int rd) +{ + assert(ra < 32); + return rm_rn_rd(rm, rn, rd) | (ra << 10); +} + static u_int imm7_rt2_rn_rt(u_int imm7, u_int rt2, u_int rn, u_int rt) { assert(imm7 < 0x80); @@ -304,7 +270,7 @@ static uint32_t is_mask(u_int value) // non-empty sequence of ones (possibly rotated) with the remainder zero. static uint32_t is_rotated_mask(u_int value) { - if (value == 0) + if (value == 0 || value == ~0) return 0; if (is_mask((value - 1) | value)) return 1; @@ -328,11 +294,11 @@ static void gen_logical_imm(u_int value, u_int *immr, u_int *imms) lzeros = __builtin_clz(value); tzeros = __builtin_ctz(value); ones = 32 - lzeros - tzeros; - *immr = 31 - tzeros; + *immr = lzeros; *imms = 31 - ones; return; } - assert(0); + abort(); } static void emit_mov(u_int rs, u_int rt) @@ -347,13 +313,6 @@ static void emit_mov64(u_int rs, u_int rt) output_w32(0xaa000000 | rm_rn_rd(rs, WZR, rt)); } -static void emit_movs(u_int rs, u_int rt) -{ - assert(0); // misleading - assem_debug("movs %s,%s\n", regname[rt], regname[rs]); - output_w32(0x31000000 | imm12_rn_rd(0, rs, rt)); -} - static void emit_add(u_int rs1, u_int rs2, u_int rt) { assem_debug("add %s,%s,%s\n", regname[rt], regname[rs1], regname[rs2]); @@ -366,16 +325,9 @@ static void emit_add64(u_int rs1, u_int rs2, u_int rt) output_w32(0x8b000000 | rm_rn_rd(rs2, rs1, rt)); } -#pragma GCC diagnostic ignored "-Wunused-function" -static void emit_adds(u_int rs1, u_int rs2, u_int rt) -{ - assem_debug("adds %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); - output_w32(0x2b000000 | rm_rn_rd(rs2, rs1, rt)); -} - static void emit_adds64(u_int rs1, u_int rs2, u_int rt) { - assem_debug("adds %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + assem_debug("adds %s,%s,%s\n",regname64[rt],regname64[rs1],regname64[rs2]); output_w32(0xab000000 | rm_rn_rd(rs2, rs1, rt)); } @@ -391,6 +343,12 @@ static void emit_sub(u_int rs1, u_int rs2, u_int rt) output_w32(0x4b000000 | rm_imm6_rn_rd(rs2, 0, rs1, rt)); } +static void emit_sub_asrimm(u_int rs1, u_int rs2, u_int shift, u_int rt) +{ + assem_debug("sub %s,%s,%s,asr #%u\n",regname[rt],regname[rs1],regname[rs2],shift); + output_w32(0x4b800000 | rm_imm6_rn_rd(rs2, shift, rs1, rt)); +} + static void emit_movz(u_int imm, u_int rt) { assem_debug("movz %s,#%#x\n", regname[rt], imm); @@ -424,7 +382,7 @@ static void emit_movk(u_int imm,u_int rt) static void emit_movk_lsl16(u_int imm,u_int rt) { assert(imm<65536); - assem_debug("movk %s, #%#x, lsl #16\n", regname[rt], imm); + assem_debug("movk %s,#%#x,lsl #16\n", regname[rt], imm); output_w32(0x72a00000 | imm16_rd(imm, rt)); } @@ -463,7 +421,7 @@ static void emit_readword(void *addr, u_int rt) output_w32(0xb9400000 | imm12_rn_rd(offset >> 2, FP, rt)); } else - assert(0); + abort(); } static void emit_readdword(void *addr, u_int rt) @@ -473,6 +431,17 @@ static void emit_readdword(void *addr, u_int rt) assem_debug("ldr %s,[x%d+%#lx]\n", regname64[rt], FP, offset); output_w32(0xf9400000 | imm12_rn_rd(offset >> 3, FP, rt)); } + else + abort(); +} + +static void emit_readshword(void *addr, u_int rt) +{ + uintptr_t offset = (u_char *)addr - (u_char *)&dynarec_local; + if (!(offset & 1) && offset <= 8190) { + assem_debug("ldrsh %s,[x%d+%#lx]\n", regname[rt], FP, offset); + output_w32(0x79c00000 | imm12_rn_rd(offset >> 1, FP, rt)); + } else assert(0); } @@ -516,10 +485,10 @@ static void emit_writedword(u_int rt, void *addr) uintptr_t offset = (u_char *)addr - (u_char *)&dynarec_local; if (!(offset & 7) && offset <= 32760) { assem_debug("str %s,[x%d+%#lx]\n", regname64[rt], FP, offset); - output_w32(0xf9000000 | imm12_rn_rd(offset >> 2, FP, rt)); + output_w32(0xf9000000 | imm12_rn_rd(offset >> 3, FP, rt)); } else - assert(0); + abort(); } static void emit_storereg(u_int r, u_int hr) @@ -547,13 +516,7 @@ static void emit_testimm(u_int rs, u_int imm) assem_debug("tst %s,#%#x\n", regname[rs], imm); assert(is_rotated_mask(imm)); // good enough for PCSX gen_logical_imm(imm, &immr, &imms); - output_w32(0xb9000000 | n_immr_imms_rn_rd(0, immr, imms, rs, WZR)); -} - -static void emit_testeqimm(u_int rs,int imm) -{ - assem_debug("tsteq %s,$%d\n",regname[rs],imm); - assert(0); // TODO eliminate emit_testeqimm + output_w32(0x72000000 | n_immr_imms_rn_rd(0, immr, imms, rs, WZR)); } static void emit_not(u_int rs,u_int rt) @@ -562,12 +525,6 @@ static void emit_not(u_int rs,u_int rt) output_w32(0x2a200000 | rm_rn_rd(rs, WZR, rt)); } -static void emit_mvnmi(u_int rs,u_int rt) -{ - assem_debug("mvnmi %s,%s\n",regname[rt],regname[rs]); - assert(0); // eliminate -} - static void emit_and(u_int rs1,u_int rs2,u_int rt) { assem_debug("and %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); @@ -580,6 +537,12 @@ static void emit_or(u_int rs1,u_int rs2,u_int rt) output_w32(0x2a000000 | rm_rn_rd(rs2, rs1, rt)); } +static void emit_bic(u_int rs1,u_int rs2,u_int rt) +{ + assem_debug("bic %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x0a200000 | rm_rn_rd(rs2, rs1, rt)); +} + static void emit_orrshl_imm(u_int rs,u_int imm,u_int rt) { assem_debug("orr %s,%s,%s,lsl #%d\n",regname[rt],regname[rt],regname[rs],imm); @@ -592,12 +555,24 @@ static void emit_orrshr_imm(u_int rs,u_int imm,u_int rt) output_w32(0x2a400000 | rm_imm6_rn_rd(rs, imm, rt, rt)); } +static void emit_bicsar_imm(u_int rs,u_int imm,u_int rt) +{ + assem_debug("bic %s,%s,%s,asr #%d\n",regname[rt],regname[rt],regname[rs],imm); + output_w32(0x0aa00000 | rm_imm6_rn_rd(rs, imm, rt, rt)); +} + static void emit_xor(u_int rs1,u_int rs2,u_int rt) { assem_debug("eor %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); output_w32(0x4a000000 | rm_rn_rd(rs2, rs1, rt)); } +static void emit_xorsar_imm(u_int rs1, u_int rs2, u_int imm, u_int rt) +{ + assem_debug("eor %s,%s,%s,asr #%d\n",regname[rt],regname[rs1],regname[rs2],imm); + output_w32(0x4a800000 | rm_imm6_rn_rd(rs2, imm, rs1, rt)); +} + static void emit_addimm_s(u_int s, u_int is64, u_int rs, uintptr_t imm, u_int rt) { unused const char *st = s ? "s" : ""; @@ -608,7 +583,7 @@ static void emit_addimm_s(u_int s, u_int is64, u_int rs, uintptr_t imm, u_int rt output_w32(0x11000000 | is64 | s | imm12_rn_rd(imm, rs, rt)); } else if (-imm < 4096) { - assem_debug("sub%s %s,%s,%#lx\n", st, regname[rt], regname[rs], imm); + assem_debug("sub%s %s,%s,%#lx\n", st, regname[rt], regname[rs], -imm); output_w32(0x51000000 | is64 | s | imm12_rn_rd(-imm, rs, rt)); } else if (imm < 16777216) { @@ -616,7 +591,7 @@ static void emit_addimm_s(u_int s, u_int is64, u_int rs, uintptr_t imm, u_int rt output_w32(0x11400000 | is64 | imm12_rn_rd(imm >> 12, rs, rt)); if ((imm & 0xfff) || s) { assem_debug("add%s %s,%s,#%#lx\n",st,regname[rt],regname[rs],imm&0xfff); - output_w32(0x11000000 | is64 | s | imm12_rn_rd(imm, rt, rt)); + output_w32(0x11000000 | is64 | s | imm12_rn_rd(imm & 0xfff, rt, rt)); } } else if (-imm < 16777216) { @@ -628,7 +603,7 @@ static void emit_addimm_s(u_int s, u_int is64, u_int rs, uintptr_t imm, u_int rt } } else - assert(0); + abort(); } static void emit_addimm(u_int rs, uintptr_t imm, u_int rt) @@ -720,15 +695,16 @@ static void emit_shlimm(u_int rs,u_int imm,u_int rt) output_w32(0x53000000 | n_immr_imms_rn_rd(0, (31-imm)+1, 31-imm, rs, rt)); } -static unused void emit_lslpls_imm(u_int rs,int imm,u_int rt) +static void emit_shrimm(u_int rs,u_int imm,u_int rt) { - assert(0); // eliminate + assem_debug("lsr %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x53000000 | n_immr_imms_rn_rd(0, imm, 31, rs, rt)); } -static void emit_shrimm(u_int rs,u_int imm,u_int rt) +static void emit_shrimm64(u_int rs,u_int imm,u_int rt) { assem_debug("lsr %s,%s,#%d\n",regname[rt],regname[rs],imm); - output_w32(0x53000000 | n_immr_imms_rn_rd(0, imm, 31, rs, rt)); + output_w32(0xd3400000 | n_immr_imms_rn_rd(0, imm, 63, rs, rt)); } static void emit_sarimm(u_int rs,u_int imm,u_int rt) @@ -739,7 +715,7 @@ static void emit_sarimm(u_int rs,u_int imm,u_int rt) static void emit_rorimm(u_int rs,u_int imm,u_int rt) { - assem_debug("ror %s,%s,#%d",regname[rt],regname[rs],imm); + assem_debug("ror %s,%s,#%d\n",regname[rt],regname[rs],imm); output_w32(0x13800000 | rm_imm6_rn_rd(rs, imm, rs, rt)); } @@ -751,7 +727,7 @@ static void emit_signextend16(u_int rs, u_int rt) static void emit_shl(u_int rs,u_int rshift,u_int rt) { - assem_debug("lsl %s,%s,%s",regname[rt],regname[rs],regname[rshift]); + assem_debug("lsl %s,%s,%s\n",regname[rt],regname[rs],regname[rshift]); output_w32(0x1ac02000 | rm_rn_rd(rshift, rs, rt)); } @@ -778,7 +754,7 @@ static void emit_cmpimm(u_int rs, u_int imm) output_w32(0x31000000 | imm12_rn_rd(-imm, rs, WZR)); } else if (imm < 16777216 && !(imm & 0xfff)) { - assem_debug("cmp %s,#%#x,lsl #12\n", regname[rs], imm >> 12); + assem_debug("cmp %s,#%#x\n", regname[rs], imm); output_w32(0x71400000 | imm12_rn_rd(imm >> 12, rs, WZR)); } else { @@ -819,9 +795,10 @@ static void emit_cmovb_imm(int imm,u_int rt) emit_cmov_imm(COND_CC, COND_CS, imm, rt); } -static void emit_cmovs_imm(int imm,u_int rt) +static void emit_cmoveq_reg(u_int rs,u_int rt) { - emit_cmov_imm(COND_MI, COND_PL, imm, rt); + assem_debug("csel %s,%s,%s,eq\n",regname[rt],regname[rs],regname[rt]); + output_w32(0x1a800000 | (COND_EQ << 12) | rm_rn_rd(rt, rs, rt)); } static void emit_cmovne_reg(u_int rs,u_int rt) @@ -836,12 +813,24 @@ static void emit_cmovl_reg(u_int rs,u_int rt) output_w32(0x1a800000 | (COND_LT << 12) | rm_rn_rd(rt, rs, rt)); } +static void emit_cmovb_reg(u_int rs,u_int rt) +{ + assem_debug("csel %s,%s,%s,cc\n",regname[rt],regname[rs],regname[rt]); + output_w32(0x1a800000 | (COND_CC << 12) | rm_rn_rd(rt, rs, rt)); +} + static void emit_cmovs_reg(u_int rs,u_int rt) { assem_debug("csel %s,%s,%s,mi\n",regname[rt],regname[rs],regname[rt]); output_w32(0x1a800000 | (COND_MI << 12) | rm_rn_rd(rt, rs, rt)); } +static void emit_csinvle_reg(u_int rs1,u_int rs2,u_int rt) +{ + assem_debug("csinv %s,%s,%s,le\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x5a800000 | (COND_LE << 12) | rm_rn_rd(rs2, rs1, rt)); +} + static void emit_slti32(u_int rs,int imm,u_int rt) { if(rs!=rt) emit_zeroreg(rt); @@ -898,6 +887,12 @@ static void emit_set_if_carry32(u_int rs1, u_int rs2, u_int rt) emit_cmovb_imm(1,rt); } +static int can_jump_or_call(const void *a) +{ + intptr_t diff = (u_char *)a - out; + return (-134217728 <= diff && diff <= 134217727); +} + static void emit_call(const void *a) { intptr_t diff = (u_char *)a - out; @@ -906,7 +901,7 @@ static void emit_call(const void *a) if (-134217728 <= diff && diff <= 134217727) output_w32(0x94000000 | ((diff >> 2) & 0x03ffffff)); else - assert(0); + abort(); } static void emit_jmp(const void *a) @@ -972,16 +967,23 @@ static void emit_jc(const void *a) output_w32(0x54000000 | (offset << 5) | COND_CS); } -static void emit_jcc(const void *a) +static void emit_cb(u_int isnz, u_int is64, const void *a, u_int r) { - assem_debug("bcc %p\n", a); + assem_debug("cb%sz %s,%p\n", isnz?"n":"", is64?regname64[r]:regname[r], a); u_int offset = genjmpcc(a); - output_w32(0x54000000 | (offset << 5) | COND_CC); + is64 = is64 ? 0x80000000 : 0; + isnz = isnz ? 0x01000000 : 0; + output_w32(0x34000000 | is64 | isnz | imm19_rt(offset, r)); +} + +static void emit_cbz(const void *a, u_int r) +{ + emit_cb(0, 0, a, r); } static void emit_jmpreg(u_int r) { - assem_debug("br %s", regname64[r]); + assem_debug("br %s\n", regname64[r]); output_w32(0xd61f0000 | rm_rn_rd(0, r, 0)); } @@ -1000,10 +1002,21 @@ static void emit_adr(void *addr, u_int rt) { intptr_t offset = (u_char *)addr - out; assert(-1048576 <= offset && offset < 1048576); + assert(rt < 31); assem_debug("adr x%d,#%#lx\n", rt, offset); output_w32(0x10000000 | ((offset&0x3) << 29) | (((offset>>2)&0x7ffff) << 5) | rt); } +static void emit_adrp(void *addr, u_int rt) +{ + intptr_t offset = ((intptr_t)addr & ~0xfffl) - ((intptr_t)out & ~0xfffl); + assert(-4294967296l <= offset && offset < 4294967296l); + assert(rt < 31); + offset >>= 12; + assem_debug("adrp %s,#%#lx(000)\n",regname64[rt],offset); + output_w32(0x90000000 | ((offset&0x3)<<29) | (((offset>>2)&0x7ffff)<<5) | rt); +} + static void emit_readword_indexed(int offset, u_int rs, u_int rt) { assem_debug("ldur %s,[%s+%#x]\n",regname[rt],regname64[rs],offset); @@ -1095,55 +1108,80 @@ static void emit_movzwl_indexed(int offset, u_int rs, u_int rt) static void emit_writeword_indexed(u_int rt, int offset, u_int rs) { - assem_debug("str %s,[%s+%#x]\n", regname[rt], regname[rs], offset); - if (!(offset & 3) && offset <= 16380) + if (!(offset & 3) && (u_int)offset <= 16380) { + assem_debug("str %s,[%s+%#x]\n", regname[rt], regname[rs], offset); output_w32(0xb9000000 | imm12_rn_rd(offset >> 2, rs, rt)); + } + else if (-256 <= offset && offset < 256) { + assem_debug("stur %s,[%s+%#x]\n", regname[rt], regname[rs], offset); + output_w32(0xb8000000 | imm9_rn_rt(offset & 0x1ff, rs, rt)); + } else assert(0); } static void emit_writehword_indexed(u_int rt, int offset, u_int rs) { - assem_debug("strh %s,[%s+%#x]\n", regname[rt], regname[rs], offset); - if (!(offset & 1) && offset <= 8190) + if (!(offset & 1) && (u_int)offset <= 8190) { + assem_debug("strh %s,[%s+%#x]\n", regname[rt], regname64[rs], offset); output_w32(0x79000000 | imm12_rn_rd(offset >> 1, rs, rt)); + } + else if (-256 <= offset && offset < 256) { + assem_debug("sturh %s,[%s+%#x]\n", regname[rt], regname64[rs], offset); + output_w32(0x78000000 | imm9_rn_rt(offset & 0x1ff, rs, rt)); + } else assert(0); } static void emit_writebyte_indexed(u_int rt, int offset, u_int rs) { - assem_debug("strb %s,[%s+%#x]\n", regname[rt], regname[rs], offset); - if ((u_int)offset < 4096) + if ((u_int)offset < 4096) { + assem_debug("strb %s,[%s+%#x]\n", regname[rt], regname64[rs], offset); output_w32(0x39000000 | imm12_rn_rd(offset, rs, rt)); + } + else if (-256 <= offset && offset < 256) { + assem_debug("sturb %s,[%s+%#x]\n", regname[rt], regname64[rs], offset); + output_w32(0x38000000 | imm9_rn_rt(offset & 0x1ff, rs, rt)); + } else assert(0); } -static void emit_umull(u_int rs1, u_int rs2, u_int hi, u_int lo) +static void emit_umull(u_int rs1, u_int rs2, u_int rt) { - assem_debug("umull %s, %s, %s, %s\n",regname[lo],regname[hi],regname[rs1],regname[rs2]); - assert(rs1<16); - assert(rs2<16); - assert(hi<16); - assert(lo<16); - assert(0); + assem_debug("umull %s,%s,%s\n",regname64[rt],regname[rs1],regname[rs2]); + output_w32(0x9ba00000 | rm_ra_rn_rd(rs2, WZR, rs1, rt)); } -static void emit_smull(u_int rs1, u_int rs2, u_int hi, u_int lo) +static void emit_smull(u_int rs1, u_int rs2, u_int rt) { - assem_debug("smull %s, %s, %s, %s\n",regname[lo],regname[hi],regname[rs1],regname[rs2]); - assert(rs1<16); - assert(rs2<16); - assert(hi<16); - assert(lo<16); - assert(0); + assem_debug("smull %s,%s,%s\n",regname64[rt],regname[rs1],regname[rs2]); + output_w32(0x9b200000 | rm_ra_rn_rd(rs2, WZR, rs1, rt)); } -static void emit_clz(u_int rs,u_int rt) +static void emit_msub(u_int rs1, u_int rs2, u_int rs3, u_int rt) +{ + assem_debug("msub %s,%s,%s,%s\n",regname[rt],regname[rs1],regname[rs2],regname[rs3]); + output_w32(0x1b008000 | rm_ra_rn_rd(rs2, rs3, rs1, rt)); +} + +static void emit_sdiv(u_int rs1, u_int rs2, u_int rt) +{ + assem_debug("sdiv %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x1ac00c00 | rm_rn_rd(rs2, rs1, rt)); +} + +static void emit_udiv(u_int rs1, u_int rs2, u_int rt) +{ + assem_debug("udiv %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x1ac00800 | rm_rn_rd(rs2, rs1, rt)); +} + +static void emit_clz(u_int rs, u_int rt) { assem_debug("clz %s,%s\n",regname[rt],regname[rs]); - assert(0); + output_w32(0x5ac01000 | rn_rd(rs, rt)); } // special case for checking invalid_code @@ -1151,28 +1189,23 @@ static void emit_cmpmem_indexedsr12_reg(u_int rbase, u_int r, u_int imm) { host_tempreg_acquire(); emit_shrimm(r, 12, HOST_TEMPREG); - assem_debug("ldrb %s,[%s,%s]",regname[HOST_TEMPREG],regname64[rbase],regname64[HOST_TEMPREG]); - output_w32(0x38606800 | rm_rn_rd(HOST_TEMPREG, rbase, HOST_TEMPREG)); + assem_debug("ldrb %s,[%s,%s,uxtw]\n",regname[HOST_TEMPREG],regname64[rbase],regname[HOST_TEMPREG]); + output_w32(0x38604800 | rm_rn_rd(HOST_TEMPREG, rbase, HOST_TEMPREG)); emit_cmpimm(HOST_TEMPREG, imm); host_tempreg_release(); } -static void emit_orrne_imm(u_int rs,int imm,u_int rt) -{ - assem_debug("orrne %s,%s,#%#x\n",regname[rt],regname[rs],imm); - assert(0); -} - -static void emit_andne_imm(u_int rs,int imm,u_int rt) +// special for loadlr_assemble, rs2 is destroyed +static void emit_bic_lsl(u_int rs1,u_int rs2,u_int shift,u_int rt) { - assem_debug("andne %s,%s,#%#x\n",regname[rt],regname[rs],imm); - assert(0); + emit_shl(rs2, shift, rs2); + emit_bic(rs1, rs2, rt); } -static unused void emit_addpl_imm(u_int rs,int imm,u_int rt) +static void emit_bic_lsr(u_int rs1,u_int rs2,u_int shift,u_int rt) { - assem_debug("addpl %s,%s,#%#x\n",regname[rt],regname[rs],imm); - assert(0); + emit_shr(rs2, shift, rs2); + emit_bic(rs1, rs2, rt); } static void emit_loadlp_ofs(u_int ofs, u_int rt) @@ -1266,7 +1299,7 @@ static void emit_extjump2(u_char *addr, u_int target, void *linker) // addr is in the current recompiled block (max 256k) // offset shouldn't exceed +/-1MB emit_adr(addr, 1); - emit_jmp(linker); + emit_far_jump(linker); } static void check_extjump2(void *src) @@ -1280,9 +1313,11 @@ static void check_extjump2(void *src) static void emit_movimm_from(u_int rs_val, u_int rs, u_int rt_val, u_int rt) { int diff = rt_val - rs_val; - if ((-4096 <= diff && diff < 4096) - || (-16777216 <= diff && diff < 16777216 && !(diff & 0xfff))) + if ((-4096 < diff && diff < 4096) + || (-16777216 < diff && diff < 16777216 && !(diff & 0xfff))) emit_addimm(rs, diff, rt); + else if (rt_val == ~rs_val) + emit_not(rs, rt); else if (is_rotated_mask(rs_val ^ rt_val)) emit_xorimm(rs, rs_val ^ rt_val, rt); else @@ -1293,8 +1328,9 @@ static void emit_movimm_from(u_int rs_val, u_int rs, u_int rt_val, u_int rt) static int is_similar_value(u_int v1, u_int v2) { int diff = v1 - v2; - return (-4096 <= diff && diff < 4096) - || (-16777216 <= diff && diff < 16777216 && !(diff & 0xfff)) + return (-4096 < diff && diff < 4096) + || (-16777216 < diff && diff < 16777216 && !(diff & 0xfff)) + || v1 == ~v2 || is_rotated_mask(v1 ^ v2); } @@ -1326,7 +1362,7 @@ static void loadstore_extend(enum stub_type type, u_int rs, u_int rt) case STOREH_STUB: emit_ubfm(rs, 15, rt); break; case LOADW_STUB: case STOREW_STUB: if (rs != rt) emit_mov(rs, rt); break; - default: assert(0); + default: assert(0); } } @@ -1381,7 +1417,7 @@ static void do_readstub(int n) case LOADH_STUB: emit_ldrsh_dualindexed(temp2,rs,rt); break; case LOADHU_STUB: emit_ldrh_dualindexed(temp2,rs,rt); break; case LOADW_STUB: emit_ldr_dualindexed(temp2,rs,rt); break; - default: assert(0); + default: assert(0); } } if(regs_saved) { @@ -1406,8 +1442,8 @@ static void do_readstub(int n) int cc=get_reg(i_regmap,CCREG); if(cc<0) emit_loadreg(CCREG,2); - emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2); - emit_call(handler); + emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d),2); + emit_far_call(handler); // (no cycle reload after read) if(itype[i]==C1LS||itype[i]==C2LS||(rt>=0&&rt1[i]!=0)) { loadstore_extend(type,0,rt); @@ -1418,7 +1454,8 @@ static void do_readstub(int n) emit_jmp(stubs[n].retaddr); } -static void inline_readstub(enum stub_type type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist) +static void inline_readstub(enum stub_type type, int i, u_int addr, + const signed char regmap[], int target, int adj, u_int reglist) { int rs=get_reg(regmap,target); int rt=get_reg(regmap,target); @@ -1428,7 +1465,7 @@ static void inline_readstub(enum stub_type type, int i, u_int addr, signed char uintptr_t host_addr = 0; void *handler; int cc=get_reg(regmap,CCREG); - //if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj+1),cc,target?rs:-1,rt)) + //if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj),cc,target?rs:-1,rt)) // return; handler = get_direct_memhandler(mem_rtab, addr, type, &host_addr); if (handler == NULL) { @@ -1469,13 +1506,16 @@ static void inline_readstub(enum stub_type type, int i, u_int addr, signed char emit_mov(rs,0); if(cc<0) emit_loadreg(CCREG,2); - emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2); - if(is_dynamic) - emit_readdword(&mem_rtab,1); + emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj),2); + if(is_dynamic) { + uintptr_t l1 = ((uintptr_t *)mem_rtab)[addr>>12] << 1; + emit_adrp((void *)l1, 1); + emit_addimm64(1, l1 & 0xfff, 1); + } else - emit_call(do_memhandler_pre); + emit_far_call(do_memhandler_pre); - emit_call(handler); + emit_far_call(handler); // (no cycle reload after read) if(rt>=0&&rt1[i]!=0) @@ -1551,7 +1591,7 @@ static void do_writestub(int n) case STOREB_STUB: handler=jump_handler_write8; break; case STOREH_STUB: handler=jump_handler_write16; break; case STOREW_STUB: handler=jump_handler_write32; break; - default: assert(0); + default: assert(0); } assert(handler); pass_args(rs,rt); @@ -1562,10 +1602,10 @@ static void do_writestub(int n) int cc=get_reg(i_regmap,CCREG); if(cc<0) emit_loadreg(CCREG,2); - emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2); + emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d),2); // returns new cycle_count - emit_call(handler); - emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc); + emit_far_call(handler); + emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d),cc<0?2:cc); if(cc<0) emit_storereg(CCREG,2); if(restore_jump) @@ -1574,7 +1614,8 @@ static void do_writestub(int n) emit_jmp(stubs[n].retaddr); } -static void inline_writestub(enum stub_type type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist) +static void inline_writestub(enum stub_type type, int i, u_int addr, + const signed char regmap[], int target, int adj, u_int reglist) { int rs = get_reg(regmap,-1); int rt = get_reg(regmap,target); @@ -1605,21 +1646,47 @@ static void inline_writestub(enum stub_type type, int i, u_int addr, signed char cc = cc_use = get_reg(regmap, CCREG); if (cc < 0) emit_loadreg(CCREG, (cc_use = 2)); - emit_addimm(cc_use, CLOCK_ADJUST(adj+1), 2); + emit_addimm(cc_use, CLOCK_ADJUST(adj), 2); - emit_call(do_memhandler_pre); - emit_call(handler); - emit_call(do_memhandler_post); - emit_addimm(0, -CLOCK_ADJUST(adj+1), cc_use); + emit_far_call(do_memhandler_pre); + emit_far_call(handler); + emit_far_call(do_memhandler_post); + emit_addimm(0, -CLOCK_ADJUST(adj), cc_use); if (cc < 0) emit_storereg(CCREG, cc_use); restore_regs(reglist); } -static void do_unalignedwritestub(int n) +static int verify_code_arm64(const void *source, const void *copy, u_int size) { - assem_debug("do_unalignedwritestub %x\n",start+stubs[n].a*4); - assert(0); + int ret = memcmp(source, copy, size); + //printf("%s %p,%#x = %d\n", __func__, source, size, ret); + return ret; +} + +// this output is parsed by verify_dirty, get_bounds, isclean, get_clean_addr +static void do_dirty_stub_base(u_int vaddr) +{ + assert(slen <= MAXBLOCK); + emit_loadlp_ofs(0, 0); // ldr x1, source + emit_loadlp_ofs(0, 1); // ldr x2, copy + emit_movz(slen*4, 2); + emit_far_call(verify_code_arm64); + void *jmp = out; + emit_cbz(0, 0); + emit_movz(vaddr & 0xffff, 0); + emit_movk_lsl16(vaddr >> 16, 0); + emit_far_call(get_addr); + emit_jmpreg(0); + set_jump_target(jmp, out); +} + +static void assert_dirty_stub(const u_int *ptr) +{ + assert((ptr[0] & 0xff00001f) == 0x58000000); // ldr x0, source + assert((ptr[1] & 0xff00001f) == 0x58000001); // ldr x1, copy + assert((ptr[2] & 0xffe0001f) == 0x52800002); // movz w2, #slen*4 + assert( ptr[8] == 0xd61f0000); // br x0 } static void set_loadlp(u_int *loadl, void *lit) @@ -1631,17 +1698,6 @@ static void set_loadlp(u_int *loadl, void *lit) *loadl |= (ofs >> 2) << 5; } -// this output is parsed by verify_dirty, get_bounds, isclean, get_clean_addr -static void do_dirty_stub_emit_args(u_int arg0) -{ - assert(slen <= MAXBLOCK); - emit_loadlp_ofs(0, 1); // ldr x1, source - emit_loadlp_ofs(0, 2); // ldr x2, copy - emit_movz(slen*4, 3); - emit_movz(arg0 & 0xffff, 0); - emit_movk_lsl16(arg0 >> 16, 0); -} - static void do_dirty_stub_emit_literals(u_int *loadlps) { set_loadlp(&loadlps[0], out); @@ -1654,8 +1710,7 @@ static void *do_dirty_stub(int i) { assem_debug("do_dirty_stub %x\n",start+i*4); u_int *loadlps = (void *)out; - do_dirty_stub_emit_args(start + i*4); - emit_call(verify_code); + do_dirty_stub_base(start + i*4); void *entry = out; load_regs_entry(i); if (entry == out) @@ -1665,38 +1720,259 @@ static void *do_dirty_stub(int i) return entry; } -static void do_dirty_stub_ds() +static void do_dirty_stub_ds(void) { - do_dirty_stub_emit_args(start + 1); u_int *loadlps = (void *)out; - emit_call(verify_code_ds); + do_dirty_stub_base(start + 1); + void *lit_jumpover = out; emit_jmp(out + 8*2); do_dirty_stub_emit_literals(loadlps); + set_jump_target(lit_jumpover, out); +} + +static uint64_t get_from_ldr_literal(const u_int *i) +{ + signed int ofs; + assert((i[0] & 0xff000000) == 0x58000000); + ofs = i[0] << 8; + ofs >>= 5+8; + return *(uint64_t *)(i + ofs); +} + +static uint64_t get_from_movz(const u_int *i) +{ + assert((i[0] & 0x7fe00000) == 0x52800000); + return (i[0] >> 5) & 0xffff; +} + +// Find the "clean" entry point from a "dirty" entry point +// by skipping past the call to verify_code +static void *get_clean_addr(u_int *addr) +{ + assert_dirty_stub(addr); + return addr + 9; +} + +static int verify_dirty(const u_int *ptr) +{ + const void *source, *copy; + u_int len; + assert_dirty_stub(ptr); + source = (void *)get_from_ldr_literal(&ptr[0]); // ldr x1, source + copy = (void *)get_from_ldr_literal(&ptr[1]); // ldr x1, copy + len = get_from_movz(&ptr[2]); // movz w3, #slen*4 + return !memcmp(source, copy, len); +} + +static int isclean(void *addr) +{ + const u_int *ptr = addr; + if ((*ptr >> 24) == 0x58) { // the only place ldr (literal) is used + assert_dirty_stub(ptr); + return 0; + } + return 1; +} + +// get source that block at addr was compiled from (host pointers) +static void get_bounds(void *addr, u_char **start, u_char **end) +{ + const u_int *ptr = addr; + assert_dirty_stub(ptr); + *start = (u_char *)get_from_ldr_literal(&ptr[0]); // ldr x1, source + *end = *start + get_from_movz(&ptr[2]); // movz w3, #slen*4 } /* Special assem */ -#define shift_assemble shift_assemble_arm64 +static void c2op_prologue(u_int op, int i, const struct regstat *i_regs, u_int reglist) +{ + save_load_regs_all(1, reglist); + cop2_call_stall_check(op, i, i_regs, 0); +#ifdef PCNT + emit_movimm(op, 0); + emit_far_call(pcnt_gte_start); +#endif + // pointer to cop2 regs + emit_addimm64(FP, (u_char *)&psxRegs.CP2D.r[0] - (u_char *)&dynarec_local, 0); +} -static void shift_assemble_arm64(int i,struct regstat *i_regs) +static void c2op_epilogue(u_int op,u_int reglist) { - assert(0); +#ifdef PCNT + emit_movimm(op, 0); + emit_far_call(pcnt_gte_end); +#endif + save_load_regs_all(0, reglist); } -#define loadlr_assemble loadlr_assemble_arm64 -static void loadlr_assemble_arm64(int i,struct regstat *i_regs) +static void c2op_assemble(int i, const struct regstat *i_regs) { - assert(0); + u_int c2op=source[i]&0x3f; + u_int hr,reglist_full=0,reglist; + int need_flags,need_ir; + for(hr=0;hrregmap[hr]>=0) reglist_full|=1<>63); // +1 because of how liveness detection works + need_ir=(gte_unneeded[i+1]&0xe00)!=0xe00; + assem_debug("gte op %08x, unneeded %016lx, need_flags %d, need_ir %d\n", + source[i],gte_unneeded[i+1],need_flags,need_ir); + if(HACK_ENABLED(NDHACK_GTE_NO_FLAGS)) + need_flags=0; + //int shift = (source[i] >> 19) & 1; + //int lm = (source[i] >> 10) & 1; + switch(c2op) { + default: + (void)need_ir; + c2op_prologue(c2op, i, i_regs, reglist); + emit_movimm(source[i],1); // opcode + emit_writeword(1,&psxRegs.code); + emit_far_call(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]); + break; + } + c2op_epilogue(c2op,reglist); + } } -static void c2op_assemble(int i,struct regstat *i_regs) +static void c2op_ctc2_31_assemble(signed char sl, signed char temp) { - assert(0); + //value = value & 0x7ffff000; + //if (value & 0x7f87e000) value |= 0x80000000; + emit_andimm(sl, 0x7fffe000, temp); + emit_testimm(temp, 0xff87ffff); + emit_andimm(sl, 0x7ffff000, temp); + host_tempreg_acquire(); + emit_orimm(temp, 0x80000000, HOST_TEMPREG); + emit_cmovne_reg(HOST_TEMPREG, temp); + host_tempreg_release(); + assert(0); // testing needed +} + +static void do_mfc2_31_one(u_int copr,signed char temp) +{ + emit_readshword(®_cop2d[copr],temp); + emit_bicsar_imm(temp,31,temp); + emit_cmpimm(temp,0xf80); + emit_csinvle_reg(temp,WZR,temp); // if (temp > 0xf80) temp = ~0; + emit_andimm(temp,0xf80,temp); +} + +static void c2op_mfc2_29_assemble(signed char tl, signed char temp) +{ + if (temp < 0) { + host_tempreg_acquire(); + temp = HOST_TEMPREG; + } + do_mfc2_31_one(9,temp); + emit_shrimm(temp,7,tl); + do_mfc2_31_one(10,temp); + emit_orrshr_imm(temp,2,tl); + do_mfc2_31_one(11,temp); + emit_orrshl_imm(temp,3,tl); + emit_writeword(tl,®_cop2d[29]); + + if (temp == HOST_TEMPREG) + host_tempreg_release(); } static void multdiv_assemble_arm64(int i,struct regstat *i_regs) { - assert(0); + // case 0x18: MULT + // case 0x19: MULTU + // case 0x1A: DIV + // case 0x1B: DIVU + if(rs1[i]&&rs2[i]) + { + switch(opcode2[i]) + { + case 0x18: // MULT + case 0x19: // MULTU + { + signed char m1=get_reg(i_regs->regmap,rs1[i]); + signed char m2=get_reg(i_regs->regmap,rs2[i]); + signed char hi=get_reg(i_regs->regmap,HIREG); + signed char lo=get_reg(i_regs->regmap,LOREG); + assert(m1>=0); + assert(m2>=0); + assert(hi>=0); + assert(lo>=0); + + if(opcode2[i]==0x18) // MULT + emit_smull(m1,m2,hi); + else // MULTU + emit_umull(m1,m2,hi); + + emit_mov(hi,lo); + emit_shrimm64(hi,32,hi); + break; + } + case 0x1A: // DIV + case 0x1B: // DIVU + { + signed char numerator=get_reg(i_regs->regmap,rs1[i]); + signed char denominator=get_reg(i_regs->regmap,rs2[i]); + signed char quotient=get_reg(i_regs->regmap,LOREG); + signed char remainder=get_reg(i_regs->regmap,HIREG); + assert(numerator>=0); + assert(denominator>=0); + assert(quotient>=0); + assert(remainder>=0); + + if (opcode2[i] == 0x1A) // DIV + emit_sdiv(numerator,denominator,quotient); + else // DIVU + emit_udiv(numerator,denominator,quotient); + emit_msub(quotient,denominator,numerator,remainder); + + // div 0 quotient (remainder is already correct) + host_tempreg_acquire(); + if (opcode2[i] == 0x1A) // DIV + emit_sub_asrimm(0,numerator,31,HOST_TEMPREG); + else + emit_movimm(~0,HOST_TEMPREG); + emit_test(denominator,denominator); + emit_cmoveq_reg(HOST_TEMPREG,quotient); + host_tempreg_release(); + break; + } + default: + assert(0); + } + } + else + { + signed char hr=get_reg(i_regs->regmap,HIREG); + signed char lr=get_reg(i_regs->regmap,LOREG); + if ((opcode2[i]==0x1A || opcode2[i]==0x1B) && rs2[i]==0) // div 0 + { + if (rs1[i]) { + signed char numerator = get_reg(i_regs->regmap, rs1[i]); + assert(numerator >= 0); + if (hr >= 0) + emit_mov(numerator,hr); + if (lr >= 0) { + if (opcode2[i] == 0x1A) // DIV + emit_sub_asrimm(0,numerator,31,lr); + else + emit_movimm(~0,lr); + } + } + else { + if (hr >= 0) emit_zeroreg(hr); + if (lr >= 0) emit_movimm(~0,lr); + } + } + else + { + // Multiply by zero is zero. + if (hr >= 0) emit_zeroreg(hr); + if (lr >= 0) emit_zeroreg(lr); + } + } } #define multdiv_assemble multdiv_assemble_arm64 @@ -1704,7 +1980,7 @@ static void do_jump_vaddr(u_int rs) { if (rs != 0) emit_mov(rs, 0); - emit_call(get_addr_ht); + emit_far_call(get_addr_ht); emit_jmpreg(0); } @@ -1748,51 +2024,60 @@ static void do_miniht_insert(u_int return_address,u_int rt,int temp) { emit_writeword(rt,&mini_ht[(return_address&0xFF)>>3][0]); } -static void mark_clear_cache(void *target) +static void clear_cache_arm64(char *start, char *end) { - u_long offset = (u_char *)target - translation_cache; - u_int mask = 1u << ((offset >> 12) & 31); - if (!(needs_clear_cache[offset >> 17] & mask)) { - char *start = (char *)((u_long)target & ~4095ul); - start_tcache_write(start, start + 4096); - needs_clear_cache[offset >> 17] |= mask; + // Don't rely on GCC's __clear_cache implementation, as it caches + // icache/dcache cache line sizes, that can vary between cores on + // big.LITTLE architectures. + uint64_t addr, ctr_el0; + static size_t icache_line_size = 0xffff, dcache_line_size = 0xffff; + size_t isize, dsize; + + __asm__ volatile("mrs %0, ctr_el0" : "=r"(ctr_el0)); + isize = 4 << ((ctr_el0 >> 0) & 0xf); + dsize = 4 << ((ctr_el0 >> 16) & 0xf); + + // use the global minimum cache line size + icache_line_size = isize = icache_line_size < isize ? icache_line_size : isize; + dcache_line_size = dsize = dcache_line_size < dsize ? dcache_line_size : dsize; + + /* If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification is + not required for instruction to data coherence. */ + if ((ctr_el0 & (1 << 28)) == 0x0) { + addr = (uint64_t)start & ~(uint64_t)(dsize - 1); + for (; addr < (uint64_t)end; addr += dsize) + // use "civac" instead of "cvau", as this is the suggested workaround for + // Cortex-A53 errata 819472, 826319, 827319 and 824069. + __asm__ volatile("dc civac, %0" : : "r"(addr) : "memory"); } -} + __asm__ volatile("dsb ish" : : : "memory"); -// Clearing the cache is rather slow on ARM Linux, so mark the areas -// that need to be cleared, and then only clear these areas once. -static void do_clear_cache() -{ - int i,j; - for (i=0;i<(1<<(TARGET_SIZE_2-17));i++) - { - u_int bitmap=needs_clear_cache[i]; - if(bitmap) { - u_char *start, *end; - for(j=0;j<32;j++) - { - if(bitmap&(1<tramp.f - (u_char *)&ndrc->tramp.ops; + struct tramp_insns *ops = ndrc->tramp.ops; + size_t i; + assert(!(diff & 3)); + start_tcache_write(ops, (u_char *)ops + sizeof(ndrc->tramp.ops)); + for (i = 0; i < ARRAY_SIZE(ndrc->tramp.ops); i++) { + ops[i].ldr = 0x58000000 | imm19_rt(diff >> 2, 17); // ldr x17, [=val] + ops[i].br = 0xd61f0000 | rm_rn_rd(0, 17, 0); // br x17 + } + end_tcache_write(ops, (u_char *)ops + sizeof(ndrc->tramp.ops)); } // vim:shiftwidth=2:expandtab