* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#include "pcnt.h"
#include "arm_features.h"
-#if defined(BASE_ADDR_FIXED)
-#elif defined(BASE_ADDR_DYNAMIC)
-u_char *translation_cache;
-#else
-u_char translation_cache[1 << TARGET_SIZE_2] __attribute__((aligned(4096)));
-#endif
-static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
-
-#define CALLER_SAVE_REGS 0x0007ffff
-
#define unused __attribute__((unused))
void do_memhandler_pre();
u_int *ptr = addr;
intptr_t offset = (u_char *)target - (u_char *)addr;
- if((*ptr&0xFC000000)==0x14000000) {
+ if ((*ptr&0xFC000000) == 0x14000000) { // b
assert(offset>=-134217728LL&&offset<134217728LL);
*ptr=(*ptr&0xFC000000)|((offset>>2)&0x3ffffff);
}
- else if((*ptr&0xff000000)==0x54000000) {
+ else if ((*ptr&0xff000000) == 0x54000000 // b.cond
+ || (*ptr&0x7e000000) == 0x34000000) { // cbz/cbnz
// Conditional branch are limited to +/- 1MB
// block max size is 256k so branching beyond the +/- 1MB limit
- // should only happen when jumping to an already compiled block (see add_link)
+ // should only happen when jumping to an already compiled block (see add_jump_out)
// a workaround would be to do a trampoline jump via a stub at the end of the block
- assert(offset>=-1048576LL&&offset<1048576LL);
+ assert(-1048576 <= offset && offset < 1048576);
*ptr=(*ptr&0xFF00000F)|(((offset>>2)&0x7ffff)<<5);
}
- else if((*ptr&0x9f000000)==0x10000000) { //adr
+ else if((*ptr&0x9f000000)==0x10000000) { // adr
// generated by do_miniht_insert
assert(offset>=-1048576LL&&offset<1048576LL);
*ptr=(*ptr&0x9F00001F)|(offset&0x3)<<29|((offset>>2)&0x7ffff)<<5;
}
else
- assert(0); // should not happen
+ abort(); // should not happen
}
// from a pointer to external jump stub (which was produced by emit_extjump2)
}
// find where external branch is liked to using addr of it's stub:
-// get address that insn one after stub loads (dyna_linker arg1),
+// get address that the stub loads (dyna_linker arg1),
// treat it as a pointer to branch insn,
// return addr where that branch jumps to
static void *get_pointer(void *stub)
{
int *i_ptr = find_extjump_insn(stub);
- assert((*i_ptr&0xfc000000) == 0x14000000); // b
- return (u_char *)i_ptr+(((signed int)(*i_ptr<<6)>>6)<<2);
-}
-
-// Find the "clean" entry point from a "dirty" entry point
-// by skipping past the call to verify_code
-static void *get_clean_addr(void *addr)
-{
+ if ((*i_ptr&0xfc000000) == 0x14000000) // b
+ return i_ptr + ((signed int)(*i_ptr<<6)>>6);
+ if ((*i_ptr&0xff000000) == 0x54000000 // b.cond
+ || (*i_ptr&0x7e000000) == 0x34000000) // cbz/cbnz
+ return i_ptr + ((signed int)(*i_ptr<<8)>>13);
assert(0);
return NULL;
}
-static int verify_dirty(u_int *ptr)
-{
- assert(0);
- return 0;
-}
-
-static int isclean(void *addr)
-{
- u_int *ptr = addr;
- return (*ptr >> 24) != 0x58; // the only place ldr (literal) is used
-}
-
-static uint64_t get_from_ldr_literal(const u_int *i)
-{
- signed int ofs;
- assert((i[0] & 0xff000000) == 0x58000000);
- ofs = i[0] << 8;
- ofs >>= 5+8;
- return *(uint64_t *)(i + ofs);
-}
-
-static uint64_t get_from_movz(const u_int *i)
-{
- assert((i[0] & 0x7fe00000) == 0x52800000);
- return (i[0] >> 5) & 0xffff;
-}
-
-// get source that block at addr was compiled from (host pointers)
-static void get_bounds(void *addr, u_char **start, u_char **end)
-{
- const u_int *ptr = addr;
- assert((ptr[0] & 0xff00001f) == 0x58000001); // ldr x1, source
- assert((ptr[1] & 0xff00001f) == 0x58000002); // ldr x2, copy
- assert((ptr[2] & 0xffe0001f) == 0x52800003); // movz w3, #slen*4
- *start = (u_char *)get_from_ldr_literal(&ptr[0]);
- *end = *start + get_from_movz(&ptr[2]);
-}
-
// Allocate a specific ARM register.
static void alloc_arm_reg(struct regstat *cur,int i,signed char reg,int hr)
{
}
*/
+static u_int rn_rd(u_int rn, u_int rd)
+{
+ assert(rn < 31);
+ assert(rd < 31);
+ return (rn << 5) | rd;
+}
+
static u_int rm_rn_rd(u_int rm, u_int rn, u_int rd)
{
assert(rm < 32);
return (rm << 16) | (rn << 5) | rd;
}
+static u_int rm_ra_rn_rd(u_int rm, u_int ra, u_int rn, u_int rd)
+{
+ assert(ra < 32);
+ return rm_rn_rd(rm, rn, rd) | (ra << 10);
+}
+
static u_int imm7_rt2_rn_rt(u_int imm7, u_int rt2, u_int rn, u_int rt)
{
assert(imm7 < 0x80);
// non-empty sequence of ones (possibly rotated) with the remainder zero.
static uint32_t is_rotated_mask(u_int value)
{
- if (value == 0)
+ if (value == 0 || value == ~0)
return 0;
if (is_mask((value - 1) | value))
return 1;
lzeros = __builtin_clz(value);
tzeros = __builtin_ctz(value);
ones = 32 - lzeros - tzeros;
- *immr = 31 - tzeros;
+ *immr = lzeros;
*imms = 31 - ones;
return;
}
- assert(0);
+ abort();
}
static void emit_mov(u_int rs, u_int rt)
output_w32(0xaa000000 | rm_rn_rd(rs, WZR, rt));
}
-static void emit_movs(u_int rs, u_int rt)
-{
- assert(0); // misleading
- assem_debug("movs %s,%s\n", regname[rt], regname[rs]);
- output_w32(0x31000000 | imm12_rn_rd(0, rs, rt));
-}
-
static void emit_add(u_int rs1, u_int rs2, u_int rt)
{
assem_debug("add %s,%s,%s\n", regname[rt], regname[rs1], regname[rs2]);
output_w32(0x8b000000 | rm_rn_rd(rs2, rs1, rt));
}
-#pragma GCC diagnostic ignored "-Wunused-function"
-static void emit_adds(u_int rs1, u_int rs2, u_int rt)
-{
- assem_debug("adds %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
- output_w32(0x2b000000 | rm_rn_rd(rs2, rs1, rt));
-}
-
static void emit_adds64(u_int rs1, u_int rs2, u_int rt)
{
- assem_debug("adds %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ assem_debug("adds %s,%s,%s\n",regname64[rt],regname64[rs1],regname64[rs2]);
output_w32(0xab000000 | rm_rn_rd(rs2, rs1, rt));
}
+#define emit_adds_ptr emit_adds64
static void emit_neg(u_int rs, u_int rt)
{
output_w32(0x4b000000 | rm_imm6_rn_rd(rs2, 0, rs1, rt));
}
+static void emit_sub_asrimm(u_int rs1, u_int rs2, u_int shift, u_int rt)
+{
+ assem_debug("sub %s,%s,%s,asr #%u\n",regname[rt],regname[rs1],regname[rs2],shift);
+ output_w32(0x4b800000 | rm_imm6_rn_rd(rs2, shift, rs1, rt));
+}
+
static void emit_movz(u_int imm, u_int rt)
{
assem_debug("movz %s,#%#x\n", regname[rt], imm);
static void emit_movk_lsl16(u_int imm,u_int rt)
{
assert(imm<65536);
- assem_debug("movk %s, #%#x, lsl #16\n", regname[rt], imm);
+ assem_debug("movk %s,#%#x,lsl #16\n", regname[rt], imm);
output_w32(0x72a00000 | imm16_rd(imm, rt));
}
output_w32(0xb9400000 | imm12_rn_rd(offset >> 2, FP, rt));
}
else
- assert(0);
+ abort();
}
static void emit_readdword(void *addr, u_int rt)
assem_debug("ldr %s,[x%d+%#lx]\n", regname64[rt], FP, offset);
output_w32(0xf9400000 | imm12_rn_rd(offset >> 3, FP, rt));
}
+ else
+ abort();
+}
+#define emit_readptr emit_readdword
+
+static void emit_readshword(void *addr, u_int rt)
+{
+ uintptr_t offset = (u_char *)addr - (u_char *)&dynarec_local;
+ if (!(offset & 1) && offset <= 8190) {
+ assem_debug("ldrsh %s,[x%d+%#lx]\n", regname[rt], FP, offset);
+ output_w32(0x79c00000 | imm12_rn_rd(offset >> 1, FP, rt));
+ }
else
assert(0);
}
case CCREG: addr = &cycle_count; break;
case CSREG: addr = &Status; break;
case INVCP: addr = &invc_ptr; is64 = 1; break;
+ case ROREG: addr = &ram_offset; is64 = 1; break;
default: assert(r < 34); break;
}
if (is64)
uintptr_t offset = (u_char *)addr - (u_char *)&dynarec_local;
if (!(offset & 7) && offset <= 32760) {
assem_debug("str %s,[x%d+%#lx]\n", regname64[rt], FP, offset);
- output_w32(0xf9000000 | imm12_rn_rd(offset >> 2, FP, rt));
+ output_w32(0xf9000000 | imm12_rn_rd(offset >> 3, FP, rt));
}
else
- assert(0);
+ abort();
}
static void emit_storereg(u_int r, u_int hr)
assem_debug("tst %s,#%#x\n", regname[rs], imm);
assert(is_rotated_mask(imm)); // good enough for PCSX
gen_logical_imm(imm, &immr, &imms);
- output_w32(0xb9000000 | n_immr_imms_rn_rd(0, immr, imms, rs, WZR));
-}
-
-static void emit_testeqimm(u_int rs,int imm)
-{
- assem_debug("tsteq %s,$%d\n",regname[rs],imm);
- assert(0); // TODO eliminate emit_testeqimm
+ output_w32(0x72000000 | n_immr_imms_rn_rd(0, immr, imms, rs, WZR));
}
static void emit_not(u_int rs,u_int rt)
output_w32(0x2a200000 | rm_rn_rd(rs, WZR, rt));
}
-static void emit_mvnmi(u_int rs,u_int rt)
-{
- assem_debug("mvnmi %s,%s\n",regname[rt],regname[rs]);
- assert(0); // eliminate
-}
-
static void emit_and(u_int rs1,u_int rs2,u_int rt)
{
assem_debug("and %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
output_w32(0x2a000000 | rm_rn_rd(rs2, rs1, rt));
}
+static void emit_bic(u_int rs1,u_int rs2,u_int rt)
+{
+ assem_debug("bic %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x0a200000 | rm_rn_rd(rs2, rs1, rt));
+}
+
static void emit_orrshl_imm(u_int rs,u_int imm,u_int rt)
{
assem_debug("orr %s,%s,%s,lsl #%d\n",regname[rt],regname[rt],regname[rs],imm);
output_w32(0x2a400000 | rm_imm6_rn_rd(rs, imm, rt, rt));
}
+static void emit_bicsar_imm(u_int rs,u_int imm,u_int rt)
+{
+ assem_debug("bic %s,%s,%s,asr #%d\n",regname[rt],regname[rt],regname[rs],imm);
+ output_w32(0x0aa00000 | rm_imm6_rn_rd(rs, imm, rt, rt));
+}
+
static void emit_xor(u_int rs1,u_int rs2,u_int rt)
{
assem_debug("eor %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
output_w32(0x4a000000 | rm_rn_rd(rs2, rs1, rt));
}
+static void emit_xorsar_imm(u_int rs1, u_int rs2, u_int imm, u_int rt)
+{
+ assem_debug("eor %s,%s,%s,asr #%d\n",regname[rt],regname[rs1],regname[rs2],imm);
+ output_w32(0x4a800000 | rm_imm6_rn_rd(rs2, imm, rs1, rt));
+}
+
static void emit_addimm_s(u_int s, u_int is64, u_int rs, uintptr_t imm, u_int rt)
{
unused const char *st = s ? "s" : "";
output_w32(0x11000000 | is64 | s | imm12_rn_rd(imm, rs, rt));
}
else if (-imm < 4096) {
- assem_debug("sub%s %s,%s,%#lx\n", st, regname[rt], regname[rs], imm);
+ assem_debug("sub%s %s,%s,%#lx\n", st, regname[rt], regname[rs], -imm);
output_w32(0x51000000 | is64 | s | imm12_rn_rd(-imm, rs, rt));
}
else if (imm < 16777216) {
output_w32(0x11400000 | is64 | imm12_rn_rd(imm >> 12, rs, rt));
if ((imm & 0xfff) || s) {
assem_debug("add%s %s,%s,#%#lx\n",st,regname[rt],regname[rs],imm&0xfff);
- output_w32(0x11000000 | is64 | s | imm12_rn_rd(imm, rt, rt));
+ output_w32(0x11000000 | is64 | s | imm12_rn_rd(imm & 0xfff, rt, rt));
}
}
else if (-imm < 16777216) {
}
}
else
- assert(0);
+ abort();
}
static void emit_addimm(u_int rs, uintptr_t imm, u_int rt)
emit_addimm_s(1, 0, rt, imm, rt);
}
-static void emit_addimm_no_flags(u_int imm,u_int rt)
-{
- emit_addimm(rt,imm,rt);
-}
-
static void emit_logicop_imm(u_int op, u_int rs, u_int imm, u_int rt)
{
const char *names[] = { "and", "orr", "eor", "ands" };
output_w32(0x53000000 | n_immr_imms_rn_rd(0, (31-imm)+1, 31-imm, rs, rt));
}
-static unused void emit_lslpls_imm(u_int rs,int imm,u_int rt)
+static void emit_shrimm(u_int rs,u_int imm,u_int rt)
{
- assert(0); // eliminate
+ assem_debug("lsr %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x53000000 | n_immr_imms_rn_rd(0, imm, 31, rs, rt));
}
-static void emit_shrimm(u_int rs,u_int imm,u_int rt)
+static void emit_shrimm64(u_int rs,u_int imm,u_int rt)
{
assem_debug("lsr %s,%s,#%d\n",regname[rt],regname[rs],imm);
- output_w32(0x53000000 | n_immr_imms_rn_rd(0, imm, 31, rs, rt));
+ output_w32(0xd3400000 | n_immr_imms_rn_rd(0, imm, 63, rs, rt));
}
static void emit_sarimm(u_int rs,u_int imm,u_int rt)
static void emit_rorimm(u_int rs,u_int imm,u_int rt)
{
- assem_debug("ror %s,%s,#%d",regname[rt],regname[rs],imm);
+ assem_debug("ror %s,%s,#%d\n",regname[rt],regname[rs],imm);
output_w32(0x13800000 | rm_imm6_rn_rd(rs, imm, rs, rt));
}
static void emit_shl(u_int rs,u_int rshift,u_int rt)
{
- assem_debug("lsl %s,%s,%s",regname[rt],regname[rs],regname[rshift]);
+ assem_debug("lsl %s,%s,%s\n",regname[rt],regname[rs],regname[rshift]);
output_w32(0x1ac02000 | rm_rn_rd(rshift, rs, rt));
}
output_w32(0x31000000 | imm12_rn_rd(-imm, rs, WZR));
}
else if (imm < 16777216 && !(imm & 0xfff)) {
- assem_debug("cmp %s,#%#x,lsl #12\n", regname[rs], imm >> 12);
+ assem_debug("cmp %s,#%#x\n", regname[rs], imm);
output_w32(0x71400000 | imm12_rn_rd(imm >> 12, rs, WZR));
}
else {
emit_cmov_imm(COND_CC, COND_CS, imm, rt);
}
-static void emit_cmovs_imm(int imm,u_int rt)
+static void emit_cmoveq_reg(u_int rs,u_int rt)
{
- emit_cmov_imm(COND_MI, COND_PL, imm, rt);
+ assem_debug("csel %s,%s,%s,eq\n",regname[rt],regname[rs],regname[rt]);
+ output_w32(0x1a800000 | (COND_EQ << 12) | rm_rn_rd(rt, rs, rt));
}
static void emit_cmovne_reg(u_int rs,u_int rt)
output_w32(0x1a800000 | (COND_LT << 12) | rm_rn_rd(rt, rs, rt));
}
+static void emit_cmovb_reg(u_int rs,u_int rt)
+{
+ assem_debug("csel %s,%s,%s,cc\n",regname[rt],regname[rs],regname[rt]);
+ output_w32(0x1a800000 | (COND_CC << 12) | rm_rn_rd(rt, rs, rt));
+}
+
static void emit_cmovs_reg(u_int rs,u_int rt)
{
assem_debug("csel %s,%s,%s,mi\n",regname[rt],regname[rs],regname[rt]);
output_w32(0x1a800000 | (COND_MI << 12) | rm_rn_rd(rt, rs, rt));
}
+static void emit_csinvle_reg(u_int rs1,u_int rs2,u_int rt)
+{
+ assem_debug("csinv %s,%s,%s,le\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x5a800000 | (COND_LE << 12) | rm_rn_rd(rs2, rs1, rt));
+}
+
static void emit_slti32(u_int rs,int imm,u_int rt)
{
if(rs!=rt) emit_zeroreg(rt);
emit_cmovb_imm(1,rt);
}
+static int can_jump_or_call(const void *a)
+{
+ intptr_t diff = (u_char *)a - out;
+ return (-134217728 <= diff && diff <= 134217727);
+}
+
static void emit_call(const void *a)
{
intptr_t diff = (u_char *)a - out;
if (-134217728 <= diff && diff <= 134217727)
output_w32(0x94000000 | ((diff >> 2) & 0x03ffffff));
else
- assert(0);
+ abort();
}
static void emit_jmp(const void *a)
output_w32(0x54000000 | (offset << 5) | COND_CS);
}
-static void emit_jcc(const void *a)
+static void emit_cb(u_int isnz, u_int is64, const void *a, u_int r)
{
- assem_debug("bcc %p\n", a);
+ assem_debug("cb%sz %s,%p\n", isnz?"n":"", is64?regname64[r]:regname[r], a);
u_int offset = genjmpcc(a);
- output_w32(0x54000000 | (offset << 5) | COND_CC);
+ is64 = is64 ? 0x80000000 : 0;
+ isnz = isnz ? 0x01000000 : 0;
+ output_w32(0x34000000 | is64 | isnz | imm19_rt(offset, r));
+}
+
+static void emit_cbz(const void *a, u_int r)
+{
+ emit_cb(0, 0, a, r);
}
static void emit_jmpreg(u_int r)
{
- assem_debug("br %s", regname64[r]);
+ assem_debug("br %s\n", regname64[r]);
output_w32(0xd61f0000 | rm_rn_rd(0, r, 0));
}
{
intptr_t offset = (u_char *)addr - out;
assert(-1048576 <= offset && offset < 1048576);
+ assert(rt < 31);
assem_debug("adr x%d,#%#lx\n", rt, offset);
output_w32(0x10000000 | ((offset&0x3) << 29) | (((offset>>2)&0x7ffff) << 5) | rt);
}
+static void emit_adrp(void *addr, u_int rt)
+{
+ intptr_t offset = ((intptr_t)addr & ~0xfffl) - ((intptr_t)out & ~0xfffl);
+ assert(-4294967296l <= offset && offset < 4294967296l);
+ assert(rt < 31);
+ offset >>= 12;
+ assem_debug("adrp %s,#%#lx(000)\n",regname64[rt],offset);
+ output_w32(0x90000000 | ((offset&0x3)<<29) | (((offset>>2)&0x7ffff)<<5) | rt);
+}
+
static void emit_readword_indexed(int offset, u_int rs, u_int rt)
{
assem_debug("ldur %s,[%s+%#x]\n",regname[rt],regname64[rs],offset);
assem_debug("ldr %s, [%s,%s, uxtw #3]\n",regname64[rt],regname64[rs1],regname[rs2]);
output_w32(0xf8605800 | rm_rn_rd(rs2, rs1, rt));
}
+#define emit_readptr_dualindexedx_ptrlen emit_readdword_dualindexedx8
static void emit_ldrb_dualindexed(u_int rs1, u_int rs2, u_int rt)
{
static void emit_writeword_indexed(u_int rt, int offset, u_int rs)
{
- assem_debug("str %s,[%s+%#x]\n", regname[rt], regname[rs], offset);
- if (!(offset & 3) && offset <= 16380)
+ if (!(offset & 3) && (u_int)offset <= 16380) {
+ assem_debug("str %s,[%s+%#x]\n", regname[rt], regname[rs], offset);
output_w32(0xb9000000 | imm12_rn_rd(offset >> 2, rs, rt));
+ }
+ else if (-256 <= offset && offset < 256) {
+ assem_debug("stur %s,[%s+%#x]\n", regname[rt], regname[rs], offset);
+ output_w32(0xb8000000 | imm9_rn_rt(offset & 0x1ff, rs, rt));
+ }
else
assert(0);
}
static void emit_writehword_indexed(u_int rt, int offset, u_int rs)
{
- assem_debug("strh %s,[%s+%#x]\n", regname[rt], regname[rs], offset);
- if (!(offset & 1) && offset <= 8190)
+ if (!(offset & 1) && (u_int)offset <= 8190) {
+ assem_debug("strh %s,[%s+%#x]\n", regname[rt], regname64[rs], offset);
output_w32(0x79000000 | imm12_rn_rd(offset >> 1, rs, rt));
+ }
+ else if (-256 <= offset && offset < 256) {
+ assem_debug("sturh %s,[%s+%#x]\n", regname[rt], regname64[rs], offset);
+ output_w32(0x78000000 | imm9_rn_rt(offset & 0x1ff, rs, rt));
+ }
else
assert(0);
}
static void emit_writebyte_indexed(u_int rt, int offset, u_int rs)
{
- assem_debug("strb %s,[%s+%#x]\n", regname[rt], regname[rs], offset);
- if ((u_int)offset < 4096)
+ if ((u_int)offset < 4096) {
+ assem_debug("strb %s,[%s+%#x]\n", regname[rt], regname64[rs], offset);
output_w32(0x39000000 | imm12_rn_rd(offset, rs, rt));
+ }
+ else if (-256 <= offset && offset < 256) {
+ assem_debug("sturb %s,[%s+%#x]\n", regname[rt], regname64[rs], offset);
+ output_w32(0x38000000 | imm9_rn_rt(offset & 0x1ff, rs, rt));
+ }
else
assert(0);
}
-static void emit_umull(u_int rs1, u_int rs2, u_int hi, u_int lo)
+static void emit_umull(u_int rs1, u_int rs2, u_int rt)
{
- assem_debug("umull %s, %s, %s, %s\n",regname[lo],regname[hi],regname[rs1],regname[rs2]);
- assert(rs1<16);
- assert(rs2<16);
- assert(hi<16);
- assert(lo<16);
- assert(0);
+ assem_debug("umull %s,%s,%s\n",regname64[rt],regname[rs1],regname[rs2]);
+ output_w32(0x9ba00000 | rm_ra_rn_rd(rs2, WZR, rs1, rt));
}
-static void emit_smull(u_int rs1, u_int rs2, u_int hi, u_int lo)
+static void emit_smull(u_int rs1, u_int rs2, u_int rt)
{
- assem_debug("smull %s, %s, %s, %s\n",regname[lo],regname[hi],regname[rs1],regname[rs2]);
- assert(rs1<16);
- assert(rs2<16);
- assert(hi<16);
- assert(lo<16);
- assert(0);
+ assem_debug("smull %s,%s,%s\n",regname64[rt],regname[rs1],regname[rs2]);
+ output_w32(0x9b200000 | rm_ra_rn_rd(rs2, WZR, rs1, rt));
+}
+
+static void emit_msub(u_int rs1, u_int rs2, u_int rs3, u_int rt)
+{
+ assem_debug("msub %s,%s,%s,%s\n",regname[rt],regname[rs1],regname[rs2],regname[rs3]);
+ output_w32(0x1b008000 | rm_ra_rn_rd(rs2, rs3, rs1, rt));
+}
+
+static void emit_sdiv(u_int rs1, u_int rs2, u_int rt)
+{
+ assem_debug("sdiv %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x1ac00c00 | rm_rn_rd(rs2, rs1, rt));
}
-static void emit_clz(u_int rs,u_int rt)
+static void emit_udiv(u_int rs1, u_int rs2, u_int rt)
+{
+ assem_debug("udiv %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x1ac00800 | rm_rn_rd(rs2, rs1, rt));
+}
+
+static void emit_clz(u_int rs, u_int rt)
{
assem_debug("clz %s,%s\n",regname[rt],regname[rs]);
- assert(0);
+ output_w32(0x5ac01000 | rn_rd(rs, rt));
}
// special case for checking invalid_code
{
host_tempreg_acquire();
emit_shrimm(r, 12, HOST_TEMPREG);
- assem_debug("ldrb %s,[%s,%s]",regname[HOST_TEMPREG],regname64[rbase],regname64[HOST_TEMPREG]);
- output_w32(0x38606800 | rm_rn_rd(HOST_TEMPREG, rbase, HOST_TEMPREG));
+ assem_debug("ldrb %s,[%s,%s,uxtw]\n",regname[HOST_TEMPREG],regname64[rbase],regname[HOST_TEMPREG]);
+ output_w32(0x38604800 | rm_rn_rd(HOST_TEMPREG, rbase, HOST_TEMPREG));
emit_cmpimm(HOST_TEMPREG, imm);
host_tempreg_release();
}
-static void emit_orrne_imm(u_int rs,int imm,u_int rt)
-{
- assem_debug("orrne %s,%s,#%#x\n",regname[rt],regname[rs],imm);
- assert(0);
-}
-
-static void emit_andne_imm(u_int rs,int imm,u_int rt)
+// special for loadlr_assemble, rs2 is destroyed
+static void emit_bic_lsl(u_int rs1,u_int rs2,u_int shift,u_int rt)
{
- assem_debug("andne %s,%s,#%#x\n",regname[rt],regname[rs],imm);
- assert(0);
+ emit_shl(rs2, shift, rs2);
+ emit_bic(rs1, rs2, rt);
}
-static unused void emit_addpl_imm(u_int rs,int imm,u_int rt)
+static void emit_bic_lsr(u_int rs1,u_int rs2,u_int shift,u_int rt)
{
- assem_debug("addpl %s,%s,#%#x\n",regname[rt],regname[rs],imm);
- assert(0);
+ emit_shr(rs2, shift, rs2);
+ emit_bic(rs1, rs2, rt);
}
static void emit_loadlp_ofs(u_int ofs, u_int rt)
// addr is in the current recompiled block (max 256k)
// offset shouldn't exceed +/-1MB
emit_adr(addr, 1);
- emit_jmp(linker);
+ emit_far_jump(linker);
}
static void check_extjump2(void *src)
static void emit_movimm_from(u_int rs_val, u_int rs, u_int rt_val, u_int rt)
{
int diff = rt_val - rs_val;
- if ((-4096 <= diff && diff < 4096)
- || (-16777216 <= diff && diff < 16777216 && !(diff & 0xfff)))
+ if ((-4096 < diff && diff < 4096)
+ || (-16777216 < diff && diff < 16777216 && !(diff & 0xfff)))
emit_addimm(rs, diff, rt);
+ else if (rt_val == ~rs_val)
+ emit_not(rs, rt);
else if (is_rotated_mask(rs_val ^ rt_val))
emit_xorimm(rs, rs_val ^ rt_val, rt);
else
static int is_similar_value(u_int v1, u_int v2)
{
int diff = v1 - v2;
- return (-4096 <= diff && diff < 4096)
- || (-16777216 <= diff && diff < 16777216 && !(diff & 0xfff))
+ return (-4096 < diff && diff < 4096)
+ || (-16777216 < diff && diff < 16777216 && !(diff & 0xfff))
+ || v1 == ~v2
|| is_rotated_mask(v1 ^ v2);
}
-// trashes r2
+static void emit_movimm_from64(u_int rs_val, u_int rs, uintptr_t rt_val, u_int rt)
+{
+ if (rt_val < 0x100000000ull) {
+ emit_movimm_from(rs_val, rs, rt_val, rt);
+ return;
+ }
+ // just move the whole thing. At least on Linux all addresses
+ // seem to be 48bit, so 3 insns - not great not terrible
+ assem_debug("movz %s,#%#lx\n", regname64[rt], rt_val & 0xffff);
+ output_w32(0xd2800000 | imm16_rd(rt_val & 0xffff, rt));
+ assem_debug("movk %s,#%#lx,lsl #16\n", regname64[rt], (rt_val >> 16) & 0xffff);
+ output_w32(0xf2a00000 | imm16_rd((rt_val >> 16) & 0xffff, rt));
+ assem_debug("movk %s,#%#lx,lsl #32\n", regname64[rt], (rt_val >> 32) & 0xffff);
+ output_w32(0xf2c00000 | imm16_rd((rt_val >> 32) & 0xffff, rt));
+ if (rt_val >> 48) {
+ assem_debug("movk %s,#%#lx,lsl #48\n", regname64[rt], (rt_val >> 48) & 0xffff);
+ output_w32(0xf2e00000 | imm16_rd((rt_val >> 48) & 0xffff, rt));
+ }
+}
+
+// trashes x2
static void pass_args64(u_int a0, u_int a1)
{
if(a0==1&&a1==0) {
case STOREH_STUB: emit_ubfm(rs, 15, rt); break;
case LOADW_STUB:
case STOREW_STUB: if (rs != rt) emit_mov(rs, rt); break;
- default: assert(0);
+ default: assert(0);
}
}
u_int reglist = stubs[n].e;
const signed char *i_regmap = i_regs->regmap;
int rt;
- if(itype[i]==C1LS||itype[i]==C2LS||itype[i]==LOADLR) {
+ if(dops[i].itype==C1LS||dops[i].itype==C2LS||dops[i].itype==LOADLR) {
rt=get_reg(i_regmap,FTEMP);
}else{
- rt=get_reg(i_regmap,rt1[i]);
+ rt=get_reg(i_regmap,dops[i].rt1);
}
assert(rs>=0);
int r,temp=-1,temp2=HOST_TEMPREG,regs_saved=0;
break;
}
}
- if(rt>=0&&rt1[i]!=0)
+ if(rt>=0&&dops[i].rt1!=0)
reglist&=~(1<<rt);
if(temp==-1) {
save_regs(reglist);
emit_adds64(temp2,temp2,temp2);
handler_jump=out;
emit_jc(0);
- if(itype[i]==C1LS||itype[i]==C2LS||(rt>=0&&rt1[i]!=0)) {
+ if(dops[i].itype==C1LS||dops[i].itype==C2LS||(rt>=0&&dops[i].rt1!=0)) {
switch(type) {
case LOADB_STUB: emit_ldrsb_dualindexed(temp2,rs,rt); break;
case LOADBU_STUB: emit_ldrb_dualindexed(temp2,rs,rt); break;
case LOADH_STUB: emit_ldrsh_dualindexed(temp2,rs,rt); break;
case LOADHU_STUB: emit_ldrh_dualindexed(temp2,rs,rt); break;
case LOADW_STUB: emit_ldr_dualindexed(temp2,rs,rt); break;
- default: assert(0);
+ default: assert(0);
}
}
if(regs_saved) {
int cc=get_reg(i_regmap,CCREG);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
- emit_call(handler);
+ emit_addimm(cc<0?2:cc,(int)stubs[n].d,2);
+ emit_far_call(handler);
// (no cycle reload after read)
- if(itype[i]==C1LS||itype[i]==C2LS||(rt>=0&&rt1[i]!=0)) {
+ if(dops[i].itype==C1LS||dops[i].itype==C2LS||(rt>=0&&dops[i].rt1!=0)) {
loadstore_extend(type,0,rt);
}
if(restore_jump)
emit_jmp(stubs[n].retaddr);
}
-static void inline_readstub(enum stub_type type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist)
+static void inline_readstub(enum stub_type type, int i, u_int addr,
+ const signed char regmap[], int target, int adj, u_int reglist)
{
int rs=get_reg(regmap,target);
int rt=get_reg(regmap,target);
uintptr_t host_addr = 0;
void *handler;
int cc=get_reg(regmap,CCREG);
- //if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj+1),cc,target?rs:-1,rt))
+ //if(pcsx_direct_read(type,addr,adj,cc,target?rs:-1,rt))
// return;
handler = get_direct_memhandler(mem_rtab, addr, type, &host_addr);
if (handler == NULL) {
- if(rt<0||rt1[i]==0)
+ if(rt<0||dops[i].rt1==0)
return;
- if (addr != host_addr) {
- if (host_addr >= 0x100000000ull)
- abort(); // ROREG not implemented
- emit_movimm_from(addr, rs, host_addr, rs);
- }
+ if (addr != host_addr)
+ emit_movimm_from64(addr, rs, host_addr, rs);
switch(type) {
case LOADB_STUB: emit_movsbl_indexed(0,rs,rt); break;
case LOADBU_STUB: emit_movzbl_indexed(0,rs,rt); break;
}
return;
}
- is_dynamic=pcsxmem_is_handler_dynamic(addr);
- if(is_dynamic) {
+ is_dynamic = pcsxmem_is_handler_dynamic(addr);
+ if (is_dynamic) {
if(type==LOADB_STUB||type==LOADBU_STUB)
handler=jump_handler_read8;
if(type==LOADH_STUB||type==LOADHU_STUB)
}
// call a memhandler
- if(rt>=0&&rt1[i]!=0)
+ if(rt>=0&&dops[i].rt1!=0)
reglist&=~(1<<rt);
save_regs(reglist);
if(target==0)
emit_mov(rs,0);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2);
- if(is_dynamic)
- emit_readdword(&mem_rtab,1);
+ emit_addimm(cc<0?2:cc,adj,2);
+ if(is_dynamic) {
+ uintptr_t l1 = ((uintptr_t *)mem_rtab)[addr>>12] << 1;
+ emit_adrp((void *)l1, 1);
+ emit_addimm64(1, l1 & 0xfff, 1);
+ }
else
- emit_call(do_memhandler_pre);
+ emit_far_call(do_memhandler_pre);
- emit_call(handler);
+ emit_far_call(handler);
// (no cycle reload after read)
- if(rt>=0&&rt1[i]!=0)
+ if(rt>=0&&dops[i].rt1!=0)
loadstore_extend(type, 0, rt);
restore_regs(reglist);
}
u_int reglist=stubs[n].e;
signed char *i_regmap=i_regs->regmap;
int rt,r;
- if(itype[i]==C1LS||itype[i]==C2LS) {
+ if(dops[i].itype==C1LS||dops[i].itype==C2LS) {
rt=get_reg(i_regmap,r=FTEMP);
}else{
- rt=get_reg(i_regmap,r=rs2[i]);
+ rt=get_reg(i_regmap,r=dops[i].rs2);
}
assert(rs>=0);
assert(rt>=0);
emit_jmp(stubs[n].retaddr); // return address (invcode check)
set_jump_target(handler_jump, out);
- // TODO FIXME: regalloc should prefer callee-saved regs
if(!regs_saved)
save_regs(reglist);
void *handler=NULL;
case STOREB_STUB: handler=jump_handler_write8; break;
case STOREH_STUB: handler=jump_handler_write16; break;
case STOREW_STUB: handler=jump_handler_write32; break;
- default: assert(0);
+ default: assert(0);
}
assert(handler);
pass_args(rs,rt);
int cc=get_reg(i_regmap,CCREG);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
+ emit_addimm(cc<0?2:cc,(int)stubs[n].d,2);
// returns new cycle_count
- emit_call(handler);
- emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc);
+ emit_far_call(handler);
+ emit_addimm(0,-(int)stubs[n].d,cc<0?2:cc);
if(cc<0)
emit_storereg(CCREG,2);
if(restore_jump)
emit_jmp(stubs[n].retaddr);
}
-static void inline_writestub(enum stub_type type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist)
+static void inline_writestub(enum stub_type type, int i, u_int addr,
+ const signed char regmap[], int target, int adj, u_int reglist)
{
int rs = get_reg(regmap,-1);
int rt = get_reg(regmap,target);
uintptr_t host_addr = 0;
void *handler = get_direct_memhandler(mem_wtab, addr, type, &host_addr);
if (handler == NULL) {
- if (addr != host_addr) {
- if (host_addr >= 0x100000000ull)
- abort(); // ROREG not implemented
- emit_movimm_from(addr, rs, host_addr, rs);
- }
+ if (addr != host_addr)
+ emit_movimm_from64(addr, rs, host_addr, rs);
switch (type) {
case STOREB_STUB: emit_writebyte_indexed(rt, 0, rs); break;
case STOREH_STUB: emit_writehword_indexed(rt, 0, rs); break;
cc = cc_use = get_reg(regmap, CCREG);
if (cc < 0)
emit_loadreg(CCREG, (cc_use = 2));
- emit_addimm(cc_use, CLOCK_ADJUST(adj+1), 2);
+ emit_addimm(cc_use, adj, 2);
- emit_call(do_memhandler_pre);
- emit_call(handler);
- emit_call(do_memhandler_post);
- emit_addimm(0, -CLOCK_ADJUST(adj+1), cc_use);
+ emit_far_call(do_memhandler_pre);
+ emit_far_call(handler);
+ emit_far_call(do_memhandler_post);
+ emit_addimm(0, -adj, cc_use);
if (cc < 0)
emit_storereg(CCREG, cc_use);
restore_regs(reglist);
}
-static void do_unalignedwritestub(int n)
+static int verify_code_arm64(const void *source, const void *copy, u_int size)
{
- assem_debug("do_unalignedwritestub %x\n",start+stubs[n].a*4);
- assert(0);
+ int ret = memcmp(source, copy, size);
+ //printf("%s %p,%#x = %d\n", __func__, source, size, ret);
+ return ret;
+}
+
+// this output is parsed by verify_dirty, get_bounds, isclean, get_clean_addr
+static void do_dirty_stub_base(u_int vaddr, u_int source_len)
+{
+ assert(source_len <= MAXBLOCK*4);
+ emit_loadlp_ofs(0, 0); // ldr x1, source
+ emit_loadlp_ofs(0, 1); // ldr x2, copy
+ emit_movz(source_len, 2);
+ emit_far_call(verify_code_arm64);
+ void *jmp = out;
+ emit_cbz(0, 0);
+ emit_movz(vaddr & 0xffff, 0);
+ emit_movk_lsl16(vaddr >> 16, 0);
+ emit_far_call(get_addr);
+ emit_jmpreg(0);
+ set_jump_target(jmp, out);
+}
+
+static void assert_dirty_stub(const u_int *ptr)
+{
+ assert((ptr[0] & 0xff00001f) == 0x58000000); // ldr x0, source
+ assert((ptr[1] & 0xff00001f) == 0x58000001); // ldr x1, copy
+ assert((ptr[2] & 0xffe0001f) == 0x52800002); // movz w2, #source_len
+ assert( ptr[8] == 0xd61f0000); // br x0
}
static void set_loadlp(u_int *loadl, void *lit)
*loadl |= (ofs >> 2) << 5;
}
-// this output is parsed by verify_dirty, get_bounds, isclean, get_clean_addr
-static void do_dirty_stub_emit_args(u_int arg0)
-{
- assert(slen <= MAXBLOCK);
- emit_loadlp_ofs(0, 1); // ldr x1, source
- emit_loadlp_ofs(0, 2); // ldr x2, copy
- emit_movz(slen*4, 3);
- emit_movz(arg0 & 0xffff, 0);
- emit_movk_lsl16(arg0 >> 16, 0);
-}
-
static void do_dirty_stub_emit_literals(u_int *loadlps)
{
set_loadlp(&loadlps[0], out);
output_w64((uintptr_t)copy);
}
-static void *do_dirty_stub(int i)
+static void *do_dirty_stub(int i, u_int source_len)
{
assem_debug("do_dirty_stub %x\n",start+i*4);
u_int *loadlps = (void *)out;
- do_dirty_stub_emit_args(start + i*4);
- emit_call(verify_code);
+ do_dirty_stub_base(start + i*4, source_len);
void *entry = out;
load_regs_entry(i);
if (entry == out)
return entry;
}
-static void do_dirty_stub_ds()
+static void do_dirty_stub_ds(u_int source_len)
{
- do_dirty_stub_emit_args(start + 1);
u_int *loadlps = (void *)out;
- emit_call(verify_code_ds);
+ do_dirty_stub_base(start + 1, source_len);
+ void *lit_jumpover = out;
emit_jmp(out + 8*2);
do_dirty_stub_emit_literals(loadlps);
+ set_jump_target(lit_jumpover, out);
+}
+
+static uint64_t get_from_ldr_literal(const u_int *i)
+{
+ signed int ofs;
+ assert((i[0] & 0xff000000) == 0x58000000);
+ ofs = i[0] << 8;
+ ofs >>= 5+8;
+ return *(uint64_t *)(i + ofs);
+}
+
+static uint64_t get_from_movz(const u_int *i)
+{
+ assert((i[0] & 0x7fe00000) == 0x52800000);
+ return (i[0] >> 5) & 0xffff;
+}
+
+// Find the "clean" entry point from a "dirty" entry point
+// by skipping past the call to verify_code
+static void *get_clean_addr(u_int *addr)
+{
+ assert_dirty_stub(addr);
+ return addr + 9;
+}
+
+static int verify_dirty(const u_int *ptr)
+{
+ const void *source, *copy;
+ u_int len;
+ assert_dirty_stub(ptr);
+ source = (void *)get_from_ldr_literal(&ptr[0]); // ldr x1, source
+ copy = (void *)get_from_ldr_literal(&ptr[1]); // ldr x1, copy
+ len = get_from_movz(&ptr[2]); // movz w3, #source_len
+ return !memcmp(source, copy, len);
+}
+
+static int isclean(void *addr)
+{
+ const u_int *ptr = addr;
+ if ((*ptr >> 24) == 0x58) { // the only place ldr (literal) is used
+ assert_dirty_stub(ptr);
+ return 0;
+ }
+ return 1;
+}
+
+// get source that block at addr was compiled from (host pointers)
+static void get_bounds(void *addr, u_char **start, u_char **end)
+{
+ const u_int *ptr = addr;
+ assert_dirty_stub(ptr);
+ *start = (u_char *)get_from_ldr_literal(&ptr[0]); // ldr x1, source
+ *end = *start + get_from_movz(&ptr[2]); // movz w3, #source_len
}
/* Special assem */
-#define shift_assemble shift_assemble_arm64
+static void c2op_prologue(u_int op, int i, const struct regstat *i_regs, u_int reglist)
+{
+ save_load_regs_all(1, reglist);
+ cop2_do_stall_check(op, i, i_regs, 0);
+#ifdef PCNT
+ emit_movimm(op, 0);
+ emit_far_call(pcnt_gte_start);
+#endif
+ // pointer to cop2 regs
+ emit_addimm64(FP, (u_char *)&psxRegs.CP2D.r[0] - (u_char *)&dynarec_local, 0);
+}
-static void shift_assemble_arm64(int i,struct regstat *i_regs)
+static void c2op_epilogue(u_int op,u_int reglist)
{
- assert(0);
+#ifdef PCNT
+ emit_movimm(op, 0);
+ emit_far_call(pcnt_gte_end);
+#endif
+ save_load_regs_all(0, reglist);
}
-#define loadlr_assemble loadlr_assemble_arm64
-static void loadlr_assemble_arm64(int i,struct regstat *i_regs)
+static void c2op_assemble(int i, const struct regstat *i_regs)
{
- assert(0);
+ u_int c2op=source[i]&0x3f;
+ u_int hr,reglist_full=0,reglist;
+ int need_flags,need_ir;
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(i_regs->regmap[hr]>=0) reglist_full|=1<<hr;
+ }
+ reglist=reglist_full&CALLER_SAVE_REGS;
+
+ if (gte_handlers[c2op]!=NULL) {
+ need_flags=!(gte_unneeded[i+1]>>63); // +1 because of how liveness detection works
+ need_ir=(gte_unneeded[i+1]&0xe00)!=0xe00;
+ assem_debug("gte op %08x, unneeded %016lx, need_flags %d, need_ir %d\n",
+ source[i],gte_unneeded[i+1],need_flags,need_ir);
+ if(HACK_ENABLED(NDHACK_GTE_NO_FLAGS))
+ need_flags=0;
+ //int shift = (source[i] >> 19) & 1;
+ //int lm = (source[i] >> 10) & 1;
+ switch(c2op) {
+ default:
+ (void)need_ir;
+ c2op_prologue(c2op, i, i_regs, reglist);
+ emit_movimm(source[i],1); // opcode
+ emit_writeword(1,&psxRegs.code);
+ emit_far_call(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]);
+ break;
+ }
+ c2op_epilogue(c2op,reglist);
+ }
}
-static void c2op_assemble(int i,struct regstat *i_regs)
+static void c2op_ctc2_31_assemble(signed char sl, signed char temp)
{
- assert(0);
+ //value = value & 0x7ffff000;
+ //if (value & 0x7f87e000) value |= 0x80000000;
+ emit_andimm(sl, 0x7fffe000, temp);
+ emit_testimm(temp, 0xff87ffff);
+ emit_andimm(sl, 0x7ffff000, temp);
+ host_tempreg_acquire();
+ emit_orimm(temp, 0x80000000, HOST_TEMPREG);
+ emit_cmovne_reg(HOST_TEMPREG, temp);
+ host_tempreg_release();
+ assert(0); // testing needed
}
-static void multdiv_assemble_arm64(int i,struct regstat *i_regs)
+static void do_mfc2_31_one(u_int copr,signed char temp)
{
- assert(0);
+ emit_readshword(®_cop2d[copr],temp);
+ emit_bicsar_imm(temp,31,temp);
+ emit_cmpimm(temp,0xf80);
+ emit_csinvle_reg(temp,WZR,temp); // if (temp > 0xf80) temp = ~0;
+ emit_andimm(temp,0xf80,temp);
+}
+
+static void c2op_mfc2_29_assemble(signed char tl, signed char temp)
+{
+ if (temp < 0) {
+ host_tempreg_acquire();
+ temp = HOST_TEMPREG;
+ }
+ do_mfc2_31_one(9,temp);
+ emit_shrimm(temp,7,tl);
+ do_mfc2_31_one(10,temp);
+ emit_orrshr_imm(temp,2,tl);
+ do_mfc2_31_one(11,temp);
+ emit_orrshl_imm(temp,3,tl);
+ emit_writeword(tl,®_cop2d[29]);
+
+ if (temp == HOST_TEMPREG)
+ host_tempreg_release();
+}
+
+static void multdiv_assemble_arm64(int i, const struct regstat *i_regs)
+{
+ // case 0x18: MULT
+ // case 0x19: MULTU
+ // case 0x1A: DIV
+ // case 0x1B: DIVU
+ if(dops[i].rs1&&dops[i].rs2)
+ {
+ switch(dops[i].opcode2)
+ {
+ case 0x18: // MULT
+ case 0x19: // MULTU
+ {
+ signed char m1=get_reg(i_regs->regmap,dops[i].rs1);
+ signed char m2=get_reg(i_regs->regmap,dops[i].rs2);
+ signed char hi=get_reg(i_regs->regmap,HIREG);
+ signed char lo=get_reg(i_regs->regmap,LOREG);
+ assert(m1>=0);
+ assert(m2>=0);
+ assert(hi>=0);
+ assert(lo>=0);
+
+ if(dops[i].opcode2==0x18) // MULT
+ emit_smull(m1,m2,hi);
+ else // MULTU
+ emit_umull(m1,m2,hi);
+
+ emit_mov(hi,lo);
+ emit_shrimm64(hi,32,hi);
+ break;
+ }
+ case 0x1A: // DIV
+ case 0x1B: // DIVU
+ {
+ signed char numerator=get_reg(i_regs->regmap,dops[i].rs1);
+ signed char denominator=get_reg(i_regs->regmap,dops[i].rs2);
+ signed char quotient=get_reg(i_regs->regmap,LOREG);
+ signed char remainder=get_reg(i_regs->regmap,HIREG);
+ assert(numerator>=0);
+ assert(denominator>=0);
+ assert(quotient>=0);
+ assert(remainder>=0);
+
+ if (dops[i].opcode2 == 0x1A) // DIV
+ emit_sdiv(numerator,denominator,quotient);
+ else // DIVU
+ emit_udiv(numerator,denominator,quotient);
+ emit_msub(quotient,denominator,numerator,remainder);
+
+ // div 0 quotient (remainder is already correct)
+ host_tempreg_acquire();
+ if (dops[i].opcode2 == 0x1A) // DIV
+ emit_sub_asrimm(0,numerator,31,HOST_TEMPREG);
+ else
+ emit_movimm(~0,HOST_TEMPREG);
+ emit_test(denominator,denominator);
+ emit_cmoveq_reg(HOST_TEMPREG,quotient);
+ host_tempreg_release();
+ break;
+ }
+ default:
+ assert(0);
+ }
+ }
+ else
+ {
+ signed char hr=get_reg(i_regs->regmap,HIREG);
+ signed char lr=get_reg(i_regs->regmap,LOREG);
+ if ((dops[i].opcode2==0x1A || dops[i].opcode2==0x1B) && dops[i].rs2==0) // div 0
+ {
+ if (dops[i].rs1) {
+ signed char numerator = get_reg(i_regs->regmap, dops[i].rs1);
+ assert(numerator >= 0);
+ if (hr >= 0)
+ emit_mov(numerator,hr);
+ if (lr >= 0) {
+ if (dops[i].opcode2 == 0x1A) // DIV
+ emit_sub_asrimm(0,numerator,31,lr);
+ else
+ emit_movimm(~0,lr);
+ }
+ }
+ else {
+ if (hr >= 0) emit_zeroreg(hr);
+ if (lr >= 0) emit_movimm(~0,lr);
+ }
+ }
+ else
+ {
+ // Multiply by zero is zero.
+ if (hr >= 0) emit_zeroreg(hr);
+ if (lr >= 0) emit_zeroreg(lr);
+ }
+ }
}
#define multdiv_assemble multdiv_assemble_arm64
{
if (rs != 0)
emit_mov(rs, 0);
- emit_call(get_addr_ht);
+ emit_far_call(get_addr_ht);
emit_jmpreg(0);
}
emit_writeword(rt,&mini_ht[(return_address&0xFF)>>3][0]);
}
-static void mark_clear_cache(void *target)
+static void clear_cache_arm64(char *start, char *end)
{
- u_long offset = (u_char *)target - translation_cache;
- u_int mask = 1u << ((offset >> 12) & 31);
- if (!(needs_clear_cache[offset >> 17] & mask)) {
- char *start = (char *)((u_long)target & ~4095ul);
- start_tcache_write(start, start + 4096);
- needs_clear_cache[offset >> 17] |= mask;
+ // Don't rely on GCC's __clear_cache implementation, as it caches
+ // icache/dcache cache line sizes, that can vary between cores on
+ // big.LITTLE architectures.
+ uint64_t addr, ctr_el0;
+ static size_t icache_line_size = 0xffff, dcache_line_size = 0xffff;
+ size_t isize, dsize;
+
+ __asm__ volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
+ isize = 4 << ((ctr_el0 >> 0) & 0xf);
+ dsize = 4 << ((ctr_el0 >> 16) & 0xf);
+
+ // use the global minimum cache line size
+ icache_line_size = isize = icache_line_size < isize ? icache_line_size : isize;
+ dcache_line_size = dsize = dcache_line_size < dsize ? dcache_line_size : dsize;
+
+ /* If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification is
+ not required for instruction to data coherence. */
+ if ((ctr_el0 & (1 << 28)) == 0x0) {
+ addr = (uint64_t)start & ~(uint64_t)(dsize - 1);
+ for (; addr < (uint64_t)end; addr += dsize)
+ // use "civac" instead of "cvau", as this is the suggested workaround for
+ // Cortex-A53 errata 819472, 826319, 827319 and 824069.
+ __asm__ volatile("dc civac, %0" : : "r"(addr) : "memory");
}
-}
+ __asm__ volatile("dsb ish" : : : "memory");
-// Clearing the cache is rather slow on ARM Linux, so mark the areas
-// that need to be cleared, and then only clear these areas once.
-static void do_clear_cache()
-{
- int i,j;
- for (i=0;i<(1<<(TARGET_SIZE_2-17));i++)
- {
- u_int bitmap=needs_clear_cache[i];
- if(bitmap) {
- u_char *start, *end;
- for(j=0;j<32;j++)
- {
- if(bitmap&(1<<j)) {
- start=translation_cache+i*131072+j*4096;
- end=start+4095;
- j++;
- while(j<32) {
- if(bitmap&(1<<j)) {
- end+=4096;
- j++;
- }else{
- end_tcache_write(start, end);
- break;
- }
- }
- }
- }
- needs_clear_cache[i]=0;
- }
+ /* If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point of
+ Unification is not required for instruction to data coherence. */
+ if ((ctr_el0 & (1 << 29)) == 0x0) {
+ addr = (uint64_t)start & ~(uint64_t)(isize - 1);
+ for (; addr < (uint64_t)end; addr += isize)
+ __asm__ volatile("ic ivau, %0" : : "r"(addr) : "memory");
+
+ __asm__ volatile("dsb ish" : : : "memory");
}
+
+ __asm__ volatile("isb" : : : "memory");
}
// CPU-architecture-specific initialization
-static void arch_init() {
+static void arch_init(void)
+{
+ uintptr_t diff = (u_char *)&ndrc->tramp.f - (u_char *)&ndrc->tramp.ops;
+ struct tramp_insns *ops = ndrc->tramp.ops;
+ size_t i;
+ assert(!(diff & 3));
+ start_tcache_write(ops, (u_char *)ops + sizeof(ndrc->tramp.ops));
+ for (i = 0; i < ARRAY_SIZE(ndrc->tramp.ops); i++) {
+ ops[i].ldr = 0x58000000 | imm19_rt(diff >> 2, 17); // ldr x17, [=val]
+ ops[i].br = 0xd61f0000 | rm_rn_rd(0, 17, 0); // br x17
+ }
+ end_tcache_write(ops, (u_char *)ops + sizeof(ndrc->tramp.ops));
}
// vim:shiftwidth=2:expandtab