#include "pcnt.h"
#include "arm_features.h"
-#if defined(BASE_ADDR_FIXED)
-#elif defined(BASE_ADDR_DYNAMIC)
-u_char *translation_cache;
-#else
-u_char translation_cache[1 << TARGET_SIZE_2] __attribute__((aligned(4096)));
-#endif
-static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
-
#define CALLER_SAVE_REGS 0x0007ffff
#define unused __attribute__((unused))
output_w32(0x1a800000 | (COND_LT << 12) | rm_rn_rd(rt, rs, rt));
}
+static void emit_cmovb_reg(u_int rs,u_int rt)
+{
+ assem_debug("csel %s,%s,%s,cc\n",regname[rt],regname[rs],regname[rt]);
+ output_w32(0x1a800000 | (COND_CC << 12) | rm_rn_rd(rt, rs, rt));
+}
+
static void emit_cmovs_reg(u_int rs,u_int rt)
{
assem_debug("csel %s,%s,%s,mi\n",regname[rt],regname[rs],regname[rt]);
emit_cmovb_imm(1,rt);
}
+static int can_jump_or_call(const void *a)
+{
+ intptr_t diff = (u_char *)a - out;
+ return (-134217728 <= diff && diff <= 134217727);
+}
+
static void emit_call(const void *a)
{
intptr_t diff = (u_char *)a - out;
// addr is in the current recompiled block (max 256k)
// offset shouldn't exceed +/-1MB
emit_adr(addr, 1);
- emit_jmp(linker);
+ emit_far_jump(linker);
}
static void check_extjump2(void *src)
int cc=get_reg(i_regmap,CCREG);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
- emit_call(handler);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d),2);
+ emit_far_call(handler);
// (no cycle reload after read)
if(itype[i]==C1LS||itype[i]==C2LS||(rt>=0&&rt1[i]!=0)) {
loadstore_extend(type,0,rt);
emit_jmp(stubs[n].retaddr);
}
-static void inline_readstub(enum stub_type type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist)
+static void inline_readstub(enum stub_type type, int i, u_int addr,
+ const signed char regmap[], int target, int adj, u_int reglist)
{
int rs=get_reg(regmap,target);
int rt=get_reg(regmap,target);
uintptr_t host_addr = 0;
void *handler;
int cc=get_reg(regmap,CCREG);
- //if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj+1),cc,target?rs:-1,rt))
+ //if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj),cc,target?rs:-1,rt))
// return;
handler = get_direct_memhandler(mem_rtab, addr, type, &host_addr);
if (handler == NULL) {
emit_mov(rs,0);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj),2);
if(is_dynamic) {
uintptr_t l1 = ((uintptr_t *)mem_rtab)[addr>>12] << 1;
emit_adrp((void *)l1, 1);
emit_addimm64(1, l1 & 0xfff, 1);
}
else
- emit_call(do_memhandler_pre);
+ emit_far_call(do_memhandler_pre);
- emit_call(handler);
+ emit_far_call(handler);
// (no cycle reload after read)
if(rt>=0&&rt1[i]!=0)
int cc=get_reg(i_regmap,CCREG);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d),2);
// returns new cycle_count
- emit_call(handler);
- emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc);
+ emit_far_call(handler);
+ emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d),cc<0?2:cc);
if(cc<0)
emit_storereg(CCREG,2);
if(restore_jump)
emit_jmp(stubs[n].retaddr);
}
-static void inline_writestub(enum stub_type type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist)
+static void inline_writestub(enum stub_type type, int i, u_int addr,
+ const signed char regmap[], int target, int adj, u_int reglist)
{
int rs = get_reg(regmap,-1);
int rt = get_reg(regmap,target);
cc = cc_use = get_reg(regmap, CCREG);
if (cc < 0)
emit_loadreg(CCREG, (cc_use = 2));
- emit_addimm(cc_use, CLOCK_ADJUST(adj+1), 2);
+ emit_addimm(cc_use, CLOCK_ADJUST(adj), 2);
- emit_call(do_memhandler_pre);
- emit_call(handler);
- emit_call(do_memhandler_post);
- emit_addimm(0, -CLOCK_ADJUST(adj+1), cc_use);
+ emit_far_call(do_memhandler_pre);
+ emit_far_call(handler);
+ emit_far_call(do_memhandler_post);
+ emit_addimm(0, -CLOCK_ADJUST(adj), cc_use);
if (cc < 0)
emit_storereg(CCREG, cc_use);
restore_regs(reglist);
emit_loadlp_ofs(0, 0); // ldr x1, source
emit_loadlp_ofs(0, 1); // ldr x2, copy
emit_movz(slen*4, 2);
- emit_call(verify_code_arm64);
+ emit_far_call(verify_code_arm64);
void *jmp = out;
emit_cbz(0, 0);
emit_movz(vaddr & 0xffff, 0);
emit_movk_lsl16(vaddr >> 16, 0);
- emit_call(get_addr);
+ emit_far_call(get_addr);
emit_jmpreg(0);
set_jump_target(jmp, out);
}
/* Special assem */
-static void c2op_prologue(u_int op,u_int reglist)
+static void c2op_prologue(u_int op, int i, const struct regstat *i_regs, u_int reglist)
{
save_load_regs_all(1, reglist);
+ cop2_do_stall_check(op, i, i_regs, 0);
#ifdef PCNT
emit_movimm(op, 0);
- emit_call(pcnt_gte_start);
+ emit_far_call(pcnt_gte_start);
#endif
// pointer to cop2 regs
emit_addimm64(FP, (u_char *)&psxRegs.CP2D.r[0] - (u_char *)&dynarec_local, 0);
{
#ifdef PCNT
emit_movimm(op, 0);
- emit_call(pcnt_gte_end);
+ emit_far_call(pcnt_gte_end);
#endif
save_load_regs_all(0, reglist);
}
-static void c2op_assemble(int i,struct regstat *i_regs)
+static void c2op_assemble(int i, const struct regstat *i_regs)
{
u_int c2op=source[i]&0x3f;
u_int hr,reglist_full=0,reglist;
need_ir=(gte_unneeded[i+1]&0xe00)!=0xe00;
assem_debug("gte op %08x, unneeded %016lx, need_flags %d, need_ir %d\n",
source[i],gte_unneeded[i+1],need_flags,need_ir);
- if(new_dynarec_hacks&NDHACK_GTE_NO_FLAGS)
+ if(HACK_ENABLED(NDHACK_GTE_NO_FLAGS))
need_flags=0;
//int shift = (source[i] >> 19) & 1;
//int lm = (source[i] >> 10) & 1;
switch(c2op) {
default:
(void)need_ir;
- c2op_prologue(c2op,reglist);
+ c2op_prologue(c2op, i, i_regs, reglist);
emit_movimm(source[i],1); // opcode
emit_writeword(1,&psxRegs.code);
- emit_call(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]);
+ emit_far_call(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]);
break;
}
c2op_epilogue(c2op,reglist);
{
if (rs != 0)
emit_mov(rs, 0);
- emit_call(get_addr_ht);
+ emit_far_call(get_addr_ht);
emit_jmpreg(0);
}
emit_writeword(rt,&mini_ht[(return_address&0xFF)>>3][0]);
}
-static void mark_clear_cache(void *target)
+static void clear_cache_arm64(char *start, char *end)
{
- u_long offset = (u_char *)target - translation_cache;
- u_int mask = 1u << ((offset >> 12) & 31);
- if (!(needs_clear_cache[offset >> 17] & mask)) {
- char *start = (char *)((u_long)target & ~4095ul);
- start_tcache_write(start, start + 4096);
- needs_clear_cache[offset >> 17] |= mask;
+ // Don't rely on GCC's __clear_cache implementation, as it caches
+ // icache/dcache cache line sizes, that can vary between cores on
+ // big.LITTLE architectures.
+ uint64_t addr, ctr_el0;
+ static size_t icache_line_size = 0xffff, dcache_line_size = 0xffff;
+ size_t isize, dsize;
+
+ __asm__ volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
+ isize = 4 << ((ctr_el0 >> 0) & 0xf);
+ dsize = 4 << ((ctr_el0 >> 16) & 0xf);
+
+ // use the global minimum cache line size
+ icache_line_size = isize = icache_line_size < isize ? icache_line_size : isize;
+ dcache_line_size = dsize = dcache_line_size < dsize ? dcache_line_size : dsize;
+
+ /* If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification is
+ not required for instruction to data coherence. */
+ if ((ctr_el0 & (1 << 28)) == 0x0) {
+ addr = (uint64_t)start & ~(uint64_t)(dsize - 1);
+ for (; addr < (uint64_t)end; addr += dsize)
+ // use "civac" instead of "cvau", as this is the suggested workaround for
+ // Cortex-A53 errata 819472, 826319, 827319 and 824069.
+ __asm__ volatile("dc civac, %0" : : "r"(addr) : "memory");
}
-}
+ __asm__ volatile("dsb ish" : : : "memory");
-// Clearing the cache is rather slow on ARM Linux, so mark the areas
-// that need to be cleared, and then only clear these areas once.
-static void do_clear_cache()
-{
- int i,j;
- for (i=0;i<(1<<(TARGET_SIZE_2-17));i++)
- {
- u_int bitmap=needs_clear_cache[i];
- if(bitmap) {
- u_char *start, *end;
- for(j=0;j<32;j++)
- {
- if(bitmap&(1<<j)) {
- start=translation_cache+i*131072+j*4096;
- end=start+4095;
- j++;
- while(j<32) {
- if(bitmap&(1<<j)) {
- end+=4096;
- j++;
- }else{
- end_tcache_write(start, end);
- break;
- }
- }
- }
- }
- needs_clear_cache[i]=0;
- }
+ /* If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point of
+ Unification is not required for instruction to data coherence. */
+ if ((ctr_el0 & (1 << 29)) == 0x0) {
+ addr = (uint64_t)start & ~(uint64_t)(isize - 1);
+ for (; addr < (uint64_t)end; addr += isize)
+ __asm__ volatile("ic ivau, %0" : : "r"(addr) : "memory");
+
+ __asm__ volatile("dsb ish" : : : "memory");
}
+
+ __asm__ volatile("isb" : : : "memory");
}
// CPU-architecture-specific initialization
-static void arch_init() {
+static void arch_init(void)
+{
+ uintptr_t diff = (u_char *)&ndrc->tramp.f - (u_char *)&ndrc->tramp.ops;
+ struct tramp_insns *ops = ndrc->tramp.ops;
+ size_t i;
+ assert(!(diff & 3));
+ start_tcache_write(ops, (u_char *)ops + sizeof(ndrc->tramp.ops));
+ for (i = 0; i < ARRAY_SIZE(ndrc->tramp.ops); i++) {
+ ops[i].ldr = 0x58000000 | imm19_rt(diff >> 2, 17); // ldr x17, [=val]
+ ops[i].br = 0xd61f0000 | rm_rn_rd(0, 17, 0); // br x17
+ }
+ end_tcache_write(ops, (u_char *)ops + sizeof(ndrc->tramp.ops));
}
// vim:shiftwidth=2:expandtab