output_w32(0x1a800000 | (COND_LT << 12) | rm_rn_rd(rt, rs, rt));
}
+static void emit_cmovb_reg(u_int rs,u_int rt)
+{
+ assem_debug("csel %s,%s,%s,cc\n",regname[rt],regname[rs],regname[rt]);
+ output_w32(0x1a800000 | (COND_CC << 12) | rm_rn_rd(rt, rs, rt));
+}
+
static void emit_cmovs_reg(u_int rs,u_int rt)
{
assem_debug("csel %s,%s,%s,mi\n",regname[rt],regname[rs],regname[rt]);
int cc=get_reg(i_regmap,CCREG);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d),2);
emit_far_call(handler);
// (no cycle reload after read)
if(itype[i]==C1LS||itype[i]==C2LS||(rt>=0&&rt1[i]!=0)) {
emit_jmp(stubs[n].retaddr);
}
-static void inline_readstub(enum stub_type type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist)
+static void inline_readstub(enum stub_type type, int i, u_int addr,
+ const signed char regmap[], int target, int adj, u_int reglist)
{
int rs=get_reg(regmap,target);
int rt=get_reg(regmap,target);
uintptr_t host_addr = 0;
void *handler;
int cc=get_reg(regmap,CCREG);
- //if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj+1),cc,target?rs:-1,rt))
+ //if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj),cc,target?rs:-1,rt))
// return;
handler = get_direct_memhandler(mem_rtab, addr, type, &host_addr);
if (handler == NULL) {
emit_mov(rs,0);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj),2);
if(is_dynamic) {
uintptr_t l1 = ((uintptr_t *)mem_rtab)[addr>>12] << 1;
emit_adrp((void *)l1, 1);
int cc=get_reg(i_regmap,CCREG);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d),2);
// returns new cycle_count
emit_far_call(handler);
- emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc);
+ emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d),cc<0?2:cc);
if(cc<0)
emit_storereg(CCREG,2);
if(restore_jump)
emit_jmp(stubs[n].retaddr);
}
-static void inline_writestub(enum stub_type type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist)
+static void inline_writestub(enum stub_type type, int i, u_int addr,
+ const signed char regmap[], int target, int adj, u_int reglist)
{
int rs = get_reg(regmap,-1);
int rt = get_reg(regmap,target);
cc = cc_use = get_reg(regmap, CCREG);
if (cc < 0)
emit_loadreg(CCREG, (cc_use = 2));
- emit_addimm(cc_use, CLOCK_ADJUST(adj+1), 2);
+ emit_addimm(cc_use, CLOCK_ADJUST(adj), 2);
emit_far_call(do_memhandler_pre);
emit_far_call(handler);
emit_far_call(do_memhandler_post);
- emit_addimm(0, -CLOCK_ADJUST(adj+1), cc_use);
+ emit_addimm(0, -CLOCK_ADJUST(adj), cc_use);
if (cc < 0)
emit_storereg(CCREG, cc_use);
restore_regs(reglist);
/* Special assem */
-static void c2op_prologue(u_int op,u_int reglist)
+static void c2op_prologue(u_int op, int i, const struct regstat *i_regs, u_int reglist)
{
save_load_regs_all(1, reglist);
+ cop2_do_stall_check(op, i, i_regs, 0);
#ifdef PCNT
emit_movimm(op, 0);
emit_far_call(pcnt_gte_start);
save_load_regs_all(0, reglist);
}
-static void c2op_assemble(int i,struct regstat *i_regs)
+static void c2op_assemble(int i, const struct regstat *i_regs)
{
u_int c2op=source[i]&0x3f;
u_int hr,reglist_full=0,reglist;
need_ir=(gte_unneeded[i+1]&0xe00)!=0xe00;
assem_debug("gte op %08x, unneeded %016lx, need_flags %d, need_ir %d\n",
source[i],gte_unneeded[i+1],need_flags,need_ir);
- if(new_dynarec_hacks&NDHACK_GTE_NO_FLAGS)
+ if(HACK_ENABLED(NDHACK_GTE_NO_FLAGS))
need_flags=0;
//int shift = (source[i] >> 19) & 1;
//int lm = (source[i] >> 10) & 1;
switch(c2op) {
default:
(void)need_ir;
- c2op_prologue(c2op,reglist);
+ c2op_prologue(c2op, i, i_regs, reglist);
emit_movimm(source[i],1); // opcode
emit_writeword(1,&psxRegs.code);
emit_far_call(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]);
emit_writeword(rt,&mini_ht[(return_address&0xFF)>>3][0]);
}
-static void mark_clear_cache(void *target)
+static void clear_cache_arm64(char *start, char *end)
{
- u_long offset = (u_char *)target - translation_cache;
- u_int mask = 1u << ((offset >> 12) & 31);
- if (!(needs_clear_cache[offset >> 17] & mask)) {
- char *start = (char *)((u_long)target & ~4095ul);
- start_tcache_write(start, start + 4096);
- needs_clear_cache[offset >> 17] |= mask;
+ // Don't rely on GCC's __clear_cache implementation, as it caches
+ // icache/dcache cache line sizes, that can vary between cores on
+ // big.LITTLE architectures.
+ uint64_t addr, ctr_el0;
+ static size_t icache_line_size = 0xffff, dcache_line_size = 0xffff;
+ size_t isize, dsize;
+
+ __asm__ volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
+ isize = 4 << ((ctr_el0 >> 0) & 0xf);
+ dsize = 4 << ((ctr_el0 >> 16) & 0xf);
+
+ // use the global minimum cache line size
+ icache_line_size = isize = icache_line_size < isize ? icache_line_size : isize;
+ dcache_line_size = dsize = dcache_line_size < dsize ? dcache_line_size : dsize;
+
+ /* If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification is
+ not required for instruction to data coherence. */
+ if ((ctr_el0 & (1 << 28)) == 0x0) {
+ addr = (uint64_t)start & ~(uint64_t)(dsize - 1);
+ for (; addr < (uint64_t)end; addr += dsize)
+ // use "civac" instead of "cvau", as this is the suggested workaround for
+ // Cortex-A53 errata 819472, 826319, 827319 and 824069.
+ __asm__ volatile("dc civac, %0" : : "r"(addr) : "memory");
}
-}
+ __asm__ volatile("dsb ish" : : : "memory");
-// Clearing the cache is rather slow on ARM Linux, so mark the areas
-// that need to be cleared, and then only clear these areas once.
-static void do_clear_cache()
-{
- int i,j;
- for (i=0;i<(1<<(TARGET_SIZE_2-17));i++)
- {
- u_int bitmap=needs_clear_cache[i];
- if(bitmap) {
- u_char *start, *end;
- for(j=0;j<32;j++)
- {
- if(bitmap&(1<<j)) {
- start=translation_cache+i*131072+j*4096;
- end=start+4095;
- j++;
- while(j<32) {
- if(bitmap&(1<<j)) {
- end+=4096;
- j++;
- }else{
- end_tcache_write(start, end);
- break;
- }
- }
- }
- }
- needs_clear_cache[i]=0;
- }
+ /* If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point of
+ Unification is not required for instruction to data coherence. */
+ if ((ctr_el0 & (1 << 29)) == 0x0) {
+ addr = (uint64_t)start & ~(uint64_t)(isize - 1);
+ for (; addr < (uint64_t)end; addr += isize)
+ __asm__ volatile("ic ivau, %0" : : "r"(addr) : "memory");
+
+ __asm__ volatile("dsb ish" : : : "memory");
}
+
+ __asm__ volatile("isb" : : : "memory");
}
// CPU-architecture-specific initialization