static uint64_t unneeded_reg[MAXBLOCK];
static uint64_t branch_unneeded_reg[MAXBLOCK];
static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i?
- static uint64_t current_constmap[HOST_REGS];
- static uint64_t constmap[MAXBLOCK][HOST_REGS];
+ // contains 'real' consts at [i] insn, but may differ from what's actually
+ // loaded in host reg as 'final' value is always loaded, see get_final_value()
+ static uint32_t current_constmap[HOST_REGS];
+ static uint32_t constmap[MAXBLOCK][HOST_REGS];
static struct regstat regs[MAXBLOCK];
static struct regstat branch_regs[MAXBLOCK];
static signed char minimum_free_regs[MAXBLOCK];
static void end_tcache_write(void *start, void *end)
{
-#ifdef __arm__
+#if defined(__arm__) || defined(__aarch64__)
size_t len = (char *)end - (char *)start;
#if defined(__BLACKBERRY_QNX__)
msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
sceKernelSyncVMDomain(sceBlock, start, len);
#elif defined(_3DS)
ctr_flush_invalidate_cache();
+ #elif defined(__aarch64__)
+ // as of 2021, __clear_cache() is still broken on arm64
+ // so here is a custom one :(
+ clear_cache_arm64(start, end);
#else
__clear_cache(start, end);
#endif
(void)len;
-#else
- __clear_cache(start, end);
#endif
mprotect_w_x(start, end, 1);
end_tcache_write(start, out);
}
+// also takes care of w^x mappings when patching code
+static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
+
+static void mark_clear_cache(void *target)
+{
+ uintptr_t offset = (u_char *)target - ndrc->translation_cache;
+ u_int mask = 1u << ((offset >> 12) & 31);
+ if (!(needs_clear_cache[offset >> 17] & mask)) {
+ char *start = (char *)((uintptr_t)target & ~4095l);
+ start_tcache_write(start, start + 4095);
+ needs_clear_cache[offset >> 17] |= mask;
+ }
+}
+
+// Clearing the cache is rather slow on ARM Linux, so mark the areas
+// that need to be cleared, and then only clear these areas once.
+static void do_clear_cache(void)
+{
+ int i, j;
+ for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
+ {
+ u_int bitmap = needs_clear_cache[i];
+ if (!bitmap)
+ continue;
+ for (j = 0; j < 32; j++)
+ {
+ u_char *start, *end;
+ if (!(bitmap & (1<<j)))
+ continue;
+
+ start = ndrc->translation_cache + i*131072 + j*4096;
+ end = start + 4095;
+ for (j++; j < 32; j++) {
+ if (!(bitmap & (1<<j)))
+ break;
+ end += 4096;
+ }
+ end_tcache_write(start, end);
+ }
+ needs_clear_cache[i] = 0;
+ }
+}
+
//#define DEBUG_CYCLE_COUNT 1
#define NO_CYCLE_PENALTY_THR 12
int cycle_multiplier; // 100 for 1.0
+int cycle_multiplier_override;
static int CLOCK_ADJUST(int x)
{
+ int m = cycle_multiplier_override
+ ? cycle_multiplier_override : cycle_multiplier;
int s=(x>>31)|1;
- return (x * cycle_multiplier + s * 50) / 100;
+ return (x * m + s * 50) / 100;
}
static u_int get_page(u_int vaddr)
}
}
-void set_const(struct regstat *cur,signed char reg,uint64_t value)
+static void set_const(struct regstat *cur, signed char reg, uint32_t value)
{
int hr;
if(!reg) return;
}
}
-void clear_const(struct regstat *cur,signed char reg)
+static void clear_const(struct regstat *cur, signed char reg)
{
int hr;
if(!reg) return;
}
}
-int is_const(struct regstat *cur,signed char reg)
+static int is_const(struct regstat *cur, signed char reg)
{
int hr;
if(reg<0) return 0;
}
return 0;
}
-uint64_t get_const(struct regstat *cur,signed char reg)
+
+static uint32_t get_const(struct regstat *cur, signed char reg)
{
int hr;
if(!reg) return 0;
{
inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
void *host_addr=find_extjump_insn(head->addr);
- #if defined(__arm__) || defined(__aarch64__)
- mark_clear_cache(host_addr);
- #endif
+ mark_clear_cache(host_addr);
set_jump_target(host_addr, head->addr);
}
head=head->next;
while(head!=NULL) {
inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
void *host_addr=find_extjump_insn(head->addr);
- #if defined(__arm__) || defined(__aarch64__)
- mark_clear_cache(host_addr);
- #endif
+ mark_clear_cache(host_addr);
set_jump_target(host_addr, head->addr);
next=head->next;
free(head);
for(first=page+1;first<last;first++) {
invalidate_page(first);
}
- #if defined(__arm__) || defined(__aarch64__)
- do_clear_cache();
- #endif
+ do_clear_cache();
// Don't trap writes
invalid_code[block]=1;
// This is called when loading a save state.
// Anything could have changed, so invalidate everything.
-void invalidate_all_pages()
+void invalidate_all_pages(void)
{
u_int page;
for(page=0;page<4096;page++)
#ifdef USE_MINI_HT
memset(mini_ht,-1,sizeof(mini_ht));
#endif
+ do_clear_cache();
}
static void do_invstub(int n)
else clear_const(current,rt1[i]);
}
else {
- set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
+ set_const(current,rt1[i],imm[i]<<16); // LUI
}
dirty_reg(current,rt1[i]);
}
s2l=get_reg(i_regs->regmap,rs2[i]);
if(rs2[i]==0) // rx<r0
{
- assert(s1l>=0);
- if(opcode2[i]==0x2a) // SLT
+ if(opcode2[i]==0x2a&&rs1[i]!=0) { // SLT
+ assert(s1l>=0);
emit_shrimm(s1l,31,t);
- else // SLTU (unsigned can not be less than zero)
+ }
+ else // SLTU (unsigned can not be less than zero, 0<0)
emit_zeroreg(t);
}
else if(rs1[i]==0) // r0<rx
//extern int cycle;
u_int hr,reglist=0;
- for(hr=0;hr<HOST_REGS;hr++)
+ assem_debug("//do_insn_cmp %08x\n", start+i*4);
+ for (hr = 0; hr < HOST_REGS; hr++)
if(regs[i].regmap[hr]>=0) reglist|=1<<hr;
save_regs(reglist);
+ // write out changed consts to match the interpreter
+ if (i > 0 && !bt[i]) {
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ int reg = regs[i-1].regmap[hr];
+ if (hr == EXCLUDE_REG || reg < 0)
+ continue;
+ if (!((regs[i-1].isconst >> hr) & 1))
+ continue;
+ if (i > 1 && reg == regs[i-2].regmap[hr] && constmap[i-1][hr] == constmap[i-2][hr])
+ continue;
+ emit_movimm(constmap[i-1][hr],0);
+ emit_storereg(reg, 0);
+ }
+ }
emit_movimm(start+i*4,0);
emit_writeword(0,&pcaddr);
emit_far_call(do_insn_cmp);
//emit_writeword(0,&cycle);
(void)get_reg2;
restore_regs(reglist);
+ assem_debug("\\\\do_insn_cmp\n");
}
#else
#define drc_dbg_emit_do_cmp(x)
else if(*adj==0||invert) {
int cycles=CLOCK_ADJUST(count+2);
// faster loop HACK
+#if 0
if (t&&*adj) {
int rel=t-i;
if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
}
+#endif
emit_addimm_and_set_flags(cycles,HOST_CCREG);
jaddr=out;
emit_jns(0);
// clear the state completely, instead of just marking
// things invalid like invalidate_all_pages() does
-void new_dynarec_clear_full()
+void new_dynarec_clear_full(void)
{
int n;
out = ndrc->translation_cache;
for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
}
-void new_dynarec_init()
+void new_dynarec_init(void)
{
SysPrintf("Init new dynarec\n");
SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
}
-void new_dynarec_cleanup()
+void new_dynarec_cleanup(void)
{
int n;
#ifdef BASE_ADDR_DYNAMIC
static u_int *get_source_start(u_int addr, u_int *limit)
{
+ if (!(new_dynarec_hacks & NDHACK_OVERRIDE_CYCLE_M))
+ cycle_multiplier_override = 0;
+
if (addr < 0x00200000 ||
- (0xa0000000 <= addr && addr < 0xa0200000)) {
+ (0xa0000000 <= addr && addr < 0xa0200000))
+ {
// used for BIOS calls mostly?
*limit = (addr&0xa0000000)|0x00200000;
return (u_int *)(rdram + (addr&0x1fffff));
}
else if (!Config.HLE && (
/* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
- (0xbfc00000 <= addr && addr < 0xbfc80000))) {
- // BIOS
+ (0xbfc00000 <= addr && addr < 0xbfc80000)))
+ {
+ // BIOS. The multiplier should be much higher as it's uncached 8bit mem,
+ // but timings in PCSX are too tied to the interpreter's BIAS
+ if (!(new_dynarec_hacks & NDHACK_OVERRIDE_CYCLE_M))
+ cycle_multiplier_override = 200;
+
*limit = (addr & 0xfff00000) | 0x80000;
return (u_int *)((u_char *)psxR + (addr&0x7ffff));
}
dirty_reg(&branch_regs[i-1],31);
}
memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
break;
case RJUMP:
memcpy(&branch_regs[i-1],¤t,sizeof(current));
}
#endif
memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
break;
case CJUMP:
if((opcode[i-1]&0x3E)==4) // BEQ/BNE
branch_regs[i-1].isconst=0;
branch_regs[i-1].wasconst=0;
memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
}
else
if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
branch_regs[i-1].isconst=0;
branch_regs[i-1].wasconst=0;
memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
}
else
// Alloc the delay slot in case the branch is taken
branch_regs[i-1].isconst=0;
branch_regs[i-1].wasconst=0;
memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
}
else
// Alloc the delay slot in case the branch is taken
if(!is_ds[i]) {
regs[i].dirty=current.dirty;
regs[i].isconst=current.isconst;
- memcpy(constmap[i],current_constmap,sizeof(current_constmap));
+ memcpy(constmap[i],current_constmap,sizeof(constmap[i]));
}
for(hr=0;hr<HOST_REGS;hr++) {
if(hr!=EXCLUDE_REG&®s[i].regmap[hr]>=0) {
break;
case 3:
// Clear jump_out
- #if defined(__arm__) || defined(__aarch64__)
if((expirep&2047)==0)
do_clear_cache();
- #endif
ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
break;