#include "new_dynarec_config.h"
#include "../psxhle.h"
#include "../psxinterpreter.h"
-#include "emu_if.h" //emulator interface
+#include "../gte.h"
+#include "emu_if.h" // emulator interface
#define noinline __attribute__((noinline,noclone))
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
#endif
+#ifndef min
+#define min(a, b) ((b) < (a) ? (b) : (a))
+#endif
//#define DISASM
//#define assem_debug printf
#include "assem_arm64.h"
#endif
+#define RAM_SIZE 0x200000
#define MAXBLOCK 4096
#define MAX_OUTPUT_BLOCK_SIZE 262144
+struct ndrc_mem
+{
+ u_char translation_cache[1 << TARGET_SIZE_2];
+ struct
+ {
+ struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
+ const void *f[2048 / sizeof(void *)];
+ } tramp;
+};
+
+#ifdef BASE_ADDR_DYNAMIC
+static struct ndrc_mem *ndrc;
+#else
+static struct ndrc_mem ndrc_ __attribute__((aligned(4096)));
+static struct ndrc_mem *ndrc = &ndrc_;
+#endif
+
// stubs
enum stub_type {
CC_STUB = 1,
static uint64_t unneeded_reg[MAXBLOCK];
static uint64_t branch_unneeded_reg[MAXBLOCK];
static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i?
- static uint64_t current_constmap[HOST_REGS];
- static uint64_t constmap[MAXBLOCK][HOST_REGS];
+ // contains 'real' consts at [i] insn, but may differ from what's actually
+ // loaded in host reg as 'final' value is always loaded, see get_final_value()
+ static uint32_t current_constmap[HOST_REGS];
+ static uint32_t constmap[MAXBLOCK][HOST_REGS];
static struct regstat regs[MAXBLOCK];
static struct regstat branch_regs[MAXBLOCK];
static signed char minimum_free_regs[MAXBLOCK];
#endif
int new_dynarec_hacks;
+ int new_dynarec_hacks_pergame;
int new_dynarec_did_compile;
+ #define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x))
+
extern int cycle_count; // ... until end of the timeslice, counts -N -> 0
extern int last_count; // last absolute target, often = next_interupt
extern int pcaddr;
void fp_exception();
void fp_exception_ds();
void jump_to_new_pc();
+void call_gteStall();
void new_dyna_leave();
// Needed by assembler
static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
static void load_regs_entry(int t);
static void load_all_consts(signed char regmap[],u_int dirty,int i);
+static u_int get_host_reglist(const signed char *regmap);
static int verify_dirty(const u_int *ptr);
static int get_final_value(int hr, int i, int *value);
static void add_stub(enum stub_type type, void *addr, void *retaddr,
u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
- int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist);
+ int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
static void add_to_linker(void *addr, u_int target, int ext);
static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override);
static void *get_direct_memhandler(void *table, u_int addr,
enum stub_type type, uintptr_t *addr_host);
+static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
static void pass_args(int a0, int a1);
+static void emit_far_jump(const void *f);
+static void emit_far_call(const void *f);
static void mprotect_w_x(void *start, void *end, int is_x)
{
static void end_tcache_write(void *start, void *end)
{
-#ifdef __arm__
+#if defined(__arm__) || defined(__aarch64__)
size_t len = (char *)end - (char *)start;
#if defined(__BLACKBERRY_QNX__)
msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
sceKernelSyncVMDomain(sceBlock, start, len);
#elif defined(_3DS)
ctr_flush_invalidate_cache();
+ #elif defined(__aarch64__)
+ // as of 2021, __clear_cache() is still broken on arm64
+ // so here is a custom one :(
+ clear_cache_arm64(start, end);
#else
__clear_cache(start, end);
#endif
(void)len;
-#else
- __clear_cache(start, end);
#endif
mprotect_w_x(start, end, 1);
static void *start_block(void)
{
u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
- if (end > translation_cache + (1<<TARGET_SIZE_2))
- end = translation_cache + (1<<TARGET_SIZE_2);
+ if (end > ndrc->translation_cache + sizeof(ndrc->translation_cache))
+ end = ndrc->translation_cache + sizeof(ndrc->translation_cache);
start_tcache_write(out, end);
return out;
}
end_tcache_write(start, out);
}
+// also takes care of w^x mappings when patching code
+static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
+
+static void mark_clear_cache(void *target)
+{
+ uintptr_t offset = (u_char *)target - ndrc->translation_cache;
+ u_int mask = 1u << ((offset >> 12) & 31);
+ if (!(needs_clear_cache[offset >> 17] & mask)) {
+ char *start = (char *)((uintptr_t)target & ~4095l);
+ start_tcache_write(start, start + 4095);
+ needs_clear_cache[offset >> 17] |= mask;
+ }
+}
+
+// Clearing the cache is rather slow on ARM Linux, so mark the areas
+// that need to be cleared, and then only clear these areas once.
+static void do_clear_cache(void)
+{
+ int i, j;
+ for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
+ {
+ u_int bitmap = needs_clear_cache[i];
+ if (!bitmap)
+ continue;
+ for (j = 0; j < 32; j++)
+ {
+ u_char *start, *end;
+ if (!(bitmap & (1<<j)))
+ continue;
+
+ start = ndrc->translation_cache + i*131072 + j*4096;
+ end = start + 4095;
+ for (j++; j < 32; j++) {
+ if (!(bitmap & (1<<j)))
+ break;
+ end += 4096;
+ }
+ end_tcache_write(start, end);
+ }
+ needs_clear_cache[i] = 0;
+ }
+}
+
//#define DEBUG_CYCLE_COUNT 1
#define NO_CYCLE_PENALTY_THR 12
int cycle_multiplier; // 100 for 1.0
+int cycle_multiplier_override;
static int CLOCK_ADJUST(int x)
{
+ int m = cycle_multiplier_override
+ ? cycle_multiplier_override : cycle_multiplier;
int s=(x>>31)|1;
- return (x * cycle_multiplier + s * 50) / 100;
+ return (x * m + s * 50) / 100;
+}
+
+// is the op an unconditional jump?
+static int is_ujump(int i)
+{
+ return itype[i] == UJUMP || itype[i] == RJUMP
+ || (source[i] >> 16) == 0x1000; // beq r0, r0, offset // b offset
+}
+
+static int is_jump(int i)
+{
+ return itype[i] == RJUMP || itype[i] == UJUMP || itype[i] == CJUMP || itype[i] == SJUMP;
}
static u_int get_page(u_int vaddr)
}
}
-void set_const(struct regstat *cur,signed char reg,uint64_t value)
+static void set_const(struct regstat *cur, signed char reg, uint32_t value)
{
int hr;
if(!reg) return;
}
}
-void clear_const(struct regstat *cur,signed char reg)
+static void clear_const(struct regstat *cur, signed char reg)
{
int hr;
if(!reg) return;
}
}
-int is_const(struct regstat *cur,signed char reg)
+static int is_const(struct regstat *cur, signed char reg)
{
int hr;
if(reg<0) return 0;
}
return 0;
}
-uint64_t get_const(struct regstat *cur,signed char reg)
+
+static uint32_t get_const(struct regstat *cur, signed char reg)
{
int hr;
if(!reg) return 0;
j=slen-i-1;
break;
}
- if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
+ if (is_ujump(i+j))
{
// Don't go past an unconditonal jump
j++;
// TODO: preferred register based on backward branch
}
// Delay slot should preferably not overwrite branch conditions or cycle count
- if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) {
+ if (i > 0 && is_jump(i-1)) {
if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
hsn[CCREG]=1;
int b=-1;
int rn=10;
- if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
+ if (i > 0 && is_ujump(i-1))
{
if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
return 0; // Don't need any registers if exiting the block
j=slen-i-1;
break;
}
- if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
+ if (is_ujump(i+j))
{
// Don't go past an unconditonal jump
j++;
j=slen-i-1;
break;
}
- if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
+ if (is_ujump(i+j))
{
// Don't go past an unconditonal jump
j++;
FUNCNAME(jump_handler_write32),
FUNCNAME(invalidate_addr),
FUNCNAME(jump_to_new_pc),
+ FUNCNAME(call_gteStall),
FUNCNAME(new_dyna_leave),
FUNCNAME(pcsx_mtc0),
FUNCNAME(pcsx_mtc0_ds),
#include "assem_arm64.c"
#endif
+static void *get_trampoline(const void *f)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(ndrc->tramp.f); i++) {
+ if (ndrc->tramp.f[i] == f || ndrc->tramp.f[i] == NULL)
+ break;
+ }
+ if (i == ARRAY_SIZE(ndrc->tramp.f)) {
+ SysPrintf("trampoline table is full, last func %p\n", f);
+ abort();
+ }
+ if (ndrc->tramp.f[i] == NULL) {
+ start_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
+ ndrc->tramp.f[i] = f;
+ end_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
+ }
+ return &ndrc->tramp.ops[i];
+}
+
+static void emit_far_jump(const void *f)
+{
+ if (can_jump_or_call(f)) {
+ emit_jmp(f);
+ return;
+ }
+
+ f = get_trampoline(f);
+ emit_jmp(f);
+}
+
+static void emit_far_call(const void *f)
+{
+ if (can_jump_or_call(f)) {
+ emit_call(f);
+ return;
+ }
+
+ f = get_trampoline(f);
+ emit_call(f);
+}
+
// Add virtual address mapping to linked list
void ll_add(struct ll_entry **head,int vaddr,void *addr)
{
{
inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
void *host_addr=find_extjump_insn(head->addr);
- #if defined(__arm__) || defined(__aarch64__)
- mark_clear_cache(host_addr);
- #endif
+ mark_clear_cache(host_addr);
set_jump_target(host_addr, head->addr);
}
head=head->next;
while(head!=NULL) {
inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
void *host_addr=find_extjump_insn(head->addr);
- #if defined(__arm__) || defined(__aarch64__)
- mark_clear_cache(host_addr);
- #endif
+ mark_clear_cache(host_addr);
set_jump_target(host_addr, head->addr);
next=head->next;
free(head);
for(first=page+1;first<last;first++) {
invalidate_page(first);
}
- #if defined(__arm__) || defined(__aarch64__)
- do_clear_cache();
- #endif
+ do_clear_cache();
// Don't trap writes
invalid_code[block]=1;
// This is called when loading a save state.
// Anything could have changed, so invalidate everything.
-void invalidate_all_pages()
+void invalidate_all_pages(void)
{
u_int page;
for(page=0;page<4096;page++)
#ifdef USE_MINI_HT
memset(mini_ht,-1,sizeof(mini_ht));
#endif
+ do_clear_cache();
}
static void do_invstub(int n)
set_jump_target(stubs[n].addr, out);
save_regs(reglist);
if(stubs[n].b!=0) emit_mov(stubs[n].b,0);
- emit_call(invalidate_addr);
+ emit_far_call(invalidate_addr);
restore_regs(reglist);
emit_jmp(stubs[n].retaddr); // return address
}
else clear_const(current,rt1[i]);
}
else {
- set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
+ set_const(current,rt1[i],imm[i]<<16); // LUI
}
dirty_reg(current,rt1[i]);
}
minimum_free_regs[i]=HOST_REGS;
}
-static void cop12_alloc(struct regstat *current,int i)
+static void cop2_alloc(struct regstat *current,int i)
{
- alloc_reg(current,i,CSREG); // Load status
- if(opcode2[i]<3) // MFC1/CFC1
+ if (opcode2[i] < 3) // MFC2/CFC2
{
+ alloc_cc(current,i); // for stalls
+ dirty_reg(current,CCREG);
if(rt1[i]){
clear_const(current,rt1[i]);
alloc_reg(current,i,rt1[i]);
dirty_reg(current,rt1[i]);
}
- alloc_reg_temp(current,i,-1);
}
- else if(opcode2[i]>3) // MTC1/CTC1
+ else if (opcode2[i] > 3) // MTC2/CTC2
{
if(rs1[i]){
clear_const(current,rs1[i]);
current->u&=~1LL;
alloc_reg(current,i,0);
}
- alloc_reg_temp(current,i,-1);
}
+ alloc_reg_temp(current,i,-1);
minimum_free_regs[i]=1;
}
void c2op_alloc(struct regstat *current,int i)
{
+ alloc_cc(current,i); // for stalls
+ dirty_reg(current,CCREG);
alloc_reg_temp(current,i,-1);
}
cop0_alloc(current,i);
break;
case COP1:
+ break;
case COP2:
- cop12_alloc(current,i);
+ cop2_alloc(current,i);
break;
case C1LS:
c1ls_alloc(current,i);
}
static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
- int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist)
+ int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist)
{
add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
}
s2l=get_reg(i_regs->regmap,rs2[i]);
if(rs2[i]==0) // rx<r0
{
- assert(s1l>=0);
- if(opcode2[i]==0x2a) // SLT
+ if(opcode2[i]==0x2a&&rs1[i]!=0) { // SLT
+ assert(s1l>=0);
emit_shrimm(s1l,31,t);
- else // SLTU (unsigned can not be less than zero)
+ }
+ else // SLTU (unsigned can not be less than zero, 0<0)
emit_zeroreg(t);
}
else if(rs1[i]==0) // r0<rx
}
}
-static void load_assemble(int i,struct regstat *i_regs)
+static u_int get_host_reglist(const signed char *regmap)
+{
+ u_int reglist = 0, hr;
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ if (hr != EXCLUDE_REG && regmap[hr] >= 0)
+ reglist |= 1 << hr;
+ }
+ return reglist;
+}
+
+static u_int reglist_exclude(u_int reglist, int r1, int r2)
+{
+ if (r1 >= 0)
+ reglist &= ~(1u << r1);
+ if (r2 >= 0)
+ reglist &= ~(1u << r2);
+ return reglist;
+}
+
+// find a temp caller-saved register not in reglist (so assumed to be free)
+static int reglist_find_free(u_int reglist)
+{
+ u_int free_regs = ~reglist & CALLER_SAVE_REGS;
+ if (free_regs == 0)
+ return -1;
+ return __builtin_ctz(free_regs);
+}
+
+static void load_assemble(int i, const struct regstat *i_regs)
{
int s,tl,addr;
int offset;
void *jaddr=0;
int memtarget=0,c=0;
int fastio_reg_override=-1;
- u_int hr,reglist=0;
+ u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,rt1[i]);
s=get_reg(i_regs->regmap,rs1[i]);
offset=imm[i];
- for(hr=0;hr<HOST_REGS;hr++) {
- if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
- }
if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
if(s>=0) {
c=(i_regs->wasconst>>s)&1;
}
#ifndef loadlr_assemble
-static void loadlr_assemble(int i,struct regstat *i_regs)
+static void loadlr_assemble(int i, const struct regstat *i_regs)
{
int s,tl,temp,temp2,addr;
int offset;
void *jaddr=0;
int memtarget=0,c=0;
int fastio_reg_override=-1;
- u_int hr,reglist=0;
+ u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,rt1[i]);
s=get_reg(i_regs->regmap,rs1[i]);
temp=get_reg(i_regs->regmap,-1);
addr=get_reg(i_regs->regmap,AGEN1+(i&1));
assert(addr<0);
offset=imm[i];
- for(hr=0;hr<HOST_REGS;hr++) {
- if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
- }
reglist|=1<<temp;
if(offset||s<0||c) addr=temp2;
else addr=s;
}
#endif
-void store_assemble(int i,struct regstat *i_regs)
+void store_assemble(int i, const struct regstat *i_regs)
{
int s,tl;
int addr,temp;
int memtarget=0,c=0;
int agr=AGEN1+(i&1);
int fastio_reg_override=-1;
- u_int hr,reglist=0;
+ u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,rs2[i]);
s=get_reg(i_regs->regmap,rs1[i]);
temp=get_reg(i_regs->regmap,agr);
}
assert(tl>=0);
assert(temp>=0);
- for(hr=0;hr<HOST_REGS;hr++) {
- if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
- }
if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
if(offset||s<0||c) addr=temp;
else addr=s;
add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
jaddr=0;
}
- if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
+ if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
if(!c||memtarget) {
#ifdef DESTRUCTIVE_SHIFT
// The x86 shift operation is 'destructive'; it overwrites the
emit_movimm(start+i*4+4,0);
emit_writeword(0,&pcaddr);
emit_addimm(HOST_CCREG,2,HOST_CCREG);
- emit_call(get_addr_ht);
+ emit_far_call(get_addr_ht);
emit_jmpreg(0);
}
}
}
-static void storelr_assemble(int i,struct regstat *i_regs)
+static void storelr_assemble(int i, const struct regstat *i_regs)
{
int s,tl;
int temp;
void *done0, *done1, *done2;
int memtarget=0,c=0;
int agr=AGEN1+(i&1);
- u_int hr,reglist=0;
+ u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,rs2[i]);
s=get_reg(i_regs->regmap,rs1[i]);
temp=get_reg(i_regs->regmap,agr);
}
}
assert(tl>=0);
- for(hr=0;hr<HOST_REGS;hr++) {
- if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
- }
assert(temp>=0);
if(!c) {
emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
set_jump_target(done2, out);
if(!c||!memtarget)
add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist);
- if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
+ if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
emit_addimm_no_flags(-ram_offset,temp);
#if defined(HOST_IMM8)
int ir=get_reg(i_regs->regmap,INVCP);
emit_storereg(CCREG,HOST_CCREG);
emit_loadreg(rs1[i],1);
emit_movimm(copr,0);
- emit_call(pcsx_mtc0_ds);
+ emit_far_call(pcsx_mtc0_ds);
emit_loadreg(rs1[i],s);
return;
}
emit_movimm(0,HOST_TEMPREG);
emit_writeword(HOST_TEMPREG,&pending_exception);
}
- //else if(copr==12&&is_delayslot) emit_call((int)MTC0_R12);
- //else
if(s==HOST_CCREG)
emit_loadreg(rs1[i],1);
else if(s!=1)
emit_mov(s,1);
emit_movimm(copr,0);
- emit_call(pcsx_mtc0);
+ emit_far_call(pcsx_mtc0);
if(copr==9||copr==11||copr==12||copr==13) {
emit_readword(&Count,HOST_CCREG);
emit_readword(&next_interupt,HOST_TEMPREG);
emit_jeq(0);
emit_readword(&pcaddr, 0);
emit_addimm(HOST_CCREG,2,HOST_CCREG);
- emit_call(get_addr_ht);
+ emit_far_call(get_addr_ht);
emit_jmpreg(0);
set_jump_target(jaddr, out);
}
if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
emit_movimm(start+(i-ds)*4,EAX); // Get PC
emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
- emit_jmp(ds?fp_exception_ds:fp_exception);
+ emit_far_jump(ds?fp_exception_ds:fp_exception);
+}
+
+static int cop2_is_stalling_op(int i, int *cycles)
+{
+ if (opcode[i] == 0x3a) { // SWC2
+ *cycles = 0;
+ return 1;
+ }
+ if (itype[i] == COP2 && (opcode2[i] == 0 || opcode2[i] == 2)) { // MFC2/CFC2
+ *cycles = 0;
+ return 1;
+ }
+ if (itype[i] == C2OP) {
+ *cycles = gte_cycletab[source[i] & 0x3f];
+ return 1;
+ }
+ // ... what about MTC2/CTC2/LWC2?
+ return 0;
+}
+
+#if 0
+static void log_gte_stall(int stall, u_int cycle)
+{
+ if ((u_int)stall <= 44)
+ printf("x stall %2d %u\n", stall, cycle + last_count);
+ if (cycle + last_count > 1215348544) exit(1);
+}
+
+static void emit_log_gte_stall(int i, int stall, u_int reglist)
+{
+ save_regs(reglist);
+ if (stall > 0)
+ emit_movimm(stall, 0);
+ else
+ emit_mov(HOST_TEMPREG, 0);
+ emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
+ emit_far_call(log_gte_stall);
+ restore_regs(reglist);
+}
+#endif
+
+static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist)
+{
+ int j = i, other_gte_op_cycles = -1, stall = -MAXBLOCK, cycles_passed;
+ int rtmp = reglist_find_free(reglist);
+
+ if (HACK_ENABLED(NDHACK_GTE_NO_STALL))
+ return;
+ //assert(get_reg(i_regs->regmap, CCREG) == HOST_CCREG);
+ if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
+ // happens occasionally... cc evicted? Don't bother then
+ //printf("no cc %08x\n", start + i*4);
+ return;
+ }
+ if (!bt[i]) {
+ for (j = i - 1; j >= 0; j--) {
+ //if (is_ds[j]) break;
+ if (cop2_is_stalling_op(j, &other_gte_op_cycles) || bt[j])
+ break;
+ }
+ }
+ cycles_passed = CLOCK_ADJUST(ccadj[i] - ccadj[j]);
+ if (other_gte_op_cycles >= 0)
+ stall = other_gte_op_cycles - cycles_passed;
+ else if (cycles_passed >= 44)
+ stall = 0; // can't stall
+ if (stall == -MAXBLOCK && rtmp >= 0) {
+ // unknown stall, do the expensive runtime check
+ assem_debug("; cop2_call_stall_check\n");
+#if 0 // too slow
+ save_regs(reglist);
+ emit_movimm(gte_cycletab[op], 0);
+ emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
+ emit_far_call(call_gteStall);
+ restore_regs(reglist);
+#else
+ host_tempreg_acquire();
+ emit_readword(&psxRegs.gteBusyCycle, rtmp);
+ emit_addimm(rtmp, -CLOCK_ADJUST(ccadj[i]), rtmp);
+ emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
+ emit_cmpimm(HOST_TEMPREG, 44);
+ emit_cmovb_reg(rtmp, HOST_CCREG);
+ //emit_log_gte_stall(i, 0, reglist);
+ host_tempreg_release();
+#endif
+ }
+ else if (stall > 0) {
+ //emit_log_gte_stall(i, stall, reglist);
+ emit_addimm(HOST_CCREG, stall, HOST_CCREG);
+ }
+
+ // save gteBusyCycle, if needed
+ if (gte_cycletab[op] == 0)
+ return;
+ other_gte_op_cycles = -1;
+ for (j = i + 1; j < slen; j++) {
+ if (cop2_is_stalling_op(j, &other_gte_op_cycles))
+ break;
+ if (is_jump(j)) {
+ // check ds
+ if (j + 1 < slen && cop2_is_stalling_op(j + 1, &other_gte_op_cycles))
+ j++;
+ break;
+ }
+ }
+ if (other_gte_op_cycles >= 0)
+ // will handle stall when assembling that op
+ return;
+ cycles_passed = CLOCK_ADJUST(ccadj[min(j, slen -1)] - ccadj[i]);
+ if (cycles_passed >= 44)
+ return;
+ assem_debug("; save gteBusyCycle\n");
+ host_tempreg_acquire();
+#if 0
+ emit_readword(&last_count, HOST_TEMPREG);
+ emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG);
+ emit_addimm(HOST_TEMPREG, CLOCK_ADJUST(ccadj[i]), HOST_TEMPREG);
+ emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG);
+ emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
+#else
+ emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]) + gte_cycletab[op], HOST_TEMPREG);
+ emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
+#endif
+ host_tempreg_release();
}
static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
}
}
-static void c2ls_assemble(int i,struct regstat *i_regs)
+static void c2ls_assemble(int i, const struct regstat *i_regs)
{
int s,tl;
int ar;
enum stub_type type;
int agr=AGEN1+(i&1);
int fastio_reg_override=-1;
- u_int hr,reglist=0;
+ u_int reglist=get_host_reglist(i_regs->regmap);
u_int copr=(source[i]>>16)&0x1f;
s=get_reg(i_regs->regmap,rs1[i]);
tl=get_reg(i_regs->regmap,FTEMP);
assert(rs1[i]>0);
assert(tl>=0);
- for(hr=0;hr<HOST_REGS;hr++) {
- if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
- }
if(i_regs->regmap[HOST_CCREG]==CCREG)
reglist&=~(1<<HOST_CCREG);
assert(ar>=0);
if (opcode[i]==0x3a) { // SWC2
+ cop2_call_stall_check(0, i, i_regs, reglist_exclude(reglist, tl, -1));
cop2_get_dreg(copr,tl,-1);
type=STOREW_STUB;
}
if(jaddr2)
add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj[i],reglist);
if(opcode[i]==0x3a) // SWC2
- if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
+ if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
#if defined(HOST_IMM8)
int ir=get_reg(i_regs->regmap,INVCP);
assert(ir>=0);
}
}
-static void cop2_assemble(int i,struct regstat *i_regs)
+static void cop2_assemble(int i, const struct regstat *i_regs)
{
- u_int copr=(source[i]>>11)&0x1f;
- signed char temp=get_reg(i_regs->regmap,-1);
+ u_int copr = (source[i]>>11) & 0x1f;
+ signed char temp = get_reg(i_regs->regmap, -1);
+
+ if (opcode2[i] == 0 || opcode2[i] == 2) { // MFC2/CFC2
+ if (!HACK_ENABLED(NDHACK_GTE_NO_STALL)) {
+ signed char tl = get_reg(i_regs->regmap, rt1[i]);
+ u_int reglist = reglist_exclude(get_host_reglist(i_regs->regmap), tl, temp);
+ cop2_call_stall_check(0, i, i_regs, reglist);
+ }
+ }
if (opcode2[i]==0) { // MFC2
signed char tl=get_reg(i_regs->regmap,rt1[i]);
if(tl>=0&&rt1[i]!=0)
if(cc<0)
emit_loadreg(CCREG,2);
emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
- emit_call((opcode[i]==0x2a?jump_handle_swl:jump_handle_swr));
+ emit_far_call((opcode[i]==0x2a?jump_handle_swl:jump_handle_swr));
emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc);
if(cc<0)
emit_storereg(CCREG,2);
emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
emit_add(2,HOST_CCREG,2);
emit_writeword(2,&psxRegs.cycle);
- emit_call(func);
- emit_jmp(jump_to_new_pc);
+ emit_far_call(func);
+ emit_far_jump(jump_to_new_pc);
}
static void syscall_assemble(int i,struct regstat *i_regs)
{
extern void do_insn_cmp();
//extern int cycle;
- u_int hr,reglist=0;
+ u_int hr, reglist = get_host_reglist(regs[i].regmap);
- for(hr=0;hr<HOST_REGS;hr++)
- if(regs[i].regmap[hr]>=0) reglist|=1<<hr;
+ assem_debug("//do_insn_cmp %08x\n", start+i*4);
save_regs(reglist);
+ // write out changed consts to match the interpreter
+ if (i > 0 && !bt[i]) {
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ int reg = regs[i-1].regmap[hr];
+ if (hr == EXCLUDE_REG || reg < 0)
+ continue;
+ if (!((regs[i-1].isconst >> hr) & 1))
+ continue;
+ if (i > 1 && reg == regs[i-2].regmap[hr] && constmap[i-1][hr] == constmap[i-2][hr])
+ continue;
+ emit_movimm(constmap[i-1][hr],0);
+ emit_storereg(reg, 0);
+ }
+ }
emit_movimm(start+i*4,0);
emit_writeword(0,&pcaddr);
- emit_call(do_insn_cmp);
+ emit_far_call(do_insn_cmp);
//emit_readword(&cycle,0);
//emit_addimm(0,2,0);
//emit_writeword(0,&cycle);
(void)get_reg2;
restore_regs(reglist);
+ assem_debug("\\\\do_insn_cmp\n");
}
#else
#define drc_dbg_emit_do_cmp(x)
else if(*adj==0||invert) {
int cycles=CLOCK_ADJUST(count+2);
// faster loop HACK
+#if 0
if (t&&*adj) {
int rel=t-i;
if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
}
+#endif
emit_addimm_and_set_flags(cycles,HOST_CCREG);
jaddr=out;
emit_jns(0);
// Update cycle count
assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
if(stubs[n].a) emit_addimm(HOST_CCREG,CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
- emit_call(cc_interrupt);
+ emit_far_call(cc_interrupt);
if(stubs[n].a) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
if(stubs[n].d==TAKEN) {
if(internal_branch(ba[i]))
uint64_t u,gte_u,b,gte_b;
uint64_t temp_u,temp_gte_u=0;
uint64_t gte_u_unknown=0;
- if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
+ if (HACK_ENABLED(NDHACK_GTE_UNNEEDED))
gte_u_unknown=~0ll;
if(iend==slen-1) {
u=1;
bt[(ba[i]-start)>>2]=1;
if(ba[i]<=start+i*4) {
// Backward branch
- if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+ if(is_ujump(i))
{
// Unconditional branch
temp_u=1;
gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
}
} /*else*/ if(1) {
- if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+ if (is_ujump(i))
{
// Unconditional branch
u=unneeded_reg[(ba[i]-start)>>2];
if(ba[i]<start || ba[i]>=(start+slen*4))
{
// Branch out of this block, flush all regs
- if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+ if (is_ujump(i))
{
// Unconditional branch
will_dirty_i=0;
// Internal branch
if(ba[i]<=start+i*4) {
// Backward branch
- if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+ if (is_ujump(i))
{
// Unconditional branch
temp_will_dirty=0;
}
/*else*/ if(1)
{
- if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+ if (is_ujump(i))
{
// Unconditional branch
will_dirty_i=0;
regs[i].dirty&=wont_dirty_i;
if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
{
- if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
+ if (i < iend-1 && !is_ujump(i)) {
for(r=0;r<HOST_REGS;r++) {
if(r!=EXCLUDE_REG) {
if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
((volatile u_int *)out)[0]++; // make cache dirty
for (i = 0; i < ARRAY_SIZE(ret); i++) {
- out = translation_cache;
+ out = ndrc->translation_cache;
beginning = start_block();
emit_movimm(DRC_TEST_VAL + i, 0); // test
emit_ret();
SysPrintf("test passed.\n");
else
SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
- out = translation_cache;
+ out = ndrc->translation_cache;
}
// clear the state completely, instead of just marking
// things invalid like invalidate_all_pages() does
-void new_dynarec_clear_full()
+void new_dynarec_clear_full(void)
{
int n;
- out = translation_cache;
+ out = ndrc->translation_cache;
memset(invalid_code,1,sizeof(invalid_code));
memset(hash_table,0xff,sizeof(hash_table));
memset(mini_ht,-1,sizeof(mini_ht));
for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
}
-void new_dynarec_init()
+void new_dynarec_init(void)
{
SysPrintf("Init new dynarec\n");
- // allocate/prepare a buffer for translation cache
- // see assem_arm.h for some explanation
-#if defined(BASE_ADDR_FIXED)
- if (mmap(translation_cache, 1 << TARGET_SIZE_2,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0) != translation_cache) {
- SysPrintf("mmap() failed: %s\n", strerror(errno));
- SysPrintf("disable BASE_ADDR_FIXED and recompile\n");
- abort();
- }
-#elif defined(BASE_ADDR_DYNAMIC)
+#ifdef BASE_ADDR_DYNAMIC
#ifdef VITA
sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
if (sceBlock < 0)
SysPrintf("sceKernelAllocMemBlockForVM failed\n");
- int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&translation_cache);
+ int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&ndrc);
if (ret < 0)
SysPrintf("sceKernelGetMemBlockBase failed\n");
#else
- translation_cache = mmap (NULL, 1 << TARGET_SIZE_2,
+ uintptr_t desired_addr = 0;
+ #ifdef __ELF__
+ extern char _end;
+ desired_addr = ((uintptr_t)&_end + 0xffffff) & ~0xffffffl;
+ #endif
+ ndrc = mmap((void *)desired_addr, sizeof(*ndrc),
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (translation_cache == MAP_FAILED) {
+ if (ndrc == MAP_FAILED) {
SysPrintf("mmap() failed: %s\n", strerror(errno));
abort();
}
#else
#ifndef NO_WRITE_EXEC
// not all systems allow execute in data segment by default
- if (mprotect(translation_cache, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
+ if (mprotect(ndrc, sizeof(ndrc->translation_cache) + sizeof(ndrc->tramp.ops),
+ PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
SysPrintf("mprotect() failed: %s\n", strerror(errno));
#endif
#endif
- out = translation_cache;
+ out = ndrc->translation_cache;
cycle_multiplier=200;
new_dynarec_clear_full();
#ifdef HOST_IMM8
SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
}
-void new_dynarec_cleanup()
+void new_dynarec_cleanup(void)
{
int n;
-#if defined(BASE_ADDR_FIXED) || defined(BASE_ADDR_DYNAMIC)
+#ifdef BASE_ADDR_DYNAMIC
#ifdef VITA
sceKernelFreeMemBlock(sceBlock);
sceBlock = -1;
#else
- if (munmap(translation_cache, 1<<TARGET_SIZE_2) < 0)
+ if (munmap(ndrc, sizeof(*ndrc)) < 0)
SysPrintf("munmap() failed\n");
#endif
#endif
static u_int *get_source_start(u_int addr, u_int *limit)
{
+ if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
+ cycle_multiplier_override = 0;
+
if (addr < 0x00200000 ||
- (0xa0000000 <= addr && addr < 0xa0200000)) {
+ (0xa0000000 <= addr && addr < 0xa0200000))
+ {
// used for BIOS calls mostly?
*limit = (addr&0xa0000000)|0x00200000;
return (u_int *)(rdram + (addr&0x1fffff));
}
else if (!Config.HLE && (
/* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
- (0xbfc00000 <= addr && addr < 0xbfc80000))) {
- // BIOS
+ (0xbfc00000 <= addr && addr < 0xbfc80000)))
+ {
+ // BIOS. The multiplier should be much higher as it's uncached 8bit mem,
+ // but timings in PCSX are too tied to the interpreter's BIAS
+ if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
+ cycle_multiplier_override = 200;
+
*limit = (addr & 0xfff00000) | 0x80000;
return (u_int *)((u_char *)psxR + (addr&0x7ffff));
}
invalid_code[start>>12]=0;
emit_movimm(start,0);
emit_writeword(0,&pcaddr);
- emit_jmp(new_dyna_leave);
+ emit_far_jump(new_dyna_leave);
literal_pool(0);
end_block(beginning);
ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
else if(type==CJUMP||type==SJUMP)
ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
else ba[i]=-1;
- if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) {
+ if (i > 0 && is_jump(i-1)) {
int do_in_intrp=0;
// branch in delay slot?
if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP) {
bt[t+1]=1; // expected return from interpreter
}
else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
- !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
+ !(i>=3&&is_jump(i-3))) {
// v0 overwrite like this is a sign of trouble, bail out
SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
do_in_intrp=1;
}
}
/* Is this the end of the block? */
- if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
+ if (i > 0 && is_ujump(i-1)) {
if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
done=2;
}
cop0_alloc(¤t,i);
break;
case COP1:
+ break;
case COP2:
- cop12_alloc(¤t,i);
+ cop2_alloc(¤t,i);
break;
case C1LS:
c1ls_alloc(¤t,i);
dirty_reg(&branch_regs[i-1],31);
}
memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
break;
case RJUMP:
memcpy(&branch_regs[i-1],¤t,sizeof(current));
}
#endif
memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
break;
case CJUMP:
if((opcode[i-1]&0x3E)==4) // BEQ/BNE
branch_regs[i-1].isconst=0;
branch_regs[i-1].wasconst=0;
memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
}
else
if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
branch_regs[i-1].isconst=0;
branch_regs[i-1].wasconst=0;
memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
}
else
// Alloc the delay slot in case the branch is taken
branch_regs[i-1].isconst=0;
branch_regs[i-1].wasconst=0;
memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
- memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+ memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
}
else
// Alloc the delay slot in case the branch is taken
break;
}
- if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
+ if (is_ujump(i-1))
{
if(rt1[i-1]==31) // JAL/JALR
{
#if !defined(DRC_DBG)
else if(itype[i]==C2OP&>e_cycletab[source[i]&0x3f]>2)
{
- // GTE runs in parallel until accessed, divide by 2 for a rough guess
- cc+=gte_cycletab[source[i]&0x3f]/2;
- }
- else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues
- {
- cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
+ // this should really be removed since the real stalls have been implemented,
+ // but doing so causes sizeable perf regression against the older version
+ u_int gtec = gte_cycletab[source[i] & 0x3f];
+ cc += HACK_ENABLED(NDHACK_GTE_NO_STALL) ? gtec/2 : 2;
}
else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
{
}
else if(itype[i]==C2LS)
{
- cc+=4;
+ // same as with C2OP
+ cc += HACK_ENABLED(NDHACK_GTE_NO_STALL) ? 4 : 2;
}
#endif
else
if(!is_ds[i]) {
regs[i].dirty=current.dirty;
regs[i].isconst=current.isconst;
- memcpy(constmap[i],current_constmap,sizeof(current_constmap));
+ memcpy(constmap[i],current_constmap,sizeof(constmap[i]));
}
for(hr=0;hr<HOST_REGS;hr++) {
if(hr!=EXCLUDE_REG&®s[i].regmap[hr]>=0) {
}
}
// Conditional branch may need registers for following instructions
- if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
+ if (!is_ujump(i))
{
if(i<slen-2) {
nr|=needed_reg[i+2];
(regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
(regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
{
- if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
+ if (!is_ujump(i))
{
if(likely[i]) {
regs[i].regmap[hr]=-1;
{
branch_regs[i].regmap[hr]=-1;
branch_regs[i].regmap_entry[hr]=-1;
- if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
+ if (!is_ujump(i))
{
if(!likely[i]&&i<slen-2) {
regmap_pre[i+2][hr]=-1;
branch_regs[i].dirty|=(1<<hr)®s[i].dirty;
branch_regs[i].wasconst&=~(1<<hr);
branch_regs[i].isconst&=~(1<<hr);
- if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
+ if (!is_ujump(i)) {
regmap_pre[i+2][hr]=f_regmap[hr];
regs[i+2].wasdirty&=~(1<<hr);
regs[i+2].wasdirty|=(1<<hr)®s[i].dirty;
branch_regs[k].dirty&=~(1<<hr);
branch_regs[k].wasconst&=~(1<<hr);
branch_regs[k].isconst&=~(1<<hr);
- if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
+ if (!is_ujump(k)) {
regmap_pre[k+2][hr]=f_regmap[hr];
regs[k+2].wasdirty&=~(1<<hr);
}
//printf("no-match due to different register\n");
break;
}
- if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
+ if (is_ujump(j))
{
// Stop on unconditional branch
break;
emit_cmp(0,1);
#ifdef __aarch64__
emit_jeq(out + 4*2);
- emit_jmp(new_dyna_leave);
+ emit_far_jump(new_dyna_leave);
#else
emit_jne(new_dyna_leave);
#endif
} else {
speculate_register_values(i);
#ifndef DESTRUCTIVE_WRITEBACK
- if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
+ if (i < 2 || !is_ujump(i-2))
{
wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]);
}
}
#endif
// write back
- if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
+ if (i < 2 || !is_ujump(i-2))
{
wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]);
loop_preload(regmap_pre[i],regs[i].regmap_entry);
case SPAN:
pagespan_assemble(i,®s[i]);break;
}
- if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
+ if (is_ujump(i))
literal_pool(1024);
else
literal_pool_jumpover(256);
}
}
- //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
+ //assert(is_ujump(i-2));
// If the block did not end with an unconditional branch,
// add a jump to the next instruction.
if(i>1) {
- if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
+ if(!is_ujump(i-2)&&itype[i-1]!=SPAN) {
assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP);
assert(i==slen);
if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP) {
// If we're within 256K of the end of the buffer,
// start over from the beginning. (Is 256K enough?)
- if (out > translation_cache+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE)
- out = translation_cache;
+ if (out > ndrc->translation_cache + sizeof(ndrc->translation_cache) - MAX_OUTPUT_BLOCK_SIZE)
+ out = ndrc->translation_cache;
// Trap writes to any of the pages we compiled
for(i=start>>12;i<=(start+slen*4)>>12;i++) {
/* Pass 10 - Free memory by expiring oldest blocks */
- int end=(((out-translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535;
+ int end=(((out-ndrc->translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535;
while(expirep!=end)
{
int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
- uintptr_t base=(uintptr_t)translation_cache+((expirep>>13)<<shift); // Base address of this block
+ uintptr_t base=(uintptr_t)ndrc->translation_cache+((expirep>>13)<<shift); // Base address of this block
inv_debug("EXP: Phase %d\n",expirep);
switch((expirep>>11)&3)
{
break;
case 3:
// Clear jump_out
- #if defined(__arm__) || defined(__aarch64__)
if((expirep&2047)==0)
do_clear_cache();
- #endif
ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
break;