#endif
#define RAM_SIZE 0x200000
-#define MAXBLOCK 4096
+#define MAXBLOCK 2048
#define MAX_OUTPUT_BLOCK_SIZE 262144
#define EXPIRITY_OFFSET (MAX_OUTPUT_BLOCK_SIZE * 2)
#define PAGE_COUNT 1024
{
signed char regmap_entry[HOST_REGS];
signed char regmap[HOST_REGS];
- uint64_t wasdirty;
- uint64_t dirty;
- uint64_t u;
+ u_int wasdirty;
+ u_int dirty;
u_int wasconst; // before; for example 'lw r2, (r2)' wasconst is true
u_int isconst; // ... but isconst is false when r2 is known (hr)
u_int loadedconst; // host regs that have constants loaded
+ u_int noevict; // can't evict this hr (alloced by current op)
//u_int waswritten; // MIPS regs that were used as store base before
+ uint64_t u;
};
struct ht_entry
u_char is_delay_load:1; // is_load + MFC/CFC
u_char is_exception:1; // unconditional, also interp. fallback
u_char may_except:1; // might generate an exception
+ u_char ls_type:2; // load/store type (ls_width_type)
} dops[MAXBLOCK];
+enum ls_width_type {
+ LS_8 = 0, LS_16, LS_32, LS_LR
+};
+
static struct compile_info
{
int imm;
#define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x))
- extern int cycle_count; // ... until end of the timeslice, counts -N -> 0
+ extern int cycle_count; // ... until end of the timeslice, counts -N -> 0 (CCREG)
extern int last_count; // last absolute target, often = next_interupt
extern int pcaddr;
extern int pending_exception;
#define LOREG 32 // lo
#define HIREG 33 // hi
//#define FSREG 34 // FPU status (FCSR)
-#define CSREG 35 // Coprocessor status
+//#define CSREG 35 // Coprocessor status
#define CCREG 36 // Cycle count
#define INVCP 37 // Pointer to invalid_code
//#define MMREG 38 // Pointer to memory_map
-#define ROREG 39 // ram offset (if rdram!=0x80000000)
+#define ROREG 39 // ram offset (if psxM != 0x80000000)
#define TEMPREG 40
-#define FTEMP 40 // FPU temporary register
+#define FTEMP 40 // Load/store temporary register (was fpu)
#define PTEMP 41 // Prefetch temporary register
//#define TLREG 42 // TLB mapping offset
#define RHASH 43 // Return address hash
#define MAXREG 45
#define AGEN1 46 // Address generation temporary register (pass5b_preallocate2)
//#define AGEN2 47 // Address generation temporary register
-#define BTREG 50 // Branch target temporary register
/* instruction types */
#define NOP 0 // No operation
/* branch codes */
#define TAKEN 1
#define NOTTAKEN 2
-#define NULLDS 3
#define DJT_1 (void *)1l // no function, just a label in assem_debug log
#define DJT_2 (void *)2l
static void exception_assemble(int i, const struct regstat *i_regs, int ccadj_);
// Needed by assembler
-static void wb_register(signed char r, const signed char regmap[], uint64_t dirty);
-static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty);
-static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr);
+static void wb_register(signed char r, const signed char regmap[], u_int dirty);
+static void wb_dirtys(const signed char i_regmap[], u_int i_dirty);
+static void wb_needed_dirtys(const signed char i_regmap[], u_int i_dirty, int addr);
static void load_all_regs(const signed char i_regmap[]);
static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[]);
static void load_regs_entry(int t);
static void load_all_consts(const signed char regmap[], u_int dirty, int i);
static u_int get_host_reglist(const signed char *regmap);
-static int get_final_value(int hr, int i, int *value);
+static int get_final_value(int hr, int i, u_int *value);
static void add_stub(enum stub_type type, void *addr, void *retaddr,
u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
return NULL;
}
+// this doesn't normally happen
+static noinline u_int generate_exception(u_int pc)
+{
+ //if (execBreakCheck(&psxRegs, pc))
+ // return psxRegs.pc;
+
+ // generate an address or bus error
+ psxRegs.CP0.n.Cause &= 0x300;
+ psxRegs.CP0.n.EPC = pc;
+ if (pc & 3) {
+ psxRegs.CP0.n.Cause |= R3000E_AdEL << 2;
+ psxRegs.CP0.n.BadVAddr = pc;
+#ifdef DRC_DBG
+ last_count -= 2;
+#endif
+ } else
+ psxRegs.CP0.n.Cause |= R3000E_IBE << 2;
+ return (psxRegs.pc = 0x80000080);
+}
+
// Get address from virtual address
// This is called from the recompiled JR/JALR instructions
static void noinline *get_addr(u_int vaddr, int can_compile)
return NULL;
int r = new_recompile_block(vaddr);
- if (r == 0)
+ if (likely(r == 0))
return ndrc_get_addr_ht(vaddr);
- // generate an address error
-#ifdef DRC_DBG
- last_count -= 2;
-#endif
- psxRegs.CP0.n.Cause &= 0x300;
- psxRegs.CP0.n.EPC = vaddr;
- if (vaddr & 3) {
- psxRegs.CP0.n.Cause |= R3000E_AdEL << 2;
- psxRegs.CP0.n.BadVAddr = vaddr;
- } else
- psxRegs.CP0.n.Cause |= R3000E_IBE << 2;
- psxRegs.pc = 0x80000080;
- return ndrc_get_addr_ht(0x80000080);
+ return ndrc_get_addr_ht(generate_exception(vaddr));
}
// Look up address in hash table first
// Least soon needed registers
// Look at the next ten instructions and see which registers
// will be used. Try not to reallocate these.
-static void lsn(u_char hsn[], int i, int *preferred_reg)
+static void lsn(u_char hsn[], int i)
{
int j;
int b=-1;
if(dops[i].itype==C2LS) {
hsn[FTEMP]=0;
}
- // Load L/R also uses FTEMP as a temporary register
- if(dops[i].itype==LOADLR) {
- hsn[FTEMP]=0;
- }
- // Also SWL/SWR/SDL/SDR
- if(dops[i].opcode==0x2a||dops[i].opcode==0x2e||dops[i].opcode==0x2c||dops[i].opcode==0x2d) {
+ // Load/store L/R also uses FTEMP as a temporary register
+ if (dops[i].itype == LOADLR || dops[i].itype == STORELR) {
hsn[FTEMP]=0;
}
// Don't remove the miniht registers
FUNCNAME(do_memhandler_post),
#endif
#ifdef DRC_DBG
+# ifdef __aarch64__
+ FUNCNAME(do_insn_cmp_arm64),
+# else
FUNCNAME(do_insn_cmp),
+# endif
#endif
};
switch (ofs) {
#define ofscase(x) case LO_##x: return " ; " #x
ofscase(next_interupt);
+ ofscase(cycle_count);
ofscase(last_count);
ofscase(pending_exception);
ofscase(stop);
/* Register allocation */
+static void alloc_set(struct regstat *cur, int reg, int hr)
+{
+ cur->regmap[hr] = reg;
+ cur->dirty &= ~(1u << hr);
+ cur->isconst &= ~(1u << hr);
+ cur->noevict |= 1u << hr;
+}
+
+static void evict_alloc_reg(struct regstat *cur, int i, int reg, int preferred_hr)
+{
+ u_char hsn[MAXREG+1];
+ int j, r, hr;
+ memset(hsn, 10, sizeof(hsn));
+ lsn(hsn, i);
+ //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
+ if(i>0) {
+ // Don't evict the cycle count at entry points, otherwise the entry
+ // stub will have to write it.
+ if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
+ if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
+ for(j=10;j>=3;j--)
+ {
+ // Alloc preferred register if available
+ if (!((cur->noevict >> preferred_hr) & 1)
+ && hsn[cur->regmap[preferred_hr]] == j)
+ {
+ alloc_set(cur, reg, preferred_hr);
+ return;
+ }
+ for(r=1;r<=MAXREG;r++)
+ {
+ if(hsn[r]==j&&r!=dops[i-1].rs1&&r!=dops[i-1].rs2&&r!=dops[i-1].rt1&&r!=dops[i-1].rt2) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if (hr == EXCLUDE_REG || ((cur->noevict >> hr) & 1))
+ continue;
+ if(hr!=HOST_CCREG||j<hsn[CCREG]) {
+ if(cur->regmap[hr]==r) {
+ alloc_set(cur, reg, hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for(j=10;j>=0;j--)
+ {
+ for(r=1;r<=MAXREG;r++)
+ {
+ if(hsn[r]==j) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if (hr == EXCLUDE_REG || ((cur->noevict >> hr) & 1))
+ continue;
+ if(cur->regmap[hr]==r) {
+ alloc_set(cur, reg, hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+ SysPrintf("This shouldn't happen (evict_alloc_reg)\n");
+ abort();
+}
+
// Note: registers are allocated clean (unmodified state)
// if you intend to modify the register, you must call dirty_reg().
static void alloc_reg(struct regstat *cur,int i,signed char reg)
if((cur->u>>reg)&1) return;
// see if it's already allocated
- if (get_reg(cur->regmap, reg) >= 0)
+ if ((hr = get_reg(cur->regmap, reg)) >= 0) {
+ cur->noevict |= 1u << hr;
return;
+ }
// Keep the same mapping if the register was already allocated in a loop
preferred_reg = loop_reg(i,reg,preferred_reg);
// Try to allocate the preferred register
- if(cur->regmap[preferred_reg]==-1) {
- cur->regmap[preferred_reg]=reg;
- cur->dirty&=~(1<<preferred_reg);
- cur->isconst&=~(1<<preferred_reg);
+ if (cur->regmap[preferred_reg] == -1) {
+ alloc_set(cur, reg, preferred_reg);
return;
}
r=cur->regmap[preferred_reg];
assert(r < 64);
if((cur->u>>r)&1) {
- cur->regmap[preferred_reg]=reg;
- cur->dirty&=~(1<<preferred_reg);
- cur->isconst&=~(1<<preferred_reg);
+ alloc_set(cur, reg, preferred_reg);
return;
}
if (oldreg < 0 || (oldreg != dops[i-1].rs1 && oldreg != dops[i-1].rs2
&& oldreg != dops[i-1].rt1 && oldreg != dops[i-1].rt2))
{
- cur->regmap[hr]=reg;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
+ alloc_set(cur, reg, hr);
return;
}
}
// Try to allocate any available register
for (hr = PREFERRED_REG_FIRST; ; ) {
if (cur->regmap[hr] < 0) {
- cur->regmap[hr]=reg;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
+ alloc_set(cur, reg, hr);
return;
}
hr++;
// Ok, now we have to evict someone
// Pick a register we hopefully won't need soon
- u_char hsn[MAXREG+1];
- memset(hsn,10,sizeof(hsn));
- int j;
- lsn(hsn,i,&preferred_reg);
- //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
- //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
- if(i>0) {
- // Don't evict the cycle count at entry points, otherwise the entry
- // stub will have to write it.
- if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
- if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
- for(j=10;j>=3;j--)
- {
- // Alloc preferred register if available
- if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
- for(hr=0;hr<HOST_REGS;hr++) {
- // Evict both parts of a 64-bit register
- if(cur->regmap[hr]==r) {
- cur->regmap[hr]=-1;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
- }
- }
- cur->regmap[preferred_reg]=reg;
- return;
- }
- for(r=1;r<=MAXREG;r++)
- {
- if(hsn[r]==j&&r!=dops[i-1].rs1&&r!=dops[i-1].rs2&&r!=dops[i-1].rt1&&r!=dops[i-1].rt2) {
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=HOST_CCREG||j<hsn[CCREG]) {
- if(cur->regmap[hr]==r) {
- cur->regmap[hr]=reg;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
- return;
- }
- }
- }
- }
- }
- }
- }
- for(j=10;j>=0;j--)
- {
- for(r=1;r<=MAXREG;r++)
- {
- if(hsn[r]==j) {
- for(hr=0;hr<HOST_REGS;hr++) {
- if(cur->regmap[hr]==r) {
- cur->regmap[hr]=reg;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
- return;
- }
- }
- }
- }
- }
- SysPrintf("This shouldn't happen (alloc_reg)");abort();
+ evict_alloc_reg(cur, i, reg, preferred_reg);
}
// Allocate a temporary register. This is done without regard to
static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
{
int r,hr;
- int preferred_reg = -1;
// see if it's already allocated
- for(hr=0;hr<HOST_REGS;hr++)
+ for (hr = 0; hr < HOST_REGS; hr++)
{
- if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
+ if (hr != EXCLUDE_REG && cur->regmap[hr] == reg) {
+ cur->noevict |= 1u << hr;
+ return;
+ }
}
// Try to allocate any available register
for(hr=HOST_REGS-1;hr>=0;hr--) {
if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
- cur->regmap[hr]=reg;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
+ alloc_set(cur, reg, hr);
return;
}
}
assert(r < 64);
if((cur->u>>r)&1) {
if(i==0||((unneeded_reg[i-1]>>r)&1)) {
- cur->regmap[hr]=reg;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
+ alloc_set(cur, reg, hr);
return;
}
}
// Ok, now we have to evict someone
// Pick a register we hopefully won't need soon
- // TODO: we might want to follow unconditional jumps here
- // TODO: get rid of dupe code and make this into a function
- u_char hsn[MAXREG+1];
- memset(hsn,10,sizeof(hsn));
- int j;
- lsn(hsn,i,&preferred_reg);
- //printf("hsn: %d %d %d %d %d %d %d\n",hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
- if(i>0) {
- // Don't evict the cycle count at entry points, otherwise the entry
- // stub will have to write it.
- if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
- if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
- for(j=10;j>=3;j--)
- {
- for(r=1;r<=MAXREG;r++)
- {
- if(hsn[r]==j&&r!=dops[i-1].rs1&&r!=dops[i-1].rs2&&r!=dops[i-1].rt1&&r!=dops[i-1].rt2) {
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=HOST_CCREG||hsn[CCREG]>2) {
- if(cur->regmap[hr]==r) {
- cur->regmap[hr]=reg;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
- return;
- }
- }
- }
- }
- }
- }
- }
- for(j=10;j>=0;j--)
- {
- for(r=1;r<=MAXREG;r++)
- {
- if(hsn[r]==j) {
- for(hr=0;hr<HOST_REGS;hr++) {
- if(cur->regmap[hr]==r) {
- cur->regmap[hr]=reg;
- cur->dirty&=~(1<<hr);
- cur->isconst&=~(1<<hr);
- return;
- }
- }
- }
- }
- }
- SysPrintf("This shouldn't happen");abort();
+ evict_alloc_reg(cur, i, reg, 0);
}
static void mov_alloc(struct regstat *current,int i)
static void shift_alloc(struct regstat *current,int i)
{
if(dops[i].rt1) {
- if(dops[i].opcode2<=0x07) // SLLV/SRLV/SRAV
- {
if(dops[i].rs1) alloc_reg(current,i,dops[i].rs1);
if(dops[i].rs2) alloc_reg(current,i,dops[i].rs2);
alloc_reg(current,i,dops[i].rt1);
alloc_reg_temp(current,i,-1);
cinfo[i].min_free_regs=1;
}
- } else { // DSLLV/DSRLV/DSRAV
- assert(0);
- }
clear_const(current,dops[i].rs1);
clear_const(current,dops[i].rs2);
clear_const(current,dops[i].rt1);
alloc_reg(current,i,dops[i].rt1);
}
if (dops[i].may_except) {
- alloc_cc(current, i); // for exceptions
+ alloc_cc_optional(current, i); // for exceptions
alloc_reg_temp(current, i, -1);
cinfo[i].min_free_regs = 1;
}
}
else clear_const(current,dops[i].rt1);
if (dops[i].may_except) {
- alloc_cc(current, i); // for exceptions
+ alloc_cc_optional(current, i); // for exceptions
alloc_reg_temp(current, i, -1);
cinfo[i].min_free_regs = 1;
}
if (ram_offset)
alloc_reg(current, i, ROREG);
if (dops[i].may_except) {
- alloc_cc(current, i); // for exceptions
- dirty_reg(current, CCREG);
+ alloc_cc_optional(current, i); // for exceptions
need_temp = 1;
}
if(dops[i].rt1&&!((current->u>>dops[i].rt1)&1)) {
}
}
-static void store_alloc(struct regstat *current,int i)
+// this may eat up to 7 registers
+static void store_alloc(struct regstat *current, int i)
{
clear_const(current,dops[i].rs2);
if(!(dops[i].rs2)) current->u&=~1LL; // Allow allocating r0 if necessary
if (dops[i].opcode == 0x2a || dops[i].opcode == 0x2e) { // SWL/SWL
alloc_reg(current,i,FTEMP);
}
- if (dops[i].may_except) {
- alloc_cc(current, i); // for exceptions
- dirty_reg(current, CCREG);
- }
+ if (dops[i].may_except)
+ alloc_cc_optional(current, i); // for exceptions
// We need a temporary register for address generation
alloc_reg_temp(current,i,-1);
cinfo[i].min_free_regs=1;
}
-static void c2ls_alloc(struct regstat *current,int i)
+static void c2ls_alloc(struct regstat *current, int i)
{
clear_const(current,dops[i].rt1);
if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
if (dops[i].opcode == 0x3a) // SWC2
alloc_reg(current,i,INVCP);
#endif
- if (dops[i].may_except) {
- alloc_cc(current, i); // for exceptions
- dirty_reg(current, CCREG);
- }
+ if (dops[i].may_except)
+ alloc_cc_optional(current, i); // for exceptions
// We need a temporary register for address generation
alloc_reg_temp(current,i,-1);
cinfo[i].min_free_regs=1;
// case 0x19: MULTU
// case 0x1A: DIV
// case 0x1B: DIVU
- // case 0x1C: DMULT
- // case 0x1D: DMULTU
- // case 0x1E: DDIV
- // case 0x1F: DDIVU
clear_const(current,dops[i].rs1);
clear_const(current,dops[i].rs2);
alloc_cc(current,i); // for stalls
+ dirty_reg(current,CCREG);
if(dops[i].rs1&&dops[i].rs2)
{
- if((dops[i].opcode2&4)==0) // 32-bit
- {
current->u&=~(1LL<<HIREG);
current->u&=~(1LL<<LOREG);
alloc_reg(current,i,HIREG);
alloc_reg(current,i,dops[i].rs2);
dirty_reg(current,HIREG);
dirty_reg(current,LOREG);
- }
- else // 64-bit
- {
- assert(0);
- }
}
else
{
// Multiply by zero is zero.
// MIPS does not have a divide by zero exception.
- // The result is undefined, we return zero.
alloc_reg(current,i,HIREG);
alloc_reg(current,i,LOREG);
dirty_reg(current,HIREG);
dirty_reg(current,LOREG);
+ if (dops[i].rs1 && ((dops[i].opcode2 & 0x3e) == 0x1a)) // div(u) 0
+ alloc_reg(current, i, dops[i].rs1);
}
}
#endif
}
else if(dops[i].opcode2==4) // MTC0
{
+ if (((source[i]>>11)&0x1e) == 12) {
+ alloc_cc(current, i);
+ dirty_reg(current, CCREG);
+ }
if(dops[i].rs1){
clear_const(current,dops[i].rs1);
alloc_reg(current,i,dops[i].rs1);
}
// Write out a single register
-static void wb_register(signed char r, const signed char regmap[], uint64_t dirty)
+static void wb_register(signed char r, const signed char regmap[], u_int dirty)
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
assert(regmap[hr]<64);
emit_storereg(r,hr);
}
+ break;
}
}
}
tmp = get_reg_temp(i_regs->regmap);
if (do_oflow)
assert(tmp >= 0);
- //if (t < 0 && do_oflow) // broken s2
- // t = tmp;
+ if (t < 0 && do_oflow)
+ t = tmp;
if (t >= 0) {
s1 = get_reg(i_regs->regmap, dops[i].rs1);
s2 = get_reg(i_regs->regmap, dops[i].rs2);
// alignment check
u_int op = dops[i].opcode;
int mask = ((op & 0x37) == 0x21 || op == 0x25) ? 1 : 3; // LH/SH/LHU
- void *jaddr;
+ void *jaddr2;
emit_testimm(addr, mask);
- jaddr = out;
+ jaddr2 = out;
emit_jne(0);
- add_stub_r(ALIGNMENT_STUB, jaddr, out, i, addr, i_regs, ccadj_, 0);
+ add_stub_r(ALIGNMENT_STUB, jaddr2, out, i, addr, i_regs, ccadj_, 0);
}
if(type==MTYPE_8020) { // RAM 80200000+ mirror
static void do_invstub(int n)
{
literal_pool(20);
- assem_debug("do_invstub\n");
+ assem_debug("do_invstub %x\n", start + stubs[n].e*4);
u_int reglist = stubs[n].a;
u_int addrr = stubs[n].b;
int ofs_start = stubs[n].c;
imm_min -= cinfo[i].imm;
imm_max -= cinfo[i].imm;
add_stub(INVCODE_STUB, jaddr, out, reglist|(1<<HOST_CCREG),
- addr, imm_min, imm_max, 0);
+ addr, imm_min, imm_max, i);
}
+// determines if code overwrite checking is needed only
+// (also true non-existent 0x20000000 mirror that shouldn't matter)
+#define is_ram_addr(a) !((a) & 0x5f800000)
+
static void store_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl;
int memtarget=0,c=0;
int offset_reg = -1;
int fastio_reg_override = -1;
+ u_int addr_const = ~0;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rs2);
s=get_reg(i_regs->regmap,dops[i].rs1);
offset=cinfo[i].imm;
if(s>=0) {
c=(i_regs->wasconst>>s)&1;
- if(c) {
- memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
+ if (c) {
+ addr_const = constmap[i][s] + offset;
+ memtarget = ((signed int)addr_const) < (signed int)(0x80000000 + RAM_SIZE);
}
}
assert(tl>=0);
assert(addr >= 0);
if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
+ reglist |= 1u << addr;
if (!c) {
jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
&offset_reg, &fastio_reg_override, ccadj_);
}
if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
host_tempreg_release();
- if(jaddr) {
+ if (jaddr) {
// PCSX store handlers don't check invcode again
- reglist|=1<<addr;
- add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
- jaddr=0;
- }
- {
- if(!c||memtarget) {
- do_store_smc_check(i, i_regs, reglist, addr);
- }
- }
- u_int addr_val=constmap[i][s]+offset;
- if(jaddr) {
add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
- } else if(c&&!memtarget) {
- inline_writestub(type,i,addr_val,i_regs->regmap,dops[i].rs2,ccadj_,reglist);
}
+ if (!c || is_ram_addr(addr_const))
+ do_store_smc_check(i, i_regs, reglist, addr);
+ if (c && !memtarget)
+ inline_writestub(type, i, addr_const, i_regs->regmap, dops[i].rs2, ccadj_, reglist);
// basic current block modification detection..
// not looking back as that should be in mips cache already
// (see Spyro2 title->attract mode)
- if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
- SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
+ if (start + i*4 < addr_const && addr_const < start + slen*4) {
+ SysPrintf("write to %08x hits block %08x, pc=%08x\n", addr_const, start, start+i*4);
assert(i_regs->regmap==regs[i].regmap); // not delay slot
if(i_regs->regmap==regs[i].regmap) {
load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
void *done0, *done1, *done2;
int memtarget=0,c=0;
int offset_reg = -1;
- u_int reglist=get_host_reglist(i_regs->regmap);
+ u_int addr_const = ~0;
+ u_int reglist = get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rs2);
s=get_reg(i_regs->regmap,dops[i].rs1);
offset=cinfo[i].imm;
if(s>=0) {
- c=(i_regs->isconst>>s)&1;
- if(c) {
- memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
+ c = (i_regs->isconst >> s) & 1;
+ if (c) {
+ addr_const = constmap[i][s] + offset;
+ memtarget = ((signed int)addr_const) < (signed int)(0x80000000 + RAM_SIZE);
}
}
assert(tl>=0);
assert(addr >= 0);
+ reglist |= 1u << addr;
if(!c) {
emit_cmpimm(addr, RAM_SIZE);
- if (!offset && s != addr) emit_mov(s, addr);
jaddr=out;
emit_jno(0);
}
if (ram_offset)
offset_reg = get_ro_reg(i_regs, 0);
- if (dops[i].opcode==0x2C||dops[i].opcode==0x2D) { // SDL/SDR
- assert(0);
- }
-
emit_testimm(addr,2);
case23=out;
emit_jne(0);
if (dops[i].opcode == 0x2A) { // SWL
// Write two msb into two least significant bytes
if (dops[i].rs2) emit_rorimm(tl, 16, tl);
- do_store_hword(addr, -1, tl, offset_reg, 0);
+ do_store_hword(addr, -1, tl, offset_reg, 1);
if (dops[i].rs2) emit_rorimm(tl, 16, tl);
}
else if (dops[i].opcode == 0x2E) { // SWR
// Write 3 lsb into three most significant bytes
do_store_byte(addr, tl, offset_reg);
if (dops[i].rs2) emit_rorimm(tl, 8, tl);
- do_store_hword(addr, 1, tl, offset_reg, 0);
+ do_store_hword(addr, 1, tl, offset_reg, 1);
if (dops[i].rs2) emit_rorimm(tl, 24, tl);
}
done1=out;
// 3
set_jump_target(case3, out);
if (dops[i].opcode == 0x2A) { // SWL
- do_store_word(addr, -3, tl, offset_reg, 0);
+ do_store_word(addr, -3, tl, offset_reg, 1);
}
else if (dops[i].opcode == 0x2E) { // SWR
do_store_byte(addr, tl, offset_reg);
set_jump_target(done2, out);
if (offset_reg == HOST_TEMPREG)
host_tempreg_release();
- if(!c||!memtarget)
+ if (!c || !memtarget)
add_stub_r(STORELR_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
- do_store_smc_check(i, i_regs, reglist, addr);
+ if (!c || is_ram_addr(addr_const))
+ do_store_smc_check(i, i_regs, reglist, addr);
}
static void cop0_assemble(int i, const struct regstat *i_regs, int ccadj_)
}
else if(dops[i].opcode2==4) // MTC0
{
- signed char s=get_reg(i_regs->regmap,dops[i].rs1);
+ int s = get_reg(i_regs->regmap, dops[i].rs1);
+ int cc = get_reg(i_regs->regmap, CCREG);
char copr=(source[i]>>11)&0x1f;
assert(s>=0);
wb_register(dops[i].rs1,i_regs->regmap,i_regs->dirty);
- if(copr==9||copr==11||copr==12||copr==13) {
+ if (copr == 12 || copr == 13) {
emit_readword(&last_count,HOST_TEMPREG);
- emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
- emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
- emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
- emit_writeword(HOST_CCREG,&psxRegs.cycle);
- }
- // What a mess. The status register (12) can enable interrupts,
- // so needs a special case to handle a pending interrupt.
- // The interrupt must be taken immediately, because a subsequent
- // instruction might disable interrupts again.
- if(copr==12||copr==13) {
+ if (cc != HOST_CCREG)
+ emit_loadreg(CCREG, HOST_CCREG);
+ emit_add(HOST_CCREG, HOST_TEMPREG, HOST_CCREG);
+ emit_addimm(HOST_CCREG, ccadj_ + 2, HOST_CCREG);
+ emit_writeword(HOST_CCREG, &psxRegs.cycle);
if (is_delayslot) {
// burn cycles to cause cc_interrupt, which will
// reschedule next_interupt. Relies on CCREG from above.
emit_movimm(0,HOST_TEMPREG);
emit_writeword(HOST_TEMPREG,&pending_exception);
}
- if(s==HOST_CCREG)
- emit_loadreg(dops[i].rs1,1);
- else if(s!=1)
- emit_mov(s,1);
- emit_movimm(copr,0);
+ if( s != 1)
+ emit_mov(s, 1);
+ emit_movimm(copr, 0);
emit_far_call(pcsx_mtc0);
- if(copr==9||copr==11||copr==12||copr==13) {
+ if (copr == 12 || copr == 13) {
emit_readword(&psxRegs.cycle,HOST_CCREG);
- emit_readword(&next_interupt,HOST_TEMPREG);
- emit_addimm(HOST_CCREG,-ccadj_,HOST_CCREG);
+ emit_readword(&last_count,HOST_TEMPREG);
emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
- emit_writeword(HOST_TEMPREG,&last_count);
- emit_storereg(CCREG,HOST_CCREG);
- }
- if(copr==12||copr==13) {
+ //emit_writeword(HOST_TEMPREG,&last_count);
assert(!is_delayslot);
emit_readword(&pending_exception,HOST_TEMPREG);
emit_test(HOST_TEMPREG,HOST_TEMPREG);
void *jaddr = out;
emit_jeq(0);
emit_readword(&pcaddr, 0);
- emit_addimm(HOST_CCREG,2,HOST_CCREG);
emit_far_call(ndrc_get_addr_ht);
emit_jmpreg(0);
set_jump_target(jaddr, out);
+ emit_addimm(HOST_CCREG, -ccadj_ - 2, HOST_CCREG);
+ if (cc != HOST_CCREG)
+ emit_storereg(CCREG, HOST_CCREG);
}
emit_loadreg(dops[i].rs1,s);
}
enum stub_type type;
int offset_reg = -1;
int fastio_reg_override = -1;
+ u_int addr_const = ~0;
u_int reglist=get_host_reglist(i_regs->regmap);
u_int copr=(source[i]>>16)&0x1f;
s=get_reg(i_regs->regmap,dops[i].rs1);
tl=get_reg(i_regs->regmap,FTEMP);
offset=cinfo[i].imm;
- assert(dops[i].rs1>0);
assert(tl>=0);
if(i_regs->regmap[HOST_CCREG]==CCREG)
if (dops[i].opcode==0x3a) { // SWC2
reglist |= 1<<ar;
}
- if(s>=0) c=(i_regs->wasconst>>s)&1;
- memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
+ if (s >= 0) {
+ c = (i_regs->isconst >> s) & 1;
+ if (c) {
+ addr_const = constmap[i][s] + offset;
+ memtarget = ((signed int)addr_const) < (signed int)(0x80000000 + RAM_SIZE);
+ }
+ }
cop2_do_stall_check(0, i, i_regs, reglist);
host_tempreg_release();
if(jaddr2)
add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj_,reglist);
- if(dops[i].opcode==0x3a) // SWC2
+ if (dops[i].opcode == 0x3a && (!c || is_ram_addr(addr_const))) // SWC2
do_store_smc_check(i, i_regs, reglist, ar);
- if (dops[i].opcode==0x32) { // LWC2
+ if (dops[i].opcode == 0x32) { // LWC2
host_tempreg_acquire();
cop2_put_dreg(copr,tl,HOST_TEMPREG);
host_tempreg_release();
if(cc<0)
emit_loadreg(CCREG,2);
emit_addimm(cc<0?2:cc,(int)stubs[n].d+1,2);
+ emit_movimm(start + i*4,3);
+ emit_writeword(3,&psxRegs.pc);
emit_far_call((dops[i].opcode==0x2a?jump_handle_swl:jump_handle_swr));
emit_addimm(0,-((int)stubs[n].d+1),cc<0?2:cc);
if(cc<0)
// fallthrough
case IMM16:
if(dops[i].rt1&&is_const(®s[i],dops[i].rt1)) {
- int value,hr=get_reg_w(regs[i].regmap, dops[i].rt1);
+ int hr = get_reg_w(regs[i].regmap, dops[i].rt1);
+ u_int value;
if(hr>=0) {
if(get_final_value(hr,i,&value))
smrv[dops[i].rt1]=value;
{
int offset = cinfo[i].imm;
int add_offset = offset != 0;
- int c=(i_regs->wasconst>>rs)&1;
+ int c = rs >= 0 && ((i_regs->wasconst >> rs) & 1);
if(dops[i].rs1==0) {
// Using r0 as a base address
assert(ra >= 0);
cinfo[i].addr = rs;
add_offset = 0;
}
- else if (dops[i].itype == STORELR) { // overwrites addr
- assert(ra >= 0);
- assert(rs != ra);
- emit_mov(rs, ra);
- cinfo[i].addr = ra;
- }
else
cinfo[i].addr = rs;
if (add_offset) {
}
}
-static int get_final_value(int hr, int i, int *value)
+static int get_final_value(int hr, int i, u_int *value)
{
int reg=regs[i].regmap[hr];
while(i<slen-1) {
if(i==0||dops[i].bt)
regs[i].loadedconst=0;
else {
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG&®map[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
- &®map[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ if (hr == EXCLUDE_REG || regmap[hr] < 0 || pre[hr] != regmap[hr])
+ continue;
+ if ((((regs[i-1].isconst & regs[i-1].loadedconst) >> hr) & 1)
+ && regmap[hr] == regs[i-1].regmap[hr])
{
- regs[i].loadedconst|=1<<hr;
+ regs[i].loadedconst |= 1u << hr;
}
}
}
if(!((regs[i].loadedconst>>hr)&1)) {
assert(regmap[hr]<64);
if(((regs[i].isconst>>hr)&1)&®map[hr]>0) {
- int value,similar=0;
+ u_int value, similar=0;
if(get_final_value(hr,i,&value)) {
// see if some other register has similar value
for(hr2=0;hr2<HOST_REGS;hr2++) {
}
}
if(similar) {
- int value2;
+ u_int value2;
if(get_final_value(hr2,i,&value2)) // is this needed?
emit_movimm_from(value2,hr2,value,hr);
else
}
// Write out all dirty registers (except cycle count)
-static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty)
+#ifndef wb_dirtys
+static void wb_dirtys(const signed char i_regmap[], u_int i_dirty)
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
}
}
}
+#endif
// Write out dirty registers that we need to reload (pair with load_needed_regs)
// This writes the registers not written by store_regs_bt
-static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr)
+static void wb_needed_dirtys(const signed char i_regmap[], u_int i_dirty, int addr)
{
int hr;
int t=(addr-start)>>2;
}
// Load all registers (except cycle count)
+#ifndef load_all_regs
static void load_all_regs(const signed char i_regmap[])
{
int hr;
}
}
}
+#endif
// Load all current registers also needed by next instruction
static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[])
{
+ signed char regmap_sel[HOST_REGS];
int hr;
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG) {
- if(get_reg(next_regmap,i_regmap[hr])>=0) {
- if(i_regmap[hr]==0) {
- emit_zeroreg(hr);
- }
- else
- if(i_regmap[hr]>0 && i_regmap[hr]<TEMPREG && i_regmap[hr]!=CCREG)
- {
- emit_loadreg(i_regmap[hr],hr);
- }
- }
- }
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ regmap_sel[hr] = -1;
+ if (hr != EXCLUDE_REG)
+ if (next_regmap[hr] == i_regmap[hr] || get_reg(next_regmap, i_regmap[hr]) >= 0)
+ regmap_sel[hr] = i_regmap[hr];
}
+ load_all_regs(regmap_sel);
}
// Load all regs, storing cycle count if necessary
static void load_regs_entry(int t)
{
- int hr;
if(dops[t].is_ds) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
else if(cinfo[t].ccadj) emit_addimm(HOST_CCREG,-cinfo[t].ccadj,HOST_CCREG);
if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
emit_storereg(CCREG,HOST_CCREG);
}
- // Load 32-bit regs
- for(hr=0;hr<HOST_REGS;hr++) {
- if(regs[t].regmap_entry[hr]>=0&®s[t].regmap_entry[hr]<TEMPREG) {
- if(regs[t].regmap_entry[hr]==0) {
- emit_zeroreg(hr);
- }
- else if(regs[t].regmap_entry[hr]!=CCREG)
- {
- emit_loadreg(regs[t].regmap_entry[hr],hr);
- }
- }
- }
+ load_all_regs(regs[t].regmap_entry);
}
// Store dirty registers prior to branch
extern void do_insn_cmp();
//extern int cycle;
u_int hr, reglist = get_host_reglist(regs[i].regmap);
+ reglist |= get_host_reglist(regs[i].regmap_entry);
+ reglist &= DRC_DBG_REGMASK;
assem_debug("//do_insn_cmp %08x\n", start+i*4);
save_regs(reglist);
assem_debug("do_ccstub %x\n",start+(u_int)stubs[n].b*4);
set_jump_target(stubs[n].addr, out);
int i=stubs[n].b;
- if(stubs[n].d==NULLDS) {
- // Delay slot instruction is nullified ("likely" branch)
- wb_dirtys(regs[i].regmap,regs[i].dirty);
- }
- else if(stubs[n].d!=TAKEN) {
+ if (stubs[n].d != TAKEN) {
wb_dirtys(branch_regs[i].regmap,branch_regs[i].dirty);
}
else {
{
//emit_movimm(cinfo[i].ba,alt);
//emit_movimm(start+i*4+8,addr);
- emit_mov2imm_compact(cinfo[i].ba,
- (dops[i].opcode2 & 1) ? addr : alt, start + i*4 + 8,
- (dops[i].opcode2 & 1) ? alt : addr);
- emit_test(s1l,s1l);
- emit_cmovs_reg(alt,addr);
+ if (dops[i].rs1) {
+ emit_mov2imm_compact(cinfo[i].ba,
+ (dops[i].opcode2 & 1) ? addr : alt, start + i*4 + 8,
+ (dops[i].opcode2 & 1) ? alt : addr);
+ emit_test(s1l,s1l);
+ emit_cmovs_reg(alt,addr);
+ }
+ else
+ emit_movimm((dops[i].opcode2 & 1) ? cinfo[i].ba : start + i*4 + 8, addr);
}
emit_writeword(addr, &pcaddr);
}
}else if(stubs[n].d==NOTTAKEN) {
if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
else load_all_regs(branch_regs[i].regmap);
- }else if(stubs[n].d==NULLDS) {
- // Delay slot instruction is nullified ("likely" branch)
- if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
- else load_all_regs(regs[i].regmap);
}else{
load_all_regs(branch_regs[i].regmap);
}
int rt;
unsigned int return_address;
rt=get_reg(branch_regs[i].regmap,31);
- assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
+ //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
//assert(rt>=0);
return_address=start+i*4+8;
if(rt>=0) {
if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
}
#endif
- emit_movimm(return_address,rt); // PC into link register
+ if (!((regs[i].loadedconst >> rt) & 1))
+ emit_movimm(return_address, rt); // PC into link register
#ifdef IMM_PREFETCH
emit_prefetch(hash_table_get(return_address));
#endif
static void ujump_assemble(int i, const struct regstat *i_regs)
{
- int ra_done=0;
if(i==(cinfo[i].ba-start)>>2) assem_debug("idle loop\n");
address_generation(i+1,i_regs,regs[i].regmap_entry);
#ifdef REG_PREFETCH
if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
}
#endif
- if(dops[i].rt1==31&&(dops[i].rt1==dops[i+1].rs1||dops[i].rt1==dops[i+1].rs2)) {
+ if (dops[i].rt1 == 31)
ujump_assemble_write_ra(i); // writeback ra for DS
- ra_done=1;
- }
ds_assemble(i+1,i_regs);
uint64_t bc_unneeded=branch_regs[i].u;
bc_unneeded|=1|(1LL<<dops[i].rt1);
wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
- if(!ra_done&&dops[i].rt1==31)
- ujump_assemble_write_ra(i);
int cc,adj;
cc=get_reg(branch_regs[i].regmap,CCREG);
assert(cc==HOST_CCREG);
static void rjump_assemble_write_ra(int i)
{
int rt,return_address;
- assert(dops[i+1].rt1!=dops[i].rt1);
- assert(dops[i+1].rt2!=dops[i].rt1);
rt=get_reg_w(branch_regs[i].regmap, dops[i].rt1);
- assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
+ //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
assert(rt>=0);
return_address=start+i*4+8;
#ifdef REG_PREFETCH
if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
}
#endif
- emit_movimm(return_address,rt); // PC into link register
+ if (!((regs[i].loadedconst >> rt) & 1))
+ emit_movimm(return_address, rt); // PC into link register
#ifdef IMM_PREFETCH
emit_prefetch(hash_table_get(return_address));
#endif
{
int temp;
int rs,cc;
- int ra_done=0;
rs=get_reg(branch_regs[i].regmap,dops[i].rs1);
assert(rs>=0);
if (ds_writes_rjump_rs(i)) {
if(rh>=0) do_preload_rhash(rh);
}
#endif
- if(dops[i].rt1!=0&&(dops[i].rt1==dops[i+1].rs1||dops[i].rt1==dops[i+1].rs2)) {
+ if (dops[i].rt1 != 0)
rjump_assemble_write_ra(i);
- ra_done=1;
- }
ds_assemble(i+1,i_regs);
uint64_t bc_unneeded=branch_regs[i].u;
bc_unneeded|=1|(1LL<<dops[i].rt1);
bc_unneeded&=~(1LL<<dops[i].rs1);
wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,CCREG);
- if(!ra_done&&dops[i].rt1!=0)
- rjump_assemble_write_ra(i);
cc=get_reg(branch_regs[i].regmap,CCREG);
assert(cc==HOST_CCREG);
(void)cc;
if(dops[i].rt1==31) {
int rt,return_address;
rt=get_reg(branch_regs[i].regmap,31);
- assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
+ //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
if(rt>=0) {
// Save the PC even if the branch is not taken
return_address=start+i*4+8;
// In-order execution (branch first)
//printf("IOE\n");
void *nottaken = NULL;
- if (!unconditional) {
+ if (!unconditional && !nevertaken) {
assert(s1l >= 0);
emit_test(s1l, s1l);
}
#endif
}
}
- if (!unconditional) {
+ if (!unconditional && !nevertaken) {
nottaken = out;
if (!(dops[i].opcode2 & 1)) // BLTZ/BLTZAL
emit_jns(DJT_1);
}
// branch not taken
if(!unconditional) {
- set_jump_target(nottaken, out);
+ if (!nevertaken) {
+ assert(nottaken);
+ set_jump_target(nottaken, out);
+ }
assem_debug("1:\n");
wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
#ifndef REGMAP_PRINT
return;
#endif
- printf("D: %"PRIx64" WD: %"PRIx64" U: %"PRIx64" hC: %x hWC: %x hLC: %x\n",
+ printf("D: %x WD: %x U: %"PRIx64" hC: %x hWC: %x hLC: %x\n",
regs[i].dirty, regs[i].wasdirty, unneeded_reg[i],
regs[i].isconst, regs[i].wasconst, regs[i].loadedconst);
print_regmap("pre: ", regmap_pre[i]);
#endif
arch_init();
new_dynarec_test();
- ram_offset=(uintptr_t)rdram-0x80000000;
+ ram_offset = (uintptr_t)psxM - 0x80000000;
if (ram_offset!=0)
SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
SysPrintf("Mapped (RAM/scrp/ROM/LUTs/TC):\n");
static u_int *get_source_start(u_int addr, u_int *limit)
{
- if (addr < 0x00200000 ||
- (0xa0000000 <= addr && addr < 0xa0200000))
+ if (addr < 0x00800000
+ || (0x80000000 <= addr && addr < 0x80800000)
+ || (0xa0000000 <= addr && addr < 0xa0800000))
{
// used for BIOS calls mostly?
- *limit = (addr&0xa0000000)|0x00200000;
- return (u_int *)(rdram + (addr&0x1fffff));
+ *limit = (addr & 0xa0600000) + 0x00200000;
+ return (u_int *)(psxM + (addr & 0x1fffff));
}
else if (!Config.HLE && (
/* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
*limit = (addr & 0xfff00000) | 0x80000;
return (u_int *)((u_char *)psxR + (addr&0x7ffff));
}
- else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
- *limit = (addr & 0x80600000) + 0x00200000;
- return (u_int *)(rdram + (addr&0x1fffff));
- }
return NULL;
}
return 0;
}
-static noinline void pass1_disassemble(u_int pagelimit)
+static int is_ld_use_hazard(int ld_rt, const struct decoded_insn *op)
{
- int i, j, done = 0, ni_count = 0;
- unsigned int type,op,op2,op3;
+ return ld_rt != 0 && (ld_rt == op->rs1 || ld_rt == op->rs2)
+ && op->itype != LOADLR && op->itype != CJUMP && op->itype != SJUMP;
+}
- for (i = 0; !done; i++)
- {
- int force_j_to_interpreter = 0;
+static void force_intcall(int i)
+{
+ memset(&dops[i], 0, sizeof(dops[i]));
+ dops[i].itype = INTCALL;
+ dops[i].rs1 = CCREG;
+ dops[i].is_exception = 1;
+ cinfo[i].ba = -1;
+}
+
+static void disassemble_one(int i, u_int src)
+{
+ unsigned int type, op, op2, op3;
+ enum ls_width_type ls_type = LS_32;
memset(&dops[i], 0, sizeof(dops[i]));
memset(&cinfo[i], 0, sizeof(cinfo[i]));
cinfo[i].ba = -1;
cinfo[i].addr = -1;
- dops[i].opcode = op = source[i] >> 26;
+ dops[i].opcode = op = src >> 26;
op2 = 0;
type = INTCALL;
set_mnemonic(i, "???");
switch(op)
{
case 0x00: set_mnemonic(i, "special");
- op2=source[i]&0x3f;
+ op2 = src & 0x3f;
switch(op2)
{
case 0x00: set_mnemonic(i, "SLL"); type=SHIFTIMM; break;
case 0x09: set_mnemonic(i, "JALR"); type=RJUMP; break;
case 0x0C: set_mnemonic(i, "SYSCALL"); type=SYSCALL; break;
case 0x0D: set_mnemonic(i, "BREAK"); type=SYSCALL; break;
- case 0x0F: set_mnemonic(i, "SYNC"); type=OTHER; break;
case 0x10: set_mnemonic(i, "MFHI"); type=MOV; break;
case 0x11: set_mnemonic(i, "MTHI"); type=MOV; break;
case 0x12: set_mnemonic(i, "MFLO"); type=MOV; break;
break;
case 0x01: set_mnemonic(i, "regimm");
type = SJUMP;
- op2 = (source[i] >> 16) & 0x1f;
+ op2 = (src >> 16) & 0x1f;
switch(op2)
{
case 0x10: set_mnemonic(i, "BLTZAL"); break;
case 0x0E: set_mnemonic(i, "XORI"); type=IMM16; break;
case 0x0F: set_mnemonic(i, "LUI"); type=IMM16; break;
case 0x10: set_mnemonic(i, "COP0");
- op2 = (source[i]>>21) & 0x1f;
+ op2 = (src >> 21) & 0x1f;
if (op2 & 0x10) {
- op3 = source[i] & 0x1f;
+ op3 = src & 0x1f;
switch (op3)
{
case 0x01: case 0x02: case 0x06: case 0x08: type = INTCALL; break;
u32 rd;
case 0x00:
set_mnemonic(i, "MFC0");
- rd = (source[i] >> 11) & 0x1F;
+ rd = (src >> 11) & 0x1F;
if (!(0x00000417u & (1u << rd)))
type = COP0;
break;
}
break;
case 0x11: set_mnemonic(i, "COP1");
- op2=(source[i]>>21)&0x1f;
+ op2 = (src >> 21) & 0x1f;
break;
case 0x12: set_mnemonic(i, "COP2");
- op2=(source[i]>>21)&0x1f;
+ op2 = (src >> 21) & 0x1f;
if (op2 & 0x10) {
type = OTHER;
- if (gte_handlers[source[i]&0x3f]!=NULL) {
+ if (gte_handlers[src & 0x3f] != NULL) {
#ifdef DISASM
- if (gte_regnames[source[i]&0x3f]!=NULL)
- strcpy(insn[i],gte_regnames[source[i]&0x3f]);
+ if (gte_regnames[src & 0x3f] != NULL)
+ strcpy(insn[i], gte_regnames[src & 0x3f]);
else
- snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
+ snprintf(insn[i], sizeof(insn[i]), "COP2 %x", src & 0x3f);
#endif
type = C2OP;
}
}
break;
case 0x13: set_mnemonic(i, "COP3");
- op2=(source[i]>>21)&0x1f;
+ op2 = (src >> 21) & 0x1f;
break;
- case 0x20: set_mnemonic(i, "LB"); type=LOAD; break;
- case 0x21: set_mnemonic(i, "LH"); type=LOAD; break;
- case 0x22: set_mnemonic(i, "LWL"); type=LOADLR; break;
- case 0x23: set_mnemonic(i, "LW"); type=LOAD; break;
- case 0x24: set_mnemonic(i, "LBU"); type=LOAD; break;
- case 0x25: set_mnemonic(i, "LHU"); type=LOAD; break;
- case 0x26: set_mnemonic(i, "LWR"); type=LOADLR; break;
- case 0x28: set_mnemonic(i, "SB"); type=STORE; break;
- case 0x29: set_mnemonic(i, "SH"); type=STORE; break;
- case 0x2A: set_mnemonic(i, "SWL"); type=STORELR; break;
- case 0x2B: set_mnemonic(i, "SW"); type=STORE; break;
- case 0x2E: set_mnemonic(i, "SWR"); type=STORELR; break;
- case 0x32: set_mnemonic(i, "LWC2"); type=C2LS; break;
- case 0x3A: set_mnemonic(i, "SWC2"); type=C2LS; break;
+ case 0x20: set_mnemonic(i, "LB"); type=LOAD; ls_type = LS_8; break;
+ case 0x21: set_mnemonic(i, "LH"); type=LOAD; ls_type = LS_16; break;
+ case 0x22: set_mnemonic(i, "LWL"); type=LOADLR; ls_type = LS_LR; break;
+ case 0x23: set_mnemonic(i, "LW"); type=LOAD; ls_type = LS_32; break;
+ case 0x24: set_mnemonic(i, "LBU"); type=LOAD; ls_type = LS_8; break;
+ case 0x25: set_mnemonic(i, "LHU"); type=LOAD; ls_type = LS_16; break;
+ case 0x26: set_mnemonic(i, "LWR"); type=LOADLR; ls_type = LS_LR; break;
+ case 0x28: set_mnemonic(i, "SB"); type=STORE; ls_type = LS_8; break;
+ case 0x29: set_mnemonic(i, "SH"); type=STORE; ls_type = LS_16; break;
+ case 0x2A: set_mnemonic(i, "SWL"); type=STORELR; ls_type = LS_LR; break;
+ case 0x2B: set_mnemonic(i, "SW"); type=STORE; ls_type = LS_32; break;
+ case 0x2E: set_mnemonic(i, "SWR"); type=STORELR; ls_type = LS_LR; break;
+ case 0x32: set_mnemonic(i, "LWC2"); type=C2LS; ls_type = LS_32; break;
+ case 0x3A: set_mnemonic(i, "SWC2"); type=C2LS; ls_type = LS_32; break;
case 0x3B:
- if (Config.HLE && (source[i] & 0x03ffffff) < ARRAY_SIZE(psxHLEt)) {
+ if (Config.HLE && (src & 0x03ffffff) < ARRAY_SIZE(psxHLEt)) {
set_mnemonic(i, "HLECALL");
type = HLECALL;
}
break;
}
if (type == INTCALL)
- SysPrintf("NI %08x @%08x (%08x)\n", source[i], start + i*4, start);
- dops[i].itype=type;
- dops[i].opcode2=op2;
+ SysPrintf("NI %08x @%08x (%08x)\n", src, start + i*4, start);
+ dops[i].itype = type;
+ dops[i].opcode2 = op2;
+ dops[i].ls_type = ls_type;
/* Get registers/immediates */
dops[i].use_lt1=0;
gte_rs[i]=gte_rt[i]=0;
dops[i].rt2 = 0;
switch(type) {
case LOAD:
- dops[i].rs1=(source[i]>>21)&0x1f;
- dops[i].rt1=(source[i]>>16)&0x1f;
- cinfo[i].imm=(short)source[i];
+ dops[i].rs1 = (src >> 21) & 0x1f;
+ dops[i].rt1 = (src >> 16) & 0x1f;
+ cinfo[i].imm = (short)src;
break;
case STORE:
case STORELR:
- dops[i].rs1=(source[i]>>21)&0x1f;
- dops[i].rs2=(source[i]>>16)&0x1f;
- cinfo[i].imm=(short)source[i];
+ dops[i].rs1 = (src >> 21) & 0x1f;
+ dops[i].rs2 = (src >> 16) & 0x1f;
+ cinfo[i].imm = (short)src;
break;
case LOADLR:
// LWL/LWR only load part of the register,
// therefore the target register must be treated as a source too
- dops[i].rs1=(source[i]>>21)&0x1f;
- dops[i].rs2=(source[i]>>16)&0x1f;
- dops[i].rt1=(source[i]>>16)&0x1f;
- cinfo[i].imm=(short)source[i];
+ dops[i].rs1 = (src >> 21) & 0x1f;
+ dops[i].rs2 = (src >> 16) & 0x1f;
+ dops[i].rt1 = (src >> 16) & 0x1f;
+ cinfo[i].imm = (short)src;
break;
case IMM16:
if (op==0x0f) dops[i].rs1=0; // LUI instruction has no source register
- else dops[i].rs1=(source[i]>>21)&0x1f;
- dops[i].rs2=0;
- dops[i].rt1=(source[i]>>16)&0x1f;
+ else dops[i].rs1 = (src >> 21) & 0x1f;
+ dops[i].rs2 = 0;
+ dops[i].rt1 = (src >> 16) & 0x1f;
if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
- cinfo[i].imm=(unsigned short)source[i];
+ cinfo[i].imm = (unsigned short)src;
}else{
- cinfo[i].imm=(short)source[i];
+ cinfo[i].imm = (short)src;
}
break;
case UJUMP:
dops[i].rs2=CCREG;
break;
case RJUMP:
- dops[i].rs1=(source[i]>>21)&0x1f;
+ dops[i].rs1 = (src >> 21) & 0x1f;
// The JALR instruction writes to rd.
if (op2&1) {
- dops[i].rt1=(source[i]>>11)&0x1f;
+ dops[i].rt1 = (src >> 11) & 0x1f;
}
dops[i].rs2=CCREG;
break;
case CJUMP:
- dops[i].rs1=(source[i]>>21)&0x1f;
- dops[i].rs2=(source[i]>>16)&0x1f;
+ dops[i].rs1 = (src >> 21) & 0x1f;
+ dops[i].rs2 = (src >> 16) & 0x1f;
if(op&2) { // BGTZ/BLEZ
dops[i].rs2=0;
}
break;
case SJUMP:
- dops[i].rs1=(source[i]>>21)&0x1f;
- dops[i].rs2=CCREG;
+ dops[i].rs1 = (src >> 21) & 0x1f;
+ dops[i].rs2 = CCREG;
if (op2 == 0x10 || op2 == 0x11) { // BxxAL
dops[i].rt1 = 31;
// NOTE: If the branch is not taken, r31 is still overwritten
}
break;
case ALU:
- dops[i].rs1=(source[i]>>21)&0x1f; // source
- dops[i].rs2=(source[i]>>16)&0x1f; // subtract amount
- dops[i].rt1=(source[i]>>11)&0x1f; // destination
+ dops[i].rs1=(src>>21)&0x1f; // source
+ dops[i].rs2=(src>>16)&0x1f; // subtract amount
+ dops[i].rt1=(src>>11)&0x1f; // destination
break;
case MULTDIV:
- dops[i].rs1=(source[i]>>21)&0x1f; // source
- dops[i].rs2=(source[i]>>16)&0x1f; // divisor
+ dops[i].rs1=(src>>21)&0x1f; // source
+ dops[i].rs2=(src>>16)&0x1f; // divisor
dops[i].rt1=HIREG;
dops[i].rt2=LOREG;
break;
if(op2==0x11) dops[i].rt1=HIREG; // MTHI
if(op2==0x12) dops[i].rs1=LOREG; // MFLO
if(op2==0x13) dops[i].rt1=LOREG; // MTLO
- if((op2&0x1d)==0x10) dops[i].rt1=(source[i]>>11)&0x1f; // MFxx
- if((op2&0x1d)==0x11) dops[i].rs1=(source[i]>>21)&0x1f; // MTxx
+ if((op2&0x1d)==0x10) dops[i].rt1=(src>>11)&0x1f; // MFxx
+ if((op2&0x1d)==0x11) dops[i].rs1=(src>>21)&0x1f; // MTxx
break;
case SHIFT:
- dops[i].rs1=(source[i]>>16)&0x1f; // target of shift
- dops[i].rs2=(source[i]>>21)&0x1f; // shift amount
- dops[i].rt1=(source[i]>>11)&0x1f; // destination
+ dops[i].rs1=(src>>16)&0x1f; // target of shift
+ dops[i].rs2=(src>>21)&0x1f; // shift amount
+ dops[i].rt1=(src>>11)&0x1f; // destination
break;
case SHIFTIMM:
- dops[i].rs1=(source[i]>>16)&0x1f;
+ dops[i].rs1=(src>>16)&0x1f;
dops[i].rs2=0;
- dops[i].rt1=(source[i]>>11)&0x1f;
- cinfo[i].imm=(source[i]>>6)&0x1f;
+ dops[i].rt1=(src>>11)&0x1f;
+ cinfo[i].imm=(src>>6)&0x1f;
break;
case COP0:
- if(op2==0) dops[i].rt1=(source[i]>>16)&0x1F; // MFC0
- if(op2==4) dops[i].rs1=(source[i]>>16)&0x1F; // MTC0
- if(op2==4&&((source[i]>>11)&0x1f)==12) dops[i].rt2=CSREG; // Status
+ if(op2==0) dops[i].rt1=(src>>16)&0x1F; // MFC0
+ if(op2==4) dops[i].rs1=(src>>16)&0x1F; // MTC0
+ if(op2==4&&((src>>11)&0x1e)==12) dops[i].rs2=CCREG;
break;
case COP2:
- if(op2<3) dops[i].rt1=(source[i]>>16)&0x1F; // MFC2/CFC2
- if(op2>3) dops[i].rs1=(source[i]>>16)&0x1F; // MTC2/CTC2
- dops[i].rs2=CSREG;
- int gr=(source[i]>>11)&0x1F;
+ if(op2<3) dops[i].rt1=(src>>16)&0x1F; // MFC2/CFC2
+ if(op2>3) dops[i].rs1=(src>>16)&0x1F; // MTC2/CTC2
+ int gr=(src>>11)&0x1F;
switch(op2)
{
case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
}
break;
case C2LS:
- dops[i].rs1=(source[i]>>21)&0x1F;
- cinfo[i].imm=(short)source[i];
- if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
- else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
+ dops[i].rs1=(src>>21)&0x1F;
+ cinfo[i].imm=(short)src;
+ if(op==0x32) gte_rt[i]=1ll<<((src>>16)&0x1F); // LWC2
+ else gte_rs[i]=1ll<<((src>>16)&0x1F); // SWC2
break;
case C2OP:
- gte_rs[i]=gte_reg_reads[source[i]&0x3f];
- gte_rt[i]=gte_reg_writes[source[i]&0x3f];
+ gte_rs[i]=gte_reg_reads[src&0x3f];
+ gte_rt[i]=gte_reg_writes[src&0x3f];
gte_rt[i]|=1ll<<63; // every op changes flags
- if((source[i]&0x3f)==GTE_MVMVA) {
- int v = (source[i] >> 15) & 3;
+ if((src&0x3f)==GTE_MVMVA) {
+ int v = (src >> 15) & 3;
gte_rs[i]&=~0xe3fll;
if(v==3) gte_rs[i]|=0xe00ll;
else gte_rs[i]|=3ll<<(v*2);
default:
break;
}
+}
+
+static noinline void pass1_disassemble(u_int pagelimit)
+{
+ int i, j, done = 0, ni_count = 0;
+ int ds_next = 0;
+
+ for (i = 0; !done; i++)
+ {
+ int force_j_to_interpreter = 0;
+ unsigned int type, op, op2;
+
+ disassemble_one(i, source[i]);
+ dops[i].is_ds = ds_next; ds_next = 0;
+ type = dops[i].itype;
+ op = dops[i].opcode;
+ op2 = dops[i].opcode2;
+
/* Calculate branch target addresses */
if(type==UJUMP)
cinfo[i].ba=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
dops[i].is_store = type == STORE || type == STORELR || op == 0x3a; // SWC2
dops[i].is_exception = type == SYSCALL || type == HLECALL || type == INTCALL;
dops[i].may_except = dops[i].is_exception || (type == ALU && (op2 == 0x20 || op2 == 0x22)) || op == 8;
+ ds_next = dops[i].is_jump;
if (((op & 0x37) == 0x21 || op == 0x25) // LH/SH/LHU
&& ((cinfo[i].imm & 1) || Config.PreciseExceptions))
SysPrintf("branch in DS @%08x (%08x)\n", start + i*4, start);
force_j_to_interpreter = 1;
}
- // basic load delay detection through a branch
+ // load delay detection through a branch
else if (dops[i].is_delay_load && dops[i].rt1 != 0) {
- int t=(cinfo[i-1].ba-start)/4;
- if(0 <= t && t < i &&(dops[i].rt1==dops[t].rs1||dops[i].rt1==dops[t].rs2)&&dops[t].itype!=CJUMP&&dops[t].itype!=SJUMP) {
+ const struct decoded_insn *dop = NULL;
+ int t = -1;
+ if (cinfo[i-1].ba != -1) {
+ t = (cinfo[i-1].ba - start) / 4;
+ if (t < 0 || t > i) {
+ u_int limit = 0;
+ u_int *mem = get_source_start(cinfo[i-1].ba, &limit);
+ if (mem != NULL) {
+ disassemble_one(MAXBLOCK - 1, mem[0]);
+ dop = &dops[MAXBLOCK - 1];
+ }
+ }
+ else
+ dop = &dops[t];
+ }
+ if ((dop && is_ld_use_hazard(dops[i].rt1, dop))
+ || (!dop && Config.PreciseExceptions)) {
// jump target wants DS result - potential load delay effect
SysPrintf("load delay in DS @%08x (%08x)\n", start + i*4, start);
force_j_to_interpreter = 1;
- dops[t+1].bt=1; // expected return from interpreter
+ if (0 <= t && t < i)
+ dops[t + 1].bt = 1; // expected return from interpreter
}
else if(i>=2&&dops[i-2].rt1==2&&dops[i].rt1==2&&dops[i].rs1!=2&&dops[i].rs2!=2&&dops[i-1].rs1!=2&&dops[i-1].rs2!=2&&
!(i>=3&&dops[i-3].is_jump)) {
}
}
}
- else if (i > 0 && dops[i-1].is_delay_load && dops[i-1].rt1 != 0
- && (dops[i].rs1 == dops[i-1].rt1 || dops[i].rs2 == dops[i-1].rt1)) {
+ else if (i > 0 && dops[i-1].is_delay_load
+ && is_ld_use_hazard(dops[i-1].rt1, &dops[i])
+ && (i < 2 || !dops[i-2].is_ujump)) {
SysPrintf("load delay @%08x (%08x)\n", start + i*4, start);
for (j = i - 1; j > 0 && dops[j-1].is_delay_load; j--)
if (dops[j-1].rt1 != dops[i-1].rt1)
force_j_to_interpreter = 1;
}
if (force_j_to_interpreter) {
- memset(&dops[j], 0, sizeof(dops[j]));
- dops[j].itype = INTCALL;
- dops[j].rs1 = CCREG;
- cinfo[j].ba = -1;
+ force_intcall(j);
done = 2;
i = j; // don't compile the problematic branch/load/etc
}
+ if (dops[i].is_exception && i > 0 && dops[i-1].is_jump) {
+ SysPrintf("exception in DS @%08x (%08x)\n", start + i*4, start);
+ i--;
+ force_intcall(i);
+ done = 2;
+ }
+ if (i >= 2 && (source[i-2] & 0xffe0f800) == 0x40806000) // MTC0 $12
+ dops[i].bt = 1;
+ if (i >= 1 && (source[i-1] & 0xffe0f800) == 0x40806800) // MTC0 $13
+ dops[i].bt = 1;
/* Is this the end of the block? */
if (i > 0 && dops[i-1].is_ujump) {
// Don't recompile stuff that's already compiled
if(check_addr(start+i*4+4)) done=1;
// Don't get too close to the limit
- if(i>MAXBLOCK/2) done=1;
+ if (i > MAXBLOCK - 64)
+ done = 1;
}
if (dops[i].itype == HLECALL)
stop = 1;
//assert(i<MAXBLOCK-1);
if(start+i*4==pagelimit-4) done=1;
assert(start+i*4<pagelimit);
- if (i==MAXBLOCK-1) done=1;
+ if (i == MAXBLOCK - 2)
+ done = 1;
// Stop if we're compiling junk
if (dops[i].itype == INTCALL && (++ni_count > 8 || dops[i].opcode == 0x11)) {
done=stop_after_jal=1;
}
}
}
- else if(dops[i].may_except)
- {
- // SYSCALL instruction, etc or conditional exception
- u=1;
- }
- else if (dops[i].itype == RFE)
- {
- u=1;
- }
//u=1; // DEBUG
// Written registers are unneeded
u|=1LL<<dops[i].rt1;
gte_u&=~gte_rs[i];
if(gte_rs[i]&&dops[i].rt1&&(unneeded_reg[i+1]&(1ll<<dops[i].rt1)))
gte_u|=gte_rs[i]>e_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
+ if (dops[i].may_except || dops[i].itype == RFE)
+ {
+ // SYSCALL instruction, etc or conditional exception
+ u=1;
+ }
// Source-target dependencies
// R0 is always unneeded
u|=1;
}
}
+static noinline void pass2a_unneeded_other(void)
+{
+ int i, j;
+ for (i = 0; i < slen; i++)
+ {
+ // remove redundant alignment checks
+ if (dops[i].may_except && (dops[i].is_load || dops[i].is_store)
+ && dops[i].rt1 != dops[i].rs1 && !dops[i].is_ds)
+ {
+ int base = dops[i].rs1, lsb = cinfo[i].imm, ls_type = dops[i].ls_type;
+ int mask = ls_type == LS_32 ? 3 : 1;
+ lsb &= mask;
+ for (j = i + 1; j < slen; j++) {
+ if (dops[j].bt || dops[j].is_jump)
+ break;
+ if ((dops[j].is_load || dops[j].is_store) && dops[j].rs1 == base
+ && dops[j].ls_type == ls_type && (cinfo[j].imm & mask) == lsb)
+ dops[j].may_except = 0;
+ if (dops[j].rt1 == base)
+ break;
+ }
+ }
+ }
+}
+
static noinline void pass3_register_alloc(u_int addr)
{
struct regstat current; // Current register allocations/status
current.wasconst = 0;
current.isconst = 0;
current.loadedconst = 0;
+ current.noevict = 0;
//current.waswritten = 0;
int ds=0;
int cc=0;
dops[1].bt=1;
ds=1;
unneeded_reg[0]=1;
- current.regmap[HOST_BTREG]=BTREG;
}
for(i=0;i<slen;i++)
abort();
}
}
- dops[i].is_ds=ds;
+ assert(dops[i].is_ds == ds);
if(ds) {
ds=0; // Skip delay slot, already allocated as part of branch
// ...but we need to alloc it in case something jumps here
}
}
else { // Not delay slot
+ current.noevict = 0;
switch(dops[i].itype) {
case UJUMP:
//current.isconst=0; // DEBUG
if (dops[i].rt1!=0) {
alloc_reg(¤t,i,dops[i].rt1);
dirty_reg(¤t,dops[i].rt1);
- assert(dops[i+1].rs1!=dops[i].rt1&&dops[i+1].rs2!=dops[i].rt1);
- assert(dops[i+1].rt1!=dops[i].rt1);
#ifdef REG_PREFETCH
alloc_reg(¤t,i,PTEMP);
#endif
memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
}
- else
- // Alloc the delay slot in case the branch is taken
- if((dops[i-1].opcode&0x3E)==0x14) // BEQL/BNEL
- {
- memcpy(&branch_regs[i-1],¤t,sizeof(current));
- branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2)|(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2)))|1;
- alloc_cc(&branch_regs[i-1],i);
- dirty_reg(&branch_regs[i-1],CCREG);
- delayslot_alloc(&branch_regs[i-1],i);
- branch_regs[i-1].isconst=0;
- alloc_reg(¤t,i,CCREG); // Not taken path
- dirty_reg(¤t,CCREG);
- memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
- }
- else
- if((dops[i-1].opcode&0x3E)==0x16) // BLEZL/BGTZL
- {
- memcpy(&branch_regs[i-1],¤t,sizeof(current));
- branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2)|(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2)))|1;
- alloc_cc(&branch_regs[i-1],i);
- dirty_reg(&branch_regs[i-1],CCREG);
- delayslot_alloc(&branch_regs[i-1],i);
- branch_regs[i-1].isconst=0;
- alloc_reg(¤t,i,CCREG); // Not taken path
- dirty_reg(¤t,CCREG);
- memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
- }
break;
case SJUMP:
{
memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
}
- // FIXME: BLTZAL/BGEZAL
- if ((dops[i-1].opcode2 & 0x1e) == 0x10) { // BxxZAL
- alloc_reg(&branch_regs[i-1],i-1,31);
- dirty_reg(&branch_regs[i-1],31);
- }
break;
}
}
}
}
- if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
//regs[i].waswritten=current.waswritten;
}
}
}
}
// Cycle count is needed at branches. Assume it is needed at the target too.
- if(i==0||dops[i].bt||dops[i].itype==CJUMP) {
+ if (i == 0 || dops[i].bt || dops[i].may_except || dops[i].itype == CJUMP) {
if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
}
regs[i+2].wasdirty&=~(1<<hr);
}
assert(hr>=0);
+ #if 0 // what is this for? double allocs $0 in ps1_rom.bin
if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
{
regs[i].regmap[hr]=dops[i+1].rs1;
regs[i+1].wasdirty&=~(1<<hr);
regs[i].dirty&=~(1<<hr);
}
+ #endif
}
}
if (dops[i+1].itype == LOADLR || dops[i+1].opcode == 0x32) { // LWC2
pass2_unneeded_regs(0,slen-1,0);
+ pass2a_unneeded_other();
+
/* Pass 3 - Register allocation */
pass3_register_alloc(addr);
/* Pass 6 - Optimize clean/dirty state */
pass6_clean_registers(0, slen-1, 1);
- /* Pass 7 - Identify 32-bit registers */
+ /* Pass 7 */
for (i=slen-1;i>=0;i--)
{
if(dops[i].itype==CJUMP||dops[i].itype==SJUMP)