struct regstat
{
- signed char regmap_entry[HOST_REGS];
+ signed char regmap_entry[HOST_REGS]; // pre-insn + loop preloaded regs?
signed char regmap[HOST_REGS];
uint64_t wasdirty;
uint64_t dirty;
u_char is_ds:1;
u_char is_jump:1;
u_char is_ujump:1;
+ u_char is_load:1;
+ u_char is_store:1;
} dops[MAXBLOCK];
// used by asm:
static u_int ba[MAXBLOCK];
static uint64_t unneeded_reg[MAXBLOCK];
static uint64_t branch_unneeded_reg[MAXBLOCK];
- static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i?
+ // pre-instruction [i], excluding loop-preload regs?
+ static signed char regmap_pre[MAXBLOCK][HOST_REGS];
// contains 'real' consts at [i] insn, but may differ from what's actually
// loaded in host reg as 'final' value is always loaded, see get_final_value()
static uint32_t current_constmap[HOST_REGS];
static int expirep;
static u_int stop_after_jal;
static u_int f1_hack; // 0 - off, ~0 - capture address, else addr
-#ifndef RAM_FIXED
- static uintptr_t ram_offset;
-#else
- static const uintptr_t ram_offset=0;
-#endif
int new_dynarec_hacks;
int new_dynarec_hacks_pergame;
extern int pcaddr;
extern int pending_exception;
extern int branch_target;
+ extern uintptr_t ram_offset;
extern uintptr_t mini_ht[32][2];
extern u_char restore_candidate[512];
#define CCREG 36 // Cycle count
#define INVCP 37 // Pointer to invalid_code
//#define MMREG 38 // Pointer to memory_map
-//#define ROREG 39 // ram offset (if rdram!=0x80000000)
+#define ROREG 39 // ram offset (if rdram!=0x80000000)
#define TEMPREG 40
#define FTEMP 40 // FPU temporary register
#define PTEMP 41 // Prefetch temporary register
void new_dyna_leave();
// Needed by assembler
-static void wb_register(signed char r,signed char regmap[],uint64_t dirty);
-static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty);
-static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr);
-static void load_all_regs(signed char i_regmap[]);
-static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
+static void wb_register(signed char r, const signed char regmap[], uint64_t dirty);
+static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty);
+static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr);
+static void load_all_regs(const signed char i_regmap[]);
+static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[]);
static void load_regs_entry(int t);
-static void load_all_consts(signed char regmap[],u_int dirty,int i);
+static void load_all_consts(const signed char regmap[], u_int dirty, int i);
static u_int get_host_reglist(const signed char *regmap);
static int verify_dirty(const u_int *ptr);
static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
static void add_to_linker(void *addr, u_int target, int ext);
-static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override);
+static void *emit_fastpath_cmp_jump(int i, const struct regstat *i_regs,
+ int addr, int *offset_reg, int *addr_reg_override);
static void *get_direct_memhandler(void *table, u_int addr,
enum stub_type type, uintptr_t *addr_host);
static void cop2_do_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
hsn[dops[i+j].rs1]=j;
hsn[dops[i+j].rs2]=j;
}
+ if (ram_offset && (dops[i+j].is_load || dops[i+j].is_store))
+ hsn[ROREG] = j;
// On some architectures stores need invc_ptr
#if defined(HOST_IMM8)
- if(dops[i+j].itype==STORE || dops[i+j].itype==STORELR || (dops[i+j].opcode&0x3b)==0x39 || (dops[i+j].opcode&0x3b)==0x3a) {
- hsn[INVCP]=j;
- }
+ if (dops[i+j].is_store)
+ hsn[INVCP] = j;
#endif
if(i+j>=0&&(dops[i+j].itype==UJUMP||dops[i+j].itype==CJUMP||dops[i+j].itype==SJUMP))
{
hsn[RHTBL]=1;
}
// Coprocessor load/store needs FTEMP, even if not declared
- if(dops[i].itype==C1LS||dops[i].itype==C2LS) {
+ if(dops[i].itype==C2LS) {
hsn[FTEMP]=0;
}
// Load L/R also uses FTEMP as a temporary register
static void alloc_reg(struct regstat *cur,int i,signed char reg)
{
int r,hr;
- int preferred_reg = (reg&7);
- if(reg==CCREG) preferred_reg=HOST_CCREG;
- if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
+ int preferred_reg = PREFERRED_REG_FIRST
+ + reg % (PREFERRED_REG_LAST - PREFERRED_REG_FIRST + 1);
+ if (reg == CCREG) preferred_reg = HOST_CCREG;
+ if (reg == PTEMP || reg == FTEMP) preferred_reg = 12;
+ assert(PREFERRED_REG_FIRST != EXCLUDE_REG && EXCLUDE_REG != HOST_REGS);
// Don't allocate unused registers
if((cur->u>>reg)&1) return;
if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
}
}
+
// Try to allocate any available register, but prefer
// registers that have not been used recently.
- if(i>0) {
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
- if(regs[i-1].regmap[hr]!=dops[i-1].rs1&®s[i-1].regmap[hr]!=dops[i-1].rs2&®s[i-1].regmap[hr]!=dops[i-1].rt1&®s[i-1].regmap[hr]!=dops[i-1].rt2) {
+ if (i > 0) {
+ for (hr = PREFERRED_REG_FIRST; ; ) {
+ if (cur->regmap[hr] < 0) {
+ int oldreg = regs[i-1].regmap[hr];
+ if (oldreg < 0 || (oldreg != dops[i-1].rs1 && oldreg != dops[i-1].rs2
+ && oldreg != dops[i-1].rt1 && oldreg != dops[i-1].rt2))
+ {
cur->regmap[hr]=reg;
cur->dirty&=~(1<<hr);
cur->isconst&=~(1<<hr);
return;
}
}
+ hr++;
+ if (hr == EXCLUDE_REG)
+ hr++;
+ if (hr == HOST_REGS)
+ hr = 0;
+ if (hr == PREFERRED_REG_FIRST)
+ break;
}
}
+
// Try to allocate any available register
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+ for (hr = PREFERRED_REG_FIRST; ; ) {
+ if (cur->regmap[hr] < 0) {
cur->regmap[hr]=reg;
cur->dirty&=~(1<<hr);
cur->isconst&=~(1<<hr);
return;
}
+ hr++;
+ if (hr == EXCLUDE_REG)
+ hr++;
+ if (hr == HOST_REGS)
+ hr = 0;
+ if (hr == PREFERRED_REG_FIRST)
+ break;
}
// Ok, now we have to evict someone
clear_const(current,dops[i].rt1);
//if(dops[i].rs1!=dops[i].rt1&&needed_again(dops[i].rs1,i)) clear_const(current,dops[i].rs1); // Does this help or hurt?
if(!dops[i].rs1) current->u&=~1LL; // Allow allocating r0 if it's the source register
- if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
+ if (needed_again(dops[i].rs1, i))
+ alloc_reg(current, i, dops[i].rs1);
+ if (ram_offset)
+ alloc_reg(current, i, ROREG);
if(dops[i].rt1&&!((current->u>>dops[i].rt1)&1)) {
alloc_reg(current,i,dops[i].rt1);
assert(get_reg(current->regmap,dops[i].rt1)>=0);
if(dops[i].opcode==0x2c||dops[i].opcode==0x2d||dops[i].opcode==0x3f) { // 64-bit SDL/SDR/SD
assert(0);
}
+ if (ram_offset)
+ alloc_reg(current, i, ROREG);
#if defined(HOST_IMM8)
// On CPUs without 32-bit immediates we need a pointer to invalid_code
- else alloc_reg(current,i,INVCP);
+ alloc_reg(current, i, INVCP);
#endif
if(dops[i].opcode==0x2a||dops[i].opcode==0x2e||dops[i].opcode==0x2c||dops[i].opcode==0x2d) { // SWL/SWL/SDL/SDR
alloc_reg(current,i,FTEMP);
void c1ls_alloc(struct regstat *current,int i)
{
- //clear_const(current,dops[i].rs1); // FIXME
clear_const(current,dops[i].rt1);
- if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
alloc_reg(current,i,CSREG); // Status
- alloc_reg(current,i,FTEMP);
- if(dops[i].opcode==0x35||dops[i].opcode==0x3d) { // 64-bit LDC1/SDC1
- assert(0);
- }
- #if defined(HOST_IMM8)
- // On CPUs without 32-bit immediates we need a pointer to invalid_code
- else if((dops[i].opcode&0x3b)==0x39) // SWC1/SDC1
- alloc_reg(current,i,INVCP);
- #endif
- // We need a temporary register for address generation
- alloc_reg_temp(current,i,-1);
}
void c2ls_alloc(struct regstat *current,int i)
clear_const(current,dops[i].rt1);
if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
alloc_reg(current,i,FTEMP);
+ if (ram_offset)
+ alloc_reg(current, i, ROREG);
#if defined(HOST_IMM8)
// On CPUs without 32-bit immediates we need a pointer to invalid_code
- if((dops[i].opcode&0x3b)==0x3a) // SWC2/SDC2
+ if (dops[i].opcode == 0x3a) // SWC2
alloc_reg(current,i,INVCP);
#endif
// We need a temporary register for address generation
}
// Write out a single register
-static void wb_register(signed char r,signed char regmap[],uint64_t dirty)
+static void wb_register(signed char r, const signed char regmap[], uint64_t dirty)
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
}
}
-static void alu_assemble(int i,struct regstat *i_regs)
+static void alu_assemble(int i, const struct regstat *i_regs)
{
if(dops[i].opcode2>=0x20&&dops[i].opcode2<=0x23) { // ADD/ADDU/SUB/SUBU
if(dops[i].rt1) {
}
}
-void imm16_assemble(int i,struct regstat *i_regs)
+static void imm16_assemble(int i, const struct regstat *i_regs)
{
if (dops[i].opcode==0x0f) { // LUI
if(dops[i].rt1) {
}
}
-void shiftimm_assemble(int i,struct regstat *i_regs)
+static void shiftimm_assemble(int i, const struct regstat *i_regs)
{
if(dops[i].opcode2<=0x3) // SLL/SRL/SRA
{
}
#ifndef shift_assemble
-static void shift_assemble(int i,struct regstat *i_regs)
+static void shift_assemble(int i, const struct regstat *i_regs)
{
signed char s,t,shift;
if (dops[i].rt1 == 0)
return MTYPE_8000;
}
-static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override)
+static int get_ro_reg(const struct regstat *i_regs, int host_tempreg_free)
+{
+ int r = get_reg(i_regs->regmap, ROREG);
+ if (r < 0 && host_tempreg_free) {
+ host_tempreg_acquire();
+ emit_loadreg(ROREG, r = HOST_TEMPREG);
+ }
+ if (r < 0)
+ abort();
+ return r;
+}
+
+static void *emit_fastpath_cmp_jump(int i, const struct regstat *i_regs,
+ int addr, int *offset_reg, int *addr_reg_override)
{
void *jaddr = NULL;
- int type=0;
- int mr=dops[i].rs1;
+ int type = 0;
+ int mr = dops[i].rs1;
+ *offset_reg = -1;
if(((smrv_strong|smrv_weak)>>mr)&1) {
type=get_ptr_mem_type(smrv[mr]);
//printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
}
}
- if(type==0)
+ if (type == 0) // need ram check
{
emit_cmpimm(addr,RAM_SIZE);
- jaddr=out;
+ jaddr = out;
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
// Hint to branch predictor that the branch is unlikely to be taken
- if(dops[i].rs1>=28)
+ if (dops[i].rs1 >= 28)
emit_jno_unlikely(0);
else
#endif
emit_jno(0);
- if(ram_offset!=0) {
- host_tempreg_acquire();
- emit_addimm(addr,ram_offset,HOST_TEMPREG);
- addr=*addr_reg_override=HOST_TEMPREG;
- }
+ if (ram_offset != 0)
+ *offset_reg = get_ro_reg(i_regs, 0);
}
return jaddr;
return __builtin_ctz(free_regs);
}
-static void load_assemble(int i, const struct regstat *i_regs)
+static void do_load_word(int a, int rt, int offset_reg)
+{
+ if (offset_reg >= 0)
+ emit_ldr_dualindexed(offset_reg, a, rt);
+ else
+ emit_readword_indexed(0, a, rt);
+}
+
+static void do_store_word(int a, int ofs, int rt, int offset_reg, int preseve_a)
+{
+ if (offset_reg < 0) {
+ emit_writeword_indexed(rt, ofs, a);
+ return;
+ }
+ if (ofs != 0)
+ emit_addimm(a, ofs, a);
+ emit_str_dualindexed(offset_reg, a, rt);
+ if (ofs != 0 && preseve_a)
+ emit_addimm(a, -ofs, a);
+}
+
+static void do_store_hword(int a, int ofs, int rt, int offset_reg, int preseve_a)
+{
+ if (offset_reg < 0) {
+ emit_writehword_indexed(rt, ofs, a);
+ return;
+ }
+ if (ofs != 0)
+ emit_addimm(a, ofs, a);
+ emit_strh_dualindexed(offset_reg, a, rt);
+ if (ofs != 0 && preseve_a)
+ emit_addimm(a, -ofs, a);
+}
+
+static void do_store_byte(int a, int rt, int offset_reg)
+{
+ if (offset_reg >= 0)
+ emit_strb_dualindexed(offset_reg, a, rt);
+ else
+ emit_writebyte_indexed(rt, 0, a);
+}
+
+static void load_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl,addr;
int offset;
void *jaddr=0;
int memtarget=0,c=0;
- int fastio_reg_override=-1;
+ int offset_reg = -1;
+ int fastio_reg_override = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rt1);
s=get_reg(i_regs->regmap,dops[i].rs1);
if(dops[i].rs1!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
#endif
{
- jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
+ jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
+ &offset_reg, &fastio_reg_override);
}
}
- else if(ram_offset&&memtarget) {
- host_tempreg_acquire();
- emit_addimm(addr,ram_offset,HOST_TEMPREG);
- fastio_reg_override=HOST_TEMPREG;
+ else if (ram_offset && memtarget) {
+ offset_reg = get_ro_reg(i_regs, 0);
}
int dummy=(dops[i].rt1==0)||(tl!=get_reg(i_regs->regmap,dops[i].rt1)); // ignore loads to r0 and unneeded reg
- if (dops[i].opcode==0x20) { // LB
+ switch (dops[i].opcode) {
+ case 0x20: // LB
if(!c||memtarget) {
if(!dummy) {
- {
- int x=0,a=tl;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
+ int a = tl;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
- emit_movsbl_indexed(x,a,tl);
- }
+ if (offset_reg >= 0)
+ emit_ldrsb_dualindexed(offset_reg, a, tl);
+ else
+ emit_movsbl_indexed(0, a, tl);
}
if(jaddr)
- add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x21) { // LH
+ inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x21: // LH
if(!c||memtarget) {
if(!dummy) {
- int x=0,a=tl;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_movswl_indexed(x,a,tl);
+ int a = tl;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ if (offset_reg >= 0)
+ emit_ldrsh_dualindexed(offset_reg, a, tl);
+ else
+ emit_movswl_indexed(0, a, tl);
}
if(jaddr)
- add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x23) { // LW
+ inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x23: // LW
if(!c||memtarget) {
if(!dummy) {
- int a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_readword_indexed(0,a,tl);
+ int a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_load_word(a, tl, offset_reg);
}
if(jaddr)
- add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x24) { // LBU
+ inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x24: // LBU
if(!c||memtarget) {
if(!dummy) {
- int x=0,a=tl;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
+ int a = tl;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
- emit_movzbl_indexed(x,a,tl);
+ if (offset_reg >= 0)
+ emit_ldrb_dualindexed(offset_reg, a, tl);
+ else
+ emit_movzbl_indexed(0, a, tl);
}
if(jaddr)
- add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x25) { // LHU
+ inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x25: // LHU
if(!c||memtarget) {
if(!dummy) {
- int x=0,a=tl;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_movzwl_indexed(x,a,tl);
+ int a = tl;
+ if(!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ if (offset_reg >= 0)
+ emit_ldrh_dualindexed(offset_reg, a, tl);
+ else
+ emit_movzwl_indexed(0, a, tl);
}
if(jaddr)
- add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x27) { // LWU
- assert(0);
- }
- if (dops[i].opcode==0x37) { // LD
+ inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x27: // LWU
+ case 0x37: // LD
+ default:
assert(0);
}
}
- if (fastio_reg_override == HOST_TEMPREG)
+ if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
host_tempreg_release();
}
#ifndef loadlr_assemble
-static void loadlr_assemble(int i, const struct regstat *i_regs)
+static void loadlr_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl,temp,temp2,addr;
int offset;
void *jaddr=0;
int memtarget=0,c=0;
- int fastio_reg_override=-1;
+ int offset_reg = -1;
+ int fastio_reg_override = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rt1);
s=get_reg(i_regs->regmap,dops[i].rs1);
}else{
emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
}
- jaddr=emit_fastpath_cmp_jump(i,temp2,&fastio_reg_override);
+ jaddr = emit_fastpath_cmp_jump(i, i_regs, temp2,
+ &offset_reg, &fastio_reg_override);
}
else {
- if(ram_offset&&memtarget) {
- host_tempreg_acquire();
- emit_addimm(temp2,ram_offset,HOST_TEMPREG);
- fastio_reg_override=HOST_TEMPREG;
+ if (ram_offset && memtarget) {
+ offset_reg = get_ro_reg(i_regs, 0);
}
if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
}
if (dops[i].opcode==0x22||dops[i].opcode==0x26) { // LWL/LWR
if(!c||memtarget) {
- int a=temp2;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_readword_indexed(0,a,temp2);
- if(fastio_reg_override==HOST_TEMPREG) host_tempreg_release();
- if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj[i],reglist);
+ int a = temp2;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_load_word(a, temp2, offset_reg);
+ if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
+ host_tempreg_release();
+ if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj[i],reglist);
+ inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj_,reglist);
if(dops[i].rt1) {
assert(tl>=0);
emit_andimm(temp,24,temp);
}
#endif
-void store_assemble(int i, const struct regstat *i_regs)
+static void store_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl;
int addr,temp;
int offset;
void *jaddr=0;
- enum stub_type type;
+ enum stub_type type=0;
int memtarget=0,c=0;
int agr=AGEN1+(i&1);
- int fastio_reg_override=-1;
+ int offset_reg = -1;
+ int fastio_reg_override = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rs2);
s=get_reg(i_regs->regmap,dops[i].rs1);
if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
if(offset||s<0||c) addr=temp;
else addr=s;
- if(!c) {
- jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
+ if (!c) {
+ jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
+ &offset_reg, &fastio_reg_override);
}
- else if(ram_offset&&memtarget) {
- host_tempreg_acquire();
- emit_addimm(addr,ram_offset,HOST_TEMPREG);
- fastio_reg_override=HOST_TEMPREG;
+ else if (ram_offset && memtarget) {
+ offset_reg = get_ro_reg(i_regs, 0);
}
- if (dops[i].opcode==0x28) { // SB
+ switch (dops[i].opcode) {
+ case 0x28: // SB
if(!c||memtarget) {
- int x=0,a=temp;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_writebyte_indexed(tl,x,a);
- }
- type=STOREB_STUB;
- }
- if (dops[i].opcode==0x29) { // SH
+ int a = temp;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_store_byte(a, tl, offset_reg);
+ }
+ type = STOREB_STUB;
+ break;
+ case 0x29: // SH
if(!c||memtarget) {
- int x=0,a=temp;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_writehword_indexed(tl,x,a);
- }
- type=STOREH_STUB;
- }
- if (dops[i].opcode==0x2B) { // SW
+ int a = temp;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_store_hword(a, 0, tl, offset_reg, 1);
+ }
+ type = STOREH_STUB;
+ break;
+ case 0x2B: // SW
if(!c||memtarget) {
- int a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_writeword_indexed(tl,0,a);
- }
- type=STOREW_STUB;
- }
- if (dops[i].opcode==0x3F) { // SD
+ int a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_store_word(a, 0, tl, offset_reg, 1);
+ }
+ type = STOREW_STUB;
+ break;
+ case 0x3F: // SD
+ default:
assert(0);
- type=STORED_STUB;
}
- if(fastio_reg_override==HOST_TEMPREG)
+ if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
host_tempreg_release();
if(jaddr) {
// PCSX store handlers don't check invcode again
reglist|=1<<addr;
- add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
jaddr=0;
}
if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
}
u_int addr_val=constmap[i][s]+offset;
if(jaddr) {
- add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
} else if(c&&!memtarget) {
- inline_writestub(type,i,addr_val,i_regs->regmap,dops[i].rs2,ccadj[i],reglist);
+ inline_writestub(type,i,addr_val,i_regs->regmap,dops[i].rs2,ccadj_,reglist);
}
// basic current block modification detection..
// not looking back as that should be in mips cache already
}
}
-static void storelr_assemble(int i, const struct regstat *i_regs)
+static void storelr_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl;
int temp;
int offset;
void *jaddr=0;
- void *case1, *case2, *case3;
+ void *case1, *case23, *case3;
void *done0, *done1, *done2;
int memtarget=0,c=0;
int agr=AGEN1+(i&1);
+ int offset_reg = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rs2);
s=get_reg(i_regs->regmap,dops[i].rs1);
emit_jmp(0);
}
}
- if(ram_offset)
- emit_addimm_no_flags(ram_offset,temp);
+ if (ram_offset)
+ offset_reg = get_ro_reg(i_regs, 0);
if (dops[i].opcode==0x2C||dops[i].opcode==0x2D) { // SDL/SDR
assert(0);
}
- emit_xorimm(temp,3,temp);
emit_testimm(temp,2);
- case2=out;
+ case23=out;
emit_jne(0);
emit_testimm(temp,1);
case1=out;
emit_jne(0);
// 0
- if (dops[i].opcode==0x2A) { // SWL
- emit_writeword_indexed(tl,0,temp);
+ if (dops[i].opcode == 0x2A) { // SWL
+ // Write msb into least significant byte
+ if (dops[i].rs2) emit_rorimm(tl, 24, tl);
+ do_store_byte(temp, tl, offset_reg);
+ if (dops[i].rs2) emit_rorimm(tl, 8, tl);
}
- else if (dops[i].opcode==0x2E) { // SWR
- emit_writebyte_indexed(tl,3,temp);
+ else if (dops[i].opcode == 0x2E) { // SWR
+ // Write entire word
+ do_store_word(temp, 0, tl, offset_reg, 1);
}
- else
- assert(0);
- done0=out;
+ done0 = out;
emit_jmp(0);
// 1
set_jump_target(case1, out);
- if (dops[i].opcode==0x2A) { // SWL
- // Write 3 msb into three least significant bytes
- if(dops[i].rs2) emit_rorimm(tl,8,tl);
- emit_writehword_indexed(tl,-1,temp);
- if(dops[i].rs2) emit_rorimm(tl,16,tl);
- emit_writebyte_indexed(tl,1,temp);
- if(dops[i].rs2) emit_rorimm(tl,8,tl);
+ if (dops[i].opcode == 0x2A) { // SWL
+ // Write two msb into two least significant bytes
+ if (dops[i].rs2) emit_rorimm(tl, 16, tl);
+ do_store_hword(temp, -1, tl, offset_reg, 0);
+ if (dops[i].rs2) emit_rorimm(tl, 16, tl);
}
- else if (dops[i].opcode==0x2E) { // SWR
- // Write two lsb into two most significant bytes
- emit_writehword_indexed(tl,1,temp);
+ else if (dops[i].opcode == 0x2E) { // SWR
+ // Write 3 lsb into three most significant bytes
+ do_store_byte(temp, tl, offset_reg);
+ if (dops[i].rs2) emit_rorimm(tl, 8, tl);
+ do_store_hword(temp, 1, tl, offset_reg, 0);
+ if (dops[i].rs2) emit_rorimm(tl, 24, tl);
}
done1=out;
emit_jmp(0);
- // 2
- set_jump_target(case2, out);
+ // 2,3
+ set_jump_target(case23, out);
emit_testimm(temp,1);
- case3=out;
+ case3 = out;
emit_jne(0);
+ // 2
if (dops[i].opcode==0x2A) { // SWL
- // Write two msb into two least significant bytes
- if(dops[i].rs2) emit_rorimm(tl,16,tl);
- emit_writehword_indexed(tl,-2,temp);
- if(dops[i].rs2) emit_rorimm(tl,16,tl);
+ // Write 3 msb into three least significant bytes
+ if (dops[i].rs2) emit_rorimm(tl, 8, tl);
+ do_store_hword(temp, -2, tl, offset_reg, 1);
+ if (dops[i].rs2) emit_rorimm(tl, 16, tl);
+ do_store_byte(temp, tl, offset_reg);
+ if (dops[i].rs2) emit_rorimm(tl, 8, tl);
}
- else if (dops[i].opcode==0x2E) { // SWR
- // Write 3 lsb into three most significant bytes
- emit_writebyte_indexed(tl,-1,temp);
- if(dops[i].rs2) emit_rorimm(tl,8,tl);
- emit_writehword_indexed(tl,0,temp);
- if(dops[i].rs2) emit_rorimm(tl,24,tl);
+ else if (dops[i].opcode == 0x2E) { // SWR
+ // Write two lsb into two most significant bytes
+ do_store_hword(temp, 0, tl, offset_reg, 1);
}
- done2=out;
+ done2 = out;
emit_jmp(0);
// 3
set_jump_target(case3, out);
- if (dops[i].opcode==0x2A) { // SWL
- // Write msb into least significant byte
- if(dops[i].rs2) emit_rorimm(tl,24,tl);
- emit_writebyte_indexed(tl,-3,temp);
- if(dops[i].rs2) emit_rorimm(tl,8,tl);
+ if (dops[i].opcode == 0x2A) { // SWL
+ do_store_word(temp, -3, tl, offset_reg, 0);
}
- else if (dops[i].opcode==0x2E) { // SWR
- // Write entire word
- emit_writeword_indexed(tl,-3,temp);
+ else if (dops[i].opcode == 0x2E) { // SWR
+ do_store_byte(temp, tl, offset_reg);
}
set_jump_target(done0, out);
set_jump_target(done1, out);
set_jump_target(done2, out);
+ if (offset_reg == HOST_TEMPREG)
+ host_tempreg_release();
if(!c||!memtarget)
- add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist);
+ add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj_,reglist);
if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
- emit_addimm_no_flags(-ram_offset,temp);
#if defined(HOST_IMM8)
int ir=get_reg(i_regs->regmap,INVCP);
assert(ir>=0);
}
}
-static void cop0_assemble(int i,struct regstat *i_regs)
+static void cop0_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
if(dops[i].opcode2==0) // MFC0
{
emit_readword(&last_count,HOST_TEMPREG);
emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+ emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
emit_writeword(HOST_CCREG,&Count);
}
// What a mess. The status register (12) can enable interrupts,
if(copr==9||copr==11||copr==12||copr==13) {
emit_readword(&Count,HOST_CCREG);
emit_readword(&next_interupt,HOST_TEMPREG);
- emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+ emit_addimm(HOST_CCREG,-ccadj_,HOST_CCREG);
emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
emit_writeword(HOST_TEMPREG,&last_count);
emit_storereg(CCREG,HOST_CCREG);
}
}
-static void cop1_unusable(int i,struct regstat *i_regs)
+static void cop1_unusable(int i, const struct regstat *i_regs)
{
// XXX: should just just do the exception instead
//if(!cop1_usable)
}
}
-static void cop1_assemble(int i,struct regstat *i_regs)
+static void cop1_assemble(int i, const struct regstat *i_regs)
{
cop1_unusable(i, i_regs);
}
-static void c1ls_assemble(int i,struct regstat *i_regs)
+static void c1ls_assemble(int i, const struct regstat *i_regs)
{
cop1_unusable(i, i_regs);
}
wb_dirtys(i_regs->regmap_entry,i_regs->wasdirty);
if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
emit_movimm(start+(i-ds)*4,EAX); // Get PC
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
+ emit_addimm(HOST_CCREG,ccadj[i],HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
emit_far_jump(ds?fp_exception_ds:fp_exception);
}
emit_movimm(stall, 0);
else
emit_mov(HOST_TEMPREG, 0);
- emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
+ emit_addimm(HOST_CCREG, ccadj[i], 1);
emit_far_call(log_gte_stall);
restore_regs(reglist);
}
//if (dops[j].is_ds) break;
if (cop2_is_stalling_op(j, &other_gte_op_cycles) || dops[j].bt)
break;
+ if (j > 0 && ccadj[j - 1] > ccadj[j])
+ break;
}
j = max(j, 0);
}
- cycles_passed = CLOCK_ADJUST(ccadj[i] - ccadj[j]);
+ cycles_passed = ccadj[i] - ccadj[j];
if (other_gte_op_cycles >= 0)
stall = other_gte_op_cycles - cycles_passed;
else if (cycles_passed >= 44)
#if 0 // too slow
save_regs(reglist);
emit_movimm(gte_cycletab[op], 0);
- emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
+ emit_addimm(HOST_CCREG, ccadj[i], 1);
emit_far_call(call_gteStall);
restore_regs(reglist);
#else
host_tempreg_acquire();
emit_readword(&psxRegs.gteBusyCycle, rtmp);
- emit_addimm(rtmp, -CLOCK_ADJUST(ccadj[i]), rtmp);
+ emit_addimm(rtmp, -ccadj[i], rtmp);
emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
emit_cmpimm(HOST_TEMPREG, 44);
emit_cmovb_reg(rtmp, HOST_CCREG);
if (other_gte_op_cycles >= 0)
// will handle stall when assembling that op
return;
- cycles_passed = CLOCK_ADJUST(ccadj[min(j, slen -1)] - ccadj[i]);
+ cycles_passed = ccadj[min(j, slen -1)] - ccadj[i];
if (cycles_passed >= 44)
return;
assem_debug("; save gteBusyCycle\n");
#if 0
emit_readword(&last_count, HOST_TEMPREG);
emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG);
- emit_addimm(HOST_TEMPREG, CLOCK_ADJUST(ccadj[i]), HOST_TEMPREG);
+ emit_addimm(HOST_TEMPREG, ccadj[i], HOST_TEMPREG);
emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG);
emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
#else
- emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]) + gte_cycletab[op], HOST_TEMPREG);
+ emit_addimm(HOST_CCREG, ccadj[i] + gte_cycletab[op], HOST_TEMPREG);
emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
#endif
host_tempreg_release();
return 1;
}
-static void multdiv_prepare_stall(int i, const struct regstat *i_regs)
+static void multdiv_prepare_stall(int i, const struct regstat *i_regs, int ccadj_)
{
int j, found = 0, c = 0;
if (HACK_ENABLED(NDHACK_NO_STALLS))
assert(c > 0);
assem_debug("; muldiv prepare stall %d\n", c);
host_tempreg_acquire();
- emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]) + c, HOST_TEMPREG);
+ emit_addimm(HOST_CCREG, ccadj_ + c, HOST_TEMPREG);
emit_writeword(HOST_TEMPREG, &psxRegs.muldivBusyCycle);
host_tempreg_release();
}
if (!dops[i].bt) {
for (j = i - 1; j >= 0; j--) {
if (dops[j].is_ds) break;
- if (check_multdiv(j, &known_cycles) || dops[j].bt)
+ if (check_multdiv(j, &known_cycles))
break;
if (is_mflohi(j))
// already handled by this op
return;
+ if (dops[j].bt || (j > 0 && ccadj[j - 1] > ccadj[j]))
+ break;
}
j = max(j, 0);
}
if (known_cycles > 0) {
- known_cycles -= CLOCK_ADJUST(ccadj[i] - ccadj[j]);
+ known_cycles -= ccadj[i] - ccadj[j];
assem_debug("; muldiv stall resolved %d\n", known_cycles);
if (known_cycles > 0)
emit_addimm(HOST_CCREG, known_cycles, HOST_CCREG);
assem_debug("; muldiv stall unresolved\n");
host_tempreg_acquire();
emit_readword(&psxRegs.muldivBusyCycle, rtmp);
- emit_addimm(rtmp, -CLOCK_ADJUST(ccadj[i]), rtmp);
+ emit_addimm(rtmp, -ccadj[i], rtmp);
emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
emit_cmpimm(HOST_TEMPREG, 37);
emit_cmovb_reg(rtmp, HOST_CCREG);
}
}
-static void c2ls_assemble(int i, const struct regstat *i_regs)
+static void c2ls_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl;
int ar;
void *jaddr2=NULL;
enum stub_type type;
int agr=AGEN1+(i&1);
- int fastio_reg_override=-1;
+ int offset_reg = -1;
+ int fastio_reg_override = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
u_int copr=(source[i]>>16)&0x1f;
s=get_reg(i_regs->regmap,dops[i].rs1);
}
else {
if(!c) {
- jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
- }
- else if(ram_offset&&memtarget) {
- host_tempreg_acquire();
- emit_addimm(ar,ram_offset,HOST_TEMPREG);
- fastio_reg_override=HOST_TEMPREG;
- }
- if (dops[i].opcode==0x32) { // LWC2
- int a=ar;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_readword_indexed(0,a,tl);
+ jaddr2 = emit_fastpath_cmp_jump(i, i_regs, ar,
+ &offset_reg, &fastio_reg_override);
+ }
+ else if (ram_offset && memtarget) {
+ offset_reg = get_ro_reg(i_regs, 0);
+ }
+ switch (dops[i].opcode) {
+ case 0x32: { // LWC2
+ int a = ar;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_load_word(a, tl, offset_reg);
+ break;
}
- if (dops[i].opcode==0x3a) { // SWC2
+ case 0x3a: { // SWC2
#ifdef DESTRUCTIVE_SHIFT
if(!offset&&!c&&s>=0) emit_mov(s,ar);
#endif
- int a=ar;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_writeword_indexed(tl,0,a);
+ int a = ar;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_store_word(a, 0, tl, offset_reg, 1);
+ break;
+ }
+ default:
+ assert(0);
}
}
- if(fastio_reg_override==HOST_TEMPREG)
+ if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
host_tempreg_release();
if(jaddr2)
- add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj[i],reglist);
+ add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj_,reglist);
if(dops[i].opcode==0x3a) // SWC2
if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
#if defined(HOST_IMM8)
int cc=get_reg(i_regmap,CCREG);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
+ emit_addimm(cc<0?2:cc,(int)stubs[n].d+1,2);
emit_far_call((dops[i].opcode==0x2a?jump_handle_swl:jump_handle_swr));
- emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc);
+ emit_addimm(0,-((int)stubs[n].d+1),cc<0?2:cc);
if(cc<0)
emit_storereg(CCREG,2);
restore_regs(reglist);
}
#endif
-static void mov_assemble(int i,struct regstat *i_regs)
+static void mov_assemble(int i, const struct regstat *i_regs)
{
//if(dops[i].opcode2==0x10||dops[i].opcode2==0x12) { // MFHI/MFLO
//if(dops[i].opcode2==0x11||dops[i].opcode2==0x13) { // MTHI/MTLO
}
// call interpreter, exception handler, things that change pc/regs/cycles ...
-static void call_c_cpu_handler(int i, const struct regstat *i_regs, u_int pc, void *func)
+static void call_c_cpu_handler(int i, const struct regstat *i_regs, int ccadj_, u_int pc, void *func)
{
signed char ccreg=get_reg(i_regs->regmap,CCREG);
assert(ccreg==HOST_CCREG);
emit_movimm(pc,3); // Get PC
emit_readword(&last_count,2);
emit_writeword(3,&psxRegs.pc);
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
+ emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
emit_add(2,HOST_CCREG,2);
emit_writeword(2,&psxRegs.cycle);
emit_far_call(func);
emit_far_jump(jump_to_new_pc);
}
-static void syscall_assemble(int i,struct regstat *i_regs)
+static void syscall_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
emit_movimm(0x20,0); // cause code
emit_movimm(0,1); // not in delay slot
- call_c_cpu_handler(i,i_regs,start+i*4,psxException);
+ call_c_cpu_handler(i, i_regs, ccadj_, start+i*4, psxException);
}
-static void hlecall_assemble(int i,struct regstat *i_regs)
+static void hlecall_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
void *hlefunc = psxNULL;
uint32_t hleCode = source[i] & 0x03ffffff;
if (hleCode < ARRAY_SIZE(psxHLEt))
hlefunc = psxHLEt[hleCode];
- call_c_cpu_handler(i,i_regs,start+i*4+4,hlefunc);
+ call_c_cpu_handler(i, i_regs, ccadj_, start + i*4+4, hlefunc);
}
-static void intcall_assemble(int i,struct regstat *i_regs)
+static void intcall_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
- call_c_cpu_handler(i,i_regs,start+i*4,execI);
+ call_c_cpu_handler(i, i_regs, ccadj_, start + i*4, execI);
}
static void speculate_mov(int rs,int rt)
#endif
}
-static void ds_assemble(int i,struct regstat *i_regs)
+static void ujump_assemble(int i, const struct regstat *i_regs);
+static void rjump_assemble(int i, const struct regstat *i_regs);
+static void cjump_assemble(int i, const struct regstat *i_regs);
+static void sjump_assemble(int i, const struct regstat *i_regs);
+static void pagespan_assemble(int i, const struct regstat *i_regs);
+
+static int assemble(int i, const struct regstat *i_regs, int ccadj_)
{
- speculate_register_values(i);
- is_delayslot=1;
- switch(dops[i].itype) {
+ int ds = 0;
+ switch (dops[i].itype) {
case ALU:
- alu_assemble(i,i_regs);break;
+ alu_assemble(i, i_regs);
+ break;
case IMM16:
- imm16_assemble(i,i_regs);break;
+ imm16_assemble(i, i_regs);
+ break;
case SHIFT:
- shift_assemble(i,i_regs);break;
+ shift_assemble(i, i_regs);
+ break;
case SHIFTIMM:
- shiftimm_assemble(i,i_regs);break;
+ shiftimm_assemble(i, i_regs);
+ break;
case LOAD:
- load_assemble(i,i_regs);break;
+ load_assemble(i, i_regs, ccadj_);
+ break;
case LOADLR:
- loadlr_assemble(i,i_regs);break;
+ loadlr_assemble(i, i_regs, ccadj_);
+ break;
case STORE:
- store_assemble(i,i_regs);break;
+ store_assemble(i, i_regs, ccadj_);
+ break;
case STORELR:
- storelr_assemble(i,i_regs);break;
+ storelr_assemble(i, i_regs, ccadj_);
+ break;
case COP0:
- cop0_assemble(i,i_regs);break;
+ cop0_assemble(i, i_regs, ccadj_);
+ break;
case COP1:
- cop1_assemble(i,i_regs);break;
+ cop1_assemble(i, i_regs);
+ break;
case C1LS:
- c1ls_assemble(i,i_regs);break;
+ c1ls_assemble(i, i_regs);
+ break;
case COP2:
- cop2_assemble(i,i_regs);break;
+ cop2_assemble(i, i_regs);
+ break;
case C2LS:
- c2ls_assemble(i,i_regs);break;
+ c2ls_assemble(i, i_regs, ccadj_);
+ break;
case C2OP:
- c2op_assemble(i,i_regs);break;
+ c2op_assemble(i, i_regs);
+ break;
case MULTDIV:
- multdiv_assemble(i,i_regs);
- multdiv_prepare_stall(i,i_regs);
+ multdiv_assemble(i, i_regs);
+ multdiv_prepare_stall(i, i_regs, ccadj_);
break;
case MOV:
- mov_assemble(i,i_regs);break;
+ mov_assemble(i, i_regs);
+ break;
+ case SYSCALL:
+ syscall_assemble(i, i_regs, ccadj_);
+ break;
+ case HLECALL:
+ hlecall_assemble(i, i_regs, ccadj_);
+ break;
+ case INTCALL:
+ intcall_assemble(i, i_regs, ccadj_);
+ break;
+ case UJUMP:
+ ujump_assemble(i, i_regs);
+ ds = 1;
+ break;
+ case RJUMP:
+ rjump_assemble(i, i_regs);
+ ds = 1;
+ break;
+ case CJUMP:
+ cjump_assemble(i, i_regs);
+ ds = 1;
+ break;
+ case SJUMP:
+ sjump_assemble(i, i_regs);
+ ds = 1;
+ break;
+ case SPAN:
+ pagespan_assemble(i, i_regs);
+ break;
+ case OTHER:
+ case NI:
+ // not handled, just skip
+ break;
+ default:
+ assert(0);
+ }
+ return ds;
+}
+
+static void ds_assemble(int i, const struct regstat *i_regs)
+{
+ speculate_register_values(i);
+ is_delayslot = 1;
+ switch (dops[i].itype) {
case SYSCALL:
case HLECALL:
case INTCALL:
case CJUMP:
case SJUMP:
SysPrintf("Jump in the delay slot. This is probably a bug.\n");
+ break;
+ default:
+ assemble(i, i_regs, ccadj[i]);
}
- is_delayslot=0;
+ is_delayslot = 0;
}
// Is the branch target a valid internal jump?
// Generate address for load/store instruction
// goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
-void address_generation(int i,struct regstat *i_regs,signed char entry[])
+void address_generation(int i, const struct regstat *i_regs, signed char entry[])
{
- if(dops[i].itype==LOAD||dops[i].itype==LOADLR||dops[i].itype==STORE||dops[i].itype==STORELR||dops[i].itype==C1LS||dops[i].itype==C2LS) {
+ if (dops[i].is_load || dops[i].is_store) {
int ra=-1;
int agr=AGEN1+(i&1);
if(dops[i].itype==LOAD) {
ra=get_reg(i_regs->regmap,agr);
if(ra<0) ra=get_reg(i_regs->regmap,-1);
}
- if(dops[i].itype==C1LS||dops[i].itype==C2LS) {
+ if(dops[i].itype==C2LS) {
if ((dops[i].opcode&0x3b)==0x31||(dops[i].opcode&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
ra=get_reg(i_regs->regmap,FTEMP);
else { // SWC1/SDC1/SWC2/SDC2
}
}
// Preload constants for next instruction
- if(dops[i+1].itype==LOAD||dops[i+1].itype==LOADLR||dops[i+1].itype==STORE||dops[i+1].itype==STORELR||dops[i+1].itype==C1LS||dops[i+1].itype==C2LS) {
+ if (dops[i+1].is_load || dops[i+1].is_store) {
int agr,ra;
// Actual address
agr=AGEN1+((i+1)&1);
}
}
-void load_all_consts(signed char regmap[], u_int dirty, int i)
+static void load_all_consts(const signed char regmap[], u_int dirty, int i)
{
int hr;
// Load 32-bit regs
}
// Write out all dirty registers (except cycle count)
-static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty)
+static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty)
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
// Write out dirty registers that we need to reload (pair with load_needed_regs)
// This writes the registers not written by store_regs_bt
-void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr)
+static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr)
{
int hr;
int t=(addr-start)>>2;
}
// Load all registers (except cycle count)
-void load_all_regs(signed char i_regmap[])
+static void load_all_regs(const signed char i_regmap[])
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
}
// Load all current registers also needed by next instruction
-void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
+static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[])
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
}
// Load all regs, storing cycle count if necessary
-void load_regs_entry(int t)
+static void load_regs_entry(int t)
{
int hr;
if(dops[t].is_ds) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
- else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
+ else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t],HOST_CCREG);
if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
emit_storereg(CCREG,HOST_CCREG);
}
}
#ifdef DRC_DBG
-static void drc_dbg_emit_do_cmp(int i)
+static void drc_dbg_emit_do_cmp(int i, int ccadj_)
{
extern void do_insn_cmp();
//extern int cycle;
// write out changed consts to match the interpreter
if (i > 0 && !dops[i].bt) {
for (hr = 0; hr < HOST_REGS; hr++) {
- int reg = regs[i-1].regmap[hr];
+ int reg = regs[i].regmap_entry[hr]; // regs[i-1].regmap[hr];
if (hr == EXCLUDE_REG || reg < 0)
continue;
if (!((regs[i-1].isconst >> hr) & 1))
}
emit_movimm(start+i*4,0);
emit_writeword(0,&pcaddr);
+ int cc = get_reg(regs[i].regmap_entry, CCREG);
+ if (cc < 0)
+ emit_loadreg(CCREG, cc = 0);
+ emit_addimm(cc, ccadj_, 0);
+ emit_writeword(0, &psxRegs.cycle);
emit_far_call(do_insn_cmp);
//emit_readword(&cycle,0);
//emit_addimm(0,2,0);
assem_debug("\\\\do_insn_cmp\n");
}
#else
-#define drc_dbg_emit_do_cmp(x)
+#define drc_dbg_emit_do_cmp(x,y)
#endif
// Used when a branch jumps into the delay slot of another branch
static void ds_assemble_entry(int i)
{
- int t=(ba[i]-start)>>2;
+ int t = (ba[i] - start) >> 2;
+ int ccadj_ = -CLOCK_ADJUST(1);
if (!instr_addr[t])
instr_addr[t] = out;
assem_debug("Assemble delay slot at %x\n",ba[i]);
assem_debug("<->\n");
- drc_dbg_emit_do_cmp(t);
+ drc_dbg_emit_do_cmp(t, ccadj_);
if(regs[t].regmap_entry[HOST_CCREG]==CCREG&®s[t].regmap[HOST_CCREG]!=CCREG)
wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty);
load_regs(regs[t].regmap_entry,regs[t].regmap,dops[t].rs1,dops[t].rs2);
address_generation(t,®s[t],regs[t].regmap_entry);
- if(dops[t].itype==STORE||dops[t].itype==STORELR||(dops[t].opcode&0x3b)==0x39||(dops[t].opcode&0x3b)==0x3a)
+ if (ram_offset && (dops[t].is_load || dops[t].is_store))
+ load_regs(regs[t].regmap_entry,regs[t].regmap,ROREG,ROREG);
+ if (dops[t].is_store)
load_regs(regs[t].regmap_entry,regs[t].regmap,INVCP,INVCP);
is_delayslot=0;
- switch(dops[t].itype) {
- case ALU:
- alu_assemble(t,®s[t]);break;
- case IMM16:
- imm16_assemble(t,®s[t]);break;
- case SHIFT:
- shift_assemble(t,®s[t]);break;
- case SHIFTIMM:
- shiftimm_assemble(t,®s[t]);break;
- case LOAD:
- load_assemble(t,®s[t]);break;
- case LOADLR:
- loadlr_assemble(t,®s[t]);break;
- case STORE:
- store_assemble(t,®s[t]);break;
- case STORELR:
- storelr_assemble(t,®s[t]);break;
- case COP0:
- cop0_assemble(t,®s[t]);break;
- case COP1:
- cop1_assemble(t,®s[t]);break;
- case C1LS:
- c1ls_assemble(t,®s[t]);break;
- case COP2:
- cop2_assemble(t,®s[t]);break;
- case C2LS:
- c2ls_assemble(t,®s[t]);break;
- case C2OP:
- c2op_assemble(t,®s[t]);break;
- case MULTDIV:
- multdiv_assemble(t,®s[t]);
- multdiv_prepare_stall(i,®s[t]);
- break;
- case MOV:
- mov_assemble(t,®s[t]);break;
+ switch (dops[t].itype) {
case SYSCALL:
case HLECALL:
case INTCALL:
case CJUMP:
case SJUMP:
SysPrintf("Jump in the delay slot. This is probably a bug.\n");
+ break;
+ default:
+ assemble(t, ®s[t], ccadj_);
}
store_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
load_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
emit_movimm_from(imm1,rt1,imm2,rt2);
}
-void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
+static void do_cc(int i, const signed char i_regmap[], int *adj,
+ int addr, int taken, int invert)
{
- int count;
+ int count, count_plus2;
void *jaddr;
void *idle=NULL;
int t=0;
if(internal_branch(ba[i]))
{
t=(ba[i]-start)>>2;
- if(dops[t].is_ds) *adj=-1; // Branch into delay slot adds an extra cycle
+ if(dops[t].is_ds) *adj=-CLOCK_ADJUST(1); // Branch into delay slot adds an extra cycle
else *adj=ccadj[t];
}
else
{
*adj=0;
}
- count=ccadj[i];
+ count = ccadj[i];
+ count_plus2 = count + CLOCK_ADJUST(2);
if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
// Idle loop
if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
emit_jmp(0);
}
else if(*adj==0||invert) {
- int cycles=CLOCK_ADJUST(count+2);
+ int cycles = count_plus2;
// faster loop HACK
#if 0
if (t&&*adj) {
int rel=t-i;
if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
- cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
+ cycles=*adj+count+2-*adj;
}
#endif
- emit_addimm_and_set_flags(cycles,HOST_CCREG);
- jaddr=out;
+ emit_addimm_and_set_flags(cycles, HOST_CCREG);
+ jaddr = out;
emit_jns(0);
}
else
{
- emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
- jaddr=out;
+ emit_cmpimm(HOST_CCREG, -count_plus2);
+ jaddr = out;
emit_jns(0);
}
- add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
+ add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:count_plus2,i,addr,taken,0);
}
static void do_ccstub(int n)
}
// Update cycle count
assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
- if(stubs[n].a) emit_addimm(HOST_CCREG,CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
+ if(stubs[n].a) emit_addimm(HOST_CCREG,(int)stubs[n].a,HOST_CCREG);
emit_far_call(cc_interrupt);
- if(stubs[n].a) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
+ if(stubs[n].a) emit_addimm(HOST_CCREG,-(int)stubs[n].a,HOST_CCREG);
if(stubs[n].d==TAKEN) {
if(internal_branch(ba[i]))
load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
}
}
-static void ujump_assemble(int i,struct regstat *i_regs)
+static void ujump_assemble(int i, const struct regstat *i_regs)
{
int ra_done=0;
if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
if(dops[i].rt1==31&&temp>=0) emit_prefetchreg(temp);
#endif
do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal_branch(ba[i]))
assem_debug("branch: internal\n");
#endif
}
-static void rjump_assemble(int i,struct regstat *i_regs)
+static void rjump_assemble(int i, const struct regstat *i_regs)
{
int temp;
int rs,cc;
//do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
//if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
//assert(adj==0);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
add_stub(CC_STUB,out,NULL,0,i,-1,TAKEN,rs);
if(dops[i+1].itype==COP0&&(source[i+1]&0x3f)==0x10)
// special case for RFE
#endif
}
-static void cjump_assemble(int i,struct regstat *i_regs)
+static void cjump_assemble(int i, const struct regstat *i_regs)
{
- signed char *i_regmap=i_regs->regmap;
+ const signed char *i_regmap = i_regs->regmap;
int cc;
int match;
match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(unconditional) {
do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
assem_debug("branch: internal\n");
}
}
else if(nop) {
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
else {
void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
- if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj&&!invert) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
//printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
assert(s1l>=0);
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
if (match && (!internal || !dops[(ba[i]-start)>>2].is_ds)) {
if(adj) {
- emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
+ emit_addimm(cc,-adj,cc);
add_to_linker(out,ba[i],internal);
}else{
emit_addnop(13);
}else
#endif
{
- if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
+ if(adj) emit_addimm(cc,-adj,cc);
store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
if(nottaken1) set_jump_target(nottaken1, out);
if(adj) {
- if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
+ if(!invert) emit_addimm(cc,adj,cc);
}
} // (!unconditional)
} // if(ooo)
// load regs
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
address_generation(i+1,&branch_regs[i],0);
+ if (ram_offset)
+ load_regs(regs[i].regmap,branch_regs[i].regmap,ROREG,ROREG);
load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
ds_assemble(i+1,&branch_regs[i]);
cc=get_reg(branch_regs[i].regmap,CCREG);
store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
assem_debug("cycle count (adj)\n");
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
assem_debug("branch: internal\n");
set_jump_target(nottaken, out);
assem_debug("2:\n");
wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
+ // load regs
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
address_generation(i+1,&branch_regs[i],0);
- load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
+ if (ram_offset)
+ load_regs(regs[i].regmap,branch_regs[i].regmap,ROREG,ROREG);
+ load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
ds_assemble(i+1,&branch_regs[i]);
cc=get_reg(branch_regs[i].regmap,CCREG);
if (cc == -1) {
// Cycle count isn't in a register, temporarily load it then write it out
emit_loadreg(CCREG,HOST_CCREG);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
else{
cc=get_reg(i_regmap,CCREG);
assert(cc==HOST_CCREG);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
}
}
-static void sjump_assemble(int i,struct regstat *i_regs)
+static void sjump_assemble(int i, const struct regstat *i_regs)
{
- signed char *i_regmap=i_regs->regmap;
+ const signed char *i_regmap = i_regs->regmap;
int cc;
int match;
match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(unconditional) {
do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
assem_debug("branch: internal\n");
}
}
else if(nevertaken) {
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
else {
void *nottaken = NULL;
do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
- if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj&&!invert) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
{
assert(s1l>=0);
if((dops[i].opcode2&0xf)==0) // BLTZ/BLTZAL
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
if (match && (!internal || !dops[(ba[i] - start) >> 2].is_ds)) {
if(adj) {
- emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
+ emit_addimm(cc,-adj,cc);
add_to_linker(out,ba[i],internal);
}else{
emit_addnop(13);
}else
#endif
{
- if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
+ if(adj) emit_addimm(cc,-adj,cc);
store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
}
if(adj) {
- if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
+ if(!invert) emit_addimm(cc,adj,cc);
}
} // (!unconditional)
} // if(ooo)
// load regs
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
address_generation(i+1,&branch_regs[i],0);
+ if (ram_offset)
+ load_regs(regs[i].regmap,branch_regs[i].regmap,ROREG,ROREG);
load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
ds_assemble(i+1,&branch_regs[i]);
cc=get_reg(branch_regs[i].regmap,CCREG);
store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
assem_debug("cycle count (adj)\n");
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
assem_debug("branch: internal\n");
if (cc == -1) {
// Cycle count isn't in a register, temporarily load it then write it out
emit_loadreg(CCREG,HOST_CCREG);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
else{
cc=get_reg(i_regmap,CCREG);
assert(cc==HOST_CCREG);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
}
}
-static void pagespan_assemble(int i,struct regstat *i_regs)
+static void pagespan_assemble(int i, const struct regstat *i_regs)
{
int s1l=get_reg(i_regs->regmap,dops[i].rs1);
int s2l=get_reg(i_regs->regmap,dops[i].rs2);
if((dops[i].opcode&0x2e)==4||dops[i].opcode==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
}
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
+ emit_addimm(HOST_CCREG, ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
if(dops[i].opcode==2) // J
{
unconditional=1;
emit_writeword(HOST_BTREG,&branch_target);
load_regs(regs[0].regmap_entry,regs[0].regmap,dops[0].rs1,dops[0].rs2);
address_generation(0,®s[0],regs[0].regmap_entry);
- if(dops[0].itype==STORE||dops[0].itype==STORELR||(dops[0].opcode&0x3b)==0x39||(dops[0].opcode&0x3b)==0x3a)
+ if (ram_offset && (dops[0].is_load || dops[0].is_store))
+ load_regs(regs[0].regmap_entry,regs[0].regmap,ROREG,ROREG);
+ if (dops[0].is_store)
load_regs(regs[0].regmap_entry,regs[0].regmap,INVCP,INVCP);
is_delayslot=0;
- switch(dops[0].itype) {
- case ALU:
- alu_assemble(0,®s[0]);break;
- case IMM16:
- imm16_assemble(0,®s[0]);break;
- case SHIFT:
- shift_assemble(0,®s[0]);break;
- case SHIFTIMM:
- shiftimm_assemble(0,®s[0]);break;
- case LOAD:
- load_assemble(0,®s[0]);break;
- case LOADLR:
- loadlr_assemble(0,®s[0]);break;
- case STORE:
- store_assemble(0,®s[0]);break;
- case STORELR:
- storelr_assemble(0,®s[0]);break;
- case COP0:
- cop0_assemble(0,®s[0]);break;
- case COP1:
- cop1_assemble(0,®s[0]);break;
- case C1LS:
- c1ls_assemble(0,®s[0]);break;
- case COP2:
- cop2_assemble(0,®s[0]);break;
- case C2LS:
- c2ls_assemble(0,®s[0]);break;
- case C2OP:
- c2op_assemble(0,®s[0]);break;
- case MULTDIV:
- multdiv_assemble(0,®s[0]);
- multdiv_prepare_stall(0,®s[0]);
- break;
- case MOV:
- mov_assemble(0,®s[0]);break;
+ switch (dops[0].itype) {
case SYSCALL:
case HLECALL:
case INTCALL:
case CJUMP:
case SJUMP:
SysPrintf("Jump in the delay slot. This is probably a bug.\n");
+ break;
+ default:
+ assemble(0, ®s[0], 0);
}
int btaddr=get_reg(regs[0].regmap,BTREG);
if(btaddr<0) {
#endif
arch_init();
new_dynarec_test();
-#ifndef RAM_FIXED
ram_offset=(uintptr_t)rdram-0x80000000;
-#endif
if (ram_offset!=0)
SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
}
dops[i].is_jump = (dops[i].itype == RJUMP || dops[i].itype == UJUMP || dops[i].itype == CJUMP || dops[i].itype == SJUMP);
dops[i].is_ujump = (dops[i].itype == RJUMP || dops[i].itype == UJUMP); // || (source[i] >> 16) == 0x1000 // beq r0,r0
+ dops[i].is_load = (dops[i].itype == LOAD || dops[i].itype == LOADLR || op == 0x32); // LWC2
+ dops[i].is_store = (dops[i].itype == STORE || dops[i].itype == STORELR || op == 0x3a); // SWC2
/* messy cases to just pass over to the interpreter */
if (i > 0 && dops[i-1].is_jump) {
}
// Count cycles in between branches
- ccadj[i]=cc;
+ ccadj[i] = CLOCK_ADJUST(cc);
if (i > 0 && (dops[i-1].is_jump || dops[i].itype == SYSCALL || dops[i].itype == HLECALL))
{
cc=0;
if(dops[i+1].rs2==regmap_pre[i][hr]) nr|=1<<hr;
if(dops[i+1].rs1==regs[i].regmap_entry[hr]) nr|=1<<hr;
if(dops[i+1].rs2==regs[i].regmap_entry[hr]) nr|=1<<hr;
- if(dops[i+1].itype==STORE || dops[i+1].itype==STORELR || (dops[i+1].opcode&0x3b)==0x39 || (dops[i+1].opcode&0x3b)==0x3a) {
+ if(ram_offset && (dops[i+1].is_load || dops[i+1].is_store)) {
+ if(regmap_pre[i][hr]==ROREG) nr|=1<<hr;
+ if(regs[i].regmap_entry[hr]==ROREG) nr|=1<<hr;
+ }
+ if(dops[i+1].is_store) {
if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
}
if(dops[i].rs2==regmap_pre[i][hr]) nr|=1<<hr;
if(dops[i].rs1==regs[i].regmap_entry[hr]) nr|=1<<hr;
if(dops[i].rs2==regs[i].regmap_entry[hr]) nr|=1<<hr;
- if(dops[i].itype==STORE || dops[i].itype==STORELR || (dops[i].opcode&0x3b)==0x39 || (dops[i].opcode&0x3b)==0x3a) {
+ if(ram_offset && (dops[i].is_load || dops[i].is_store)) {
+ if(regmap_pre[i][hr]==ROREG) nr|=1<<hr;
+ if(regs[i].regmap_entry[hr]==ROREG) nr|=1<<hr;
+ }
+ if(dops[i].is_store) {
if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
}
if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
if(dops[i].is_jump)
{
- int map=0,temp=0;
- if(dops[i+1].itype==STORE || dops[i+1].itype==STORELR ||
- (dops[i+1].opcode&0x3b)==0x39 || (dops[i+1].opcode&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
- map=INVCP;
- }
- if(dops[i+1].itype==LOADLR || dops[i+1].itype==STORELR ||
- dops[i+1].itype==C1LS || dops[i+1].itype==C2LS)
- temp=FTEMP;
+ int map1 = 0, map2 = 0, temp = 0; // or -1 ??
+ if (dops[i+1].is_load || dops[i+1].is_store)
+ map1 = ROREG;
+ if (dops[i+1].is_store)
+ map2 = INVCP;
+ if(dops[i+1].itype==LOADLR || dops[i+1].itype==STORELR || dops[i+1].itype==C2LS)
+ temp = FTEMP;
if((regs[i].regmap[hr]&63)!=dops[i].rs1 && (regs[i].regmap[hr]&63)!=dops[i].rs2 &&
(regs[i].regmap[hr]&63)!=dops[i].rt1 && (regs[i].regmap[hr]&63)!=dops[i].rt2 &&
(regs[i].regmap[hr]&63)!=dops[i+1].rt1 && (regs[i].regmap[hr]&63)!=dops[i+1].rt2 &&
(regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
- regs[i].regmap[hr]!=map )
+ regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2)
{
regs[i].regmap[hr]=-1;
regs[i].isconst&=~(1<<hr);
(branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
- branch_regs[i].regmap[hr]!=map)
+ branch_regs[i].regmap[hr]!=map1 && branch_regs[i].regmap[hr]!=map2)
{
branch_regs[i].regmap[hr]=-1;
branch_regs[i].regmap_entry[hr]=-1;
// Non-branch
if(i>0)
{
- int map=-1,temp=-1;
- if(dops[i].itype==STORE || dops[i].itype==STORELR ||
- (dops[i].opcode&0x3b)==0x39 || (dops[i].opcode&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
- map=INVCP;
- }
- if(dops[i].itype==LOADLR || dops[i].itype==STORELR ||
- dops[i].itype==C1LS || dops[i].itype==C2LS)
- temp=FTEMP;
+ int map1 = -1, map2 = -1, temp=-1;
+ if (dops[i].is_load || dops[i].is_store)
+ map1 = ROREG;
+ if (dops[i].is_store)
+ map2 = INVCP;
+ if (dops[i].itype==LOADLR || dops[i].itype==STORELR || dops[i].itype==C2LS)
+ temp = FTEMP;
if((regs[i].regmap[hr]&63)!=dops[i].rt1 && (regs[i].regmap[hr]&63)!=dops[i].rt2 &&
regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
- (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
- (dops[i].itype!=SPAN||regs[i].regmap[hr]!=CCREG))
+ (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2 &&
+ //(dops[i].itype!=SPAN||regs[i].regmap[hr]!=CCREG)
+ regs[i].regmap[hr] != CCREG)
{
if(i<slen-1&&!dops[i].is_ds) {
assert(regs[i].regmap[hr]<64);
// branch target entry point
instr_addr[i] = out;
assem_debug("<->\n");
- drc_dbg_emit_do_cmp(i);
+ drc_dbg_emit_do_cmp(i, ccadj[i]);
// load regs
if(regs[i].regmap_entry[HOST_CCREG]==CCREG&®s[i].regmap[HOST_CCREG]!=CCREG)
load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs1,dops[i+1].rs1);
if(dops[i+1].rs2!=dops[i+1].rs1&&dops[i+1].rs2!=dops[i].rs1&&dops[i+1].rs2!=dops[i].rs2&&(dops[i+1].rs2!=dops[i].rt1||dops[i].rt1==0))
load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
- if(dops[i+1].itype==STORE||dops[i+1].itype==STORELR||(dops[i+1].opcode&0x3b)==0x39||(dops[i+1].opcode&0x3b)==0x3a)
+ if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store))
+ load_regs(regs[i].regmap_entry,regs[i].regmap,ROREG,ROREG);
+ if (dops[i+1].is_store)
load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
}
else if(i+1<slen)
load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
}
// TODO: if(is_ooo(i)) address_generation(i+1);
- if(dops[i].itype==CJUMP)
+ if (dops[i].itype == CJUMP)
load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
- if(dops[i].itype==STORE||dops[i].itype==STORELR||(dops[i].opcode&0x3b)==0x39||(dops[i].opcode&0x3b)==0x3a)
+ if (ram_offset && (dops[i].is_load || dops[i].is_store))
+ load_regs(regs[i].regmap_entry,regs[i].regmap,ROREG,ROREG);
+ if (dops[i].is_store)
load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
- // assemble
- switch(dops[i].itype) {
- case ALU:
- alu_assemble(i,®s[i]);break;
- case IMM16:
- imm16_assemble(i,®s[i]);break;
- case SHIFT:
- shift_assemble(i,®s[i]);break;
- case SHIFTIMM:
- shiftimm_assemble(i,®s[i]);break;
- case LOAD:
- load_assemble(i,®s[i]);break;
- case LOADLR:
- loadlr_assemble(i,®s[i]);break;
- case STORE:
- store_assemble(i,®s[i]);break;
- case STORELR:
- storelr_assemble(i,®s[i]);break;
- case COP0:
- cop0_assemble(i,®s[i]);break;
- case COP1:
- cop1_assemble(i,®s[i]);break;
- case C1LS:
- c1ls_assemble(i,®s[i]);break;
- case COP2:
- cop2_assemble(i,®s[i]);break;
- case C2LS:
- c2ls_assemble(i,®s[i]);break;
- case C2OP:
- c2op_assemble(i,®s[i]);break;
- case MULTDIV:
- multdiv_assemble(i,®s[i]);
- multdiv_prepare_stall(i,®s[i]);
- break;
- case MOV:
- mov_assemble(i,®s[i]);break;
- case SYSCALL:
- syscall_assemble(i,®s[i]);break;
- case HLECALL:
- hlecall_assemble(i,®s[i]);break;
- case INTCALL:
- intcall_assemble(i,®s[i]);break;
- case UJUMP:
- ujump_assemble(i,®s[i]);ds=1;break;
- case RJUMP:
- rjump_assemble(i,®s[i]);ds=1;break;
- case CJUMP:
- cjump_assemble(i,®s[i]);ds=1;break;
- case SJUMP:
- sjump_assemble(i,®s[i]);ds=1;break;
- case SPAN:
- pagespan_assemble(i,®s[i]);break;
- }
+
+ ds = assemble(i, ®s[i], ccadj[i]);
+
if (dops[i].is_ujump)
literal_pool(1024);
else
store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
emit_loadreg(CCREG,HOST_CCREG);
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
+ emit_addimm(HOST_CCREG, ccadj[i-1] + CLOCK_ADJUST(1), HOST_CCREG);
}
else
{
store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
emit_loadreg(CCREG,HOST_CCREG);
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
+ emit_addimm(HOST_CCREG, ccadj[i-1] + CLOCK_ADJUST(1), HOST_CCREG);
add_to_linker(out,start+i*4,0);
emit_jmp(0);
}
}
expirep=(expirep+1)&65535;
}
+#ifdef ASSEM_PRINT
+ fflush(stdout);
+#endif
return 0;
}