assert(literalcount<sizeof(literals)/sizeof(literals[0]));
literals[literalcount][0]=addr;
literals[literalcount][1]=val;
- literalcount++;
-}
+ literalcount++;
+}
void *kill_pointer(void *stub)
{
int preferred_reg = (reg&7);
if(reg==CCREG) preferred_reg=HOST_CCREG;
if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
-
+
// Don't allocate unused registers
if((cur->u>>reg)&1) return;
-
+
// see if it's already allocated
for(hr=0;hr<HOST_REGS;hr++)
{
if(cur->regmap[hr]==reg) return;
}
-
+
// Keep the same mapping if the register was already allocated in a loop
preferred_reg = loop_reg(i,reg,preferred_reg);
-
+
// Try to allocate the preferred register
if(cur->regmap[preferred_reg]==-1) {
cur->regmap[preferred_reg]=reg;
cur->isconst&=~(1<<preferred_reg);
return;
}
-
+
// Clear any unneeded registers
// We try to keep the mapping consistent, if possible, because it
// makes branches easier (especially loops). So we try to allocate
return;
}
}
-
+
// Ok, now we have to evict someone
// Pick a register we hopefully won't need soon
u_char hsn[MAXREG+1];
{
int preferred_reg = 8+(reg&1);
int r,hr;
-
+
// allocate the lower 32 bits
alloc_reg(cur,i,reg);
-
+
// Don't allocate unused registers
if((cur->uu>>reg)&1) return;
-
+
// see if the upper half is already allocated
for(hr=0;hr<HOST_REGS;hr++)
{
if(cur->regmap[hr]==reg+64) return;
}
-
+
// Keep the same mapping if the register was already allocated in a loop
preferred_reg = loop_reg(i,reg,preferred_reg);
-
+
// Try to allocate the preferred register
if(cur->regmap[preferred_reg]==-1) {
cur->regmap[preferred_reg]=reg|64;
cur->isconst&=~(1<<preferred_reg);
return;
}
-
+
// Clear any unneeded registers
// We try to keep the mapping consistent, if possible, because it
// makes branches easier (especially loops). So we try to allocate
return;
}
}
-
+
// Ok, now we have to evict someone
// Pick a register we hopefully won't need soon
u_char hsn[MAXREG+1];
{
int r,hr;
int preferred_reg = -1;
-
+
// see if it's already allocated
for(hr=0;hr<HOST_REGS;hr++)
{
if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
}
-
+
// Try to allocate any available register
for(hr=HOST_REGS-1;hr>=0;hr--) {
if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
return;
}
}
-
+
// Find an unneeded register
for(hr=HOST_REGS-1;hr>=0;hr--)
{
}
}
}
-
+
// Ok, now we have to evict someone
// Pick a register we hopefully won't need soon
// TODO: we might want to follow unconditional jumps here
{
int n;
int dirty=0;
-
+
// see if it's already allocated (and dealloc it)
for(n=0;n<HOST_REGS;n++)
{
cur->regmap[n]=-1;
}
}
-
+
cur->regmap[hr]=reg;
cur->dirty&=~(1<<hr);
cur->dirty|=dirty<<hr;
u_int bitmap=needs_clear_cache[i];
if(bitmap) {
u_int start,end;
- for(j=0;j<32;j++)
+ for(j=0;j<32;j++)
{
if(bitmap&(1<<j)) {
start=(u_int)BASE_ADDR+i*131072+j*4096;
#define STORE 2 // Store
#define LOADLR 3 // Unaligned load
#define STORELR 4 // Unaligned store
-#define MOV 5 // Move
+#define MOV 5 // Move
#define ALU 6 // Arithmetic/logic
#define MULTDIV 7 // Multiply/divide
#define SHIFT 8 // Shift by register
for (hr=0;hr<HOST_REGS;hr++) {
if((cur->dirty>>hr)&1) {
reg=cur->regmap[hr];
- if(reg>=64)
+ if(reg>=64)
if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
}
}
int j;
int b=-1;
int rn=10;
-
+
if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
{
if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
void alloc_all(struct regstat *cur,int i)
{
int hr;
-
+
for(hr=0;hr<HOST_REGS;hr++) {
if(hr!=EXCLUDE_REG) {
if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
{
struct ll_entry *next;
while(*head) {
- if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
+ if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
{
inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
#ifdef __arm__
do_clear_cache();
#endif
-
+
// Don't trap writes
invalid_code[block]=1;
int map=get_reg(i_regs->regmap,ROREG);
if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
#else
- if((u_int)rdram!=0x80000000)
+ if((u_int)rdram!=0x80000000)
emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
#endif
int agr=AGEN1+(i&1);
if(itype[i]==LOAD) {
ra=get_reg(i_regs->regmap,rt1[i]);
- if(ra<0) ra=get_reg(i_regs->regmap,-1);
+ if(ra<0) ra=get_reg(i_regs->regmap,-1);
assert(ra>=0);
}
if(itype[i]==LOADLR) {
{
return 0;
}
- else
+ else
if((i_dirty>>hr)&1)
{
if(i_regmap[hr]<TEMPREG)
if(rs1[i]) {
if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
emit_loadreg(rs1[i],s1l);
- }
+ }
else {
if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
emit_loadreg(rs2[i],s1l);
load_all_regs(branch_regs[i].regmap);
}
emit_jmp(stubs[n][2]); // return address
-
+
/* This works but uses a lot of memory...
emit_readword((int)&last_count,ECX);
emit_add(HOST_CCREG,ECX,EAX);
{
link_addr[linkcount][0]=addr;
link_addr[linkcount][1]=target;
- link_addr[linkcount][2]=ext;
+ link_addr[linkcount][2]=ext;
linkcount++;
}
#endif
{
#ifdef REG_PREFETCH
- if(temp>=0)
+ if(temp>=0)
{
if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
}
address_generation(i+1,i_regs,regs[i].regmap_entry);
#ifdef REG_PREFETCH
int temp=get_reg(branch_regs[i].regmap,PTEMP);
- if(rt1[i]==31&&temp>=0)
+ if(rt1[i]==31&&temp>=0)
{
int return_address=start+i*4+8;
- if(get_reg(branch_regs[i].regmap,31)>0)
+ if(get_reg(branch_regs[i].regmap,31)>0)
if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
}
#endif
assert(rt>=0);
return_address=start+i*4+8;
#ifdef REG_PREFETCH
- if(temp>=0)
+ if(temp>=0)
{
if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
}
}
address_generation(i+1,i_regs,regs[i].regmap_entry);
#ifdef REG_PREFETCH
- if(rt1[i]==31)
+ if(rt1[i]==31)
{
if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
int return_address=start+i*4+8;
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
if(i>(ba[i]-start)>>2) invert=1;
#endif
-
+
if(ooo[i]) {
s1l=get_reg(branch_regs[i].regmap,rs1[i]);
s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
cc=get_reg(branch_regs[i].regmap,CCREG);
assert(cc==HOST_CCREG);
- if(unconditional)
+ if(unconditional)
store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
//do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
//assem_debug("cycle count (adj)\n");
emit_jne(0);
}
} // if(!only32)
-
+
//printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
assert(s1l>=0);
if(opcode[i]==4) // BEQ
emit_jne(1);
}
} // if(!only32)
-
+
//printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
assert(s1l>=0);
if((opcode[i]&0x2f)==4) // BEQ
}
cc=get_reg(branch_regs[i].regmap,CCREG);
assert(cc==HOST_CCREG);
- if(unconditional)
+ if(unconditional)
store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
//do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
assem_debug("cycle count (adj)\n");
}
}
} // if(!only32)
-
+
if(invert) {
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
{
}
} // if(!only32)
-
+
if(invert) {
if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
{
// If subroutine call, flag return address as a possible branch target
if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
-
+
if(ba[i]<start || ba[i]>=(start+slen*4))
{
// Branch out of this block, flush all regs
u=1;
uu=1;
gte_u=gte_u_unknown;
- /* Hexagon hack
+ /* Hexagon hack
if(itype[i]==UJUMP&&rt1[i]==31)
{
uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
if(i>istart) {
- if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
+ if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
{
// Don't store a register immediately after writing it,
// may prevent dual-issue.
assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
//printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
//printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
- //if(debug)
+ //if(debug)
//printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
//printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
/*if(Count>=312978186) {
unsigned int type,op,op2;
//printf("addr = %x source = %x %x\n", addr,source,source[0]);
-
+
/* Pass 1 disassembly */
for(i=0;!done;i++) {
/* Pass 2 - Register dependencies and branch targets */
unneeded_registers(0,slen-1,0);
-
+
/* Pass 3 - Register allocation */
struct regstat current; // Current register allocations/status
unneeded_reg_upper[0]=1;
current.regmap[HOST_BTREG]=BTREG;
}
-
+
for(i=0;i<slen;i++)
{
if(bt[i])
}
} else {
// First instruction expects CCREG to be allocated
- if(i==0&&hr==HOST_CCREG)
+ if(i==0&&hr==HOST_CCREG)
regs[i].regmap_entry[hr]=CCREG;
else
regs[i].regmap_entry[hr]=-1;
pagespan_alloc(¤t,i);
break;
}
-
+
// Drop the upper half of registers that have become 32-bit
current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
}
} else {
// Branches expect CCREG to be allocated at the target
- if(regmap_pre[i][hr]==CCREG)
+ if(regmap_pre[i][hr]==CCREG)
regs[i].regmap_entry[hr]=CCREG;
else
regs[i].regmap_entry[hr]=-1;
if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
regs[i].waswritten=current.waswritten;
}
-
+
/* Pass 4 - Cull unused host registers */
-
+
uint64_t nr=0;
-
+
for (i=slen-1;i>=0;i--)
{
int hr;
}
// Save it
needed_reg[i]=nr;
-
+
// Deallocate unneeded registers
for(hr=0;hr<HOST_REGS;hr++)
{
}
}
}
-
+
/* Pass 5 - Pre-allocate registers */
-
+
// If a register is allocated during a loop, try to allocate it for the
// entire loop, if possible. This avoids loading/storing registers
// inside of the loop.
-
+
signed char f_regmap[HOST_REGS];
clear_all_regs(f_regmap);
for(i=0;i<slen-1;i++)
{
if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
{
- if(ba[i]>=start && ba[i]<(start+i*4))
+ if(ba[i]>=start && ba[i]<(start+i*4))
if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
}
}
if(ooo[i]) {
- if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
+ if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
f_regmap[hr]=branch_regs[i].regmap[hr];
}else{
- if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
+ if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
f_regmap[hr]=branch_regs[i].regmap[hr];
}
// Avoid dirty->clean transition
if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
{
if(ooo[j]) {
- if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
+ if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
break;
}else{
- if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
+ if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
break;
}
if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
regs[k].isconst&=~(1<<HOST_CCREG);
k++;
}
- regs[j].regmap_entry[HOST_CCREG]=CCREG;
+ regs[j].regmap_entry[HOST_CCREG]=CCREG;
}
// Work backwards from the branch target
if(j>i&&f_regmap[HOST_CCREG]==CCREG)
}
}
}
-
+
// Cache memory offset or tlb map pointer if a register is available
#ifndef HOST_IMM_ADDR32
#ifndef RAM_OFFSET
}
}
#endif
-
+
// This allocates registers (if possible) one instruction prior
// to use, which can avoid a load-use penalty on certain CPUs.
for(i=0;i<slen-1;i++)
}
}
}
- // Load source into target register
+ // Load source into target register
if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
{
}
}
if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
- if(itype[i+1]==LOAD)
+ if(itype[i+1]==LOAD)
hr=get_reg(regs[i+1].regmap,rt1[i+1]);
if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
hr=get_reg(regs[i+1].regmap,FTEMP);
}
}
}
-
+
/* Pass 6 - Optimize clean/dirty state */
clean_registers(0,slen-1,1);
-
+
/* Pass 7 - Identify 32-bit registers */
for (i=slen-1;i>=0;i--)
{
//printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
memcpy(copy,source,slen*4);
copy+=slen*4;
-
+
#ifdef __arm__
__clear_cache((void *)beginning,out);
#endif
-
+
// If we're within 256K of the end of the buffer,
// start over from the beginning. (Is 256K enough?)
if((u_int)out>(u_int)BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
-
+
// Trap writes to any of the pages we compiled
for(i=start>>12;i<=(start+slen*4)>>12;i++) {
invalid_code[i]=0;
invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
-
+
/* Pass 10 - Free memory by expiring oldest blocks */
-
+
int end=((((int)out-(int)BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
while(expirep!=end)
{
case 3:
// Clear jump_out
#ifdef __arm__
- if((expirep&2047)==0)
+ if((expirep&2047)==0)
do_clear_cache();
#endif
ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);