int new_dynarec_did_compile;
int new_dynarec_hacks;
u_int stop_after_jal;
+#ifndef RAM_FIXED
+ static u_int ram_offset;
+#else
+ static const u_int ram_offset=0;
+#endif
extern u_char restore_candidate[512];
extern int cycle_count;
if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
get_bounds((int)head->addr,&start,&end);
//printf("start: %x end: %x\n",start,end);
- if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
+ if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) {
if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
if(page<2048) { // RAM
struct ll_entry *head;
u_int addr_min=~0, addr_max=0;
- int mask=RAM_SIZE-1;
+ u_int mask=RAM_SIZE-1;
+ u_int addr_main=0x80000000|(addr&mask);
int pg1;
- inv_code_start=addr&~0xfff;
- inv_code_end=addr|0xfff;
+ inv_code_start=addr_main&~0xfff;
+ inv_code_end=addr_main|0xfff;
pg1=page;
if (pg1>0) {
// must check previous page too because of spans..
for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
u_int start,end;
get_bounds((int)head->addr,&start,&end);
- if((start&mask)<=(addr&mask)&&(addr&mask)<(end&mask)) {
+ if(ram_offset) {
+ start-=ram_offset;
+ end-=ram_offset;
+ }
+ if(start<=addr_main&&addr_main<end) {
if(start<addr_min) addr_min=start;
if(end>addr_max) addr_max=end;
}
- else if(addr<start) {
+ else if(addr_main<start) {
if(start<inv_code_end)
inv_code_end=start-1;
}
return;
}
else {
+ inv_code_start=(addr&~mask)|(inv_code_start&mask);
+ inv_code_end=(addr&~mask)|(inv_code_end&mask);
inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
return;
}
jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
}
}
+ else if(ram_offset&&memtarget) {
+ emit_addimm(addr,ram_offset,HOST_TEMPREG);
+ fastload_reg_override=HOST_TEMPREG;
+ }
}else{ // using tlb
int x=0;
if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
gen_tlb_addr_r(a,map);
emit_movswl_indexed(x,a,tl);
}else{
- #ifdef RAM_OFFSET
+ #if 1 //def RAM_OFFSET
emit_movswl_indexed(x,a,tl);
#else
emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
gen_tlb_addr_r(a,map);
emit_movzwl_indexed(x,a,tl);
}else{
- #ifdef RAM_OFFSET
+ #if 1 //def RAM_OFFSET
emit_movzwl_indexed(x,a,tl);
#else
emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
#endif
}
+ else if(ram_offset&&memtarget) {
+ emit_addimm(addr,ram_offset,HOST_TEMPREG);
+ faststore_reg_override=HOST_TEMPREG;
+ }
}else{ // using tlb
int x=0;
if (opcode[i]==0x28) x=3; // SB
gen_tlb_addr_w(a,map);
emit_writehword_indexed(tl,x,a);
}else
- emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
+ //emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
+ emit_writehword_indexed(tl,x,a);
}
type=STOREH_STUB;
}
if(!c) {
jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
}
+ else if(ram_offset&&memtarget) {
+ emit_addimm(ar,ram_offset,HOST_TEMPREG);
+ fastio_reg_override=HOST_TEMPREG;
+ }
if (opcode[i]==0x32) { // LWC2
#ifdef HOST_IMM_ADDR32
if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
{
printf("Init new dynarec\n");
out=(u_char *)BASE_ADDR;
-#ifdef BASE_ADDR_FIXED
+#if BASE_ADDR_FIXED
if (mmap (out, 1<<TARGET_SIZE_2,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
#endif
tlb_hacks();
arch_init();
+#ifndef RAM_FIXED
+ ram_offset=(u_int)rdram-0x80000000;
+#endif
+ if (ram_offset!=0)
+ printf("warning: RAM is not directly mapped, performance will suffer\n");
}
void new_dynarec_cleanup()
{
int n;
- #ifdef BASE_ADDR_FIXED
+ #if BASE_ADDR_FIXED
if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
#endif
for(n=0;n<4096;n++) ll_clear(jump_in+n);