X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=libpcsxcore%2Fnew_dynarec%2Fnew_dynarec.c;h=bb6ff0b3264524566478da63e7fd1d8ded0ee369;hb=58e485d6efcecfe0fb7d072d705cf658e6162b4e;hp=b0bfb239efa472ca53cc243efef239148e92c236;hpb=186935dccdeb09590c0858b7510c769f5ccb06de;p=pcsx_rearmed.git diff --git a/libpcsxcore/new_dynarec/new_dynarec.c b/libpcsxcore/new_dynarec/new_dynarec.c index b0bfb239..bb6ff0b3 100644 --- a/libpcsxcore/new_dynarec/new_dynarec.c +++ b/libpcsxcore/new_dynarec/new_dynarec.c @@ -26,9 +26,17 @@ #ifdef __MACH__ #include #endif +#ifdef _3DS +#include <3ds_utils.h> +#endif +#ifdef VITA +#include +static int sceBlock; +int getVMBlock(); +#endif #include "new_dynarec_config.h" -#include "emu_if.h" //emulator interface +#include "backends/psx/emu_if.h" //emulator interface //#define DISASM //#define assem_debug printf @@ -37,13 +45,17 @@ #define inv_debug(...) #ifdef __i386__ -#include "assem_x86.h" +#include "x86/assem_x86.h" #endif #ifdef __x86_64__ -#include "assem_x64.h" +#include "x64/assem_x64.h" #endif #ifdef __arm__ -#include "assem_arm.h" +#include "arm/assem_arm.h" +#endif + +#ifdef VITA +int _newlib_vm_size_user = 1 << TARGET_SIZE_2; #endif #define MAXBLOCK 4096 @@ -265,11 +277,20 @@ static int tracedebug=0; static void mprotect_w_x(void *start, void *end, int is_x) { #ifdef NO_WRITE_EXEC + #if defined(VITA) + // *Open* enables write on all memory that was + // allocated by sceKernelAllocMemBlockForVM()? + if (is_x) + sceKernelCloseVMDomain(); + else + sceKernelOpenVMDomain(); + #else u_long mstart = (u_long)start & ~4095ul; u_long mend = (u_long)end; if (mprotect((void *)mstart, mend - mstart, PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0) SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno)); + #endif #endif } @@ -287,8 +308,9 @@ static void end_tcache_write(void *start, void *end) #elif defined(__MACH__) sys_cache_control(kCacheFunctionPrepareForExecution, start, len); #elif defined(VITA) - int block = sceKernelFindMemBlockByAddr(start, len); - sceKernelSyncVMDomain(block, start, len); + sceKernelSyncVMDomain(sceBlock, start, len); + #elif defined(_3DS) + ctr_flush_invalidate_cache(); #else __clear_cache(start, end); #endif @@ -344,14 +366,16 @@ static u_int get_vpage(u_int vaddr) // This is called from the recompiled JR/JALR instructions void *get_addr(u_int vaddr) { - u_int page=get_page(vaddr); - u_int vpage=get_vpage(vaddr); - struct ll_entry *head; + struct ll_entry *head = NULL; + u_int page = get_page(vaddr); + u_int vpage = get_vpage(vaddr); //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page); head=jump_in[page]; - while(head!=NULL) { - if(head->vaddr==vaddr) { - //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr); + while(head!=NULL) + { + if(head->vaddr==vaddr) + { + //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr); u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF]; ht_bin[3]=ht_bin[1]; ht_bin[2]=ht_bin[0]; @@ -362,39 +386,47 @@ void *get_addr(u_int vaddr) head=head->next; } head=jump_dirty[vpage]; - while(head!=NULL) { - if(head->vaddr==vaddr) { + while(head!=NULL) + { + if(head->vaddr==vaddr) + { //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr); // Don't restore blocks which are about to expire from the cache if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) - if(verify_dirty(head->addr)) { - //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]); - invalid_code[vaddr>>12]=0; - inv_code_start=inv_code_end=~0; - if(vpage<2048) { - restore_candidate[vpage>>3]|=1<<(vpage&7); - } - else restore_candidate[page>>3]|=1<<(page&7); - u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF]; - if(ht_bin[0]==vaddr) { - ht_bin[1]=(u_int)head->addr; // Replace existing entry - } - else + if(verify_dirty(head->addr)) { - ht_bin[3]=ht_bin[1]; - ht_bin[2]=ht_bin[0]; - ht_bin[1]=(int)head->addr; - ht_bin[0]=vaddr; + //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]); + invalid_code[vaddr>>12]=0; + inv_code_start=inv_code_end=~0; + if(vpage<2048) + { + restore_candidate[vpage>>3]|=1<<(vpage&7); + } + else + { + restore_candidate[page>>3]|=1<<(page&7); + } + u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF]; + + if(ht_bin[0]==vaddr) + ht_bin[1]=(u_int)head->addr; // Replace existing entry + else + { + ht_bin[3]=ht_bin[1]; + ht_bin[2]=ht_bin[0]; + ht_bin[1]=(int)head->addr; + ht_bin[0]=vaddr; + } + return head->addr; } - return head->addr; - } } head=head->next; } //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr); int r=new_recompile_block(vaddr); - if(r==0) return get_addr(vaddr); - // Execute in unmapped page, generate pagefault execption + if(r==0) + return get_addr(vaddr); + // Execute in unmapped page, generate pagefault exception Status|=2; Cause=(vaddr<<31)|0x8; EPC=(vaddr&1)?vaddr-5:vaddr; @@ -403,6 +435,7 @@ void *get_addr(u_int vaddr) EntryHi=BadVAddr&0xFFFFE000; return get_addr_ht(0x80000000); } + // Look up address in hash table first void *get_addr_ht(u_int vaddr) { @@ -746,13 +779,13 @@ void alloc_all(struct regstat *cur,int i) } #ifdef __i386__ -#include "assem_x86.c" +#include "x86/assem_x86.c" #endif #ifdef __x86_64__ -#include "assem_x64.c" +#include "x64/assem_x64.c" #endif #ifdef __arm__ -#include "assem_arm.c" +#include "arm/assem_arm.c" #endif // Add virtual address mapping to linked list @@ -926,23 +959,26 @@ static void invalidate_block_range(u_int block, u_int first, u_int last) assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages) assert(last2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision + if(vpage>2047||(head->vaddr>>12)==block) + { // Ignore vaddr hash collision get_bounds((int)head->addr,&start,&end); //printf("start: %x end: %x\n",start,end); - if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) { - if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) { + if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) + { + if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) + { if((((start-(u_int)rdram)>>12)&2047)>12)&2047; if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047; } @@ -1033,19 +1072,23 @@ void invalidate_addr(u_int addr) // This is called when loading a save state. // Anything could have changed, so invalidate everything. -void invalidate_all_pages() +void invalidate_all_pages(void) { u_int page; for(page=0;page<4096;page++) invalidate_page(page); for(page=0;page<1048576;page++) - if(!invalid_code[page]) { + { + if(!invalid_code[page]) + { restore_candidate[(page&2047)>>3]|=1<<(page&7); restore_candidate[((page&2047)>>3)+256]|=1<<(page&7); } - #ifdef USE_MINI_HT + } + +#ifdef USE_MINI_HT memset(mini_ht,-1,sizeof(mini_ht)); - #endif +#endif } // Add an entry to jump_out after making a link @@ -1070,37 +1113,48 @@ void clean_blocks(u_int page) struct ll_entry *head; inv_debug("INV: clean_blocks page=%d\n",page); head=jump_dirty[page]; - while(head!=NULL) { - if(!invalid_code[head->vaddr>>12]) { + while(head!=NULL) + { + if(!invalid_code[head->vaddr>>12]) + { // Don't restore blocks which are about to expire from the cache - if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) { + if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) + { u_int start,end; - if(verify_dirty(head->addr)) { + if(verify_dirty(head->addr)) + { //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr); u_int i; u_int inv=0; get_bounds((int)head->addr,&start,&end); - if(start-(u_int)rdram>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) { + if(start-(u_int)rdram>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) + { inv|=invalid_code[i]; } } - else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) { + else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) + { inv=1; } - if(!inv) { + if(!inv) + { void * clean_addr=(void *)get_clean_addr((int)head->addr); - if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) { + if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) + { u_int ppage=page; inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr); //printf("page=%x, addr=%x\n",page,head->vaddr); //assert(head->vaddr>>12==(page|0x80000)); ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr); u_int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF]; - if(ht_bin[0]==head->vaddr) { + if(ht_bin[0]==head->vaddr) + { ht_bin[1]=(u_int)clean_addr; // Replace existing entry } - if(ht_bin[2]==head->vaddr) { + if(ht_bin[2]==head->vaddr) + { ht_bin[3]=(u_int)clean_addr; // Replace existing entry } } @@ -1112,15 +1166,17 @@ void clean_blocks(u_int page) } } - -void mov_alloc(struct regstat *current,int i) +static void mov_alloc(struct regstat *current,int i) { // Note: Don't need to actually alloc the source registers - if((~current->is32>>rs1[i])&1) { + if((~current->is32>>rs1[i])&1) + { //alloc_reg64(current,i,rs1[i]); alloc_reg64(current,i,rt1[i]); current->is32&=~(1LL<is32|=(1LL<>32),(int)reg[LOREG]); //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum()); @@ -1888,6 +1948,7 @@ void memdebug(int i) } //printf("TRACE: %x\n",(&i)[-1]); } +#endif void alu_assemble(int i,struct regstat *i_regs) { @@ -6999,7 +7060,7 @@ static int new_dynarec_test(void) // clear the state completely, instead of just marking // things invalid like invalidate_all_pages() does -void new_dynarec_clear_full() +void new_dynarec_clear_full(void) { int n; out=(u_char *)BASE_ADDR; @@ -7020,22 +7081,58 @@ void new_dynarec_clear_full() for(n=0;n<4096;n++) ll_clear(jump_dirty+n); } -void new_dynarec_init() +void new_dynarec_init(void) { SysPrintf("Init new dynarec\n"); - out=(u_char *)BASE_ADDR; -#if BASE_ADDR_FIXED - if (mmap (out, 1<