#define RAM_SIZE 0x200000
#define MAXBLOCK 4096
#define MAX_OUTPUT_BLOCK_SIZE 262144
+#define EXPIRITY_OFFSET (MAX_OUTPUT_BLOCK_SIZE * 2)
+#define PAGE_COUNT 1024
+
+#if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+#define INVALIDATE_USE_COND_CALL
+#endif
#ifdef VITA
// apparently Vita has a 16MB limit, so either we cut tc in half,
u_int waswritten; // MIPS regs that were used as store base before
};
-// note: asm depends on this layout
-struct ll_entry
-{
- u_int vaddr;
- void *addr;
- struct ll_entry *next;
-};
-
struct ht_entry
{
u_int vaddr[2];
u_int tc_offs;
//u_int tc_len;
u_int reg_sv_flags;
- u_short is_dirty;
+ u_char is_dirty;
+ u_char inv_near_misses;
u_short jump_in_cnt;
struct {
u_int vaddr;
} jump_in[0];
};
+struct jump_info
+{
+ int alloc;
+ int count;
+ struct {
+ u_int target_vaddr;
+ void *stub;
+ } e[0];
+};
+
static struct decoded_insn
{
u_char itype;
static u_char *out;
static struct ht_entry hash_table[65536];
- static struct block_info *blocks[4096];
- static struct ll_entry *jump_out[4096];
+ static struct block_info *blocks[PAGE_COUNT];
+ static struct jump_info *jumps[PAGE_COUNT];
static u_int start;
static u_int *source;
static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
static int is_delayslot;
static char shadow[1048576] __attribute__((aligned(16)));
static void *copy;
- static int expirep;
+ static u_int expirep;
static u_int stop_after_jal;
static u_int f1_hack;
#ifdef STAT_PRINT
for (j = 0; j < 32; j++)
{
u_char *start, *end;
- if (!(bitmap & (1<<j)))
+ if (!(bitmap & (1u << j)))
continue;
start = ndrc->translation_cache + i*131072 + j*4096;
end = start + 4095;
for (j++; j < 32; j++) {
- if (!(bitmap & (1<<j)))
+ if (!(bitmap & (1u << j)))
break;
end += 4096;
}
static u_int get_page(u_int vaddr)
{
u_int page = pmmask(vaddr) >> 12;
- if(page>2048) page=2048+(page&2047);
+ if (page >= PAGE_COUNT / 2)
+ page = PAGE_COUNT / 2 + (page & (PAGE_COUNT / 2 - 1));
return page;
}
static void mark_invalid_code(u_int vaddr, u_int len, char invalid)
{
+ u_int vaddr_m = vaddr & 0x1fffffff;
u_int i, j;
- vaddr &= 0x1fffffff;
- for (i = vaddr & ~0xfff; i < vaddr + len; i += 0x1000) {
+ for (i = vaddr_m & ~0xfff; i < vaddr_m + len; i += 0x1000) {
// ram mirrors, but should not hurt bios
for (j = 0; j < 0x800000; j += 0x200000) {
invalid_code[(i|j) >> 12] =
invalid_code[(i|j|0xa0000000u) >> 12] = invalid;
}
}
- if (!invalid)
+ if (!invalid && vaddr + len > inv_code_start && vaddr <= inv_code_end)
inv_code_start = inv_code_end = ~0;
}
-// some messy ari64's code, seems to rely on unsigned 32bit overflow
-static int doesnt_expire_soon(void *tcaddr)
+static int doesnt_expire_soon(u_char *tcaddr)
{
- u_int diff = (u_int)((u_char *)tcaddr - out) << (32-TARGET_SIZE_2);
- return diff > (u_int)(0x60000000 + (MAX_OUTPUT_BLOCK_SIZE << (32-TARGET_SIZE_2)));
+ u_int diff = (u_int)(tcaddr - out) & ((1u << TARGET_SIZE_2) - 1u);
+ return diff > EXPIRITY_OFFSET + MAX_OUTPUT_BLOCK_SIZE;
}
static void *try_restore_block(u_int vaddr, u_int start_page, u_int end_page)
if (memcmp(block->source, block->copy, block->len))
continue;
- block->is_dirty = 0;
+ block->is_dirty = block->inv_near_misses = 0;
found_clean = block->jump_in[i].addr;
hash_table_add(vaddr, found_clean);
mark_invalid_code(block->start, block->len, 0);
emit_call(f);
}
-// Add virtual address mapping to linked list
-static void ll_add(struct ll_entry **head,int vaddr,void *addr)
-{
- struct ll_entry *new_entry;
- new_entry=malloc(sizeof(struct ll_entry));
- assert(new_entry!=NULL);
- new_entry->vaddr=vaddr;
- new_entry->addr=addr;
- new_entry->next=*head;
- *head=new_entry;
-}
-
// Check if an address is already compiled
// but don't return addresses which are about to expire from the cache
static void *check_addr(u_int vaddr)
size_t i;
for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
if (ht_bin->vaddr[i] == vaddr)
- if (doesnt_expire_soon((u_char *)ht_bin->tcaddr[i] - MAX_OUTPUT_BLOCK_SIZE))
+ if (doesnt_expire_soon(ht_bin->tcaddr[i]))
return ht_bin->tcaddr[i];
}
return NULL;
}
-static void ll_remove_matching_addrs(struct ll_entry **head,
- uintptr_t base_offs_s, int shift)
-{
- struct ll_entry *next;
- while(*head) {
- uintptr_t o1 = (u_char *)(*head)->addr - ndrc->translation_cache;
- uintptr_t o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s)
- {
- inv_debug("EXP: rm pointer to %08x (%p)\n", (*head)->vaddr, (*head)->addr);
- hash_table_remove((*head)->vaddr);
- next=(*head)->next;
- free(*head);
- *head=next;
- stat_dec(stat_links);
- }
- else
- {
- head=&((*head)->next);
- }
- }
-}
-
-// Remove all entries from linked list
-static void ll_clear(struct ll_entry **head)
-{
- struct ll_entry *cur;
- struct ll_entry *next;
- if((cur=*head)) {
- *head=0;
- while(cur) {
- next=cur->next;
- free(cur);
- cur=next;
- }
- }
-}
-
-#if 0
-// Dereference the pointers and remove if it matches
-static void ll_kill_pointers(struct ll_entry *head,
- uintptr_t base_offs_s, int shift)
-{
- while(head) {
- u_char *ptr = get_pointer(head->addr);
- uintptr_t o1 = ptr - ndrc->translation_cache;
- uintptr_t o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- inv_debug("EXP: Lookup pointer to %p at %p (%x)\n",ptr,head->addr,head->vaddr);
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s)
- {
- inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
- void *host_addr=find_extjump_insn(head->addr);
- mark_clear_cache(host_addr);
- set_jump_target(host_addr, head->addr);
- }
- head=head->next;
- }
-}
-#endif
-
static void blocks_clear(struct block_info **head)
{
struct block_info *cur, *next;
}
}
-static void blocks_remove_matching_addrs(struct block_info **head,
- uintptr_t base_offs_s, int shift)
+static int blocks_remove_matching_addrs(struct block_info **head,
+ u_int base_offs, int shift)
{
struct block_info *next;
+ int hit = 0;
while (*head) {
- u_int o1 = (*head)->tc_offs;
- u_int o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s)
- {
- inv_debug("EXP: rm block %08x (tc_offs %u)\n", (*head)->start, o1);
+ if ((((*head)->tc_offs ^ base_offs) >> shift) == 0) {
+ inv_debug("EXP: rm block %08x (tc_offs %zx)\n", (*head)->start, (*head)->tc_offs);
invalidate_block(*head);
next = (*head)->next;
free(*head);
*head = next;
stat_dec(stat_blocks);
+ hit = 1;
}
else
{
head = &((*head)->next);
}
}
+ return hit;
}
// This is called when we write to a compiled block (see do_invstub)
-static void unlink_jumps_range(u_int start, u_int end)
+static void unlink_jumps_vaddr_range(u_int start, u_int end)
{
u_int page, start_page = get_page(start), end_page = get_page(end - 1);
- struct ll_entry **head, *next;
+ int i;
for (page = start_page; page <= end_page; page++) {
- for (head = &jump_out[page]; *head; ) {
- if ((*head)->vaddr < start || (*head)->vaddr >= end) {
- head = &((*head)->next);
+ struct jump_info *ji = jumps[page];
+ if (ji == NULL)
+ continue;
+ for (i = 0; i < ji->count; ) {
+ if (ji->e[i].target_vaddr < start || ji->e[i].target_vaddr >= end) {
+ i++;
continue;
}
- inv_debug("INV: rm pointer to %08x (%p)\n", (*head)->vaddr, (*head)->addr);
- void *host_addr = find_extjump_insn((*head)->addr);
+
+ inv_debug("INV: rm link to %08x (tc_offs %zx)\n", ji->e[i].target_vaddr,
+ (u_char *)ji->e[i].stub - ndrc->translation_cache);
+ void *host_addr = find_extjump_insn(ji->e[i].stub);
mark_clear_cache(host_addr);
- set_jump_target(host_addr, (*head)->addr); // point back to dyna_linker stub
+ set_jump_target(host_addr, ji->e[i].stub); // point back to dyna_linker stub
- next = (*head)->next;
- free(*head);
- *head = next;
stat_dec(stat_links);
+ ji->count--;
+ if (i < ji->count) {
+ ji->e[i] = ji->e[ji->count];
+ continue;
+ }
+ i++;
+ }
+ }
+}
+
+static void unlink_jumps_tc_range(struct jump_info *ji, u_int base_offs, int shift)
+{
+ int i;
+ if (ji == NULL)
+ return;
+ for (i = 0; i < ji->count; ) {
+ u_int tc_offs = (u_char *)ji->e[i].stub - ndrc->translation_cache;
+ if (((tc_offs ^ base_offs) >> shift) != 0) {
+ i++;
+ continue;
+ }
+
+ inv_debug("EXP: rm link to %08x (tc_offs %zx)\n", ji->e[i].target_vaddr, tc_offs);
+ stat_dec(stat_links);
+ ji->count--;
+ if (i < ji->count) {
+ ji->e[i] = ji->e[ji->count];
+ continue;
}
+ i++;
}
}
u_int i;
block->is_dirty = 1;
- unlink_jumps_range(block->start, block->start + block->len);
+ unlink_jumps_vaddr_range(block->start, block->start + block->len);
for (i = 0; i < block->jump_in_cnt; i++)
hash_table_remove(block->jump_in[i].vaddr);
}
static int invalidate_range(u_int start, u_int end,
u32 *inv_start_ret, u32 *inv_end_ret)
{
+ struct block_info *last_block = NULL;
u_int start_page = get_page_prev(start);
u_int end_page = get_page(end - 1);
u_int start_m = pmmask(start);
- u_int end_m = pmmask(end);
+ u_int end_m = pmmask(end - 1);
u_int inv_start, inv_end;
u_int blk_start_m, blk_end_m;
u_int page;
for (block = blocks[page]; block != NULL; block = block->next) {
if (block->is_dirty)
continue;
+ last_block = block;
blk_end_m = pmmask(block->start + block->len);
if (blk_end_m <= start_m) {
inv_start = max(inv_start, blk_end_m);
}
}
+ if (!hit && last_block && last_block->source) {
+ // could be some leftover unused block, uselessly trapping writes
+ last_block->inv_near_misses++;
+ if (last_block->inv_near_misses > 128) {
+ invalidate_block(last_block);
+ stat_inc(stat_inv_hits);
+ hit++;
+ }
+ }
if (hit) {
do_clear_cache();
#ifdef USE_MINI_HT
memset(mini_ht, -1, sizeof(mini_ht));
#endif
}
+
if (inv_start <= (start_m & ~0xfff) && inv_end >= (start_m | 0xfff))
// the whole page is empty now
mark_invalid_code(start, 1, 1);
}
#ifdef USE_MINI_HT
- memset(mini_ht,-1,sizeof(mini_ht));
+ memset(mini_ht, -1, sizeof(mini_ht));
#endif
do_clear_cache();
}
static void do_invstub(int n)
{
literal_pool(20);
- u_int reglist=stubs[n].a;
+ u_int reglist = stubs[n].a;
set_jump_target(stubs[n].addr, out);
save_regs(reglist);
- if(stubs[n].b!=0) emit_mov(stubs[n].b,0);
+ if (stubs[n].b != 0)
+ emit_mov(stubs[n].b, 0);
+ emit_readword(&inv_code_start, 1);
+ emit_readword(&inv_code_end, 2);
+ emit_cmp(0, 1);
+ emit_cmpcs(2, 0);
+ void *jaddr = out;
+ emit_jc(0);
emit_far_call(ndrc_invalidate_addr);
+ set_jump_target(jaddr, out);
restore_regs(reglist);
emit_jmp(stubs[n].retaddr); // return address
}
// Add an entry to jump_out after making a link
// src should point to code by emit_extjump()
-void ndrc_add_jump_out(u_int vaddr,void *src)
+void ndrc_add_jump_out(u_int vaddr, void *src)
{
- u_int page=get_page(vaddr);
- inv_debug("ndrc_add_jump_out: %p -> %x (%d)\n",src,vaddr,page);
- check_extjump2(src);
- ll_add(jump_out+page,vaddr,src);
- //inv_debug("ndrc_add_jump_out: to %p\n",get_pointer(src));
+ inv_debug("ndrc_add_jump_out: %p -> %x\n", src, vaddr);
+ u_int page = get_page(vaddr);
+ struct jump_info *ji;
+
stat_inc(stat_links);
+ check_extjump2(src);
+ ji = jumps[page];
+ if (ji == NULL) {
+ ji = malloc(sizeof(*ji) + sizeof(ji->e[0]) * 16);
+ ji->alloc = 16;
+ ji->count = 0;
+ }
+ else if (ji->count >= ji->alloc) {
+ ji->alloc += 16;
+ ji = realloc(ji, sizeof(*ji) + sizeof(ji->e[0]) * ji->alloc);
+ }
+ jumps[page] = ji;
+ ji->e[ji->count].target_vaddr = vaddr;
+ ji->e[ji->count].stub = src;
+ ji->count++;
}
/* Register allocation */
}
else
{
- // TLBR/TLBWI/TLBWR/TLBP/ERET
+ // RFE
assert(dops[i].opcode2==0x10);
alloc_all(current,i);
}
#else
emit_cmpmem_indexedsr12_imm(invalid_code,addr,1);
#endif
- #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ #ifdef INVALIDATE_USE_COND_CALL
emit_callne(invalidate_addr_reg[addr]);
#else
void *jaddr2 = out;
#else
emit_cmpmem_indexedsr12_imm(invalid_code,temp,1);
#endif
- #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ #ifdef INVALIDATE_USE_COND_CALL
emit_callne(invalidate_addr_reg[temp]);
#else
void *jaddr2 = out;
#else
emit_cmpmem_indexedsr12_imm(invalid_code,ar,1);
#endif
- #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ #ifdef INVALIDATE_USE_COND_CALL
emit_callne(invalidate_addr_reg[ar]);
#else
void *jaddr3 = out;
//assert(adj==0);
emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
add_stub(CC_STUB,out,NULL,0,i,-1,TAKEN,rs);
- if(dops[i+1].itype==COP0&&(source[i+1]&0x3f)==0x10)
+ if(dops[i+1].itype==COP0 && dops[i+1].opcode2==0x10)
// special case for RFE
emit_jmp(0);
else
memset(mini_ht,-1,sizeof(mini_ht));
memset(shadow,0,sizeof(shadow));
copy=shadow;
- expirep=16384; // Expiry pointer, +2 blocks
+ expirep = EXPIRITY_OFFSET;
pending_exception=0;
literalcount=0;
stop_after_jal=0;
inv_code_start=inv_code_end=~0;
hack_addr=0;
f1_hack=0;
- // TLB
- for(n=0;n<4096;n++) blocks_clear(&blocks[n]);
- for(n=0;n<4096;n++) ll_clear(jump_out+n);
+ for (n = 0; n < ARRAY_SIZE(blocks); n++)
+ blocks_clear(&blocks[n]);
+ for (n = 0; n < ARRAY_SIZE(jumps); n++) {
+ free(jumps[n]);
+ jumps[n] = NULL;
+ }
stat_clear(stat_blocks);
stat_clear(stat_links);
SysPrintf("munmap() failed\n");
#endif
#endif
- for(n=0;n<4096;n++) blocks_clear(&blocks[n]);
- for(n=0;n<4096;n++) ll_clear(jump_out+n);
+ for (n = 0; n < ARRAY_SIZE(blocks); n++)
+ blocks_clear(&blocks[n]);
+ for (n = 0; n < ARRAY_SIZE(jumps); n++) {
+ free(jumps[n]);
+ jumps[n] = NULL;
+ }
stat_clear(stat_blocks);
stat_clear(stat_links);
#ifdef ROM_COPY
// SYSCALL instruction (software interrupt)
u=1;
}
- else if(dops[i].itype==COP0 && (source[i]&0x3f)==0x18)
+ else if(dops[i].itype==COP0 && dops[i].opcode2==0x10)
{
- // ERET instruction (return from interrupt)
+ // RFE
u=1;
}
//u=1; // DEBUG
static noinline void pass10_expire_blocks(void)
{
- int i, end;
- end = (((out-ndrc->translation_cache)>>(TARGET_SIZE_2-16)) + 16384) & 65535;
- while (expirep != end)
+ u_int step = MAX_OUTPUT_BLOCK_SIZE / PAGE_COUNT / 2;
+ // not sizeof(ndrc->translation_cache) due to vita hack
+ u_int step_mask = ((1u << TARGET_SIZE_2) - 1u) & ~(step - 1u);
+ u_int end = (out - ndrc->translation_cache + EXPIRITY_OFFSET) & step_mask;
+ u_int base_shift = __builtin_ctz(MAX_OUTPUT_BLOCK_SIZE);
+ int hit;
+
+ for (; expirep != end; expirep = ((expirep + step) & step_mask))
{
- int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
- uintptr_t base_offs = ((uintptr_t)(expirep >> 13) << shift); // Base offset of this block
- uintptr_t base_offs_s = base_offs >> shift;
- if (!(expirep & ((1 << 13) - 1)))
- inv_debug("EXP: base_offs %x\n", base_offs);
- switch((expirep>>11)&3)
- {
- case 0:
- // Clear blocks
- blocks_remove_matching_addrs(&blocks[expirep & 2047], base_offs_s, shift);
- blocks_remove_matching_addrs(&blocks[2048 + (expirep & 2047)], base_offs_s, shift);
- break;
- case 1:
- // Clear pointers
- //ll_kill_pointers(jump_out[expirep&2047],base_offs_s,shift);
- //ll_kill_pointers(jump_out[(expirep&2047)+2048],base_offs_s,shift);
- break;
- case 2:
- // Clear hash table
- for(i=0;i<32;i++) {
- struct ht_entry *ht_bin = &hash_table[((expirep&2047)<<5)+i];
- uintptr_t o1 = (u_char *)ht_bin->tcaddr[1] - ndrc->translation_cache;
- uintptr_t o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s) {
- inv_debug("EXP: Remove hash %x -> %p\n",ht_bin->vaddr[1],ht_bin->tcaddr[1]);
- ht_bin->vaddr[1] = -1;
- ht_bin->tcaddr[1] = NULL;
- }
- o1 = (u_char *)ht_bin->tcaddr[0] - ndrc->translation_cache;
- o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s) {
- inv_debug("EXP: Remove hash %x -> %p\n",ht_bin->vaddr[0],ht_bin->tcaddr[0]);
- ht_bin->vaddr[0] = ht_bin->vaddr[1];
- ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
- ht_bin->vaddr[1] = -1;
- ht_bin->tcaddr[1] = NULL;
- }
- }
- break;
- case 3:
- // Clear jump_out
- if((expirep&2047)==0)
- do_clear_cache();
- ll_remove_matching_addrs(jump_out+(expirep&2047),base_offs_s,shift);
- ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base_offs_s,shift);
- break;
+ u_int base_offs = expirep & ~(MAX_OUTPUT_BLOCK_SIZE - 1);
+ u_int block_i = expirep / step & (PAGE_COUNT - 1);
+ u_int phase = (expirep >> (base_shift - 1)) & 1u;
+ if (!(expirep & (MAX_OUTPUT_BLOCK_SIZE / 2 - 1))) {
+ inv_debug("EXP: base_offs %x/%x phase %u\n", base_offs,
+ out - ndrc->translation_cache, phase);
+ }
+
+ if (!phase) {
+ hit = blocks_remove_matching_addrs(&blocks[block_i], base_offs, base_shift);
+ if (hit) {
+ do_clear_cache();
+ #ifdef USE_MINI_HT
+ memset(mini_ht, -1, sizeof(mini_ht));
+ #endif
+ }
}
- expirep=(expirep+1)&65535;
+ else
+ unlink_jumps_tc_range(jumps[block_i], base_offs, base_shift);
}
}
block->tc_offs = beginning - ndrc->translation_cache;
//block->tc_len = out - beginning;
block->is_dirty = 0;
+ block->inv_near_misses = 0;
block->jump_in_cnt = jump_in_count;
- // insert sorted by start vaddr
+ // insert sorted by start mirror-unmasked vaddr
for (b_pptr = &blocks[page]; ; b_pptr = &((*b_pptr)->next)) {
if (*b_pptr == NULL || (*b_pptr)->start >= start) {
block->next = *b_pptr;
if (instr_addr0_override)
instr_addr[0] = instr_addr0_override;
+#if 0
+ /* check for improper expiration */
+ for (i = 0; i < ARRAY_SIZE(jumps); i++) {
+ int j;
+ if (!jumps[i])
+ continue;
+ for (j = 0; j < jumps[i]->count; j++)
+ assert(jumps[i]->e[j].stub < beginning || (u_char *)jumps[i]->e[j].stub > out);
+ }
+#endif
+
/* Pass 9 - Linker */
for(i=0;i<linkcount;i++)
{