#ifdef _3DS
#include <3ds_utils.h>
#endif
-#ifdef VITA
-#include <psp2/kernel/sysmem.h>
-static int sceBlock;
+#ifdef HAVE_LIBNX
+#include <switch.h>
+static Jit g_jit;
#endif
#include "new_dynarec_config.h"
#include "../psxinterpreter.h"
#include "../gte.h"
#include "emu_if.h" // emulator interface
+#include "arm_features.h"
+#ifdef __clang__
+#define noinline __attribute__((noinline))
+#else
#define noinline __attribute__((noinline,noclone))
+#endif
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
#endif
//#define DISASM
//#define ASSEM_PRINT
+//#define STAT_PRINT
#ifdef ASSEM_PRINT
#define assem_debug printf
#define RAM_SIZE 0x200000
#define MAXBLOCK 4096
#define MAX_OUTPUT_BLOCK_SIZE 262144
+#define EXPIRITY_OFFSET (MAX_OUTPUT_BLOCK_SIZE * 2)
+#define PAGE_COUNT 1024
+
+#if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+#define INVALIDATE_USE_COND_CALL
+#endif
+
+#ifdef VITA
+// apparently Vita has a 16MB limit, so either we cut tc in half,
+// or use this hack (it's a hack because tc size was designed to be power-of-2)
+#define TC_REDUCE_BYTES 4096
+#else
+#define TC_REDUCE_BYTES 0
+#endif
+
+struct ndrc_tramp
+{
+ struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
+ const void *f[2048 / sizeof(void *)];
+};
struct ndrc_mem
{
- u_char translation_cache[1 << TARGET_SIZE_2];
- struct
- {
- struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
- const void *f[2048 / sizeof(void *)];
- } tramp;
+ u_char translation_cache[(1 << TARGET_SIZE_2) - TC_REDUCE_BYTES];
+ struct ndrc_tramp tramp;
};
#ifdef BASE_ADDR_DYNAMIC
static struct ndrc_mem ndrc_ __attribute__((aligned(4096)));
static struct ndrc_mem *ndrc = &ndrc_;
#endif
+#ifdef TC_WRITE_OFFSET
+# ifdef __GLIBC__
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <fcntl.h>
+# include <unistd.h>
+# endif
+static long ndrc_write_ofs;
+#define NDRC_WRITE_OFFSET(x) (void *)((char *)(x) + ndrc_write_ofs)
+#else
+#define NDRC_WRITE_OFFSET(x) (x)
+#endif
// stubs
enum stub_type {
INVCODE_STUB = 14,
};
+// regmap_pre[i] - regs before [i] insn starts; dirty things here that
+// don't match .regmap will be written back
+// [i].regmap_entry - regs that must be set up if someone jumps here
+// [i].regmap - regs [i] insn will read/(over)write
+// branch_regs[i].* - same as above but for branches, takes delay slot into account
struct regstat
{
signed char regmap_entry[HOST_REGS];
uint64_t wasdirty;
uint64_t dirty;
uint64_t u;
- u_int wasconst;
- u_int isconst;
+ u_int wasconst; // before; for example 'lw r2, (r2)' wasconst is true
+ u_int isconst; // ... but isconst is false when r2 is known
u_int loadedconst; // host regs that have constants loaded
u_int waswritten; // MIPS regs that were used as store base before
};
-// note: asm depends on this layout
-struct ll_entry
-{
- u_int vaddr;
- u_int reg_sv_flags;
- void *addr;
- struct ll_entry *next;
-};
-
struct ht_entry
{
u_int vaddr[2];
{
void *addr;
u_int target;
- u_int ext;
+ u_int internal;
+};
+
+struct block_info
+{
+ struct block_info *next;
+ const void *source;
+ const void *copy;
+ u_int start; // vaddr of the block start
+ u_int len; // of the whole block source
+ u_int tc_offs;
+ //u_int tc_len;
+ u_int reg_sv_flags;
+ u_char is_dirty;
+ u_char inv_near_misses;
+ u_short jump_in_cnt;
+ struct {
+ u_int vaddr;
+ void *addr;
+ } jump_in[0];
+};
+
+struct jump_info
+{
+ int alloc;
+ int count;
+ struct {
+ u_int target_vaddr;
+ void *stub;
+ } e[0];
};
static struct decoded_insn
u_char rs2;
u_char rt1;
u_char rt2;
- u_char lt1;
+ u_char use_lt1:1;
u_char bt:1;
- u_char likely:1;
u_char ooo:1;
u_char is_ds:1;
+ u_char is_jump:1;
+ u_char is_ujump:1;
+ u_char is_load:1;
+ u_char is_store:1;
} dops[MAXBLOCK];
- // used by asm:
- u_char *out;
- struct ht_entry hash_table[65536] __attribute__((aligned(16)));
- struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
- struct ll_entry *jump_dirty[4096];
-
- static struct ll_entry *jump_out[4096];
+ static u_char *out;
+ static struct ht_entry hash_table[65536];
+ static struct block_info *blocks[PAGE_COUNT];
+ static struct jump_info *jumps[PAGE_COUNT];
static u_int start;
static u_int *source;
- static char insn[MAXBLOCK][10];
static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
static uint64_t gte_rt[MAXBLOCK];
static uint64_t gte_unneeded[MAXBLOCK];
static u_int ba[MAXBLOCK];
static uint64_t unneeded_reg[MAXBLOCK];
static uint64_t branch_unneeded_reg[MAXBLOCK];
- static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i?
+ // see 'struct regstat' for a description
+ static signed char regmap_pre[MAXBLOCK][HOST_REGS];
// contains 'real' consts at [i] insn, but may differ from what's actually
// loaded in host reg as 'final' value is always loaded, see get_final_value()
static uint32_t current_constmap[HOST_REGS];
static struct regstat regs[MAXBLOCK];
static struct regstat branch_regs[MAXBLOCK];
static signed char minimum_free_regs[MAXBLOCK];
- static u_int needed_reg[MAXBLOCK];
- static u_int wont_dirty[MAXBLOCK];
- static u_int will_dirty[MAXBLOCK];
static int ccadj[MAXBLOCK];
static int slen;
static void *instr_addr[MAXBLOCK];
static int is_delayslot;
static char shadow[1048576] __attribute__((aligned(16)));
static void *copy;
- static int expirep;
+ static u_int expirep;
static u_int stop_after_jal;
-#ifndef RAM_FIXED
- static uintptr_t ram_offset;
+ static u_int f1_hack;
+#ifdef STAT_PRINT
+ static int stat_bc_direct;
+ static int stat_bc_pre;
+ static int stat_bc_restore;
+ static int stat_ht_lookups;
+ static int stat_jump_in_lookups;
+ static int stat_restore_tries;
+ static int stat_restore_compares;
+ static int stat_inv_addr_calls;
+ static int stat_inv_hits;
+ static int stat_blocks;
+ static int stat_links;
+ #define stat_inc(s) s++
+ #define stat_dec(s) s--
+ #define stat_clear(s) s = 0
#else
- static const uintptr_t ram_offset=0;
+ #define stat_inc(s)
+ #define stat_dec(s)
+ #define stat_clear(s)
#endif
int new_dynarec_hacks;
extern int pcaddr;
extern int pending_exception;
extern int branch_target;
+ extern uintptr_t ram_offset;
extern uintptr_t mini_ht[32][2];
- extern u_char restore_candidate[512];
/* registers that may be allocated */
/* 1-31 gpr */
#define CCREG 36 // Cycle count
#define INVCP 37 // Pointer to invalid_code
//#define MMREG 38 // Pointer to memory_map
-//#define ROREG 39 // ram offset (if rdram!=0x80000000)
+#define ROREG 39 // ram offset (if rdram!=0x80000000)
#define TEMPREG 40
#define FTEMP 40 // FPU temporary register
#define PTEMP 41 // Prefetch temporary register
//#define FLOAT 19 // Floating point unit
//#define FCONV 20 // Convert integer to float
//#define FCOMP 21 // Floating point compare (sets FSREG)
-#define SYSCALL 22// SYSCALL
+#define SYSCALL 22// SYSCALL,BREAK
#define OTHER 23 // Other
-#define SPAN 24 // Branch/delay slot spans 2 pages
+//#define SPAN 24 // Branch/delay slot spans 2 pages
#define NI 25 // Not implemented
#define HLECALL 26// PCSX fake opcodes for HLE
#define COP2 27 // Coprocessor 2 move
#define DJT_2 (void *)2l
// asm linkage
-int new_recompile_block(u_int addr);
-void *get_addr_ht(u_int vaddr);
-void invalidate_block(u_int block);
-void invalidate_addr(u_int addr);
-void remove_hash(int vaddr);
void dyna_linker();
-void dyna_linker_ds();
-void verify_code();
-void verify_code_ds();
void cc_interrupt();
void fp_exception();
void fp_exception_ds();
+void jump_syscall (u_int u0, u_int u1, u_int pc);
+void jump_syscall_ds(u_int u0, u_int u1, u_int pc);
+void jump_break (u_int u0, u_int u1, u_int pc);
+void jump_break_ds(u_int u0, u_int u1, u_int pc);
void jump_to_new_pc();
void call_gteStall();
void new_dyna_leave();
+void *ndrc_get_addr_ht_param(u_int vaddr, int can_compile);
+void *ndrc_get_addr_ht(u_int vaddr);
+void ndrc_invalidate_addr(u_int addr);
+void ndrc_add_jump_out(u_int vaddr, void *src);
+
+static int new_recompile_block(u_int addr);
+static void invalidate_block(struct block_info *block);
+
// Needed by assembler
-static void wb_register(signed char r,signed char regmap[],uint64_t dirty);
-static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty);
-static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr);
-static void load_all_regs(signed char i_regmap[]);
-static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
+static void wb_register(signed char r, const signed char regmap[], uint64_t dirty);
+static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty);
+static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr);
+static void load_all_regs(const signed char i_regmap[]);
+static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[]);
static void load_regs_entry(int t);
-static void load_all_consts(signed char regmap[],u_int dirty,int i);
+static void load_all_consts(const signed char regmap[], u_int dirty, int i);
static u_int get_host_reglist(const signed char *regmap);
-static int verify_dirty(const u_int *ptr);
static int get_final_value(int hr, int i, int *value);
static void add_stub(enum stub_type type, void *addr, void *retaddr,
u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
static void add_to_linker(void *addr, u_int target, int ext);
-static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override);
+static void *emit_fastpath_cmp_jump(int i, const struct regstat *i_regs,
+ int addr, int *offset_reg, int *addr_reg_override);
static void *get_direct_memhandler(void *table, u_int addr,
enum stub_type type, uintptr_t *addr_host);
static void cop2_do_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
static void emit_far_jump(const void *f);
static void emit_far_call(const void *f);
+#ifdef VITA
+#include <psp2/kernel/sysmem.h>
+static int sceBlock;
+// note: this interacts with RetroArch's Vita bootstrap code: bootstrap/vita/sbrk.c
+extern int getVMBlock();
+int _newlib_vm_size_user = sizeof(*ndrc);
+#endif
+
static void mprotect_w_x(void *start, void *end, int is_x)
{
#ifdef NO_WRITE_EXEC
sceKernelCloseVMDomain();
else
sceKernelOpenVMDomain();
+ #elif defined(HAVE_LIBNX)
+ Result rc;
+ // check to avoid the full flush in jitTransitionToExecutable()
+ if (g_jit.type != JitType_CodeMemory) {
+ if (is_x)
+ rc = jitTransitionToExecutable(&g_jit);
+ else
+ rc = jitTransitionToWritable(&g_jit);
+ if (R_FAILED(rc))
+ ;//SysPrintf("jitTransition %d %08x\n", is_x, rc);
+ }
+ #elif defined(TC_WRITE_OFFSET)
+ // separated rx and rw areas are always available
#else
u_long mstart = (u_long)start & ~4095ul;
u_long mend = (u_long)end;
sceKernelSyncVMDomain(sceBlock, start, len);
#elif defined(_3DS)
ctr_flush_invalidate_cache();
+ #elif defined(HAVE_LIBNX)
+ if (g_jit.type == JitType_CodeMemory) {
+ armDCacheClean(start, len);
+ armICacheInvalidate((char *)start - ndrc_write_ofs, len);
+ }
#elif defined(__aarch64__)
// as of 2021, __clear_cache() is still broken on arm64
// so here is a custom one :(
u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
if (end > ndrc->translation_cache + sizeof(ndrc->translation_cache))
end = ndrc->translation_cache + sizeof(ndrc->translation_cache);
- start_tcache_write(out, end);
+ start_tcache_write(NDRC_WRITE_OFFSET(out), NDRC_WRITE_OFFSET(end));
return out;
}
static void end_block(void *start)
{
- end_tcache_write(start, out);
+ end_tcache_write(NDRC_WRITE_OFFSET(start), NDRC_WRITE_OFFSET(out));
+}
+
+#ifdef NDRC_CACHE_FLUSH_ALL
+
+static int needs_clear_cache;
+
+static void mark_clear_cache(void *target)
+{
+ if (!needs_clear_cache) {
+ start_tcache_write(NDRC_WRITE_OFFSET(ndrc), NDRC_WRITE_OFFSET(ndrc + 1));
+ needs_clear_cache = 1;
+ }
+}
+
+static void do_clear_cache(void)
+{
+ if (needs_clear_cache) {
+ end_tcache_write(NDRC_WRITE_OFFSET(ndrc), NDRC_WRITE_OFFSET(ndrc + 1));
+ needs_clear_cache = 0;
+ }
}
+#else
+
// also takes care of w^x mappings when patching code
static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
uintptr_t offset = (u_char *)target - ndrc->translation_cache;
u_int mask = 1u << ((offset >> 12) & 31);
if (!(needs_clear_cache[offset >> 17] & mask)) {
- char *start = (char *)((uintptr_t)target & ~4095l);
+ char *start = (char *)NDRC_WRITE_OFFSET((uintptr_t)target & ~4095l);
start_tcache_write(start, start + 4095);
needs_clear_cache[offset >> 17] |= mask;
}
for (j = 0; j < 32; j++)
{
u_char *start, *end;
- if (!(bitmap & (1<<j)))
+ if (!(bitmap & (1u << j)))
continue;
start = ndrc->translation_cache + i*131072 + j*4096;
end = start + 4095;
for (j++; j < 32; j++) {
- if (!(bitmap & (1<<j)))
+ if (!(bitmap & (1u << j)))
break;
end += 4096;
}
- end_tcache_write(start, end);
+ end_tcache_write(NDRC_WRITE_OFFSET(start), NDRC_WRITE_OFFSET(end));
}
needs_clear_cache[i] = 0;
}
}
-//#define DEBUG_CYCLE_COUNT 1
+#endif // NDRC_CACHE_FLUSH_ALL
#define NO_CYCLE_PENALTY_THR 12
-int cycle_multiplier; // 100 for 1.0
+int cycle_multiplier = CYCLE_MULT_DEFAULT; // 100 for 1.0
int cycle_multiplier_override;
int cycle_multiplier_old;
+static int cycle_multiplier_active;
static int CLOCK_ADJUST(int x)
{
- int m = cycle_multiplier_override
- ? cycle_multiplier_override : cycle_multiplier;
- int s=(x>>31)|1;
+ int m = cycle_multiplier_active;
+ int s = (x >> 31) | 1;
return (x * m + s * 50) / 100;
}
-// is the op an unconditional jump?
-static int is_ujump(int i)
-{
- return dops[i].itype == UJUMP || dops[i].itype == RJUMP
- || (source[i] >> 16) == 0x1000; // beq r0, r0, offset // b offset
-}
-
-static int is_jump(int i)
+static int ds_writes_rjump_rs(int i)
{
- return dops[i].itype == RJUMP || dops[i].itype == UJUMP || dops[i].itype == CJUMP || dops[i].itype == SJUMP;
+ return dops[i].rs1 != 0 && (dops[i].rs1 == dops[i+1].rt1 || dops[i].rs1 == dops[i+1].rt2);
}
-static int ds_writes_rjump_rs(int i)
+// psx addr mirror masking (for invalidation)
+static u_int pmmask(u_int vaddr)
{
- return dops[i].rs1 != 0 && (dops[i].rs1 == dops[i+1].rt1 || dops[i].rs1 == dops[i+1].rt2);
+ vaddr &= ~0xe0000000;
+ if (vaddr < 0x01000000)
+ vaddr &= ~0x00e00000; // RAM mirrors
+ return vaddr;
}
static u_int get_page(u_int vaddr)
{
- u_int page=vaddr&~0xe0000000;
- if (page < 0x1000000)
- page &= ~0x0e00000; // RAM mirrors
- page>>=12;
- if(page>2048) page=2048+(page&2047);
+ u_int page = pmmask(vaddr) >> 12;
+ if (page >= PAGE_COUNT / 2)
+ page = PAGE_COUNT / 2 + (page & (PAGE_COUNT / 2 - 1));
return page;
}
-// no virtual mem in PCSX
-static u_int get_vpage(u_int vaddr)
+// get a page for looking for a block that has vaddr
+// (needed because the block may start in previous page)
+static u_int get_page_prev(u_int vaddr)
{
- return get_page(vaddr);
+ assert(MAXBLOCK <= (1 << 12));
+ u_int page = get_page(vaddr);
+ if (page & 511)
+ page--;
+ return page;
}
static struct ht_entry *hash_table_get(u_int vaddr)
return &hash_table[((vaddr>>16)^vaddr)&0xFFFF];
}
-static void hash_table_add(struct ht_entry *ht_bin, u_int vaddr, void *tcaddr)
+static void hash_table_add(u_int vaddr, void *tcaddr)
{
+ struct ht_entry *ht_bin = hash_table_get(vaddr);
+ assert(tcaddr);
ht_bin->vaddr[1] = ht_bin->vaddr[0];
ht_bin->tcaddr[1] = ht_bin->tcaddr[0];
ht_bin->vaddr[0] = vaddr;
ht_bin->tcaddr[0] = tcaddr;
}
-// some messy ari64's code, seems to rely on unsigned 32bit overflow
-static int doesnt_expire_soon(void *tcaddr)
+static void hash_table_remove(int vaddr)
+{
+ //printf("remove hash: %x\n",vaddr);
+ struct ht_entry *ht_bin = hash_table_get(vaddr);
+ if (ht_bin->vaddr[1] == vaddr) {
+ ht_bin->vaddr[1] = -1;
+ ht_bin->tcaddr[1] = NULL;
+ }
+ if (ht_bin->vaddr[0] == vaddr) {
+ ht_bin->vaddr[0] = ht_bin->vaddr[1];
+ ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
+ ht_bin->vaddr[1] = -1;
+ ht_bin->tcaddr[1] = NULL;
+ }
+}
+
+static void mark_invalid_code(u_int vaddr, u_int len, char invalid)
+{
+ u_int vaddr_m = vaddr & 0x1fffffff;
+ u_int i, j;
+ for (i = vaddr_m & ~0xfff; i < vaddr_m + len; i += 0x1000) {
+ // ram mirrors, but should not hurt bios
+ for (j = 0; j < 0x800000; j += 0x200000) {
+ invalid_code[(i|j) >> 12] =
+ invalid_code[(i|j|0x80000000u) >> 12] =
+ invalid_code[(i|j|0xa0000000u) >> 12] = invalid;
+ }
+ }
+ if (!invalid && vaddr + len > inv_code_start && vaddr <= inv_code_end)
+ inv_code_start = inv_code_end = ~0;
+}
+
+static int doesnt_expire_soon(u_char *tcaddr)
+{
+ u_int diff = (u_int)(tcaddr - out) & ((1u << TARGET_SIZE_2) - 1u);
+ return diff > EXPIRITY_OFFSET + MAX_OUTPUT_BLOCK_SIZE;
+}
+
+static void *try_restore_block(u_int vaddr, u_int start_page, u_int end_page)
{
- u_int diff = (u_int)((u_char *)tcaddr - out) << (32-TARGET_SIZE_2);
- return diff > (u_int)(0x60000000 + (MAX_OUTPUT_BLOCK_SIZE << (32-TARGET_SIZE_2)));
+ void *found_clean = NULL;
+ u_int i, page;
+
+ stat_inc(stat_restore_tries);
+ for (page = start_page; page <= end_page; page++) {
+ struct block_info *block;
+ for (block = blocks[page]; block != NULL; block = block->next) {
+ if (vaddr < block->start)
+ break;
+ if (!block->is_dirty || vaddr >= block->start + block->len)
+ continue;
+ for (i = 0; i < block->jump_in_cnt; i++)
+ if (block->jump_in[i].vaddr == vaddr)
+ break;
+ if (i == block->jump_in_cnt)
+ continue;
+ assert(block->source && block->copy);
+ stat_inc(stat_restore_compares);
+ if (memcmp(block->source, block->copy, block->len))
+ continue;
+
+ block->is_dirty = block->inv_near_misses = 0;
+ found_clean = block->jump_in[i].addr;
+ hash_table_add(vaddr, found_clean);
+ mark_invalid_code(block->start, block->len, 0);
+ stat_inc(stat_bc_restore);
+ inv_debug("INV: restored %08x %p (%d)\n", vaddr, found_clean, block->jump_in_cnt);
+ return found_clean;
+ }
+ }
+ return NULL;
}
// Get address from virtual address
// This is called from the recompiled JR/JALR instructions
-void noinline *get_addr(u_int vaddr)
-{
- u_int page=get_page(vaddr);
- u_int vpage=get_vpage(vaddr);
- struct ll_entry *head;
- //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
- head=jump_in[page];
- while(head!=NULL) {
- if(head->vaddr==vaddr) {
- //printf("TRACE: count=%d next=%d (get_addr match %x: %p)\n",Count,next_interupt,vaddr,head->addr);
- hash_table_add(hash_table_get(vaddr), vaddr, head->addr);
- return head->addr;
- }
- head=head->next;
- }
- head=jump_dirty[vpage];
- while(head!=NULL) {
- if(head->vaddr==vaddr) {
- //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %p)\n",Count,next_interupt,vaddr,head->addr);
- // Don't restore blocks which are about to expire from the cache
- if (doesnt_expire_soon(head->addr))
- if (verify_dirty(head->addr)) {
- //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
- invalid_code[vaddr>>12]=0;
- inv_code_start=inv_code_end=~0;
- if(vpage<2048) {
- restore_candidate[vpage>>3]|=1<<(vpage&7);
- }
- else restore_candidate[page>>3]|=1<<(page&7);
- struct ht_entry *ht_bin = hash_table_get(vaddr);
- if (ht_bin->vaddr[0] == vaddr)
- ht_bin->tcaddr[0] = head->addr; // Replace existing entry
- else
- hash_table_add(ht_bin, vaddr, head->addr);
+static void noinline *get_addr(u_int vaddr, int can_compile)
+{
+ u_int start_page = get_page_prev(vaddr);
+ u_int i, page, end_page = get_page(vaddr);
+ void *found_clean = NULL;
- return head->addr;
- }
+ stat_inc(stat_jump_in_lookups);
+ for (page = start_page; page <= end_page; page++) {
+ const struct block_info *block;
+ for (block = blocks[page]; block != NULL; block = block->next) {
+ if (vaddr < block->start)
+ break;
+ if (block->is_dirty || vaddr >= block->start + block->len)
+ continue;
+ for (i = 0; i < block->jump_in_cnt; i++)
+ if (block->jump_in[i].vaddr == vaddr)
+ break;
+ if (i == block->jump_in_cnt)
+ continue;
+ found_clean = block->jump_in[i].addr;
+ hash_table_add(vaddr, found_clean);
+ return found_clean;
}
- head=head->next;
}
- //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
- int r=new_recompile_block(vaddr);
- if(r==0) return get_addr(vaddr);
- // Execute in unmapped page, generate pagefault execption
+ found_clean = try_restore_block(vaddr, start_page, end_page);
+ if (found_clean)
+ return found_clean;
+
+ if (!can_compile)
+ return NULL;
+
+ int r = new_recompile_block(vaddr);
+ if (r == 0)
+ return ndrc_get_addr_ht(vaddr);
+
+ // generate an address error
Status|=2;
- Cause=(vaddr<<31)|0x8;
+ Cause=(vaddr<<31)|(4<<2);
EPC=(vaddr&1)?vaddr-5:vaddr;
BadVAddr=(vaddr&~1);
- Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
- EntryHi=BadVAddr&0xFFFFE000;
- return get_addr_ht(0x80000000);
+ return ndrc_get_addr_ht(0x80000080);
}
+
// Look up address in hash table first
-void *get_addr_ht(u_int vaddr)
+void *ndrc_get_addr_ht_param(u_int vaddr, int can_compile)
{
- //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
const struct ht_entry *ht_bin = hash_table_get(vaddr);
+ stat_inc(stat_ht_lookups);
if (ht_bin->vaddr[0] == vaddr) return ht_bin->tcaddr[0];
if (ht_bin->vaddr[1] == vaddr) return ht_bin->tcaddr[1];
- return get_addr(vaddr);
+ return get_addr(vaddr, can_compile);
}
-void clear_all_regs(signed char regmap[])
+void *ndrc_get_addr_ht(u_int vaddr)
+{
+ return ndrc_get_addr_ht_param(vaddr, 1);
+}
+
+static void clear_all_regs(signed char regmap[])
+{
+ memset(regmap, -1, sizeof(regmap[0]) * HOST_REGS);
+}
+
+// get_reg: get allocated host reg from mips reg
+// returns -1 if no such mips reg was allocated
+#if defined(__arm__) && defined(HAVE_ARMV6) && HOST_REGS == 13 && EXCLUDE_REG == 11
+
+extern signed char get_reg(const signed char regmap[], signed char r);
+
+#else
+
+static signed char get_reg(const signed char regmap[], signed char r)
{
int hr;
- for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ if (hr == EXCLUDE_REG)
+ continue;
+ if (regmap[hr] == r)
+ return hr;
+ }
+ return -1;
+}
+
+#endif
+
+// get reg as mask bit (1 << hr)
+static u_int get_regm(const signed char regmap[], signed char r)
+{
+ return (1u << (get_reg(regmap, r) & 31)) & ~(1u << 31);
}
-static signed char get_reg(const signed char regmap[],int r)
+static signed char get_reg_temp(const signed char regmap[])
{
int hr;
- for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&®map[hr]==r) return hr;
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ if (hr == EXCLUDE_REG)
+ continue;
+ if (regmap[hr] == (signed char)-1)
+ return hr;
+ }
return -1;
}
return -1;
}
-int count_free_regs(signed char regmap[])
+// reverse reg map: mips -> host
+#define RRMAP_SIZE 64
+static void make_rregs(const signed char regmap[], signed char rrmap[RRMAP_SIZE],
+ u_int *regs_can_change)
+{
+ u_int r, hr, hr_can_change = 0;
+ memset(rrmap, -1, RRMAP_SIZE);
+ for (hr = 0; hr < HOST_REGS; )
+ {
+ r = regmap[hr];
+ rrmap[r & (RRMAP_SIZE - 1)] = hr;
+ // only add mips $1-$31+$lo, others shifted out
+ hr_can_change |= (uint64_t)1 << (hr + ((r - 1) & 32));
+ hr++;
+ if (hr == EXCLUDE_REG)
+ hr++;
+ }
+ hr_can_change |= 1u << (rrmap[33] & 31);
+ hr_can_change |= 1u << (rrmap[CCREG] & 31);
+ hr_can_change &= ~(1u << 31);
+ *regs_can_change = hr_can_change;
+}
+
+// same as get_reg, but takes rrmap
+static signed char get_rreg(signed char rrmap[RRMAP_SIZE], signed char r)
+{
+ assert(0 <= r && r < RRMAP_SIZE);
+ return rrmap[r];
+}
+
+static int count_free_regs(const signed char regmap[])
{
int count=0;
int hr;
return count;
}
-void dirty_reg(struct regstat *cur,signed char reg)
+static void dirty_reg(struct regstat *cur, signed char reg)
{
int hr;
- if(!reg) return;
- for (hr=0;hr<HOST_REGS;hr++) {
- if((cur->regmap[hr]&63)==reg) {
- cur->dirty|=1<<hr;
- }
- }
+ if (!reg) return;
+ hr = get_reg(cur->regmap, reg);
+ if (hr >= 0)
+ cur->dirty |= 1<<hr;
}
static void set_const(struct regstat *cur, signed char reg, uint32_t value)
{
int hr;
- if(!reg) return;
- for (hr=0;hr<HOST_REGS;hr++) {
- if(cur->regmap[hr]==reg) {
- cur->isconst|=1<<hr;
- current_constmap[hr]=value;
- }
+ if (!reg) return;
+ hr = get_reg(cur->regmap, reg);
+ if (hr >= 0) {
+ cur->isconst |= 1<<hr;
+ current_constmap[hr] = value;
}
}
static void clear_const(struct regstat *cur, signed char reg)
{
int hr;
- if(!reg) return;
- for (hr=0;hr<HOST_REGS;hr++) {
- if((cur->regmap[hr]&63)==reg) {
- cur->isconst&=~(1<<hr);
- }
- }
+ if (!reg) return;
+ hr = get_reg(cur->regmap, reg);
+ if (hr >= 0)
+ cur->isconst &= ~(1<<hr);
}
-static int is_const(struct regstat *cur, signed char reg)
+static int is_const(const struct regstat *cur, signed char reg)
{
int hr;
- if(reg<0) return 0;
- if(!reg) return 1;
- for (hr=0;hr<HOST_REGS;hr++) {
- if((cur->regmap[hr]&63)==reg) {
- return (cur->isconst>>hr)&1;
- }
- }
+ if (reg < 0) return 0;
+ if (!reg) return 1;
+ hr = get_reg(cur->regmap, reg);
+ if (hr >= 0)
+ return (cur->isconst>>hr)&1;
return 0;
}
-static uint32_t get_const(struct regstat *cur, signed char reg)
+static uint32_t get_const(const struct regstat *cur, signed char reg)
{
int hr;
- if(!reg) return 0;
- for (hr=0;hr<HOST_REGS;hr++) {
- if(cur->regmap[hr]==reg) {
- return current_constmap[hr];
- }
- }
- SysPrintf("Unknown constant in r%d\n",reg);
+ if (!reg) return 0;
+ hr = get_reg(cur->regmap, reg);
+ if (hr >= 0)
+ return current_constmap[hr];
+
+ SysPrintf("Unknown constant in r%d\n", reg);
abort();
}
// Least soon needed registers
// Look at the next ten instructions and see which registers
// will be used. Try not to reallocate these.
-void lsn(u_char hsn[], int i, int *preferred_reg)
+static void lsn(u_char hsn[], int i, int *preferred_reg)
{
int j;
int b=-1;
j=slen-i-1;
break;
}
- if (is_ujump(i+j))
+ if (dops[i+j].is_ujump)
{
// Don't go past an unconditonal jump
j++;
hsn[dops[i+j].rs1]=j;
hsn[dops[i+j].rs2]=j;
}
+ if (ram_offset && (dops[i+j].is_load || dops[i+j].is_store))
+ hsn[ROREG] = j;
// On some architectures stores need invc_ptr
#if defined(HOST_IMM8)
- if(dops[i+j].itype==STORE || dops[i+j].itype==STORELR || (dops[i+j].opcode&0x3b)==0x39 || (dops[i+j].opcode&0x3b)==0x3a) {
- hsn[INVCP]=j;
- }
+ if (dops[i+j].is_store)
+ hsn[INVCP] = j;
#endif
if(i+j>=0&&(dops[i+j].itype==UJUMP||dops[i+j].itype==CJUMP||dops[i+j].itype==SJUMP))
{
// TODO: preferred register based on backward branch
}
// Delay slot should preferably not overwrite branch conditions or cycle count
- if (i > 0 && is_jump(i-1)) {
+ if (i > 0 && dops[i-1].is_jump) {
if(dops[i-1].rs1) if(hsn[dops[i-1].rs1]>1) hsn[dops[i-1].rs1]=1;
if(dops[i-1].rs2) if(hsn[dops[i-1].rs2]>1) hsn[dops[i-1].rs2]=1;
hsn[CCREG]=1;
hsn[RHTBL]=1;
}
// Coprocessor load/store needs FTEMP, even if not declared
- if(dops[i].itype==C1LS||dops[i].itype==C2LS) {
+ if(dops[i].itype==C2LS) {
hsn[FTEMP]=0;
}
// Load L/R also uses FTEMP as a temporary register
}
// We only want to allocate registers if we're going to use them again soon
-int needed_again(int r, int i)
+static int needed_again(int r, int i)
{
int j;
int b=-1;
int rn=10;
- if (i > 0 && is_ujump(i-1))
+ if (i > 0 && dops[i-1].is_ujump)
{
if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
return 0; // Don't need any registers if exiting the block
j=slen-i-1;
break;
}
- if (is_ujump(i+j))
+ if (dops[i+j].is_ujump)
{
// Don't go past an unconditonal jump
j++;
// Try to match register allocations at the end of a loop with those
// at the beginning
-int loop_reg(int i, int r, int hr)
+static int loop_reg(int i, int r, int hr)
{
int j,k;
for(j=0;j<9;j++)
j=slen-i-1;
break;
}
- if (is_ujump(i+j))
+ if (dops[i+j].is_ujump)
{
// Don't go past an unconditonal jump
j++;
// Allocate every register, preserving source/target regs
-void alloc_all(struct regstat *cur,int i)
+static void alloc_all(struct regstat *cur,int i)
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
if(hr!=EXCLUDE_REG) {
- if(((cur->regmap[hr]&63)!=dops[i].rs1)&&((cur->regmap[hr]&63)!=dops[i].rs2)&&
- ((cur->regmap[hr]&63)!=dops[i].rt1)&&((cur->regmap[hr]&63)!=dops[i].rt2))
+ if((cur->regmap[hr]!=dops[i].rs1)&&(cur->regmap[hr]!=dops[i].rs2)&&
+ (cur->regmap[hr]!=dops[i].rt1)&&(cur->regmap[hr]!=dops[i].rt2))
{
cur->regmap[hr]=-1;
cur->dirty&=~(1<<hr);
}
// Don't need zeros
- if((cur->regmap[hr]&63)==0)
+ if(cur->regmap[hr]==0)
{
cur->regmap[hr]=-1;
cur->dirty&=~(1<<hr);
} function_names[] = {
FUNCNAME(cc_interrupt),
FUNCNAME(gen_interupt),
- FUNCNAME(get_addr_ht),
- FUNCNAME(get_addr),
+ FUNCNAME(ndrc_get_addr_ht),
FUNCNAME(jump_handler_read8),
FUNCNAME(jump_handler_read16),
FUNCNAME(jump_handler_read32),
FUNCNAME(jump_handler_write8),
FUNCNAME(jump_handler_write16),
FUNCNAME(jump_handler_write32),
- FUNCNAME(invalidate_addr),
+ FUNCNAME(ndrc_invalidate_addr),
FUNCNAME(jump_to_new_pc),
+ FUNCNAME(jump_break),
+ FUNCNAME(jump_break_ds),
+ FUNCNAME(jump_syscall),
+ FUNCNAME(jump_syscall_ds),
FUNCNAME(call_gteStall),
FUNCNAME(new_dyna_leave),
FUNCNAME(pcsx_mtc0),
#ifdef DRC_DBG
FUNCNAME(do_insn_cmp),
#endif
-#ifdef __arm__
- FUNCNAME(verify_code),
-#endif
};
static const char *func_name(const void *a)
static void *get_trampoline(const void *f)
{
+ struct ndrc_tramp *tramp = NDRC_WRITE_OFFSET(&ndrc->tramp);
size_t i;
- for (i = 0; i < ARRAY_SIZE(ndrc->tramp.f); i++) {
- if (ndrc->tramp.f[i] == f || ndrc->tramp.f[i] == NULL)
+ for (i = 0; i < ARRAY_SIZE(tramp->f); i++) {
+ if (tramp->f[i] == f || tramp->f[i] == NULL)
break;
}
- if (i == ARRAY_SIZE(ndrc->tramp.f)) {
+ if (i == ARRAY_SIZE(tramp->f)) {
SysPrintf("trampoline table is full, last func %p\n", f);
abort();
}
- if (ndrc->tramp.f[i] == NULL) {
- start_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
- ndrc->tramp.f[i] = f;
- end_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
+ if (tramp->f[i] == NULL) {
+ start_tcache_write(&tramp->f[i], &tramp->f[i + 1]);
+ tramp->f[i] = f;
+ end_tcache_write(&tramp->f[i], &tramp->f[i + 1]);
+#ifdef HAVE_LIBNX
+ // invalidate the RX mirror (unsure if necessary, but just in case...)
+ armDCacheFlush(&ndrc->tramp.f[i], sizeof(ndrc->tramp.f[i]));
+#endif
}
return &ndrc->tramp.ops[i];
}
emit_call(f);
}
-// Add virtual address mapping to linked list
-void ll_add(struct ll_entry **head,int vaddr,void *addr)
-{
- struct ll_entry *new_entry;
- new_entry=malloc(sizeof(struct ll_entry));
- assert(new_entry!=NULL);
- new_entry->vaddr=vaddr;
- new_entry->reg_sv_flags=0;
- new_entry->addr=addr;
- new_entry->next=*head;
- *head=new_entry;
-}
-
-void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
-{
- ll_add(head,vaddr,addr);
- (*head)->reg_sv_flags=reg_sv_flags;
-}
-
// Check if an address is already compiled
// but don't return addresses which are about to expire from the cache
-void *check_addr(u_int vaddr)
+static void *check_addr(u_int vaddr)
{
struct ht_entry *ht_bin = hash_table_get(vaddr);
size_t i;
for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
if (ht_bin->vaddr[i] == vaddr)
- if (doesnt_expire_soon((u_char *)ht_bin->tcaddr[i] - MAX_OUTPUT_BLOCK_SIZE))
- if (isclean(ht_bin->tcaddr[i]))
- return ht_bin->tcaddr[i];
- }
- u_int page=get_page(vaddr);
- struct ll_entry *head;
- head=jump_in[page];
- while (head != NULL) {
- if (head->vaddr == vaddr) {
- if (doesnt_expire_soon(head->addr)) {
- // Update existing entry with current address
- if (ht_bin->vaddr[0] == vaddr) {
- ht_bin->tcaddr[0] = head->addr;
- return head->addr;
- }
- if (ht_bin->vaddr[1] == vaddr) {
- ht_bin->tcaddr[1] = head->addr;
- return head->addr;
- }
- // Insert into hash table with low priority.
- // Don't evict existing entries, as they are probably
- // addresses that are being accessed frequently.
- if (ht_bin->vaddr[0] == -1) {
- ht_bin->vaddr[0] = vaddr;
- ht_bin->tcaddr[0] = head->addr;
- }
- else if (ht_bin->vaddr[1] == -1) {
- ht_bin->vaddr[1] = vaddr;
- ht_bin->tcaddr[1] = head->addr;
- }
- return head->addr;
+ if (doesnt_expire_soon(ht_bin->tcaddr[i]))
+ return ht_bin->tcaddr[i];
+ }
+
+ // refactor to get_addr_nocompile?
+ u_int start_page = get_page_prev(vaddr);
+ u_int page, end_page = get_page(vaddr);
+
+ stat_inc(stat_jump_in_lookups);
+ for (page = start_page; page <= end_page; page++) {
+ const struct block_info *block;
+ for (block = blocks[page]; block != NULL; block = block->next) {
+ if (vaddr < block->start)
+ break;
+ if (block->is_dirty || vaddr >= block->start + block->len)
+ continue;
+ if (!doesnt_expire_soon(ndrc->translation_cache + block->tc_offs))
+ continue;
+ for (i = 0; i < block->jump_in_cnt; i++)
+ if (block->jump_in[i].vaddr == vaddr)
+ break;
+ if (i == block->jump_in_cnt)
+ continue;
+
+ // Update existing entry with current address
+ void *addr = block->jump_in[i].addr;
+ if (ht_bin->vaddr[0] == vaddr) {
+ ht_bin->tcaddr[0] = addr;
+ return addr;
+ }
+ if (ht_bin->vaddr[1] == vaddr) {
+ ht_bin->tcaddr[1] = addr;
+ return addr;
+ }
+ // Insert into hash table with low priority.
+ // Don't evict existing entries, as they are probably
+ // addresses that are being accessed frequently.
+ if (ht_bin->vaddr[0] == -1) {
+ ht_bin->vaddr[0] = vaddr;
+ ht_bin->tcaddr[0] = addr;
}
+ else if (ht_bin->vaddr[1] == -1) {
+ ht_bin->vaddr[1] = vaddr;
+ ht_bin->tcaddr[1] = addr;
+ }
+ return addr;
}
- head=head->next;
}
- return 0;
+ return NULL;
}
-void remove_hash(int vaddr)
+static void blocks_clear(struct block_info **head)
{
- //printf("remove hash: %x\n",vaddr);
- struct ht_entry *ht_bin = hash_table_get(vaddr);
- if (ht_bin->vaddr[1] == vaddr) {
- ht_bin->vaddr[1] = -1;
- ht_bin->tcaddr[1] = NULL;
- }
- if (ht_bin->vaddr[0] == vaddr) {
- ht_bin->vaddr[0] = ht_bin->vaddr[1];
- ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
- ht_bin->vaddr[1] = -1;
- ht_bin->tcaddr[1] = NULL;
+ struct block_info *cur, *next;
+
+ if ((cur = *head)) {
+ *head = NULL;
+ while (cur) {
+ next = cur->next;
+ free(cur);
+ cur = next;
+ }
}
}
-static void ll_remove_matching_addrs(struct ll_entry **head,
- uintptr_t base_offs_s, int shift)
+static int blocks_remove_matching_addrs(struct block_info **head,
+ u_int base_offs, int shift)
{
- struct ll_entry *next;
- while(*head) {
- uintptr_t o1 = (u_char *)(*head)->addr - ndrc->translation_cache;
- uintptr_t o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s)
- {
- inv_debug("EXP: Remove pointer to %p (%x)\n",(*head)->addr,(*head)->vaddr);
- remove_hash((*head)->vaddr);
- next=(*head)->next;
+ struct block_info *next;
+ int hit = 0;
+ while (*head) {
+ if ((((*head)->tc_offs ^ base_offs) >> shift) == 0) {
+ inv_debug("EXP: rm block %08x (tc_offs %zx)\n", (*head)->start, (*head)->tc_offs);
+ invalidate_block(*head);
+ next = (*head)->next;
free(*head);
- *head=next;
+ *head = next;
+ stat_dec(stat_blocks);
+ hit = 1;
}
else
{
- head=&((*head)->next);
+ head = &((*head)->next);
}
}
+ return hit;
}
-// Remove all entries from linked list
-void ll_clear(struct ll_entry **head)
+// This is called when we write to a compiled block (see do_invstub)
+static void unlink_jumps_vaddr_range(u_int start, u_int end)
{
- struct ll_entry *cur;
- struct ll_entry *next;
- if((cur=*head)) {
- *head=0;
- while(cur) {
- next=cur->next;
- free(cur);
- cur=next;
+ u_int page, start_page = get_page(start), end_page = get_page(end - 1);
+ int i;
+
+ for (page = start_page; page <= end_page; page++) {
+ struct jump_info *ji = jumps[page];
+ if (ji == NULL)
+ continue;
+ for (i = 0; i < ji->count; ) {
+ if (ji->e[i].target_vaddr < start || ji->e[i].target_vaddr >= end) {
+ i++;
+ continue;
+ }
+
+ inv_debug("INV: rm link to %08x (tc_offs %zx)\n", ji->e[i].target_vaddr,
+ (u_char *)ji->e[i].stub - ndrc->translation_cache);
+ void *host_addr = find_extjump_insn(ji->e[i].stub);
+ mark_clear_cache(host_addr);
+ set_jump_target(host_addr, ji->e[i].stub); // point back to dyna_linker stub
+
+ stat_dec(stat_links);
+ ji->count--;
+ if (i < ji->count) {
+ ji->e[i] = ji->e[ji->count];
+ continue;
+ }
+ i++;
}
}
}
-// Dereference the pointers and remove if it matches
-static void ll_kill_pointers(struct ll_entry *head,
- uintptr_t base_offs_s, int shift)
+static void unlink_jumps_tc_range(struct jump_info *ji, u_int base_offs, int shift)
{
- while(head) {
- u_char *ptr = get_pointer(head->addr);
- uintptr_t o1 = ptr - ndrc->translation_cache;
- uintptr_t o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- inv_debug("EXP: Lookup pointer to %p at %p (%x)\n",ptr,head->addr,head->vaddr);
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s)
- {
- inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
- void *host_addr=find_extjump_insn(head->addr);
- mark_clear_cache(host_addr);
- set_jump_target(host_addr, head->addr);
+ int i;
+ if (ji == NULL)
+ return;
+ for (i = 0; i < ji->count; ) {
+ u_int tc_offs = (u_char *)ji->e[i].stub - ndrc->translation_cache;
+ if (((tc_offs ^ base_offs) >> shift) != 0) {
+ i++;
+ continue;
}
- head=head->next;
- }
-}
-// This is called when we write to a compiled block (see do_invstub)
-static void invalidate_page(u_int page)
-{
- struct ll_entry *head;
- struct ll_entry *next;
- head=jump_in[page];
- jump_in[page]=0;
- while(head!=NULL) {
- inv_debug("INVALIDATE: %x\n",head->vaddr);
- remove_hash(head->vaddr);
- next=head->next;
- free(head);
- head=next;
- }
- head=jump_out[page];
- jump_out[page]=0;
- while(head!=NULL) {
- inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
- void *host_addr=find_extjump_insn(head->addr);
- mark_clear_cache(host_addr);
- set_jump_target(host_addr, head->addr); // point back to dyna_linker
- next=head->next;
- free(head);
- head=next;
- }
-}
-
-static void invalidate_block_range(u_int block, u_int first, u_int last)
-{
- u_int page=get_page(block<<12);
- //printf("first=%d last=%d\n",first,last);
- invalidate_page(page);
- assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
- assert(last<page+5);
- // Invalidate the adjacent pages if a block crosses a 4K boundary
- while(first<page) {
- invalidate_page(first);
- first++;
- }
- for(first=page+1;first<last;first++) {
- invalidate_page(first);
+ inv_debug("EXP: rm link to %08x (tc_offs %zx)\n", ji->e[i].target_vaddr, tc_offs);
+ stat_dec(stat_links);
+ ji->count--;
+ if (i < ji->count) {
+ ji->e[i] = ji->e[ji->count];
+ continue;
+ }
+ i++;
}
- do_clear_cache();
+}
- // Don't trap writes
- invalid_code[block]=1;
+static void invalidate_block(struct block_info *block)
+{
+ u_int i;
- #ifdef USE_MINI_HT
- memset(mini_ht,-1,sizeof(mini_ht));
- #endif
+ block->is_dirty = 1;
+ unlink_jumps_vaddr_range(block->start, block->start + block->len);
+ for (i = 0; i < block->jump_in_cnt; i++)
+ hash_table_remove(block->jump_in[i].vaddr);
}
-void invalidate_block(u_int block)
+static int invalidate_range(u_int start, u_int end,
+ u32 *inv_start_ret, u32 *inv_end_ret)
{
- u_int page=get_page(block<<12);
- u_int vpage=get_vpage(block<<12);
- inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
- //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
- u_int first,last;
- first=last=page;
- struct ll_entry *head;
- head=jump_dirty[vpage];
- //printf("page=%d vpage=%d\n",page,vpage);
- while(head!=NULL) {
- if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
- u_char *start, *end;
- get_bounds(head->addr, &start, &end);
- //printf("start: %p end: %p\n", start, end);
- if (page < 2048 && start >= rdram && end < rdram+RAM_SIZE) {
- if (((start-rdram)>>12) <= page && ((end-1-rdram)>>12) >= page) {
- if ((((start-rdram)>>12)&2047) < first) first = ((start-rdram)>>12)&2047;
- if ((((end-1-rdram)>>12)&2047) > last) last = ((end-1-rdram)>>12)&2047;
- }
+ struct block_info *last_block = NULL;
+ u_int start_page = get_page_prev(start);
+ u_int end_page = get_page(end - 1);
+ u_int start_m = pmmask(start);
+ u_int end_m = pmmask(end - 1);
+ u_int inv_start, inv_end;
+ u_int blk_start_m, blk_end_m;
+ u_int page;
+ int hit = 0;
+
+ // additional area without code (to supplement invalid_code[]), [start, end)
+ // avoids excessive ndrc_invalidate_addr() calls
+ inv_start = start_m & ~0xfff;
+ inv_end = end_m | 0xfff;
+
+ for (page = start_page; page <= end_page; page++) {
+ struct block_info *block;
+ for (block = blocks[page]; block != NULL; block = block->next) {
+ if (block->is_dirty)
+ continue;
+ last_block = block;
+ blk_end_m = pmmask(block->start + block->len);
+ if (blk_end_m <= start_m) {
+ inv_start = max(inv_start, blk_end_m);
+ continue;
+ }
+ blk_start_m = pmmask(block->start);
+ if (end_m <= blk_start_m) {
+ inv_end = min(inv_end, blk_start_m - 1);
+ continue;
}
+ if (!block->source) // "hack" block - leave it alone
+ continue;
+
+ hit++;
+ invalidate_block(block);
+ stat_inc(stat_inv_hits);
}
- head=head->next;
}
- invalidate_block_range(block,first,last);
-}
-void invalidate_addr(u_int addr)
-{
- //static int rhits;
- // this check is done by the caller
- //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
- u_int page=get_vpage(addr);
- if(page<2048) { // RAM
- struct ll_entry *head;
- u_int addr_min=~0, addr_max=0;
- u_int mask=RAM_SIZE-1;
- u_int addr_main=0x80000000|(addr&mask);
- int pg1;
- inv_code_start=addr_main&~0xfff;
- inv_code_end=addr_main|0xfff;
- pg1=page;
- if (pg1>0) {
- // must check previous page too because of spans..
- pg1--;
- inv_code_start-=0x1000;
- }
- for(;pg1<=page;pg1++) {
- for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
- u_char *start_h, *end_h;
- u_int start, end;
- get_bounds(head->addr, &start_h, &end_h);
- start = (uintptr_t)start_h - ram_offset;
- end = (uintptr_t)end_h - ram_offset;
- if(start<=addr_main&&addr_main<end) {
- if(start<addr_min) addr_min=start;
- if(end>addr_max) addr_max=end;
- }
- else if(addr_main<start) {
- if(start<inv_code_end)
- inv_code_end=start-1;
- }
- else {
- if(end>inv_code_start)
- inv_code_start=end;
- }
- }
- }
- if (addr_min!=~0) {
- inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
- inv_code_start=inv_code_end=~0;
- invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
- return;
- }
- else {
- inv_code_start=(addr&~mask)|(inv_code_start&mask);
- inv_code_end=(addr&~mask)|(inv_code_end&mask);
- inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
- return;
+ if (!hit && last_block && last_block->source) {
+ // could be some leftover unused block, uselessly trapping writes
+ last_block->inv_near_misses++;
+ if (last_block->inv_near_misses > 128) {
+ invalidate_block(last_block);
+ stat_inc(stat_inv_hits);
+ hit++;
}
}
- invalidate_block(addr>>12);
+ if (hit) {
+ do_clear_cache();
+#ifdef USE_MINI_HT
+ memset(mini_ht, -1, sizeof(mini_ht));
+#endif
+ }
+
+ if (inv_start <= (start_m & ~0xfff) && inv_end >= (start_m | 0xfff))
+ // the whole page is empty now
+ mark_invalid_code(start, 1, 1);
+
+ if (inv_start_ret) *inv_start_ret = inv_start | (start & 0xe0000000);
+ if (inv_end_ret) *inv_end_ret = inv_end | (end & 0xe0000000);
+ return hit;
+}
+
+void new_dynarec_invalidate_range(unsigned int start, unsigned int end)
+{
+ invalidate_range(start, end, NULL, NULL);
+}
+
+void ndrc_invalidate_addr(u_int addr)
+{
+ // this check is done by the caller
+ //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
+ int ret = invalidate_range(addr, addr + 4, &inv_code_start, &inv_code_end);
+ if (ret)
+ inv_debug("INV ADDR: %08x hit %d blocks\n", addr, ret);
+ else
+ inv_debug("INV ADDR: %08x miss, inv %08x-%08x\n", addr, inv_code_start, inv_code_end);
+ stat_inc(stat_inv_addr_calls);
}
// This is called when loading a save state.
// Anything could have changed, so invalidate everything.
-void invalidate_all_pages(void)
+void new_dynarec_invalidate_all_pages(void)
{
+ struct block_info *block;
u_int page;
- for(page=0;page<4096;page++)
- invalidate_page(page);
- for(page=0;page<1048576;page++)
- if(!invalid_code[page]) {
- restore_candidate[(page&2047)>>3]|=1<<(page&7);
- restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
+ for (page = 0; page < ARRAY_SIZE(blocks); page++) {
+ for (block = blocks[page]; block != NULL; block = block->next) {
+ if (block->is_dirty)
+ continue;
+ if (!block->source) // hack block?
+ continue;
+ invalidate_block(block);
}
+ }
+
#ifdef USE_MINI_HT
- memset(mini_ht,-1,sizeof(mini_ht));
+ memset(mini_ht, -1, sizeof(mini_ht));
#endif
do_clear_cache();
}
static void do_invstub(int n)
{
literal_pool(20);
- u_int reglist=stubs[n].a;
+ u_int reglist = stubs[n].a;
set_jump_target(stubs[n].addr, out);
save_regs(reglist);
- if(stubs[n].b!=0) emit_mov(stubs[n].b,0);
- emit_far_call(invalidate_addr);
+ if (stubs[n].b != 0)
+ emit_mov(stubs[n].b, 0);
+ emit_readword(&inv_code_start, 1);
+ emit_readword(&inv_code_end, 2);
+ emit_cmp(0, 1);
+ emit_cmpcs(2, 0);
+ void *jaddr = out;
+ emit_jc(0);
+ emit_far_call(ndrc_invalidate_addr);
+ set_jump_target(jaddr, out);
restore_regs(reglist);
emit_jmp(stubs[n].retaddr); // return address
}
// Add an entry to jump_out after making a link
-// src should point to code by emit_extjump2()
-void add_jump_out(u_int vaddr,void *src)
+// src should point to code by emit_extjump()
+void ndrc_add_jump_out(u_int vaddr, void *src)
{
- u_int page=get_page(vaddr);
- inv_debug("add_jump_out: %p -> %x (%d)\n",src,vaddr,page);
+ inv_debug("ndrc_add_jump_out: %p -> %x\n", src, vaddr);
+ u_int page = get_page(vaddr);
+ struct jump_info *ji;
+
+ stat_inc(stat_links);
check_extjump2(src);
- ll_add(jump_out+page,vaddr,src);
- //inv_debug("add_jump_out: to %p\n",get_pointer(src));
-}
-
-// If a code block was found to be unmodified (bit was set in
-// restore_candidate) and it remains unmodified (bit is clear
-// in invalid_code) then move the entries for that 4K page from
-// the dirty list to the clean list.
-void clean_blocks(u_int page)
-{
- struct ll_entry *head;
- inv_debug("INV: clean_blocks page=%d\n",page);
- head=jump_dirty[page];
- while(head!=NULL) {
- if(!invalid_code[head->vaddr>>12]) {
- // Don't restore blocks which are about to expire from the cache
- if (doesnt_expire_soon(head->addr)) {
- if(verify_dirty(head->addr)) {
- u_char *start, *end;
- //printf("Possibly Restore %x (%p)\n",head->vaddr, head->addr);
- u_int i;
- u_int inv=0;
- get_bounds(head->addr, &start, &end);
- if (start - rdram < RAM_SIZE) {
- for (i = (start-rdram+0x80000000)>>12; i <= (end-1-rdram+0x80000000)>>12; i++) {
- inv|=invalid_code[i];
- }
- }
- else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
- inv=1;
- }
- if(!inv) {
- void *clean_addr = get_clean_addr(head->addr);
- if (doesnt_expire_soon(clean_addr)) {
- u_int ppage=page;
- inv_debug("INV: Restored %x (%p/%p)\n",head->vaddr, head->addr, clean_addr);
- //printf("page=%x, addr=%x\n",page,head->vaddr);
- //assert(head->vaddr>>12==(page|0x80000));
- ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
- struct ht_entry *ht_bin = hash_table_get(head->vaddr);
- if (ht_bin->vaddr[0] == head->vaddr)
- ht_bin->tcaddr[0] = clean_addr; // Replace existing entry
- if (ht_bin->vaddr[1] == head->vaddr)
- ht_bin->tcaddr[1] = clean_addr; // Replace existing entry
- }
- }
- }
- }
- }
- head=head->next;
+ ji = jumps[page];
+ if (ji == NULL) {
+ ji = malloc(sizeof(*ji) + sizeof(ji->e[0]) * 16);
+ ji->alloc = 16;
+ ji->count = 0;
+ }
+ else if (ji->count >= ji->alloc) {
+ ji->alloc += 16;
+ ji = realloc(ji, sizeof(*ji) + sizeof(ji->e[0]) * ji->alloc);
}
+ jumps[page] = ji;
+ ji->e[ji->count].target_vaddr = vaddr;
+ ji->e[ji->count].stub = src;
+ ji->count++;
}
/* Register allocation */
static void alloc_reg(struct regstat *cur,int i,signed char reg)
{
int r,hr;
- int preferred_reg = (reg&7);
- if(reg==CCREG) preferred_reg=HOST_CCREG;
- if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
+ int preferred_reg = PREFERRED_REG_FIRST
+ + reg % (PREFERRED_REG_LAST - PREFERRED_REG_FIRST + 1);
+ if (reg == CCREG) preferred_reg = HOST_CCREG;
+ if (reg == PTEMP || reg == FTEMP) preferred_reg = 12;
+ assert(PREFERRED_REG_FIRST != EXCLUDE_REG && EXCLUDE_REG != HOST_REGS);
+ assert(reg >= 0);
// Don't allocate unused registers
if((cur->u>>reg)&1) return;
// see if it's already allocated
- for(hr=0;hr<HOST_REGS;hr++)
- {
- if(cur->regmap[hr]==reg) return;
- }
+ if (get_reg(cur->regmap, reg) >= 0)
+ return;
// Keep the same mapping if the register was already allocated in a loop
preferred_reg = loop_reg(i,reg,preferred_reg);
if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
}
}
+
// Try to allocate any available register, but prefer
// registers that have not been used recently.
- if(i>0) {
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
- if(regs[i-1].regmap[hr]!=dops[i-1].rs1&®s[i-1].regmap[hr]!=dops[i-1].rs2&®s[i-1].regmap[hr]!=dops[i-1].rt1&®s[i-1].regmap[hr]!=dops[i-1].rt2) {
+ if (i > 0) {
+ for (hr = PREFERRED_REG_FIRST; ; ) {
+ if (cur->regmap[hr] < 0) {
+ int oldreg = regs[i-1].regmap[hr];
+ if (oldreg < 0 || (oldreg != dops[i-1].rs1 && oldreg != dops[i-1].rs2
+ && oldreg != dops[i-1].rt1 && oldreg != dops[i-1].rt2))
+ {
cur->regmap[hr]=reg;
cur->dirty&=~(1<<hr);
cur->isconst&=~(1<<hr);
return;
}
}
+ hr++;
+ if (hr == EXCLUDE_REG)
+ hr++;
+ if (hr == HOST_REGS)
+ hr = 0;
+ if (hr == PREFERRED_REG_FIRST)
+ break;
}
}
+
// Try to allocate any available register
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+ for (hr = PREFERRED_REG_FIRST; ; ) {
+ if (cur->regmap[hr] < 0) {
cur->regmap[hr]=reg;
cur->dirty&=~(1<<hr);
cur->isconst&=~(1<<hr);
return;
}
+ hr++;
+ if (hr == EXCLUDE_REG)
+ hr++;
+ if (hr == HOST_REGS)
+ hr = 0;
+ if (hr == PREFERRED_REG_FIRST)
+ break;
}
// Ok, now we have to evict someone
// Don't evict the cycle count at entry points, otherwise the entry
// stub will have to write it.
if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
- if(i>1&&hsn[CCREG]>2&&(dops[i-2].itype==RJUMP||dops[i-2].itype==UJUMP||dops[i-2].itype==CJUMP||dops[i-2].itype==SJUMP)) hsn[CCREG]=2;
+ if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
for(j=10;j>=3;j--)
{
// Alloc preferred register if available
if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
for(hr=0;hr<HOST_REGS;hr++) {
// Evict both parts of a 64-bit register
- if((cur->regmap[hr]&63)==r) {
+ if(cur->regmap[hr]==r) {
cur->regmap[hr]=-1;
cur->dirty&=~(1<<hr);
cur->isconst&=~(1<<hr);
// Don't evict the cycle count at entry points, otherwise the entry
// stub will have to write it.
if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
- if(i>1&&hsn[CCREG]>2&&(dops[i-2].itype==RJUMP||dops[i-2].itype==UJUMP||dops[i-2].itype==CJUMP||dops[i-2].itype==SJUMP)) hsn[CCREG]=2;
+ if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
for(j=10;j>=3;j--)
{
for(r=1;r<=MAXREG;r++)
static void mov_alloc(struct regstat *current,int i)
{
if (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG) {
- // logically this is needed but just won't work, no idea why
- //alloc_cc(current,i); // for stalls
- //dirty_reg(current,CCREG);
+ alloc_cc(current,i); // for stalls
+ dirty_reg(current,CCREG);
}
// Note: Don't need to actually alloc the source registers
{
if(dops[i].rt1) {
if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
- else dops[i].lt1=dops[i].rs1;
+ else dops[i].use_lt1=!!dops[i].rs1;
alloc_reg(current,i,dops[i].rt1);
dirty_reg(current,dops[i].rt1);
if(is_const(current,dops[i].rs1)) {
static void imm16_alloc(struct regstat *current,int i)
{
if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
- else dops[i].lt1=dops[i].rs1;
+ else dops[i].use_lt1=!!dops[i].rs1;
if(dops[i].rt1) alloc_reg(current,i,dops[i].rt1);
if(dops[i].opcode==0x18||dops[i].opcode==0x19) { // DADDI/DADDIU
assert(0);
clear_const(current,dops[i].rt1);
//if(dops[i].rs1!=dops[i].rt1&&needed_again(dops[i].rs1,i)) clear_const(current,dops[i].rs1); // Does this help or hurt?
if(!dops[i].rs1) current->u&=~1LL; // Allow allocating r0 if it's the source register
- if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
+ if (needed_again(dops[i].rs1, i))
+ alloc_reg(current, i, dops[i].rs1);
+ if (ram_offset)
+ alloc_reg(current, i, ROREG);
if(dops[i].rt1&&!((current->u>>dops[i].rt1)&1)) {
alloc_reg(current,i,dops[i].rt1);
assert(get_reg(current->regmap,dops[i].rt1)>=0);
}
}
-void store_alloc(struct regstat *current,int i)
+static void store_alloc(struct regstat *current,int i)
{
clear_const(current,dops[i].rs2);
if(!(dops[i].rs2)) current->u&=~1LL; // Allow allocating r0 if necessary
if(dops[i].opcode==0x2c||dops[i].opcode==0x2d||dops[i].opcode==0x3f) { // 64-bit SDL/SDR/SD
assert(0);
}
+ if (ram_offset)
+ alloc_reg(current, i, ROREG);
#if defined(HOST_IMM8)
// On CPUs without 32-bit immediates we need a pointer to invalid_code
- else alloc_reg(current,i,INVCP);
+ alloc_reg(current, i, INVCP);
#endif
if(dops[i].opcode==0x2a||dops[i].opcode==0x2e||dops[i].opcode==0x2c||dops[i].opcode==0x2d) { // SWL/SWL/SDL/SDR
alloc_reg(current,i,FTEMP);
minimum_free_regs[i]=1;
}
-void c1ls_alloc(struct regstat *current,int i)
+static void c1ls_alloc(struct regstat *current,int i)
{
- //clear_const(current,dops[i].rs1); // FIXME
clear_const(current,dops[i].rt1);
- if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
alloc_reg(current,i,CSREG); // Status
- alloc_reg(current,i,FTEMP);
- if(dops[i].opcode==0x35||dops[i].opcode==0x3d) { // 64-bit LDC1/SDC1
- assert(0);
- }
- #if defined(HOST_IMM8)
- // On CPUs without 32-bit immediates we need a pointer to invalid_code
- else if((dops[i].opcode&0x3b)==0x39) // SWC1/SDC1
- alloc_reg(current,i,INVCP);
- #endif
- // We need a temporary register for address generation
- alloc_reg_temp(current,i,-1);
}
-void c2ls_alloc(struct regstat *current,int i)
+static void c2ls_alloc(struct regstat *current,int i)
{
clear_const(current,dops[i].rt1);
if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
alloc_reg(current,i,FTEMP);
+ if (ram_offset)
+ alloc_reg(current, i, ROREG);
#if defined(HOST_IMM8)
// On CPUs without 32-bit immediates we need a pointer to invalid_code
- if((dops[i].opcode&0x3b)==0x3a) // SWC2/SDC2
+ if (dops[i].opcode == 0x3a) // SWC2
alloc_reg(current,i,INVCP);
#endif
// We need a temporary register for address generation
}
#ifndef multdiv_alloc
-void multdiv_alloc(struct regstat *current,int i)
+static void multdiv_alloc(struct regstat *current,int i)
{
// case 0x18: MULT
// case 0x19: MULTU
}
#endif
-void cop0_alloc(struct regstat *current,int i)
+static void cop0_alloc(struct regstat *current,int i)
{
if(dops[i].opcode2==0) // MFC0
{
}
else
{
- // TLBR/TLBWI/TLBWR/TLBP/ERET
+ // RFE
assert(dops[i].opcode2==0x10);
alloc_all(current,i);
}
minimum_free_regs[i]=1;
}
-void c2op_alloc(struct regstat *current,int i)
+static void c2op_alloc(struct regstat *current,int i)
{
alloc_cc(current,i); // for stalls
dirty_reg(current,CCREG);
alloc_reg_temp(current,i,-1);
}
-void syscall_alloc(struct regstat *current,int i)
+static void syscall_alloc(struct regstat *current,int i)
{
alloc_cc(current,i);
dirty_reg(current,CCREG);
current->isconst=0;
}
-void delayslot_alloc(struct regstat *current,int i)
+static void delayslot_alloc(struct regstat *current,int i)
{
switch(dops[i].itype) {
case UJUMP:
case RJUMP:
case SYSCALL:
case HLECALL:
- case SPAN:
- assem_debug("jump in the delay slot. this shouldn't happen.\n");//abort();
- SysPrintf("Disabled speculative precompilation\n");
- stop_after_jal=1;
- break;
case IMM16:
imm16_alloc(current,i);
break;
}
}
-// Special case where a branch and delay slot span two pages in virtual memory
-static void pagespan_alloc(struct regstat *current,int i)
-{
- current->isconst=0;
- current->wasconst=0;
- regs[i].wasconst=0;
- minimum_free_regs[i]=HOST_REGS;
- alloc_all(current,i);
- alloc_cc(current,i);
- dirty_reg(current,CCREG);
- if(dops[i].opcode==3) // JAL
- {
- alloc_reg(current,i,31);
- dirty_reg(current,31);
- }
- if(dops[i].opcode==0&&(dops[i].opcode2&0x3E)==8) // JR/JALR
- {
- alloc_reg(current,i,dops[i].rs1);
- if (dops[i].rt1!=0) {
- alloc_reg(current,i,dops[i].rt1);
- dirty_reg(current,dops[i].rt1);
- }
- }
- if((dops[i].opcode&0x2E)==4) // BEQ/BNE/BEQL/BNEL
- {
- if(dops[i].rs1) alloc_reg(current,i,dops[i].rs1);
- if(dops[i].rs2) alloc_reg(current,i,dops[i].rs2);
- }
- else
- if((dops[i].opcode&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
- {
- if(dops[i].rs1) alloc_reg(current,i,dops[i].rs1);
- }
- //else ...
-}
-
static void add_stub(enum stub_type type, void *addr, void *retaddr,
u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
{
}
// Write out a single register
-static void wb_register(signed char r,signed char regmap[],uint64_t dirty)
+static void wb_register(signed char r, const signed char regmap[], uint64_t dirty)
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
if(hr!=EXCLUDE_REG) {
- if((regmap[hr]&63)==r) {
+ if(regmap[hr]==r) {
if((dirty>>hr)&1) {
assert(regmap[hr]<64);
emit_storereg(r,hr);
static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
{
//if(dirty_pre==dirty) return;
- int hr,reg;
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG) {
- reg=pre[hr];
- if(((~u)>>(reg&63))&1) {
- if(reg>0) {
- if(((dirty_pre&~dirty)>>hr)&1) {
- if(reg>0&®<34) {
- emit_storereg(reg,hr);
- }
- else if(reg>=64) {
- assert(0);
- }
- }
- }
- }
- }
+ int hr, r;
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ r = pre[hr];
+ if (r < 1 || r > 33 || ((u >> r) & 1))
+ continue;
+ if (((dirty_pre & ~dirty) >> hr) & 1)
+ emit_storereg(r, hr);
}
}
}
}
-static void alu_assemble(int i,struct regstat *i_regs)
+static void alu_assemble(int i, const struct regstat *i_regs)
{
if(dops[i].opcode2>=0x20&&dops[i].opcode2<=0x23) { // ADD/ADDU/SUB/SUBU
if(dops[i].rt1) {
}
}
-void imm16_assemble(int i,struct regstat *i_regs)
+static void imm16_assemble(int i, const struct regstat *i_regs)
{
if (dops[i].opcode==0x0f) { // LUI
if(dops[i].rt1) {
}
}
-void shiftimm_assemble(int i,struct regstat *i_regs)
+static void shiftimm_assemble(int i, const struct regstat *i_regs)
{
if(dops[i].opcode2<=0x3) // SLL/SRL/SRA
{
}
#ifndef shift_assemble
-static void shift_assemble(int i,struct regstat *i_regs)
+static void shift_assemble(int i, const struct regstat *i_regs)
{
signed char s,t,shift;
if (dops[i].rt1 == 0)
return MTYPE_8000;
}
-static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override)
+static int get_ro_reg(const struct regstat *i_regs, int host_tempreg_free)
+{
+ int r = get_reg(i_regs->regmap, ROREG);
+ if (r < 0 && host_tempreg_free) {
+ host_tempreg_acquire();
+ emit_loadreg(ROREG, r = HOST_TEMPREG);
+ }
+ if (r < 0)
+ abort();
+ return r;
+}
+
+static void *emit_fastpath_cmp_jump(int i, const struct regstat *i_regs,
+ int addr, int *offset_reg, int *addr_reg_override)
{
void *jaddr = NULL;
- int type=0;
- int mr=dops[i].rs1;
+ int type = 0;
+ int mr = dops[i].rs1;
+ *offset_reg = -1;
if(((smrv_strong|smrv_weak)>>mr)&1) {
type=get_ptr_mem_type(smrv[mr]);
//printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
}
}
- if(type==0)
+ if (type == 0) // need ram check
{
emit_cmpimm(addr,RAM_SIZE);
- jaddr=out;
+ jaddr = out;
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
// Hint to branch predictor that the branch is unlikely to be taken
- if(dops[i].rs1>=28)
+ if (dops[i].rs1 >= 28)
emit_jno_unlikely(0);
else
#endif
emit_jno(0);
- if(ram_offset!=0) {
- host_tempreg_acquire();
- emit_addimm(addr,ram_offset,HOST_TEMPREG);
- addr=*addr_reg_override=HOST_TEMPREG;
- }
+ if (ram_offset != 0)
+ *offset_reg = get_ro_reg(i_regs, 0);
}
return jaddr;
static void *get_direct_memhandler(void *table, u_int addr,
enum stub_type type, uintptr_t *addr_host)
{
+ uintptr_t msb = 1ull << (sizeof(uintptr_t)*8 - 1);
uintptr_t l1, l2 = 0;
l1 = ((uintptr_t *)table)[addr>>12];
- if ((l1 & (1ul << (sizeof(l1)*8-1))) == 0) {
+ if (!(l1 & msb)) {
uintptr_t v = l1 << 1;
*addr_host = v + addr;
return NULL;
if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
- l2=((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
+ l2 = ((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
else
- l2=((uintptr_t *)l1)[(addr&0xfff)/4];
- if ((l2 & (1<<31)) == 0) {
+ l2 = ((uintptr_t *)l1)[(addr&0xfff)/4];
+ if (!(l2 & msb)) {
uintptr_t v = l2 << 1;
*addr_host = v + (addr&0xfff);
return NULL;
return __builtin_ctz(free_regs);
}
-static void load_assemble(int i, const struct regstat *i_regs)
+static void do_load_word(int a, int rt, int offset_reg)
+{
+ if (offset_reg >= 0)
+ emit_ldr_dualindexed(offset_reg, a, rt);
+ else
+ emit_readword_indexed(0, a, rt);
+}
+
+static void do_store_word(int a, int ofs, int rt, int offset_reg, int preseve_a)
+{
+ if (offset_reg < 0) {
+ emit_writeword_indexed(rt, ofs, a);
+ return;
+ }
+ if (ofs != 0)
+ emit_addimm(a, ofs, a);
+ emit_str_dualindexed(offset_reg, a, rt);
+ if (ofs != 0 && preseve_a)
+ emit_addimm(a, -ofs, a);
+}
+
+static void do_store_hword(int a, int ofs, int rt, int offset_reg, int preseve_a)
+{
+ if (offset_reg < 0) {
+ emit_writehword_indexed(rt, ofs, a);
+ return;
+ }
+ if (ofs != 0)
+ emit_addimm(a, ofs, a);
+ emit_strh_dualindexed(offset_reg, a, rt);
+ if (ofs != 0 && preseve_a)
+ emit_addimm(a, -ofs, a);
+}
+
+static void do_store_byte(int a, int rt, int offset_reg)
+{
+ if (offset_reg >= 0)
+ emit_strb_dualindexed(offset_reg, a, rt);
+ else
+ emit_writebyte_indexed(rt, 0, a);
+}
+
+static void load_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl,addr;
int offset;
void *jaddr=0;
int memtarget=0,c=0;
- int fastio_reg_override=-1;
+ int offset_reg = -1;
+ int fastio_reg_override = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rt1);
s=get_reg(i_regs->regmap,dops[i].rs1);
// could be FIFO, must perform the read
// ||dummy read
assem_debug("(forced read)\n");
- tl=get_reg(i_regs->regmap,-1);
+ tl=get_reg_temp(i_regs->regmap);
assert(tl>=0);
}
if(offset||s<0||c) addr=tl;
else addr=s;
- //if(tl<0) tl=get_reg(i_regs->regmap,-1);
+ //if(tl<0) tl=get_reg_temp(i_regs->regmap);
if(tl>=0) {
//printf("load_assemble: c=%d\n",c);
//if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
if(dops[i].rs1!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
#endif
{
- jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
+ jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
+ &offset_reg, &fastio_reg_override);
}
}
- else if(ram_offset&&memtarget) {
- host_tempreg_acquire();
- emit_addimm(addr,ram_offset,HOST_TEMPREG);
- fastio_reg_override=HOST_TEMPREG;
+ else if (ram_offset && memtarget) {
+ offset_reg = get_ro_reg(i_regs, 0);
}
int dummy=(dops[i].rt1==0)||(tl!=get_reg(i_regs->regmap,dops[i].rt1)); // ignore loads to r0 and unneeded reg
- if (dops[i].opcode==0x20) { // LB
+ switch (dops[i].opcode) {
+ case 0x20: // LB
if(!c||memtarget) {
if(!dummy) {
- {
- int x=0,a=tl;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
+ int a = tl;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
- emit_movsbl_indexed(x,a,tl);
- }
+ if (offset_reg >= 0)
+ emit_ldrsb_dualindexed(offset_reg, a, tl);
+ else
+ emit_movsbl_indexed(0, a, tl);
}
if(jaddr)
- add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x21) { // LH
+ inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x21: // LH
if(!c||memtarget) {
if(!dummy) {
- int x=0,a=tl;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_movswl_indexed(x,a,tl);
+ int a = tl;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ if (offset_reg >= 0)
+ emit_ldrsh_dualindexed(offset_reg, a, tl);
+ else
+ emit_movswl_indexed(0, a, tl);
}
if(jaddr)
- add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x23) { // LW
+ inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x23: // LW
if(!c||memtarget) {
if(!dummy) {
- int a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_readword_indexed(0,a,tl);
+ int a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_load_word(a, tl, offset_reg);
}
if(jaddr)
- add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x24) { // LBU
+ inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x24: // LBU
if(!c||memtarget) {
if(!dummy) {
- int x=0,a=tl;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
+ int a = tl;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
- emit_movzbl_indexed(x,a,tl);
+ if (offset_reg >= 0)
+ emit_ldrb_dualindexed(offset_reg, a, tl);
+ else
+ emit_movzbl_indexed(0, a, tl);
}
if(jaddr)
- add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x25) { // LHU
+ inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x25: // LHU
if(!c||memtarget) {
if(!dummy) {
- int x=0,a=tl;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_movzwl_indexed(x,a,tl);
+ int a = tl;
+ if(!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ if (offset_reg >= 0)
+ emit_ldrh_dualindexed(offset_reg, a, tl);
+ else
+ emit_movzwl_indexed(0, a, tl);
}
if(jaddr)
- add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj[i],reglist);
- }
- if (dops[i].opcode==0x27) { // LWU
- assert(0);
- }
- if (dops[i].opcode==0x37) { // LD
+ inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
+ break;
+ case 0x27: // LWU
+ case 0x37: // LD
+ default:
assert(0);
}
}
- if (fastio_reg_override == HOST_TEMPREG)
+ if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
host_tempreg_release();
}
#ifndef loadlr_assemble
-static void loadlr_assemble(int i, const struct regstat *i_regs)
+static void loadlr_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl,temp,temp2,addr;
int offset;
void *jaddr=0;
int memtarget=0,c=0;
- int fastio_reg_override=-1;
+ int offset_reg = -1;
+ int fastio_reg_override = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rt1);
s=get_reg(i_regs->regmap,dops[i].rs1);
- temp=get_reg(i_regs->regmap,-1);
+ temp=get_reg_temp(i_regs->regmap);
temp2=get_reg(i_regs->regmap,FTEMP);
addr=get_reg(i_regs->regmap,AGEN1+(i&1));
assert(addr<0);
}else{
emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
}
- jaddr=emit_fastpath_cmp_jump(i,temp2,&fastio_reg_override);
+ jaddr = emit_fastpath_cmp_jump(i, i_regs, temp2,
+ &offset_reg, &fastio_reg_override);
}
else {
- if(ram_offset&&memtarget) {
- host_tempreg_acquire();
- emit_addimm(temp2,ram_offset,HOST_TEMPREG);
- fastio_reg_override=HOST_TEMPREG;
+ if (ram_offset && memtarget) {
+ offset_reg = get_ro_reg(i_regs, 0);
}
if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
}
if (dops[i].opcode==0x22||dops[i].opcode==0x26) { // LWL/LWR
if(!c||memtarget) {
- int a=temp2;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_readword_indexed(0,a,temp2);
- if(fastio_reg_override==HOST_TEMPREG) host_tempreg_release();
- if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj[i],reglist);
+ int a = temp2;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_load_word(a, temp2, offset_reg);
+ if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
+ host_tempreg_release();
+ if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj_,reglist);
}
else
- inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj[i],reglist);
+ inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj_,reglist);
if(dops[i].rt1) {
assert(tl>=0);
emit_andimm(temp,24,temp);
}
#endif
-void store_assemble(int i, const struct regstat *i_regs)
+static void store_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl;
int addr,temp;
int offset;
void *jaddr=0;
- enum stub_type type;
+ enum stub_type type=0;
int memtarget=0,c=0;
int agr=AGEN1+(i&1);
- int fastio_reg_override=-1;
+ int offset_reg = -1;
+ int fastio_reg_override = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rs2);
s=get_reg(i_regs->regmap,dops[i].rs1);
temp=get_reg(i_regs->regmap,agr);
- if(temp<0) temp=get_reg(i_regs->regmap,-1);
+ if(temp<0) temp=get_reg_temp(i_regs->regmap);
offset=imm[i];
if(s>=0) {
c=(i_regs->wasconst>>s)&1;
if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
if(offset||s<0||c) addr=temp;
else addr=s;
- if(!c) {
- jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
+ if (!c) {
+ jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
+ &offset_reg, &fastio_reg_override);
}
- else if(ram_offset&&memtarget) {
- host_tempreg_acquire();
- emit_addimm(addr,ram_offset,HOST_TEMPREG);
- fastio_reg_override=HOST_TEMPREG;
+ else if (ram_offset && memtarget) {
+ offset_reg = get_ro_reg(i_regs, 0);
}
- if (dops[i].opcode==0x28) { // SB
+ switch (dops[i].opcode) {
+ case 0x28: // SB
if(!c||memtarget) {
- int x=0,a=temp;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_writebyte_indexed(tl,x,a);
- }
- type=STOREB_STUB;
- }
- if (dops[i].opcode==0x29) { // SH
+ int a = temp;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_store_byte(a, tl, offset_reg);
+ }
+ type = STOREB_STUB;
+ break;
+ case 0x29: // SH
if(!c||memtarget) {
- int x=0,a=temp;
- if(!c) a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_writehword_indexed(tl,x,a);
- }
- type=STOREH_STUB;
- }
- if (dops[i].opcode==0x2B) { // SW
+ int a = temp;
+ if (!c) a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_store_hword(a, 0, tl, offset_reg, 1);
+ }
+ type = STOREH_STUB;
+ break;
+ case 0x2B: // SW
if(!c||memtarget) {
- int a=addr;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_writeword_indexed(tl,0,a);
- }
- type=STOREW_STUB;
- }
- if (dops[i].opcode==0x3F) { // SD
+ int a = addr;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_store_word(a, 0, tl, offset_reg, 1);
+ }
+ type = STOREW_STUB;
+ break;
+ case 0x3F: // SD
+ default:
assert(0);
- type=STORED_STUB;
}
- if(fastio_reg_override==HOST_TEMPREG)
+ if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
host_tempreg_release();
if(jaddr) {
// PCSX store handlers don't check invcode again
reglist|=1<<addr;
- add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
jaddr=0;
}
if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
#else
emit_cmpmem_indexedsr12_imm(invalid_code,addr,1);
#endif
- #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ #ifdef INVALIDATE_USE_COND_CALL
emit_callne(invalidate_addr_reg[addr]);
#else
void *jaddr2 = out;
}
u_int addr_val=constmap[i][s]+offset;
if(jaddr) {
- add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
+ add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
} else if(c&&!memtarget) {
- inline_writestub(type,i,addr_val,i_regs->regmap,dops[i].rs2,ccadj[i],reglist);
+ inline_writestub(type,i,addr_val,i_regs->regmap,dops[i].rs2,ccadj_,reglist);
}
// basic current block modification detection..
// not looking back as that should be in mips cache already
emit_movimm(start+i*4+4,0);
emit_writeword(0,&pcaddr);
emit_addimm(HOST_CCREG,2,HOST_CCREG);
- emit_far_call(get_addr_ht);
+ emit_far_call(ndrc_get_addr_ht);
emit_jmpreg(0);
}
}
}
-static void storelr_assemble(int i, const struct regstat *i_regs)
+static void storelr_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl;
int temp;
int offset;
void *jaddr=0;
- void *case1, *case2, *case3;
+ void *case1, *case23, *case3;
void *done0, *done1, *done2;
int memtarget=0,c=0;
int agr=AGEN1+(i&1);
+ int offset_reg = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
tl=get_reg(i_regs->regmap,dops[i].rs2);
s=get_reg(i_regs->regmap,dops[i].rs1);
temp=get_reg(i_regs->regmap,agr);
- if(temp<0) temp=get_reg(i_regs->regmap,-1);
+ if(temp<0) temp=get_reg_temp(i_regs->regmap);
offset=imm[i];
if(s>=0) {
c=(i_regs->isconst>>s)&1;
emit_jmp(0);
}
}
- if(ram_offset)
- emit_addimm_no_flags(ram_offset,temp);
+ if (ram_offset)
+ offset_reg = get_ro_reg(i_regs, 0);
if (dops[i].opcode==0x2C||dops[i].opcode==0x2D) { // SDL/SDR
assert(0);
}
- emit_xorimm(temp,3,temp);
emit_testimm(temp,2);
- case2=out;
+ case23=out;
emit_jne(0);
emit_testimm(temp,1);
case1=out;
emit_jne(0);
// 0
- if (dops[i].opcode==0x2A) { // SWL
- emit_writeword_indexed(tl,0,temp);
+ if (dops[i].opcode == 0x2A) { // SWL
+ // Write msb into least significant byte
+ if (dops[i].rs2) emit_rorimm(tl, 24, tl);
+ do_store_byte(temp, tl, offset_reg);
+ if (dops[i].rs2) emit_rorimm(tl, 8, tl);
}
- else if (dops[i].opcode==0x2E) { // SWR
- emit_writebyte_indexed(tl,3,temp);
+ else if (dops[i].opcode == 0x2E) { // SWR
+ // Write entire word
+ do_store_word(temp, 0, tl, offset_reg, 1);
}
- else
- assert(0);
- done0=out;
+ done0 = out;
emit_jmp(0);
// 1
set_jump_target(case1, out);
- if (dops[i].opcode==0x2A) { // SWL
- // Write 3 msb into three least significant bytes
- if(dops[i].rs2) emit_rorimm(tl,8,tl);
- emit_writehword_indexed(tl,-1,temp);
- if(dops[i].rs2) emit_rorimm(tl,16,tl);
- emit_writebyte_indexed(tl,1,temp);
- if(dops[i].rs2) emit_rorimm(tl,8,tl);
+ if (dops[i].opcode == 0x2A) { // SWL
+ // Write two msb into two least significant bytes
+ if (dops[i].rs2) emit_rorimm(tl, 16, tl);
+ do_store_hword(temp, -1, tl, offset_reg, 0);
+ if (dops[i].rs2) emit_rorimm(tl, 16, tl);
}
- else if (dops[i].opcode==0x2E) { // SWR
- // Write two lsb into two most significant bytes
- emit_writehword_indexed(tl,1,temp);
+ else if (dops[i].opcode == 0x2E) { // SWR
+ // Write 3 lsb into three most significant bytes
+ do_store_byte(temp, tl, offset_reg);
+ if (dops[i].rs2) emit_rorimm(tl, 8, tl);
+ do_store_hword(temp, 1, tl, offset_reg, 0);
+ if (dops[i].rs2) emit_rorimm(tl, 24, tl);
}
done1=out;
emit_jmp(0);
- // 2
- set_jump_target(case2, out);
+ // 2,3
+ set_jump_target(case23, out);
emit_testimm(temp,1);
- case3=out;
+ case3 = out;
emit_jne(0);
+ // 2
if (dops[i].opcode==0x2A) { // SWL
- // Write two msb into two least significant bytes
- if(dops[i].rs2) emit_rorimm(tl,16,tl);
- emit_writehword_indexed(tl,-2,temp);
- if(dops[i].rs2) emit_rorimm(tl,16,tl);
+ // Write 3 msb into three least significant bytes
+ if (dops[i].rs2) emit_rorimm(tl, 8, tl);
+ do_store_hword(temp, -2, tl, offset_reg, 1);
+ if (dops[i].rs2) emit_rorimm(tl, 16, tl);
+ do_store_byte(temp, tl, offset_reg);
+ if (dops[i].rs2) emit_rorimm(tl, 8, tl);
}
- else if (dops[i].opcode==0x2E) { // SWR
- // Write 3 lsb into three most significant bytes
- emit_writebyte_indexed(tl,-1,temp);
- if(dops[i].rs2) emit_rorimm(tl,8,tl);
- emit_writehword_indexed(tl,0,temp);
- if(dops[i].rs2) emit_rorimm(tl,24,tl);
+ else if (dops[i].opcode == 0x2E) { // SWR
+ // Write two lsb into two most significant bytes
+ do_store_hword(temp, 0, tl, offset_reg, 1);
}
- done2=out;
+ done2 = out;
emit_jmp(0);
// 3
set_jump_target(case3, out);
- if (dops[i].opcode==0x2A) { // SWL
- // Write msb into least significant byte
- if(dops[i].rs2) emit_rorimm(tl,24,tl);
- emit_writebyte_indexed(tl,-3,temp);
- if(dops[i].rs2) emit_rorimm(tl,8,tl);
+ if (dops[i].opcode == 0x2A) { // SWL
+ do_store_word(temp, -3, tl, offset_reg, 0);
}
- else if (dops[i].opcode==0x2E) { // SWR
- // Write entire word
- emit_writeword_indexed(tl,-3,temp);
+ else if (dops[i].opcode == 0x2E) { // SWR
+ do_store_byte(temp, tl, offset_reg);
}
set_jump_target(done0, out);
set_jump_target(done1, out);
set_jump_target(done2, out);
+ if (offset_reg == HOST_TEMPREG)
+ host_tempreg_release();
if(!c||!memtarget)
- add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist);
+ add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj_,reglist);
if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
- emit_addimm_no_flags(-ram_offset,temp);
#if defined(HOST_IMM8)
int ir=get_reg(i_regs->regmap,INVCP);
assert(ir>=0);
#else
emit_cmpmem_indexedsr12_imm(invalid_code,temp,1);
#endif
- #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ #ifdef INVALIDATE_USE_COND_CALL
emit_callne(invalidate_addr_reg[temp]);
#else
void *jaddr2 = out;
}
}
-static void cop0_assemble(int i,struct regstat *i_regs)
+static void cop0_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
if(dops[i].opcode2==0) // MFC0
{
emit_readword(&last_count,HOST_TEMPREG);
emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+ emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
emit_writeword(HOST_CCREG,&Count);
}
// What a mess. The status register (12) can enable interrupts,
if(copr==9||copr==11||copr==12||copr==13) {
emit_readword(&Count,HOST_CCREG);
emit_readword(&next_interupt,HOST_TEMPREG);
- emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+ emit_addimm(HOST_CCREG,-ccadj_,HOST_CCREG);
emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
emit_writeword(HOST_TEMPREG,&last_count);
emit_storereg(CCREG,HOST_CCREG);
emit_jeq(0);
emit_readword(&pcaddr, 0);
emit_addimm(HOST_CCREG,2,HOST_CCREG);
- emit_far_call(get_addr_ht);
+ emit_far_call(ndrc_get_addr_ht);
emit_jmpreg(0);
set_jump_target(jaddr, out);
}
}
}
-static void cop1_unusable(int i,struct regstat *i_regs)
+static void cop1_unusable(int i, const struct regstat *i_regs)
{
// XXX: should just just do the exception instead
//if(!cop1_usable)
}
}
-static void cop1_assemble(int i,struct regstat *i_regs)
+static void cop1_assemble(int i, const struct regstat *i_regs)
{
cop1_unusable(i, i_regs);
}
-static void c1ls_assemble(int i,struct regstat *i_regs)
+static void c1ls_assemble(int i, const struct regstat *i_regs)
{
cop1_unusable(i, i_regs);
}
wb_dirtys(i_regs->regmap_entry,i_regs->wasdirty);
if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
emit_movimm(start+(i-ds)*4,EAX); // Get PC
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
+ emit_addimm(HOST_CCREG,ccadj[i],HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
emit_far_jump(ds?fp_exception_ds:fp_exception);
}
emit_movimm(stall, 0);
else
emit_mov(HOST_TEMPREG, 0);
- emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
+ emit_addimm(HOST_CCREG, ccadj[i], 1);
emit_far_call(log_gte_stall);
restore_regs(reglist);
}
//if (dops[j].is_ds) break;
if (cop2_is_stalling_op(j, &other_gte_op_cycles) || dops[j].bt)
break;
+ if (j > 0 && ccadj[j - 1] > ccadj[j])
+ break;
}
j = max(j, 0);
}
- cycles_passed = CLOCK_ADJUST(ccadj[i] - ccadj[j]);
+ cycles_passed = ccadj[i] - ccadj[j];
if (other_gte_op_cycles >= 0)
stall = other_gte_op_cycles - cycles_passed;
else if (cycles_passed >= 44)
#if 0 // too slow
save_regs(reglist);
emit_movimm(gte_cycletab[op], 0);
- emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
+ emit_addimm(HOST_CCREG, ccadj[i], 1);
emit_far_call(call_gteStall);
restore_regs(reglist);
#else
host_tempreg_acquire();
emit_readword(&psxRegs.gteBusyCycle, rtmp);
- emit_addimm(rtmp, -CLOCK_ADJUST(ccadj[i]), rtmp);
+ emit_addimm(rtmp, -ccadj[i], rtmp);
emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
emit_cmpimm(HOST_TEMPREG, 44);
emit_cmovb_reg(rtmp, HOST_CCREG);
for (j = i + 1; j < slen; j++) {
if (cop2_is_stalling_op(j, &other_gte_op_cycles))
break;
- if (is_jump(j)) {
+ if (dops[j].is_jump) {
// check ds
if (j + 1 < slen && cop2_is_stalling_op(j + 1, &other_gte_op_cycles))
j++;
if (other_gte_op_cycles >= 0)
// will handle stall when assembling that op
return;
- cycles_passed = CLOCK_ADJUST(ccadj[min(j, slen -1)] - ccadj[i]);
+ cycles_passed = ccadj[min(j, slen -1)] - ccadj[i];
if (cycles_passed >= 44)
return;
assem_debug("; save gteBusyCycle\n");
#if 0
emit_readword(&last_count, HOST_TEMPREG);
emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG);
- emit_addimm(HOST_TEMPREG, CLOCK_ADJUST(ccadj[i]), HOST_TEMPREG);
+ emit_addimm(HOST_TEMPREG, ccadj[i], HOST_TEMPREG);
emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG);
emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
#else
- emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]) + gte_cycletab[op], HOST_TEMPREG);
+ emit_addimm(HOST_CCREG, ccadj[i] + gte_cycletab[op], HOST_TEMPREG);
emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
#endif
host_tempreg_release();
return 1;
}
-static void multdiv_prepare_stall(int i, const struct regstat *i_regs)
+static void multdiv_prepare_stall(int i, const struct regstat *i_regs, int ccadj_)
{
int j, found = 0, c = 0;
if (HACK_ENABLED(NDHACK_NO_STALLS))
break;
if ((found = is_mflohi(j)))
break;
- if (is_jump(j)) {
+ if (dops[j].is_jump) {
// check ds
if (j + 1 < slen && (found = is_mflohi(j + 1)))
j++;
assert(c > 0);
assem_debug("; muldiv prepare stall %d\n", c);
host_tempreg_acquire();
- emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]) + c, HOST_TEMPREG);
+ emit_addimm(HOST_CCREG, ccadj_ + c, HOST_TEMPREG);
emit_writeword(HOST_TEMPREG, &psxRegs.muldivBusyCycle);
host_tempreg_release();
}
{
int j, known_cycles = 0;
u_int reglist = get_host_reglist(i_regs->regmap);
- int rtmp = get_reg(i_regs->regmap, -1);
+ int rtmp = get_reg_temp(i_regs->regmap);
if (rtmp < 0)
rtmp = reglist_find_free(reglist);
if (HACK_ENABLED(NDHACK_NO_STALLS))
if (!dops[i].bt) {
for (j = i - 1; j >= 0; j--) {
if (dops[j].is_ds) break;
- if (check_multdiv(j, &known_cycles) || dops[j].bt)
+ if (check_multdiv(j, &known_cycles))
break;
if (is_mflohi(j))
// already handled by this op
return;
+ if (dops[j].bt || (j > 0 && ccadj[j - 1] > ccadj[j]))
+ break;
}
j = max(j, 0);
}
if (known_cycles > 0) {
- known_cycles -= CLOCK_ADJUST(ccadj[i] - ccadj[j]);
+ known_cycles -= ccadj[i] - ccadj[j];
assem_debug("; muldiv stall resolved %d\n", known_cycles);
if (known_cycles > 0)
emit_addimm(HOST_CCREG, known_cycles, HOST_CCREG);
assem_debug("; muldiv stall unresolved\n");
host_tempreg_acquire();
emit_readword(&psxRegs.muldivBusyCycle, rtmp);
- emit_addimm(rtmp, -CLOCK_ADJUST(ccadj[i]), rtmp);
+ emit_addimm(rtmp, -ccadj[i], rtmp);
emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
emit_cmpimm(HOST_TEMPREG, 37);
emit_cmovb_reg(rtmp, HOST_CCREG);
}
}
-static void c2ls_assemble(int i, const struct regstat *i_regs)
+static void c2ls_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
int s,tl;
int ar;
void *jaddr2=NULL;
enum stub_type type;
int agr=AGEN1+(i&1);
- int fastio_reg_override=-1;
+ int offset_reg = -1;
+ int fastio_reg_override = -1;
u_int reglist=get_host_reglist(i_regs->regmap);
u_int copr=(source[i]>>16)&0x1f;
s=get_reg(i_regs->regmap,dops[i].rs1);
// get the address
if (dops[i].opcode==0x3a) { // SWC2
ar=get_reg(i_regs->regmap,agr);
- if(ar<0) ar=get_reg(i_regs->regmap,-1);
+ if(ar<0) ar=get_reg_temp(i_regs->regmap);
reglist|=1<<ar;
} else { // LWC2
ar=tl;
}
else {
if(!c) {
- jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
- }
- else if(ram_offset&&memtarget) {
- host_tempreg_acquire();
- emit_addimm(ar,ram_offset,HOST_TEMPREG);
- fastio_reg_override=HOST_TEMPREG;
- }
- if (dops[i].opcode==0x32) { // LWC2
- int a=ar;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_readword_indexed(0,a,tl);
+ jaddr2 = emit_fastpath_cmp_jump(i, i_regs, ar,
+ &offset_reg, &fastio_reg_override);
+ }
+ else if (ram_offset && memtarget) {
+ offset_reg = get_ro_reg(i_regs, 0);
+ }
+ switch (dops[i].opcode) {
+ case 0x32: { // LWC2
+ int a = ar;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_load_word(a, tl, offset_reg);
+ break;
}
- if (dops[i].opcode==0x3a) { // SWC2
+ case 0x3a: { // SWC2
#ifdef DESTRUCTIVE_SHIFT
if(!offset&&!c&&s>=0) emit_mov(s,ar);
#endif
- int a=ar;
- if(fastio_reg_override>=0) a=fastio_reg_override;
- emit_writeword_indexed(tl,0,a);
+ int a = ar;
+ if (fastio_reg_override >= 0)
+ a = fastio_reg_override;
+ do_store_word(a, 0, tl, offset_reg, 1);
+ break;
+ }
+ default:
+ assert(0);
}
}
- if(fastio_reg_override==HOST_TEMPREG)
+ if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
host_tempreg_release();
if(jaddr2)
- add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj[i],reglist);
+ add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj_,reglist);
if(dops[i].opcode==0x3a) // SWC2
if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
#if defined(HOST_IMM8)
#else
emit_cmpmem_indexedsr12_imm(invalid_code,ar,1);
#endif
- #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ #ifdef INVALIDATE_USE_COND_CALL
emit_callne(invalidate_addr_reg[ar]);
#else
void *jaddr3 = out;
static void cop2_assemble(int i, const struct regstat *i_regs)
{
u_int copr = (source[i]>>11) & 0x1f;
- signed char temp = get_reg(i_regs->regmap, -1);
+ signed char temp = get_reg_temp(i_regs->regmap);
if (!HACK_ENABLED(NDHACK_NO_STALLS)) {
u_int reglist = reglist_exclude(get_host_reglist(i_regs->regmap), temp, -1);
int cc=get_reg(i_regmap,CCREG);
if(cc<0)
emit_loadreg(CCREG,2);
- emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
+ emit_addimm(cc<0?2:cc,(int)stubs[n].d+1,2);
emit_far_call((dops[i].opcode==0x2a?jump_handle_swl:jump_handle_swr));
- emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc);
+ emit_addimm(0,-((int)stubs[n].d+1),cc<0?2:cc);
if(cc<0)
emit_storereg(CCREG,2);
restore_regs(reglist);
}
#endif
-static void mov_assemble(int i,struct regstat *i_regs)
+static void mov_assemble(int i, const struct regstat *i_regs)
{
//if(dops[i].opcode2==0x10||dops[i].opcode2==0x12) { // MFHI/MFLO
//if(dops[i].opcode2==0x11||dops[i].opcode2==0x13) { // MTHI/MTLO
}
// call interpreter, exception handler, things that change pc/regs/cycles ...
-static void call_c_cpu_handler(int i, const struct regstat *i_regs, u_int pc, void *func)
+static void call_c_cpu_handler(int i, const struct regstat *i_regs, int ccadj_, u_int pc, void *func)
{
signed char ccreg=get_reg(i_regs->regmap,CCREG);
assert(ccreg==HOST_CCREG);
emit_movimm(pc,3); // Get PC
emit_readword(&last_count,2);
emit_writeword(3,&psxRegs.pc);
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
+ emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
emit_add(2,HOST_CCREG,2);
emit_writeword(2,&psxRegs.cycle);
emit_far_call(func);
emit_far_jump(jump_to_new_pc);
}
-static void syscall_assemble(int i,struct regstat *i_regs)
+static void syscall_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
- emit_movimm(0x20,0); // cause code
- emit_movimm(0,1); // not in delay slot
- call_c_cpu_handler(i,i_regs,start+i*4,psxException);
+ // 'break' tends to be littered around to catch things like
+ // division by 0 and is almost never executed, so don't emit much code here
+ void *func = (dops[i].opcode2 == 0x0C)
+ ? (is_delayslot ? jump_syscall_ds : jump_syscall)
+ : (is_delayslot ? jump_break_ds : jump_break);
+ assert(get_reg(i_regs->regmap, CCREG) == HOST_CCREG);
+ emit_movimm(start + i*4, 2); // pc
+ emit_addimm(HOST_CCREG, ccadj_ + CLOCK_ADJUST(1), HOST_CCREG);
+ emit_far_jump(func);
}
-static void hlecall_assemble(int i,struct regstat *i_regs)
+static void hlecall_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
void *hlefunc = psxNULL;
uint32_t hleCode = source[i] & 0x03ffffff;
if (hleCode < ARRAY_SIZE(psxHLEt))
hlefunc = psxHLEt[hleCode];
- call_c_cpu_handler(i,i_regs,start+i*4+4,hlefunc);
+ call_c_cpu_handler(i, i_regs, ccadj_, start + i*4+4, hlefunc);
}
-static void intcall_assemble(int i,struct regstat *i_regs)
+static void intcall_assemble(int i, const struct regstat *i_regs, int ccadj_)
{
- call_c_cpu_handler(i,i_regs,start+i*4,execI);
+ call_c_cpu_handler(i, i_regs, ccadj_, start + i*4, execI);
}
static void speculate_mov(int rs,int rt)
#endif
}
-static void ds_assemble(int i,struct regstat *i_regs)
+static void ujump_assemble(int i, const struct regstat *i_regs);
+static void rjump_assemble(int i, const struct regstat *i_regs);
+static void cjump_assemble(int i, const struct regstat *i_regs);
+static void sjump_assemble(int i, const struct regstat *i_regs);
+
+static int assemble(int i, const struct regstat *i_regs, int ccadj_)
{
- speculate_register_values(i);
- is_delayslot=1;
- switch(dops[i].itype) {
+ int ds = 0;
+ switch (dops[i].itype) {
case ALU:
- alu_assemble(i,i_regs);break;
+ alu_assemble(i, i_regs);
+ break;
case IMM16:
- imm16_assemble(i,i_regs);break;
+ imm16_assemble(i, i_regs);
+ break;
case SHIFT:
- shift_assemble(i,i_regs);break;
+ shift_assemble(i, i_regs);
+ break;
case SHIFTIMM:
- shiftimm_assemble(i,i_regs);break;
+ shiftimm_assemble(i, i_regs);
+ break;
case LOAD:
- load_assemble(i,i_regs);break;
+ load_assemble(i, i_regs, ccadj_);
+ break;
case LOADLR:
- loadlr_assemble(i,i_regs);break;
+ loadlr_assemble(i, i_regs, ccadj_);
+ break;
case STORE:
- store_assemble(i,i_regs);break;
+ store_assemble(i, i_regs, ccadj_);
+ break;
case STORELR:
- storelr_assemble(i,i_regs);break;
+ storelr_assemble(i, i_regs, ccadj_);
+ break;
case COP0:
- cop0_assemble(i,i_regs);break;
+ cop0_assemble(i, i_regs, ccadj_);
+ break;
case COP1:
- cop1_assemble(i,i_regs);break;
+ cop1_assemble(i, i_regs);
+ break;
case C1LS:
- c1ls_assemble(i,i_regs);break;
+ c1ls_assemble(i, i_regs);
+ break;
case COP2:
- cop2_assemble(i,i_regs);break;
+ cop2_assemble(i, i_regs);
+ break;
case C2LS:
- c2ls_assemble(i,i_regs);break;
+ c2ls_assemble(i, i_regs, ccadj_);
+ break;
case C2OP:
- c2op_assemble(i,i_regs);break;
+ c2op_assemble(i, i_regs);
+ break;
case MULTDIV:
- multdiv_assemble(i,i_regs);
- multdiv_prepare_stall(i,i_regs);
+ multdiv_assemble(i, i_regs);
+ multdiv_prepare_stall(i, i_regs, ccadj_);
break;
case MOV:
- mov_assemble(i,i_regs);break;
+ mov_assemble(i, i_regs);
+ break;
+ case SYSCALL:
+ syscall_assemble(i, i_regs, ccadj_);
+ break;
+ case HLECALL:
+ hlecall_assemble(i, i_regs, ccadj_);
+ break;
+ case INTCALL:
+ intcall_assemble(i, i_regs, ccadj_);
+ break;
+ case UJUMP:
+ ujump_assemble(i, i_regs);
+ ds = 1;
+ break;
+ case RJUMP:
+ rjump_assemble(i, i_regs);
+ ds = 1;
+ break;
+ case CJUMP:
+ cjump_assemble(i, i_regs);
+ ds = 1;
+ break;
+ case SJUMP:
+ sjump_assemble(i, i_regs);
+ ds = 1;
+ break;
+ case NOP:
+ case OTHER:
+ case NI:
+ // not handled, just skip
+ break;
+ default:
+ assert(0);
+ }
+ return ds;
+}
+
+static void ds_assemble(int i, const struct regstat *i_regs)
+{
+ speculate_register_values(i);
+ is_delayslot = 1;
+ switch (dops[i].itype) {
case SYSCALL:
case HLECALL:
case INTCALL:
- case SPAN:
case UJUMP:
case RJUMP:
case CJUMP:
case SJUMP:
SysPrintf("Jump in the delay slot. This is probably a bug.\n");
+ break;
+ default:
+ assemble(i, i_regs, ccadj[i]);
}
- is_delayslot=0;
+ is_delayslot = 0;
}
// Is the branch target a valid internal jump?
for(hr=0;hr<HOST_REGS;hr++) {
if(hr!=EXCLUDE_REG) {
if(pre[hr]!=entry[hr]) {
- if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
+ if(pre[hr]>=0&&pre[hr]<TEMPREG) {
int nr;
if((nr=get_reg(entry,pre[hr]))>=0) {
emit_mov(hr,nr);
// Load the specified registers
// This only loads the registers given as arguments because
// we don't want to load things that will be overwritten
-static void load_regs(signed char entry[],signed char regmap[],int rs1,int rs2)
+static inline void load_reg(signed char entry[], signed char regmap[], int rs)
{
- int hr;
- // Load 32-bit regs
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG&®map[hr]>=0) {
- if(entry[hr]!=regmap[hr]) {
- if(regmap[hr]==rs1||regmap[hr]==rs2)
- {
- if(regmap[hr]==0) {
- emit_zeroreg(hr);
- }
- else
- {
- emit_loadreg(regmap[hr],hr);
- }
- }
- }
- }
- }
+ int hr = get_reg(regmap, rs);
+ if (hr >= 0 && entry[hr] != regmap[hr])
+ emit_loadreg(regmap[hr], hr);
+}
+
+static void load_regs(signed char entry[], signed char regmap[], int rs1, int rs2)
+{
+ load_reg(entry, regmap, rs1);
+ if (rs1 != rs2)
+ load_reg(entry, regmap, rs2);
}
// Load registers prior to the start of a loop
static void loop_preload(signed char pre[],signed char entry[])
{
int hr;
- for(hr=0;hr<HOST_REGS;hr++) {
- if(hr!=EXCLUDE_REG) {
- if(pre[hr]!=entry[hr]) {
- if(entry[hr]>=0) {
- if(get_reg(pre,entry[hr])<0) {
- assem_debug("loop preload:\n");
- //printf("loop preload: %d\n",hr);
- if(entry[hr]==0) {
- emit_zeroreg(hr);
- }
- else if(entry[hr]<TEMPREG)
- {
- emit_loadreg(entry[hr],hr);
- }
- else if(entry[hr]-64<TEMPREG)
- {
- emit_loadreg(entry[hr],hr);
- }
- }
- }
- }
+ for (hr = 0; hr < HOST_REGS; hr++) {
+ int r = entry[hr];
+ if (r >= 0 && pre[hr] != r && get_reg(pre, r) < 0) {
+ assem_debug("loop preload:\n");
+ if (r < TEMPREG)
+ emit_loadreg(r, hr);
}
}
}
// Generate address for load/store instruction
// goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
-void address_generation(int i,struct regstat *i_regs,signed char entry[])
+static void address_generation(int i, const struct regstat *i_regs, signed char entry[])
{
- if(dops[i].itype==LOAD||dops[i].itype==LOADLR||dops[i].itype==STORE||dops[i].itype==STORELR||dops[i].itype==C1LS||dops[i].itype==C2LS) {
+ if (dops[i].is_load || dops[i].is_store) {
int ra=-1;
int agr=AGEN1+(i&1);
if(dops[i].itype==LOAD) {
ra=get_reg(i_regs->regmap,dops[i].rt1);
- if(ra<0) ra=get_reg(i_regs->regmap,-1);
+ if(ra<0) ra=get_reg_temp(i_regs->regmap);
assert(ra>=0);
}
if(dops[i].itype==LOADLR) {
}
if(dops[i].itype==STORE||dops[i].itype==STORELR) {
ra=get_reg(i_regs->regmap,agr);
- if(ra<0) ra=get_reg(i_regs->regmap,-1);
+ if(ra<0) ra=get_reg_temp(i_regs->regmap);
}
- if(dops[i].itype==C1LS||dops[i].itype==C2LS) {
+ if(dops[i].itype==C2LS) {
if ((dops[i].opcode&0x3b)==0x31||(dops[i].opcode&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
ra=get_reg(i_regs->regmap,FTEMP);
else { // SWC1/SDC1/SWC2/SDC2
ra=get_reg(i_regs->regmap,agr);
- if(ra<0) ra=get_reg(i_regs->regmap,-1);
+ if(ra<0) ra=get_reg_temp(i_regs->regmap);
}
}
int rs=get_reg(i_regs->regmap,dops[i].rs1);
}
}
// Preload constants for next instruction
- if(dops[i+1].itype==LOAD||dops[i+1].itype==LOADLR||dops[i+1].itype==STORE||dops[i+1].itype==STORELR||dops[i+1].itype==C1LS||dops[i+1].itype==C2LS) {
+ if (dops[i+1].is_load || dops[i+1].is_store) {
int agr,ra;
// Actual address
agr=AGEN1+((i+1)&1);
i++;
}
if(i<slen-1) {
- if(dops[i].itype==UJUMP||dops[i].itype==RJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP) {
+ if (dops[i].is_jump) {
*value=constmap[i][hr];
return 1;
}
if(!dops[i+1].bt) {
- if(dops[i+1].itype==UJUMP||dops[i+1].itype==RJUMP||dops[i+1].itype==CJUMP||dops[i+1].itype==SJUMP) {
+ if (dops[i+1].is_jump) {
// Load in delay slot, out-of-order execution
if(dops[i+2].itype==LOAD&&dops[i+2].rs1==reg&&dops[i+2].rt1==reg&&((regs[i+1].wasconst>>hr)&1))
{
}
}
-void load_all_consts(signed char regmap[], u_int dirty, int i)
+static void load_all_consts(const signed char regmap[], u_int dirty, int i)
{
int hr;
// Load 32-bit regs
}
// Write out all dirty registers (except cycle count)
-static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty)
+static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty)
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
// Write out dirty registers that we need to reload (pair with load_needed_regs)
// This writes the registers not written by store_regs_bt
-void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr)
+static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr)
{
int hr;
int t=(addr-start)>>2;
}
// Load all registers (except cycle count)
-void load_all_regs(signed char i_regmap[])
+static void load_all_regs(const signed char i_regmap[])
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
emit_zeroreg(hr);
}
else
- if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
+ if(i_regmap[hr]>0 && i_regmap[hr]<TEMPREG && i_regmap[hr]!=CCREG)
{
emit_loadreg(i_regmap[hr],hr);
}
}
// Load all current registers also needed by next instruction
-void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
+static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[])
{
int hr;
for(hr=0;hr<HOST_REGS;hr++) {
emit_zeroreg(hr);
}
else
- if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
+ if(i_regmap[hr]>0 && i_regmap[hr]<TEMPREG && i_regmap[hr]!=CCREG)
{
emit_loadreg(i_regmap[hr],hr);
}
}
// Load all regs, storing cycle count if necessary
-void load_regs_entry(int t)
+static void load_regs_entry(int t)
{
int hr;
if(dops[t].is_ds) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
- else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
+ else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t],HOST_CCREG);
if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
emit_storereg(CCREG,HOST_CCREG);
}
}
// Store dirty registers prior to branch
-void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
+static void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
{
if(internal_branch(addr))
{
}
}
// Delay slots are not valid branch targets
- //if(t>0&&(dops[t-1].itype==RJUMP||dops[t-1].itype==UJUMP||dops[t-1].itype==CJUMP||dops[t-1].itype==SJUMP)) return 0;
+ //if(t>0&&(dops[t-1].is_jump) return 0;
// Delay slots require additional processing, so do not match
if(dops[t].is_ds) return 0;
}
}
#ifdef DRC_DBG
-static void drc_dbg_emit_do_cmp(int i)
+static void drc_dbg_emit_do_cmp(int i, int ccadj_)
{
extern void do_insn_cmp();
//extern int cycle;
// write out changed consts to match the interpreter
if (i > 0 && !dops[i].bt) {
for (hr = 0; hr < HOST_REGS; hr++) {
- int reg = regs[i-1].regmap[hr];
+ int reg = regs[i].regmap_entry[hr]; // regs[i-1].regmap[hr];
if (hr == EXCLUDE_REG || reg < 0)
continue;
if (!((regs[i-1].isconst >> hr) & 1))
}
emit_movimm(start+i*4,0);
emit_writeword(0,&pcaddr);
+ int cc = get_reg(regs[i].regmap_entry, CCREG);
+ if (cc < 0)
+ emit_loadreg(CCREG, cc = 0);
+ emit_addimm(cc, ccadj_, 0);
+ emit_writeword(0, &psxRegs.cycle);
emit_far_call(do_insn_cmp);
//emit_readword(&cycle,0);
//emit_addimm(0,2,0);
assem_debug("\\\\do_insn_cmp\n");
}
#else
-#define drc_dbg_emit_do_cmp(x)
+#define drc_dbg_emit_do_cmp(x,y)
#endif
// Used when a branch jumps into the delay slot of another branch
static void ds_assemble_entry(int i)
{
- int t=(ba[i]-start)>>2;
+ int t = (ba[i] - start) >> 2;
+ int ccadj_ = -CLOCK_ADJUST(1);
if (!instr_addr[t])
instr_addr[t] = out;
assem_debug("Assemble delay slot at %x\n",ba[i]);
assem_debug("<->\n");
- drc_dbg_emit_do_cmp(t);
+ drc_dbg_emit_do_cmp(t, ccadj_);
if(regs[t].regmap_entry[HOST_CCREG]==CCREG&®s[t].regmap[HOST_CCREG]!=CCREG)
wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty);
load_regs(regs[t].regmap_entry,regs[t].regmap,dops[t].rs1,dops[t].rs2);
address_generation(t,®s[t],regs[t].regmap_entry);
- if(dops[t].itype==STORE||dops[t].itype==STORELR||(dops[t].opcode&0x3b)==0x39||(dops[t].opcode&0x3b)==0x3a)
- load_regs(regs[t].regmap_entry,regs[t].regmap,INVCP,INVCP);
+ if (ram_offset && (dops[t].is_load || dops[t].is_store))
+ load_reg(regs[t].regmap_entry,regs[t].regmap,ROREG);
+ if (dops[t].is_store)
+ load_reg(regs[t].regmap_entry,regs[t].regmap,INVCP);
is_delayslot=0;
- switch(dops[t].itype) {
- case ALU:
- alu_assemble(t,®s[t]);break;
- case IMM16:
- imm16_assemble(t,®s[t]);break;
- case SHIFT:
- shift_assemble(t,®s[t]);break;
- case SHIFTIMM:
- shiftimm_assemble(t,®s[t]);break;
- case LOAD:
- load_assemble(t,®s[t]);break;
- case LOADLR:
- loadlr_assemble(t,®s[t]);break;
- case STORE:
- store_assemble(t,®s[t]);break;
- case STORELR:
- storelr_assemble(t,®s[t]);break;
- case COP0:
- cop0_assemble(t,®s[t]);break;
- case COP1:
- cop1_assemble(t,®s[t]);break;
- case C1LS:
- c1ls_assemble(t,®s[t]);break;
- case COP2:
- cop2_assemble(t,®s[t]);break;
- case C2LS:
- c2ls_assemble(t,®s[t]);break;
- case C2OP:
- c2op_assemble(t,®s[t]);break;
- case MULTDIV:
- multdiv_assemble(t,®s[t]);
- multdiv_prepare_stall(i,®s[t]);
- break;
- case MOV:
- mov_assemble(t,®s[t]);break;
+ switch (dops[t].itype) {
case SYSCALL:
case HLECALL:
case INTCALL:
- case SPAN:
case UJUMP:
case RJUMP:
case CJUMP:
case SJUMP:
SysPrintf("Jump in the delay slot. This is probably a bug.\n");
+ break;
+ default:
+ assemble(t, ®s[t], ccadj_);
}
store_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
load_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
emit_jmp(0);
}
-static void emit_extjump(void *addr, u_int target)
-{
- emit_extjump2(addr, target, dyna_linker);
-}
-
-static void emit_extjump_ds(void *addr, u_int target)
-{
- emit_extjump2(addr, target, dyna_linker_ds);
-}
-
// Load 2 immediates optimizing for small code size
static void emit_mov2imm_compact(int imm1,u_int rt1,int imm2,u_int rt2)
{
emit_movimm_from(imm1,rt1,imm2,rt2);
}
-void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
+static void do_cc(int i, const signed char i_regmap[], int *adj,
+ int addr, int taken, int invert)
{
- int count;
+ int count, count_plus2;
void *jaddr;
void *idle=NULL;
int t=0;
if(internal_branch(ba[i]))
{
t=(ba[i]-start)>>2;
- if(dops[t].is_ds) *adj=-1; // Branch into delay slot adds an extra cycle
+ if(dops[t].is_ds) *adj=-CLOCK_ADJUST(1); // Branch into delay slot adds an extra cycle
else *adj=ccadj[t];
}
else
{
*adj=0;
}
- count=ccadj[i];
+ count = ccadj[i];
+ count_plus2 = count + CLOCK_ADJUST(2);
if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
// Idle loop
if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
emit_jmp(0);
}
else if(*adj==0||invert) {
- int cycles=CLOCK_ADJUST(count+2);
+ int cycles = count_plus2;
// faster loop HACK
#if 0
if (t&&*adj) {
int rel=t-i;
if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
- cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
+ cycles=*adj+count+2-*adj;
}
#endif
- emit_addimm_and_set_flags(cycles,HOST_CCREG);
- jaddr=out;
+ emit_addimm_and_set_flags(cycles, HOST_CCREG);
+ jaddr = out;
emit_jns(0);
}
else
{
- emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
- jaddr=out;
+ emit_cmpimm(HOST_CCREG, -count_plus2);
+ jaddr = out;
emit_jns(0);
}
- add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
+ add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:count_plus2,i,addr,taken,0);
}
static void do_ccstub(int n)
while(hr<HOST_REGS)
{
if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
- (branch_regs[i].regmap[hr]&63)!=dops[i].rs1 &&
- (branch_regs[i].regmap[hr]&63)!=dops[i].rs2 )
+ branch_regs[i].regmap[hr]!=dops[i].rs1 &&
+ branch_regs[i].regmap[hr]!=dops[i].rs2 )
{
addr=hr++;break;
}
while(hr<HOST_REGS)
{
if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
- (branch_regs[i].regmap[hr]&63)!=dops[i].rs1 &&
- (branch_regs[i].regmap[hr]&63)!=dops[i].rs2 )
+ branch_regs[i].regmap[hr]!=dops[i].rs1 &&
+ branch_regs[i].regmap[hr]!=dops[i].rs2 )
{
alt=hr++;break;
}
while(hr<HOST_REGS)
{
if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
- (branch_regs[i].regmap[hr]&63)!=dops[i].rs1 &&
- (branch_regs[i].regmap[hr]&63)!=dops[i].rs2 )
+ branch_regs[i].regmap[hr]!=dops[i].rs1 &&
+ branch_regs[i].regmap[hr]!=dops[i].rs2 )
{
ntaddr=hr;break;
}
}
// Update cycle count
assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
- if(stubs[n].a) emit_addimm(HOST_CCREG,CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
+ if(stubs[n].a) emit_addimm(HOST_CCREG,(int)stubs[n].a,HOST_CCREG);
emit_far_call(cc_interrupt);
- if(stubs[n].a) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
+ if(stubs[n].a) emit_addimm(HOST_CCREG,-(int)stubs[n].a,HOST_CCREG);
if(stubs[n].d==TAKEN) {
if(internal_branch(ba[i]))
load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
do_jump_vaddr(stubs[n].e);
}
-static void add_to_linker(void *addr, u_int target, int ext)
+static void add_to_linker(void *addr, u_int target, int is_internal)
{
assert(linkcount < ARRAY_SIZE(link_addr));
link_addr[linkcount].addr = addr;
link_addr[linkcount].target = target;
- link_addr[linkcount].ext = ext;
+ link_addr[linkcount].internal = is_internal;
linkcount++;
}
}
}
-static void ujump_assemble(int i,struct regstat *i_regs)
+static void ujump_assemble(int i, const struct regstat *i_regs)
{
int ra_done=0;
if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
uint64_t bc_unneeded=branch_regs[i].u;
bc_unneeded|=1|(1LL<<dops[i].rt1);
wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
- load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
+ load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
if(!ra_done&&dops[i].rt1==31)
ujump_assemble_write_ra(i);
int cc,adj;
if(dops[i].rt1==31&&temp>=0) emit_prefetchreg(temp);
#endif
do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal_branch(ba[i]))
assem_debug("branch: internal\n");
#endif
}
-static void rjump_assemble(int i,struct regstat *i_regs)
+static void rjump_assemble(int i, const struct regstat *i_regs)
{
int temp;
int rs,cc;
//do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
//if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
//assert(adj==0);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
add_stub(CC_STUB,out,NULL,0,i,-1,TAKEN,rs);
- if(dops[i+1].itype==COP0&&(source[i+1]&0x3f)==0x10)
+ if(dops[i+1].itype==COP0 && dops[i+1].opcode2==0x10)
// special case for RFE
emit_jmp(0);
else
#endif
}
-static void cjump_assemble(int i,struct regstat *i_regs)
+static void cjump_assemble(int i, const struct regstat *i_regs)
{
- signed char *i_regmap=i_regs->regmap;
+ const signed char *i_regmap = i_regs->regmap;
int cc;
int match;
match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
bc_unneeded|=1;
wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,dops[i].rs2);
- load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
+ load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
cc=get_reg(branch_regs[i].regmap,CCREG);
assert(cc==HOST_CCREG);
if(unconditional)
if(unconditional) {
do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
assem_debug("branch: internal\n");
}
}
else if(nop) {
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
else {
void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
- if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj&&!invert) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
//printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
assert(s1l>=0);
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
if (match && (!internal || !dops[(ba[i]-start)>>2].is_ds)) {
if(adj) {
- emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
+ emit_addimm(cc,-adj,cc);
add_to_linker(out,ba[i],internal);
}else{
emit_addnop(13);
}else
#endif
{
- if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
+ if(adj) emit_addimm(cc,-adj,cc);
store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
if(nottaken1) set_jump_target(nottaken1, out);
if(adj) {
- if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
+ if(!invert) emit_addimm(cc,adj,cc);
}
} // (!unconditional)
} // if(ooo)
else
{
// In-order execution (branch first)
- //if(dops[i].likely) printf("IOL\n");
- //else
- //printf("IOE\n");
void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
if(!unconditional&&!nop) {
//printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
// load regs
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
address_generation(i+1,&branch_regs[i],0);
+ if (ram_offset)
+ load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
ds_assemble(i+1,&branch_regs[i]);
cc=get_reg(branch_regs[i].regmap,CCREG);
store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
assem_debug("cycle count (adj)\n");
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
assem_debug("branch: internal\n");
if(nottaken1) set_jump_target(nottaken1, out);
set_jump_target(nottaken, out);
assem_debug("2:\n");
- if(!dops[i].likely) {
- wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
- load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
- address_generation(i+1,&branch_regs[i],0);
- load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
- ds_assemble(i+1,&branch_regs[i]);
- }
+ wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
+ // load regs
+ load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
+ address_generation(i+1,&branch_regs[i],0);
+ if (ram_offset)
+ load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
+ load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
+ ds_assemble(i+1,&branch_regs[i]);
cc=get_reg(branch_regs[i].regmap,CCREG);
- if(cc==-1&&!dops[i].likely) {
+ if (cc == -1) {
// Cycle count isn't in a register, temporarily load it then write it out
emit_loadreg(CCREG,HOST_CCREG);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
else{
cc=get_reg(i_regmap,CCREG);
assert(cc==HOST_CCREG);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
void *jaddr=out;
emit_jns(0);
- add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,dops[i].likely?NULLDS:NOTTAKEN,0);
+ add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
}
}
}
}
-static void sjump_assemble(int i,struct regstat *i_regs)
+static void sjump_assemble(int i, const struct regstat *i_regs)
{
- signed char *i_regmap=i_regs->regmap;
+ const signed char *i_regmap = i_regs->regmap;
int cc;
int match;
match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
- assem_debug("smatch=%d\n",match);
+ assem_debug("smatch=%d ooo=%d\n", match, dops[i].ooo);
int s1l;
int unconditional=0,nevertaken=0;
int invert=0;
bc_unneeded|=1;
wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,dops[i].rs1);
- load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
+ load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
if(dops[i].rt1==31) {
int rt,return_address;
rt=get_reg(branch_regs[i].regmap,31);
if(unconditional) {
do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
assem_debug("branch: internal\n");
}
}
else if(nevertaken) {
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
else {
void *nottaken = NULL;
do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
- if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj&&!invert) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
{
assert(s1l>=0);
if((dops[i].opcode2&0xf)==0) // BLTZ/BLTZAL
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
if (match && (!internal || !dops[(ba[i] - start) >> 2].is_ds)) {
if(adj) {
- emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
+ emit_addimm(cc,-adj,cc);
add_to_linker(out,ba[i],internal);
}else{
emit_addnop(13);
}else
#endif
{
- if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
+ if(adj) emit_addimm(cc,-adj,cc);
store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
}
if(adj) {
- if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
+ if(!invert) emit_addimm(cc,adj,cc);
}
} // (!unconditional)
} // if(ooo)
// load regs
load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
address_generation(i+1,&branch_regs[i],0);
+ if (ram_offset)
+ load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
ds_assemble(i+1,&branch_regs[i]);
cc=get_reg(branch_regs[i].regmap,CCREG);
store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
assem_debug("cycle count (adj)\n");
- if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
+ if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
if(internal)
assem_debug("branch: internal\n");
if(!unconditional) {
set_jump_target(nottaken, out);
assem_debug("1:\n");
- if(!dops[i].likely) {
- wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
- load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
- address_generation(i+1,&branch_regs[i],0);
- load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
- ds_assemble(i+1,&branch_regs[i]);
- }
+ wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
+ load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
+ address_generation(i+1,&branch_regs[i],0);
+ if (ram_offset)
+ load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
+ load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
+ ds_assemble(i+1,&branch_regs[i]);
cc=get_reg(branch_regs[i].regmap,CCREG);
- if(cc==-1&&!dops[i].likely) {
+ if (cc == -1) {
// Cycle count isn't in a register, temporarily load it then write it out
emit_loadreg(CCREG,HOST_CCREG);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
void *jaddr=out;
emit_jns(0);
add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
else{
cc=get_reg(i_regmap,CCREG);
assert(cc==HOST_CCREG);
- emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
+ emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
void *jaddr=out;
emit_jns(0);
- add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,dops[i].likely?NULLDS:NOTTAKEN,0);
+ add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
}
}
}
}
-static void pagespan_assemble(int i,struct regstat *i_regs)
+static void check_regmap(signed char *regmap)
{
- int s1l=get_reg(i_regs->regmap,dops[i].rs1);
- int s2l=get_reg(i_regs->regmap,dops[i].rs2);
- void *taken = NULL;
- void *nottaken = NULL;
- int unconditional=0;
- if(dops[i].rs1==0)
- {
- s1l=s2l;
- s2l=-1;
- }
- else if(dops[i].rs2==0)
- {
- s2l=-1;
- }
- int hr=0;
- int addr=-1,alt=-1,ntaddr=-1;
- if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
- else {
- while(hr<HOST_REGS)
- {
- if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
- (i_regs->regmap[hr]&63)!=dops[i].rs1 &&
- (i_regs->regmap[hr]&63)!=dops[i].rs2 )
- {
- addr=hr++;break;
- }
- hr++;
- }
- }
- while(hr<HOST_REGS)
- {
- if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
- (i_regs->regmap[hr]&63)!=dops[i].rs1 &&
- (i_regs->regmap[hr]&63)!=dops[i].rs2 )
- {
- alt=hr++;break;
- }
- hr++;
- }
- if((dops[i].opcode&0x2E)==6) // BLEZ/BGTZ needs another register
- {
- while(hr<HOST_REGS)
- {
- if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
- (i_regs->regmap[hr]&63)!=dops[i].rs1 &&
- (i_regs->regmap[hr]&63)!=dops[i].rs2 )
- {
- ntaddr=hr;break;
- }
- hr++;
- }
- }
- assert(hr<HOST_REGS);
- if((dops[i].opcode&0x2e)==4||dops[i].opcode==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
- load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
- }
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
- if(dops[i].opcode==2) // J
- {
- unconditional=1;
- }
- if(dops[i].opcode==3) // JAL
- {
- // TODO: mini_ht
- int rt=get_reg(i_regs->regmap,31);
- emit_movimm(start+i*4+8,rt);
- unconditional=1;
+#ifndef NDEBUG
+ int i,j;
+ for (i = 0; i < HOST_REGS; i++) {
+ if (regmap[i] < 0)
+ continue;
+ for (j = i + 1; j < HOST_REGS; j++)
+ assert(regmap[i] != regmap[j]);
}
- if(dops[i].opcode==0&&(dops[i].opcode2&0x3E)==8) // JR/JALR
- {
- emit_mov(s1l,addr);
- if(dops[i].opcode2==9) // JALR
- {
- int rt=get_reg(i_regs->regmap,dops[i].rt1);
- emit_movimm(start+i*4+8,rt);
- }
+#endif
+}
+
+#ifdef DISASM
+#include <inttypes.h>
+static char insn[MAXBLOCK][10];
+
+#define set_mnemonic(i_, n_) \
+ strcpy(insn[i_], n_)
+
+void print_regmap(const char *name, const signed char *regmap)
+{
+ char buf[5];
+ int i, l;
+ fputs(name, stdout);
+ for (i = 0; i < HOST_REGS; i++) {
+ l = 0;
+ if (regmap[i] >= 0)
+ l = snprintf(buf, sizeof(buf), "$%d", regmap[i]);
+ for (; l < 3; l++)
+ buf[l] = ' ';
+ buf[l] = 0;
+ printf(" r%d=%s", i, buf);
}
- if((dops[i].opcode&0x3f)==4) // BEQ
- {
- if(dops[i].rs1==dops[i].rs2)
- {
- unconditional=1;
- }
- else
- #ifdef HAVE_CMOV_IMM
- if(1) {
- if(s2l>=0) emit_cmp(s1l,s2l);
- else emit_test(s1l,s1l);
- emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
+ fputs("\n", stdout);
+}
+
+ /* disassembly */
+void disassemble_inst(int i)
+{
+ if (dops[i].bt) printf("*"); else printf(" ");
+ switch(dops[i].itype) {
+ case UJUMP:
+ printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
+ case CJUMP:
+ printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2,i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
+ case SJUMP:
+ printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
+ case RJUMP:
+ if (dops[i].opcode==0x9&&dops[i].rt1!=31)
+ printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1);
+ else
+ printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
+ break;
+ case IMM16:
+ if(dops[i].opcode==0xf) //LUI
+ printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],dops[i].rt1,imm[i]&0xffff);
+ else
+ printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
+ break;
+ case LOAD:
+ case LOADLR:
+ printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
+ break;
+ case STORE:
+ case STORELR:
+ printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rs2,dops[i].rs1,imm[i]);
+ break;
+ case ALU:
+ case SHIFT:
+ printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,dops[i].rs2);
+ break;
+ case MULTDIV:
+ printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2);
+ break;
+ case SHIFTIMM:
+ printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
+ break;
+ case MOV:
+ if((dops[i].opcode2&0x1d)==0x10)
+ printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rt1);
+ else if((dops[i].opcode2&0x1d)==0x11)
+ printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
+ else
+ printf (" %x: %s\n",start+i*4,insn[i]);
+ break;
+ case COP0:
+ if(dops[i].opcode2==0)
+ printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC0
+ else if(dops[i].opcode2==4)
+ printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC0
+ else printf (" %x: %s\n",start+i*4,insn[i]);
+ break;
+ case COP1:
+ if(dops[i].opcode2<3)
+ printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC1
+ else if(dops[i].opcode2>3)
+ printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC1
+ else printf (" %x: %s\n",start+i*4,insn[i]);
+ break;
+ case COP2:
+ if(dops[i].opcode2<3)
+ printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC2
+ else if(dops[i].opcode2>3)
+ printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC2
+ else printf (" %x: %s\n",start+i*4,insn[i]);
+ break;
+ case C1LS:
+ printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,dops[i].rs1,imm[i]);
+ break;
+ case C2LS:
+ printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,dops[i].rs1,imm[i]);
+ break;
+ case INTCALL:
+ printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
+ break;
+ default:
+ //printf (" %s %8x\n",insn[i],source[i]);
+ printf (" %x: %s\n",start+i*4,insn[i]);
}
- else
- #endif
- {
- assert(s1l>=0);
- emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
- if(s2l>=0) emit_cmp(s1l,s2l);
- else emit_test(s1l,s1l);
- emit_cmovne_reg(alt,addr);
+ return;
+ printf("D: %"PRIu64" WD: %"PRIu64" U: %"PRIu64"\n",
+ regs[i].dirty, regs[i].wasdirty, unneeded_reg[i]);
+ print_regmap("pre: ", regmap_pre[i]);
+ print_regmap("entry: ", regs[i].regmap_entry);
+ print_regmap("map: ", regs[i].regmap);
+ if (dops[i].is_jump) {
+ print_regmap("bentry:", branch_regs[i].regmap_entry);
+ print_regmap("bmap: ", branch_regs[i].regmap);
}
- }
- if((dops[i].opcode&0x3f)==5) // BNE
- {
- #ifdef HAVE_CMOV_IMM
- if(s2l>=0) emit_cmp(s1l,s2l);
- else emit_test(s1l,s1l);
- emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
- #else
- assert(s1l>=0);
- emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
- if(s2l>=0) emit_cmp(s1l,s2l);
- else emit_test(s1l,s1l);
- emit_cmovne_reg(alt,addr);
- #endif
- }
- if((dops[i].opcode&0x3f)==0x14) // BEQL
- {
- if(s2l>=0) emit_cmp(s1l,s2l);
- else emit_test(s1l,s1l);
- if(nottaken) set_jump_target(nottaken, out);
- nottaken=out;
- emit_jne(0);
- }
- if((dops[i].opcode&0x3f)==0x15) // BNEL
- {
- if(s2l>=0) emit_cmp(s1l,s2l);
- else emit_test(s1l,s1l);
- nottaken=out;
- emit_jeq(0);
- if(taken) set_jump_target(taken, out);
- }
- if((dops[i].opcode&0x3f)==6) // BLEZ
- {
- emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
- emit_cmpimm(s1l,1);
- emit_cmovl_reg(alt,addr);
- }
- if((dops[i].opcode&0x3f)==7) // BGTZ
- {
- emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
- emit_cmpimm(s1l,1);
- emit_cmovl_reg(ntaddr,addr);
- }
- if((dops[i].opcode&0x3f)==0x16) // BLEZL
- {
- assert((dops[i].opcode&0x3f)!=0x16);
- }
- if((dops[i].opcode&0x3f)==0x17) // BGTZL
+}
+#else
+#define set_mnemonic(i_, n_)
+static void disassemble_inst(int i) {}
+#endif // DISASM
+
+#define DRC_TEST_VAL 0x74657374
+
+static noinline void new_dynarec_test(void)
+{
+ int (*testfunc)(void);
+ void *beginning;
+ int ret[2];
+ size_t i;
+
+ // check structure linkage
+ if ((u_char *)rcnts - (u_char *)&psxRegs != sizeof(psxRegs))
{
- assert((dops[i].opcode&0x3f)!=0x17);
+ SysPrintf("linkage_arm* miscompilation/breakage detected.\n");
}
- assert(dops[i].opcode!=1); // BLTZ/BGEZ
- //FIXME: Check CSREG
- if(dops[i].opcode==0x11 && dops[i].opcode2==0x08 ) {
- if((source[i]&0x30000)==0) // BC1F
- {
- emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
- emit_testimm(s1l,0x800000);
- emit_cmovne_reg(alt,addr);
- }
- if((source[i]&0x30000)==0x10000) // BC1T
- {
- emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
- emit_testimm(s1l,0x800000);
- emit_cmovne_reg(alt,addr);
- }
- if((source[i]&0x30000)==0x20000) // BC1FL
- {
- emit_testimm(s1l,0x800000);
- nottaken=out;
- emit_jne(0);
- }
- if((source[i]&0x30000)==0x30000) // BC1TL
- {
- emit_testimm(s1l,0x800000);
- nottaken=out;
- emit_jeq(0);
- }
- }
+ SysPrintf("(%p) testing if we can run recompiled code @%p...\n",
+ new_dynarec_test, out);
+ ((volatile u_int *)NDRC_WRITE_OFFSET(out))[0]++; // make the cache dirty
- assert(i_regs->regmap[HOST_CCREG]==CCREG);
- wb_dirtys(regs[i].regmap,regs[i].dirty);
- if(dops[i].likely||unconditional)
- {
- emit_movimm(ba[i],HOST_BTREG);
+ for (i = 0; i < ARRAY_SIZE(ret); i++) {
+ out = ndrc->translation_cache;
+ beginning = start_block();
+ emit_movimm(DRC_TEST_VAL + i, 0); // test
+ emit_ret();
+ literal_pool(0);
+ end_block(beginning);
+ testfunc = beginning;
+ ret[i] = testfunc();
}
- else if(addr!=HOST_BTREG)
- {
- emit_mov(addr,HOST_BTREG);
+
+ if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1)
+ SysPrintf("test passed.\n");
+ else
+ SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
+ out = ndrc->translation_cache;
+}
+
+// clear the state completely, instead of just marking
+// things invalid like invalidate_all_pages() does
+void new_dynarec_clear_full(void)
+{
+ int n;
+ out = ndrc->translation_cache;
+ memset(invalid_code,1,sizeof(invalid_code));
+ memset(hash_table,0xff,sizeof(hash_table));
+ memset(mini_ht,-1,sizeof(mini_ht));
+ memset(shadow,0,sizeof(shadow));
+ copy=shadow;
+ expirep = EXPIRITY_OFFSET;
+ pending_exception=0;
+ literalcount=0;
+ stop_after_jal=0;
+ inv_code_start=inv_code_end=~0;
+ hack_addr=0;
+ f1_hack=0;
+ for (n = 0; n < ARRAY_SIZE(blocks); n++)
+ blocks_clear(&blocks[n]);
+ for (n = 0; n < ARRAY_SIZE(jumps); n++) {
+ free(jumps[n]);
+ jumps[n] = NULL;
}
- void *branch_addr=out;
- emit_jmp(0);
- int target_addr=start+i*4+5;
- void *stub=out;
- void *compiled_target_addr=check_addr(target_addr);
- emit_extjump_ds(branch_addr, target_addr);
- if(compiled_target_addr) {
- set_jump_target(branch_addr, compiled_target_addr);
- add_jump_out(target_addr,stub);
- }
- else set_jump_target(branch_addr, stub);
- if(dops[i].likely) {
- // Not-taken path
- set_jump_target(nottaken, out);
- wb_dirtys(regs[i].regmap,regs[i].dirty);
- void *branch_addr=out;
- emit_jmp(0);
- int target_addr=start+i*4+8;
- void *stub=out;
- void *compiled_target_addr=check_addr(target_addr);
- emit_extjump_ds(branch_addr, target_addr);
- if(compiled_target_addr) {
- set_jump_target(branch_addr, compiled_target_addr);
- add_jump_out(target_addr,stub);
- }
- else set_jump_target(branch_addr, stub);
- }
-}
-
-// Assemble the delay slot for the above
-static void pagespan_ds()
-{
- assem_debug("initial delay slot:\n");
- u_int vaddr=start+1;
- u_int page=get_page(vaddr);
- u_int vpage=get_vpage(vaddr);
- ll_add(jump_dirty+vpage,vaddr,(void *)out);
- do_dirty_stub_ds(slen*4);
- ll_add(jump_in+page,vaddr,(void *)out);
- assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
- if(regs[0].regmap[HOST_CCREG]!=CCREG)
- wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty);
- if(regs[0].regmap[HOST_BTREG]!=BTREG)
- emit_writeword(HOST_BTREG,&branch_target);
- load_regs(regs[0].regmap_entry,regs[0].regmap,dops[0].rs1,dops[0].rs2);
- address_generation(0,®s[0],regs[0].regmap_entry);
- if(dops[0].itype==STORE||dops[0].itype==STORELR||(dops[0].opcode&0x3b)==0x39||(dops[0].opcode&0x3b)==0x3a)
- load_regs(regs[0].regmap_entry,regs[0].regmap,INVCP,INVCP);
- is_delayslot=0;
- switch(dops[0].itype) {
- case ALU:
- alu_assemble(0,®s[0]);break;
- case IMM16:
- imm16_assemble(0,®s[0]);break;
- case SHIFT:
- shift_assemble(0,®s[0]);break;
- case SHIFTIMM:
- shiftimm_assemble(0,®s[0]);break;
- case LOAD:
- load_assemble(0,®s[0]);break;
- case LOADLR:
- loadlr_assemble(0,®s[0]);break;
- case STORE:
- store_assemble(0,®s[0]);break;
- case STORELR:
- storelr_assemble(0,®s[0]);break;
- case COP0:
- cop0_assemble(0,®s[0]);break;
- case COP1:
- cop1_assemble(0,®s[0]);break;
- case C1LS:
- c1ls_assemble(0,®s[0]);break;
- case COP2:
- cop2_assemble(0,®s[0]);break;
- case C2LS:
- c2ls_assemble(0,®s[0]);break;
- case C2OP:
- c2op_assemble(0,®s[0]);break;
- case MULTDIV:
- multdiv_assemble(0,®s[0]);
- multdiv_prepare_stall(0,®s[0]);
- break;
- case MOV:
- mov_assemble(0,®s[0]);break;
- case SYSCALL:
- case HLECALL:
- case INTCALL:
- case SPAN:
- case UJUMP:
- case RJUMP:
- case CJUMP:
- case SJUMP:
- SysPrintf("Jump in the delay slot. This is probably a bug.\n");
- }
- int btaddr=get_reg(regs[0].regmap,BTREG);
- if(btaddr<0) {
- btaddr=get_reg(regs[0].regmap,-1);
- emit_readword(&branch_target,btaddr);
- }
- assert(btaddr!=HOST_CCREG);
- if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
-#ifdef HOST_IMM8
- host_tempreg_acquire();
- emit_movimm(start+4,HOST_TEMPREG);
- emit_cmp(btaddr,HOST_TEMPREG);
- host_tempreg_release();
-#else
- emit_cmpimm(btaddr,start+4);
-#endif
- void *branch = out;
- emit_jeq(0);
- store_regs_bt(regs[0].regmap,regs[0].dirty,-1);
- do_jump_vaddr(btaddr);
- set_jump_target(branch, out);
- store_regs_bt(regs[0].regmap,regs[0].dirty,start+4);
- load_regs_bt(regs[0].regmap,regs[0].dirty,start+4);
-}
-
-// Basic liveness analysis for MIPS registers
-void unneeded_registers(int istart,int iend,int r)
-{
- int i;
- uint64_t u,gte_u,b,gte_b;
- uint64_t temp_u,temp_gte_u=0;
- uint64_t gte_u_unknown=0;
- if (HACK_ENABLED(NDHACK_GTE_UNNEEDED))
- gte_u_unknown=~0ll;
- if(iend==slen-1) {
- u=1;
- gte_u=gte_u_unknown;
- }else{
- //u=unneeded_reg[iend+1];
- u=1;
- gte_u=gte_unneeded[iend+1];
- }
-
- for (i=iend;i>=istart;i--)
- {
- //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
- if(dops[i].itype==RJUMP||dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
- {
- // If subroutine call, flag return address as a possible branch target
- if(dops[i].rt1==31 && i<slen-2) dops[i+2].bt=1;
-
- if(ba[i]<start || ba[i]>=(start+slen*4))
- {
- // Branch out of this block, flush all regs
- u=1;
- gte_u=gte_u_unknown;
- branch_unneeded_reg[i]=u;
- // Merge in delay slot
- u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
- u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
- u|=1;
- gte_u|=gte_rt[i+1];
- gte_u&=~gte_rs[i+1];
- // If branch is "likely" (and conditional)
- // then we skip the delay slot on the fall-thru path
- if(dops[i].likely) {
- if(i<slen-1) {
- u&=unneeded_reg[i+2];
- gte_u&=gte_unneeded[i+2];
- }
- else
- {
- u=1;
- gte_u=gte_u_unknown;
- }
- }
- }
- else
- {
- // Internal branch, flag target
- dops[(ba[i]-start)>>2].bt=1;
- if(ba[i]<=start+i*4) {
- // Backward branch
- if(is_ujump(i))
- {
- // Unconditional branch
- temp_u=1;
- temp_gte_u=0;
- } else {
- // Conditional branch (not taken case)
- temp_u=unneeded_reg[i+2];
- temp_gte_u&=gte_unneeded[i+2];
- }
- // Merge in delay slot
- temp_u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
- temp_u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
- temp_u|=1;
- temp_gte_u|=gte_rt[i+1];
- temp_gte_u&=~gte_rs[i+1];
- // If branch is "likely" (and conditional)
- // then we skip the delay slot on the fall-thru path
- if(dops[i].likely) {
- if(i<slen-1) {
- temp_u&=unneeded_reg[i+2];
- temp_gte_u&=gte_unneeded[i+2];
- }
- else
- {
- temp_u=1;
- temp_gte_u=gte_u_unknown;
- }
- }
- temp_u|=(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2);
- temp_u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
- temp_u|=1;
- temp_gte_u|=gte_rt[i];
- temp_gte_u&=~gte_rs[i];
- unneeded_reg[i]=temp_u;
- gte_unneeded[i]=temp_gte_u;
- // Only go three levels deep. This recursion can take an
- // excessive amount of time if there are a lot of nested loops.
- if(r<2) {
- unneeded_registers((ba[i]-start)>>2,i-1,r+1);
- }else{
- unneeded_reg[(ba[i]-start)>>2]=1;
- gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
- }
- } /*else*/ if(1) {
- if (is_ujump(i))
- {
- // Unconditional branch
- u=unneeded_reg[(ba[i]-start)>>2];
- gte_u=gte_unneeded[(ba[i]-start)>>2];
- branch_unneeded_reg[i]=u;
- // Merge in delay slot
- u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
- u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
- u|=1;
- gte_u|=gte_rt[i+1];
- gte_u&=~gte_rs[i+1];
- } else {
- // Conditional branch
- b=unneeded_reg[(ba[i]-start)>>2];
- gte_b=gte_unneeded[(ba[i]-start)>>2];
- branch_unneeded_reg[i]=b;
- // Branch delay slot
- b|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
- b&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
- b|=1;
- gte_b|=gte_rt[i+1];
- gte_b&=~gte_rs[i+1];
- // If branch is "likely" then we skip the
- // delay slot on the fall-thru path
- if(dops[i].likely) {
- u=b;
- gte_u=gte_b;
- if(i<slen-1) {
- u&=unneeded_reg[i+2];
- gte_u&=gte_unneeded[i+2];
- }
- } else {
- u&=b;
- gte_u&=gte_b;
- }
- if(i<slen-1) {
- branch_unneeded_reg[i]&=unneeded_reg[i+2];
- } else {
- branch_unneeded_reg[i]=1;
- }
- }
- }
- }
- }
- else if(dops[i].itype==SYSCALL||dops[i].itype==HLECALL||dops[i].itype==INTCALL)
- {
- // SYSCALL instruction (software interrupt)
- u=1;
- }
- else if(dops[i].itype==COP0 && (source[i]&0x3f)==0x18)
- {
- // ERET instruction (return from interrupt)
- u=1;
- }
- //u=1; // DEBUG
- // Written registers are unneeded
- u|=1LL<<dops[i].rt1;
- u|=1LL<<dops[i].rt2;
- gte_u|=gte_rt[i];
- // Accessed registers are needed
- u&=~(1LL<<dops[i].rs1);
- u&=~(1LL<<dops[i].rs2);
- gte_u&=~gte_rs[i];
- if(gte_rs[i]&&dops[i].rt1&&(unneeded_reg[i+1]&(1ll<<dops[i].rt1)))
- gte_u|=gte_rs[i]>e_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
- // Source-target dependencies
- // R0 is always unneeded
- u|=1;
- // Save it
- unneeded_reg[i]=u;
- gte_unneeded[i]=gte_u;
- /*
- printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
- printf("U:");
- int r;
- for(r=1;r<=CCREG;r++) {
- if((unneeded_reg[i]>>r)&1) {
- if(r==HIREG) printf(" HI");
- else if(r==LOREG) printf(" LO");
- else printf(" r%d",r);
- }
- }
- printf("\n");
- */
- }
-}
-
-// Write back dirty registers as soon as we will no longer modify them,
-// so that we don't end up with lots of writes at the branches.
-void clean_registers(int istart,int iend,int wr)
-{
- int i;
- int r;
- u_int will_dirty_i,will_dirty_next,temp_will_dirty;
- u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
- if(iend==slen-1) {
- will_dirty_i=will_dirty_next=0;
- wont_dirty_i=wont_dirty_next=0;
- }else{
- will_dirty_i=will_dirty_next=will_dirty[iend+1];
- wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
- }
- for (i=iend;i>=istart;i--)
- {
- if(dops[i].itype==RJUMP||dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
- {
- if(ba[i]<start || ba[i]>=(start+slen*4))
- {
- // Branch out of this block, flush all regs
- if (is_ujump(i))
- {
- // Unconditional branch
- will_dirty_i=0;
- wont_dirty_i=0;
- // Merge in delay slot (will dirty)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
- if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- }
- }
- }
- else
- {
- // Conditional branch
- will_dirty_i=0;
- wont_dirty_i=wont_dirty_next;
- // Merge in delay slot (will dirty)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if(!dops[i].likely) {
- // Might not dirty if likely branch is not taken
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
- if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- //if((regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- //if((regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- }
- }
- }
- }
- // Merge in delay slot (wont dirty)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if((regs[i].regmap[r]&63)==dops[i].rt1) wont_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt2) wont_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) wont_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) wont_dirty_i|=1<<r;
- if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) wont_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) wont_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) wont_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) wont_dirty_i|=1<<r;
- if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
- }
- }
- if(wr) {
- #ifndef DESTRUCTIVE_WRITEBACK
- branch_regs[i].dirty&=wont_dirty_i;
- #endif
- branch_regs[i].dirty|=will_dirty_i;
- }
- }
- else
- {
- // Internal branch
- if(ba[i]<=start+i*4) {
- // Backward branch
- if (is_ujump(i))
- {
- // Unconditional branch
- temp_will_dirty=0;
- temp_wont_dirty=0;
- // Merge in delay slot (will dirty)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) temp_will_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) temp_will_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) temp_will_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) temp_will_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
- if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
- if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt1) temp_will_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt2) temp_will_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) temp_will_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) temp_will_dirty|=1<<r;
- if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
- if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
- if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
- }
- }
- } else {
- // Conditional branch (not taken case)
- temp_will_dirty=will_dirty_next;
- temp_wont_dirty=wont_dirty_next;
- // Merge in delay slot (will dirty)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if(!dops[i].likely) {
- // Will not dirty if likely branch is not taken
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) temp_will_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) temp_will_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) temp_will_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) temp_will_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
- if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
- if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
- //if((regs[i].regmap[r]&63)==dops[i].rt1) temp_will_dirty|=1<<r;
- //if((regs[i].regmap[r]&63)==dops[i].rt2) temp_will_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) temp_will_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) temp_will_dirty|=1<<r;
- if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
- if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
- if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
- }
- }
- }
- }
- // Merge in delay slot (wont dirty)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if((regs[i].regmap[r]&63)==dops[i].rt1) temp_wont_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt2) temp_wont_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) temp_wont_dirty|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) temp_wont_dirty|=1<<r;
- if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) temp_wont_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) temp_wont_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) temp_wont_dirty|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) temp_wont_dirty|=1<<r;
- if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
- }
- }
- // Deal with changed mappings
- if(i<iend) {
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if(regs[i].regmap[r]!=regmap_pre[i][r]) {
- temp_will_dirty&=~(1<<r);
- temp_wont_dirty&=~(1<<r);
- if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
- temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
- temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
- } else {
- temp_will_dirty|=1<<r;
- temp_wont_dirty|=1<<r;
- }
- }
- }
- }
- }
- if(wr) {
- will_dirty[i]=temp_will_dirty;
- wont_dirty[i]=temp_wont_dirty;
- clean_registers((ba[i]-start)>>2,i-1,0);
- }else{
- // Limit recursion. It can take an excessive amount
- // of time if there are a lot of nested loops.
- will_dirty[(ba[i]-start)>>2]=0;
- wont_dirty[(ba[i]-start)>>2]=-1;
- }
- }
- /*else*/ if(1)
- {
- if (is_ujump(i))
- {
- // Unconditional branch
- will_dirty_i=0;
- wont_dirty_i=0;
- //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
- will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
- wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
- }
- if(branch_regs[i].regmap[r]>=0) {
- will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
- wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
- }
- }
- }
- //}
- // Merge in delay slot
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
- if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- }
- }
- } else {
- // Conditional branch
- will_dirty_i=will_dirty_next;
- wont_dirty_i=wont_dirty_next;
- //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- signed char target_reg=branch_regs[i].regmap[r];
- if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
- will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
- wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
- }
- else if(target_reg>=0) {
- will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
- wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
- }
- }
- }
- //}
- // Merge in delay slot
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if(!dops[i].likely) {
- // Might not dirty if likely branch is not taken
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) will_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
- if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- //if((regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- //if((regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- }
- }
- }
- }
- // Merge in delay slot (won't dirty)
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if((regs[i].regmap[r]&63)==dops[i].rt1) wont_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt2) wont_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt1) wont_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i+1].rt2) wont_dirty_i|=1<<r;
- if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt1) wont_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i].rt2) wont_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt1) wont_dirty_i|=1<<r;
- if((branch_regs[i].regmap[r]&63)==dops[i+1].rt2) wont_dirty_i|=1<<r;
- if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
- }
- }
- if(wr) {
- #ifndef DESTRUCTIVE_WRITEBACK
- branch_regs[i].dirty&=wont_dirty_i;
- #endif
- branch_regs[i].dirty|=will_dirty_i;
- }
- }
- }
- }
- else if(dops[i].itype==SYSCALL||dops[i].itype==HLECALL||dops[i].itype==INTCALL)
- {
- // SYSCALL instruction (software interrupt)
- will_dirty_i=0;
- wont_dirty_i=0;
- }
- else if(dops[i].itype==COP0 && (source[i]&0x3f)==0x18)
- {
- // ERET instruction (return from interrupt)
- will_dirty_i=0;
- wont_dirty_i=0;
- }
- will_dirty_next=will_dirty_i;
- wont_dirty_next=wont_dirty_i;
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if((regs[i].regmap[r]&63)==dops[i].rt1) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt2) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
- if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt1) wont_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i].rt2) wont_dirty_i|=1<<r;
- if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
- if(i>istart) {
- if(dops[i].itype!=RJUMP&&dops[i].itype!=UJUMP&&dops[i].itype!=CJUMP&&dops[i].itype!=SJUMP)
- {
- // Don't store a register immediately after writing it,
- // may prevent dual-issue.
- if((regs[i].regmap[r]&63)==dops[i-1].rt1) wont_dirty_i|=1<<r;
- if((regs[i].regmap[r]&63)==dops[i-1].rt2) wont_dirty_i|=1<<r;
- }
- }
- }
- }
- // Save it
- will_dirty[i]=will_dirty_i;
- wont_dirty[i]=wont_dirty_i;
- // Mark registers that won't be dirtied as not dirty
- if(wr) {
- regs[i].dirty|=will_dirty_i;
- #ifndef DESTRUCTIVE_WRITEBACK
- regs[i].dirty&=wont_dirty_i;
- if(dops[i].itype==RJUMP||dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
- {
- if (i < iend-1 && !is_ujump(i)) {
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
- regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
- }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
- }
- }
- }
- }
- else
- {
- if(i<iend) {
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
- regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
- }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
- }
- }
- }
- }
- #endif
- //}
- }
- // Deal with changed mappings
- temp_will_dirty=will_dirty_i;
- temp_wont_dirty=wont_dirty_i;
- for(r=0;r<HOST_REGS;r++) {
- if(r!=EXCLUDE_REG) {
- int nr;
- if(regs[i].regmap[r]==regmap_pre[i][r]) {
- if(wr) {
- #ifndef DESTRUCTIVE_WRITEBACK
- regs[i].wasdirty&=wont_dirty_i|~(1<<r);
- #endif
- regs[i].wasdirty|=will_dirty_i&(1<<r);
- }
- }
- else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
- // Register moved to a different register
- will_dirty_i&=~(1<<r);
- wont_dirty_i&=~(1<<r);
- will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
- wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
- if(wr) {
- #ifndef DESTRUCTIVE_WRITEBACK
- regs[i].wasdirty&=wont_dirty_i|~(1<<r);
- #endif
- regs[i].wasdirty|=will_dirty_i&(1<<r);
- }
- }
- else {
- will_dirty_i&=~(1<<r);
- wont_dirty_i&=~(1<<r);
- if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
- will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
- wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
- } else {
- wont_dirty_i|=1<<r;
- /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
- }
- }
- }
- }
- }
-}
-
-#ifdef DISASM
- /* disassembly */
-void disassemble_inst(int i)
-{
- if (dops[i].bt) printf("*"); else printf(" ");
- switch(dops[i].itype) {
- case UJUMP:
- printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
- case CJUMP:
- printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2,i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
- case SJUMP:
- printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
- case RJUMP:
- if (dops[i].opcode==0x9&&dops[i].rt1!=31)
- printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1);
- else
- printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
- break;
- case SPAN:
- printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2,ba[i]);break;
- case IMM16:
- if(dops[i].opcode==0xf) //LUI
- printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],dops[i].rt1,imm[i]&0xffff);
- else
- printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
- break;
- case LOAD:
- case LOADLR:
- printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
- break;
- case STORE:
- case STORELR:
- printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rs2,dops[i].rs1,imm[i]);
- break;
- case ALU:
- case SHIFT:
- printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,dops[i].rs2);
- break;
- case MULTDIV:
- printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2);
- break;
- case SHIFTIMM:
- printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
- break;
- case MOV:
- if((dops[i].opcode2&0x1d)==0x10)
- printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rt1);
- else if((dops[i].opcode2&0x1d)==0x11)
- printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
- else
- printf (" %x: %s\n",start+i*4,insn[i]);
- break;
- case COP0:
- if(dops[i].opcode2==0)
- printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC0
- else if(dops[i].opcode2==4)
- printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC0
- else printf (" %x: %s\n",start+i*4,insn[i]);
- break;
- case COP1:
- if(dops[i].opcode2<3)
- printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC1
- else if(dops[i].opcode2>3)
- printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC1
- else printf (" %x: %s\n",start+i*4,insn[i]);
- break;
- case COP2:
- if(dops[i].opcode2<3)
- printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC2
- else if(dops[i].opcode2>3)
- printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC2
- else printf (" %x: %s\n",start+i*4,insn[i]);
- break;
- case C1LS:
- printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,dops[i].rs1,imm[i]);
- break;
- case C2LS:
- printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,dops[i].rs1,imm[i]);
- break;
- case INTCALL:
- printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
- break;
- default:
- //printf (" %s %8x\n",insn[i],source[i]);
- printf (" %x: %s\n",start+i*4,insn[i]);
- }
-}
-#else
-static void disassemble_inst(int i) {}
-#endif // DISASM
-
-#define DRC_TEST_VAL 0x74657374
-
-static void new_dynarec_test(void)
-{
- int (*testfunc)(void);
- void *beginning;
- int ret[2];
- size_t i;
-
- // check structure linkage
- if ((u_char *)rcnts - (u_char *)&psxRegs != sizeof(psxRegs))
- {
- SysPrintf("linkage_arm* miscompilation/breakage detected.\n");
- }
-
- SysPrintf("testing if we can run recompiled code...\n");
- ((volatile u_int *)out)[0]++; // make cache dirty
-
- for (i = 0; i < ARRAY_SIZE(ret); i++) {
- out = ndrc->translation_cache;
- beginning = start_block();
- emit_movimm(DRC_TEST_VAL + i, 0); // test
- emit_ret();
- literal_pool(0);
- end_block(beginning);
- testfunc = beginning;
- ret[i] = testfunc();
- }
-
- if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1)
- SysPrintf("test passed.\n");
- else
- SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
- out = ndrc->translation_cache;
-}
-
-// clear the state completely, instead of just marking
-// things invalid like invalidate_all_pages() does
-void new_dynarec_clear_full(void)
-{
- int n;
- out = ndrc->translation_cache;
- memset(invalid_code,1,sizeof(invalid_code));
- memset(hash_table,0xff,sizeof(hash_table));
- memset(mini_ht,-1,sizeof(mini_ht));
- memset(restore_candidate,0,sizeof(restore_candidate));
- memset(shadow,0,sizeof(shadow));
- copy=shadow;
- expirep=16384; // Expiry pointer, +2 blocks
- pending_exception=0;
- literalcount=0;
- stop_after_jal=0;
- inv_code_start=inv_code_end=~0;
- // TLB
- for(n=0;n<4096;n++) ll_clear(jump_in+n);
- for(n=0;n<4096;n++) ll_clear(jump_out+n);
- for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
+ stat_clear(stat_blocks);
+ stat_clear(stat_links);
cycle_multiplier_old = cycle_multiplier;
new_dynarec_hacks_old = new_dynarec_hacks;
void new_dynarec_init(void)
{
- SysPrintf("Init new dynarec\n");
+ SysPrintf("Init new dynarec, ndrc size %x\n", (int)sizeof(*ndrc));
+#ifdef _3DS
+ check_rosalina();
+#endif
#ifdef BASE_ADDR_DYNAMIC
#ifdef VITA
- sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
- if (sceBlock < 0)
- SysPrintf("sceKernelAllocMemBlockForVM failed\n");
+ sceBlock = getVMBlock(); //sceKernelAllocMemBlockForVM("code", sizeof(*ndrc));
+ if (sceBlock <= 0)
+ SysPrintf("sceKernelAllocMemBlockForVM failed: %x\n", sceBlock);
int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&ndrc);
if (ret < 0)
- SysPrintf("sceKernelGetMemBlockBase failed\n");
+ SysPrintf("sceKernelGetMemBlockBase failed: %x\n", ret);
+ sceKernelOpenVMDomain();
+ sceClibPrintf("translation_cache = 0x%08lx\n ", (long)ndrc->translation_cache);
+ #elif defined(_MSC_VER)
+ ndrc = VirtualAlloc(NULL, sizeof(*ndrc), MEM_COMMIT | MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE);
+ #elif defined(HAVE_LIBNX)
+ Result rc = jitCreate(&g_jit, sizeof(*ndrc));
+ if (R_FAILED(rc))
+ SysPrintf("jitCreate failed: %08x\n", rc);
+ SysPrintf("jitCreate: RX: %p RW: %p type: %d\n", g_jit.rx_addr, g_jit.rw_addr, g_jit.type);
+ jitTransitionToWritable(&g_jit);
+ ndrc = g_jit.rx_addr;
+ ndrc_write_ofs = (char *)g_jit.rw_addr - (char *)ndrc;
+ memset(NDRC_WRITE_OFFSET(&ndrc->tramp), 0, sizeof(ndrc->tramp));
#else
uintptr_t desired_addr = 0;
+ int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int fd = -1;
#ifdef __ELF__
extern char _end;
desired_addr = ((uintptr_t)&_end + 0xffffff) & ~0xffffffl;
#endif
- ndrc = mmap((void *)desired_addr, sizeof(*ndrc),
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ #ifdef TC_WRITE_OFFSET
+ // mostly for testing
+ fd = open("/dev/shm/pcsxr", O_CREAT | O_RDWR, 0600);
+ ftruncate(fd, sizeof(*ndrc));
+ void *mw = mmap(NULL, sizeof(*ndrc), PROT_READ | PROT_WRITE,
+ (flags = MAP_SHARED), fd, 0);
+ assert(mw != MAP_FAILED);
+ prot = PROT_READ | PROT_EXEC;
+ #endif
+ ndrc = mmap((void *)desired_addr, sizeof(*ndrc), prot, flags, fd, 0);
if (ndrc == MAP_FAILED) {
SysPrintf("mmap() failed: %s\n", strerror(errno));
abort();
}
+ #ifdef TC_WRITE_OFFSET
+ ndrc_write_ofs = (char *)mw - (char *)ndrc;
+ #endif
#endif
#else
#ifndef NO_WRITE_EXEC
// not all systems allow execute in data segment by default
- if (mprotect(ndrc, sizeof(ndrc->translation_cache) + sizeof(ndrc->tramp.ops),
+ // size must be 4K aligned for 3DS?
+ if (mprotect(ndrc, sizeof(*ndrc),
PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
SysPrintf("mprotect() failed: %s\n", strerror(errno));
#endif
#endif
arch_init();
new_dynarec_test();
-#ifndef RAM_FIXED
ram_offset=(uintptr_t)rdram-0x80000000;
-#endif
if (ram_offset!=0)
SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
+ SysPrintf("Mapped (RAM/scrp/ROM/LUTs/TC):\n");
+ SysPrintf("%p/%p/%p/%p/%p\n", psxM, psxH, psxR, mem_rtab, out);
}
void new_dynarec_cleanup(void)
int n;
#ifdef BASE_ADDR_DYNAMIC
#ifdef VITA
- sceKernelFreeMemBlock(sceBlock);
- sceBlock = -1;
+ // sceBlock is managed by retroarch's bootstrap code
+ //sceKernelFreeMemBlock(sceBlock);
+ //sceBlock = -1;
+ #elif defined(HAVE_LIBNX)
+ jitClose(&g_jit);
+ ndrc = NULL;
#else
if (munmap(ndrc, sizeof(*ndrc)) < 0)
SysPrintf("munmap() failed\n");
+ ndrc = NULL;
#endif
#endif
- for(n=0;n<4096;n++) ll_clear(jump_in+n);
- for(n=0;n<4096;n++) ll_clear(jump_out+n);
- for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
- #ifdef ROM_COPY
- if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
- #endif
+ for (n = 0; n < ARRAY_SIZE(blocks); n++)
+ blocks_clear(&blocks[n]);
+ for (n = 0; n < ARRAY_SIZE(jumps); n++) {
+ free(jumps[n]);
+ jumps[n] = NULL;
+ }
+ stat_clear(stat_blocks);
+ stat_clear(stat_links);
+ new_dynarec_print_stats();
}
static u_int *get_source_start(u_int addr, u_int *limit)
{
- if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
- cycle_multiplier_override = 0;
-
if (addr < 0x00200000 ||
(0xa0000000 <= addr && addr < 0xa0200000))
{
// BIOS. The multiplier should be much higher as it's uncached 8bit mem,
// but timings in PCSX are too tied to the interpreter's BIAS
if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
- cycle_multiplier_override = 200;
+ cycle_multiplier_active = 200;
*limit = (addr & 0xfff00000) | 0x80000;
return (u_int *)((u_char *)psxR + (addr&0x7ffff));
int new_dynarec_save_blocks(void *save, int size)
{
- struct savestate_block *blocks = save;
- int maxcount = size / sizeof(blocks[0]);
+ struct savestate_block *sblocks = save;
+ int maxcount = size / sizeof(sblocks[0]);
struct savestate_block tmp_blocks[1024];
- struct ll_entry *head;
+ struct block_info *block;
int p, s, d, o, bcnt;
u_int addr;
o = 0;
- for (p = 0; p < ARRAY_SIZE(jump_in); p++) {
+ for (p = 0; p < ARRAY_SIZE(blocks); p++) {
bcnt = 0;
- for (head = jump_in[p]; head != NULL; head = head->next) {
- tmp_blocks[bcnt].addr = head->vaddr;
- tmp_blocks[bcnt].regflags = head->reg_sv_flags;
+ for (block = blocks[p]; block != NULL; block = block->next) {
+ if (block->is_dirty)
+ continue;
+ tmp_blocks[bcnt].addr = block->start;
+ tmp_blocks[bcnt].regflags = block->reg_sv_flags;
bcnt++;
}
if (bcnt < 1)
if (o + d > maxcount)
d = maxcount - o;
- memcpy(&blocks[o], tmp_blocks, d * sizeof(blocks[0]));
+ memcpy(&sblocks[o], tmp_blocks, d * sizeof(sblocks[0]));
o += d;
}
- return o * sizeof(blocks[0]);
+ return o * sizeof(sblocks[0]);
}
void new_dynarec_load_blocks(const void *save, int size)
{
- const struct savestate_block *blocks = save;
- int count = size / sizeof(blocks[0]);
+ const struct savestate_block *sblocks = save;
+ int count = size / sizeof(sblocks[0]);
+ struct block_info *block;
u_int regs_save[32];
+ u_int page;
uint32_t f;
int i, b;
- get_addr(psxRegs.pc);
+ // restore clean blocks, if any
+ for (page = 0, b = i = 0; page < ARRAY_SIZE(blocks); page++) {
+ for (block = blocks[page]; block != NULL; block = block->next, b++) {
+ if (!block->is_dirty)
+ continue;
+ assert(block->source && block->copy);
+ if (memcmp(block->source, block->copy, block->len))
+ continue;
+
+ // see try_restore_block
+ block->is_dirty = 0;
+ mark_invalid_code(block->start, block->len, 0);
+ i++;
+ }
+ }
+ inv_debug("load_blocks: %d/%d clean blocks\n", i, b);
// change GPRs for speculation to at least partially work..
memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
psxRegs.GPR.r[i] = 0x80000000;
for (b = 0; b < count; b++) {
- for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
+ for (f = sblocks[b].regflags, i = 0; f; f >>= 1, i++) {
if (f & 1)
psxRegs.GPR.r[i] = 0x1f800000;
}
- get_addr(blocks[b].addr);
+ ndrc_get_addr_ht(sblocks[b].addr);
- for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
+ for (f = sblocks[b].regflags, i = 0; f; f >>= 1, i++) {
if (f & 1)
psxRegs.GPR.r[i] = 0x80000000;
}
memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
}
-int new_recompile_block(u_int addr)
+void new_dynarec_print_stats(void)
{
- u_int pagelimit = 0;
- u_int state_rflags = 0;
- int i;
-
- assem_debug("NOTCOMPILED: addr = %x -> %p\n", addr, out);
- //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
- //if(debug)
- //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
-
- // this is just for speculation
- for (i = 1; i < 32; i++) {
- if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
- state_rflags |= 1 << i;
- }
-
- start = (u_int)addr&~3;
- //assert(((u_int)addr&1)==0); // start-in-delay-slot flag
- new_dynarec_did_compile=1;
- if (Config.HLE && start == 0x80001000) // hlecall
- {
- // XXX: is this enough? Maybe check hleSoftCall?
- void *beginning=start_block();
- u_int page=get_page(start);
+#ifdef STAT_PRINT
+ printf("cc %3d,%3d,%3d lu%6d,%3d,%3d c%3d inv%3d,%3d tc_offs %zu b %u,%u\n",
+ stat_bc_pre, stat_bc_direct, stat_bc_restore,
+ stat_ht_lookups, stat_jump_in_lookups, stat_restore_tries,
+ stat_restore_compares, stat_inv_addr_calls, stat_inv_hits,
+ out - ndrc->translation_cache, stat_blocks, stat_links);
+ stat_bc_direct = stat_bc_pre = stat_bc_restore =
+ stat_ht_lookups = stat_jump_in_lookups = stat_restore_tries =
+ stat_restore_compares = stat_inv_addr_calls = stat_inv_hits = 0;
+#endif
+}
- invalid_code[start>>12]=0;
- emit_movimm(start,0);
- emit_writeword(0,&pcaddr);
- emit_far_jump(new_dyna_leave);
- literal_pool(0);
- end_block(beginning);
- ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
+static int apply_hacks(void)
+{
+ int i;
+ if (HACK_ENABLED(NDHACK_NO_COMPAT_HACKS))
return 0;
+ /* special hack(s) */
+ for (i = 0; i < slen - 4; i++)
+ {
+ // lui a4, 0xf200; jal <rcnt_read>; addu a0, 2; slti v0, 28224
+ if (source[i] == 0x3c04f200 && dops[i+1].itype == UJUMP
+ && source[i+2] == 0x34840002 && dops[i+3].opcode == 0x0a
+ && imm[i+3] == 0x6e40 && dops[i+3].rs1 == 2)
+ {
+ SysPrintf("PE2 hack @%08x\n", start + (i+3)*4);
+ dops[i + 3].itype = NOP;
+ }
}
-
- source = get_source_start(start, &pagelimit);
- if (source == NULL) {
- SysPrintf("Compile at bogus memory address: %08x\n", addr);
- abort();
+ i = slen;
+ if (i > 10 && source[i-1] == 0 && source[i-2] == 0x03e00008
+ && source[i-4] == 0x8fbf0018 && source[i-6] == 0x00c0f809
+ && dops[i-7].itype == STORE)
+ {
+ i = i-8;
+ if (dops[i].itype == IMM16)
+ i--;
+ // swl r2, 15(r6); swr r2, 12(r6); sw r6, *; jalr r6
+ if (dops[i].itype == STORELR && dops[i].rs1 == 6
+ && dops[i-1].itype == STORELR && dops[i-1].rs1 == 6)
+ {
+ SysPrintf("F1 hack from %08x, old dst %08x\n", start, hack_addr);
+ f1_hack = 1;
+ return 1;
+ }
}
+ return 0;
+}
- /* Pass 1: disassemble */
- /* Pass 2: register dependencies, branch targets */
- /* Pass 3: register allocation */
- /* Pass 4: branch dependencies */
- /* Pass 5: pre-alloc */
- /* Pass 6: optimize clean/dirty state */
- /* Pass 7: flag 32-bit registers */
- /* Pass 8: assembly */
- /* Pass 9: linker */
- /* Pass 10: garbage collection / free memory */
-
- int j;
- int done=0;
+static noinline void pass1_disassemble(u_int pagelimit)
+{
+ int i, j, done = 0, ni_count = 0;
unsigned int type,op,op2;
- //printf("addr = %x source = %x %x\n", addr,source,source[0]);
-
- /* Pass 1 disassembly */
-
- for(i=0;!done;i++) {
- dops[i].bt=0;
- dops[i].likely=0;
- dops[i].ooo=0;
+ for (i = 0; !done; i++)
+ {
+ memset(&dops[i], 0, sizeof(dops[i]));
op2=0;
minimum_free_regs[i]=0;
dops[i].opcode=op=source[i]>>26;
switch(op)
{
- case 0x00: strcpy(insn[i],"special"); type=NI;
+ case 0x00: set_mnemonic(i, "special"); type=NI;
op2=source[i]&0x3f;
switch(op2)
{
- case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
- case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
- case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
- case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
- case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
- case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
- case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
- case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
- case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
- case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
- case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
- case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
- case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
- case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
- case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
- case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
- case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
- case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
- case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
- case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
- case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
- case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
- case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
- case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
- case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
- case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
- case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
- case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
- case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
- case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
- case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
- case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
- case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
- case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
- case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
+ case 0x00: set_mnemonic(i, "SLL"); type=SHIFTIMM; break;
+ case 0x02: set_mnemonic(i, "SRL"); type=SHIFTIMM; break;
+ case 0x03: set_mnemonic(i, "SRA"); type=SHIFTIMM; break;
+ case 0x04: set_mnemonic(i, "SLLV"); type=SHIFT; break;
+ case 0x06: set_mnemonic(i, "SRLV"); type=SHIFT; break;
+ case 0x07: set_mnemonic(i, "SRAV"); type=SHIFT; break;
+ case 0x08: set_mnemonic(i, "JR"); type=RJUMP; break;
+ case 0x09: set_mnemonic(i, "JALR"); type=RJUMP; break;
+ case 0x0C: set_mnemonic(i, "SYSCALL"); type=SYSCALL; break;
+ case 0x0D: set_mnemonic(i, "BREAK"); type=SYSCALL; break;
+ case 0x0F: set_mnemonic(i, "SYNC"); type=OTHER; break;
+ case 0x10: set_mnemonic(i, "MFHI"); type=MOV; break;
+ case 0x11: set_mnemonic(i, "MTHI"); type=MOV; break;
+ case 0x12: set_mnemonic(i, "MFLO"); type=MOV; break;
+ case 0x13: set_mnemonic(i, "MTLO"); type=MOV; break;
+ case 0x18: set_mnemonic(i, "MULT"); type=MULTDIV; break;
+ case 0x19: set_mnemonic(i, "MULTU"); type=MULTDIV; break;
+ case 0x1A: set_mnemonic(i, "DIV"); type=MULTDIV; break;
+ case 0x1B: set_mnemonic(i, "DIVU"); type=MULTDIV; break;
+ case 0x20: set_mnemonic(i, "ADD"); type=ALU; break;
+ case 0x21: set_mnemonic(i, "ADDU"); type=ALU; break;
+ case 0x22: set_mnemonic(i, "SUB"); type=ALU; break;
+ case 0x23: set_mnemonic(i, "SUBU"); type=ALU; break;
+ case 0x24: set_mnemonic(i, "AND"); type=ALU; break;
+ case 0x25: set_mnemonic(i, "OR"); type=ALU; break;
+ case 0x26: set_mnemonic(i, "XOR"); type=ALU; break;
+ case 0x27: set_mnemonic(i, "NOR"); type=ALU; break;
+ case 0x2A: set_mnemonic(i, "SLT"); type=ALU; break;
+ case 0x2B: set_mnemonic(i, "SLTU"); type=ALU; break;
+ case 0x30: set_mnemonic(i, "TGE"); type=NI; break;
+ case 0x31: set_mnemonic(i, "TGEU"); type=NI; break;
+ case 0x32: set_mnemonic(i, "TLT"); type=NI; break;
+ case 0x33: set_mnemonic(i, "TLTU"); type=NI; break;
+ case 0x34: set_mnemonic(i, "TEQ"); type=NI; break;
+ case 0x36: set_mnemonic(i, "TNE"); type=NI; break;
#if 0
- case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
- case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
- case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
- case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
- case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
- case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
- case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
- case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
- case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
- case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
- case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
- case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
- case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
- case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
- case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
- case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
- case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
+ case 0x14: set_mnemonic(i, "DSLLV"); type=SHIFT; break;
+ case 0x16: set_mnemonic(i, "DSRLV"); type=SHIFT; break;
+ case 0x17: set_mnemonic(i, "DSRAV"); type=SHIFT; break;
+ case 0x1C: set_mnemonic(i, "DMULT"); type=MULTDIV; break;
+ case 0x1D: set_mnemonic(i, "DMULTU"); type=MULTDIV; break;
+ case 0x1E: set_mnemonic(i, "DDIV"); type=MULTDIV; break;
+ case 0x1F: set_mnemonic(i, "DDIVU"); type=MULTDIV; break;
+ case 0x2C: set_mnemonic(i, "DADD"); type=ALU; break;
+ case 0x2D: set_mnemonic(i, "DADDU"); type=ALU; break;
+ case 0x2E: set_mnemonic(i, "DSUB"); type=ALU; break;
+ case 0x2F: set_mnemonic(i, "DSUBU"); type=ALU; break;
+ case 0x38: set_mnemonic(i, "DSLL"); type=SHIFTIMM; break;
+ case 0x3A: set_mnemonic(i, "DSRL"); type=SHIFTIMM; break;
+ case 0x3B: set_mnemonic(i, "DSRA"); type=SHIFTIMM; break;
+ case 0x3C: set_mnemonic(i, "DSLL32"); type=SHIFTIMM; break;
+ case 0x3E: set_mnemonic(i, "DSRL32"); type=SHIFTIMM; break;
+ case 0x3F: set_mnemonic(i, "DSRA32"); type=SHIFTIMM; break;
#endif
}
break;
- case 0x01: strcpy(insn[i],"regimm"); type=NI;
+ case 0x01: set_mnemonic(i, "regimm"); type=NI;
op2=(source[i]>>16)&0x1f;
switch(op2)
{
- case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
- case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
- //case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
- //case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
- //case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
- //case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
- //case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
- //case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
- //case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
- //case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
- case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
- case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
- //case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
- //case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
+ case 0x00: set_mnemonic(i, "BLTZ"); type=SJUMP; break;
+ case 0x01: set_mnemonic(i, "BGEZ"); type=SJUMP; break;
+ //case 0x02: set_mnemonic(i, "BLTZL"); type=SJUMP; break;
+ //case 0x03: set_mnemonic(i, "BGEZL"); type=SJUMP; break;
+ //case 0x08: set_mnemonic(i, "TGEI"); type=NI; break;
+ //case 0x09: set_mnemonic(i, "TGEIU"); type=NI; break;
+ //case 0x0A: set_mnemonic(i, "TLTI"); type=NI; break;
+ //case 0x0B: set_mnemonic(i, "TLTIU"); type=NI; break;
+ //case 0x0C: set_mnemonic(i, "TEQI"); type=NI; break;
+ //case 0x0E: set_mnemonic(i, "TNEI"); type=NI; break;
+ case 0x10: set_mnemonic(i, "BLTZAL"); type=SJUMP; break;
+ case 0x11: set_mnemonic(i, "BGEZAL"); type=SJUMP; break;
+ //case 0x12: set_mnemonic(i, "BLTZALL"); type=SJUMP; break;
+ //case 0x13: set_mnemonic(i, "BGEZALL"); type=SJUMP; break;
}
break;
- case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
- case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
- case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
- case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
- case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
- case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
- case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
- case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
- case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
- case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
- case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
- case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
- case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
- case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
- case 0x10: strcpy(insn[i],"cop0"); type=NI;
+ case 0x02: set_mnemonic(i, "J"); type=UJUMP; break;
+ case 0x03: set_mnemonic(i, "JAL"); type=UJUMP; break;
+ case 0x04: set_mnemonic(i, "BEQ"); type=CJUMP; break;
+ case 0x05: set_mnemonic(i, "BNE"); type=CJUMP; break;
+ case 0x06: set_mnemonic(i, "BLEZ"); type=CJUMP; break;
+ case 0x07: set_mnemonic(i, "BGTZ"); type=CJUMP; break;
+ case 0x08: set_mnemonic(i, "ADDI"); type=IMM16; break;
+ case 0x09: set_mnemonic(i, "ADDIU"); type=IMM16; break;
+ case 0x0A: set_mnemonic(i, "SLTI"); type=IMM16; break;
+ case 0x0B: set_mnemonic(i, "SLTIU"); type=IMM16; break;
+ case 0x0C: set_mnemonic(i, "ANDI"); type=IMM16; break;
+ case 0x0D: set_mnemonic(i, "ORI"); type=IMM16; break;
+ case 0x0E: set_mnemonic(i, "XORI"); type=IMM16; break;
+ case 0x0F: set_mnemonic(i, "LUI"); type=IMM16; break;
+ case 0x10: set_mnemonic(i, "cop0"); type=NI;
op2=(source[i]>>21)&0x1f;
switch(op2)
{
- case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
- case 0x02: strcpy(insn[i],"CFC0"); type=COP0; break;
- case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
- case 0x06: strcpy(insn[i],"CTC0"); type=COP0; break;
- case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
+ case 0x00: set_mnemonic(i, "MFC0"); type=COP0; break;
+ case 0x02: set_mnemonic(i, "CFC0"); type=COP0; break;
+ case 0x04: set_mnemonic(i, "MTC0"); type=COP0; break;
+ case 0x06: set_mnemonic(i, "CTC0"); type=COP0; break;
+ case 0x10: set_mnemonic(i, "RFE"); type=COP0; break;
}
break;
- case 0x11: strcpy(insn[i],"cop1"); type=COP1;
+ case 0x11: set_mnemonic(i, "cop1"); type=COP1;
op2=(source[i]>>21)&0x1f;
break;
#if 0
- case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
- case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
- case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
- case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
- case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
- case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
- case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
- case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
+ case 0x14: set_mnemonic(i, "BEQL"); type=CJUMP; break;
+ case 0x15: set_mnemonic(i, "BNEL"); type=CJUMP; break;
+ case 0x16: set_mnemonic(i, "BLEZL"); type=CJUMP; break;
+ case 0x17: set_mnemonic(i, "BGTZL"); type=CJUMP; break;
+ case 0x18: set_mnemonic(i, "DADDI"); type=IMM16; break;
+ case 0x19: set_mnemonic(i, "DADDIU"); type=IMM16; break;
+ case 0x1A: set_mnemonic(i, "LDL"); type=LOADLR; break;
+ case 0x1B: set_mnemonic(i, "LDR"); type=LOADLR; break;
#endif
- case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
- case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
- case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
- case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
- case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
- case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
- case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
+ case 0x20: set_mnemonic(i, "LB"); type=LOAD; break;
+ case 0x21: set_mnemonic(i, "LH"); type=LOAD; break;
+ case 0x22: set_mnemonic(i, "LWL"); type=LOADLR; break;
+ case 0x23: set_mnemonic(i, "LW"); type=LOAD; break;
+ case 0x24: set_mnemonic(i, "LBU"); type=LOAD; break;
+ case 0x25: set_mnemonic(i, "LHU"); type=LOAD; break;
+ case 0x26: set_mnemonic(i, "LWR"); type=LOADLR; break;
#if 0
- case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
+ case 0x27: set_mnemonic(i, "LWU"); type=LOAD; break;
#endif
- case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
- case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
- case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
- case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
+ case 0x28: set_mnemonic(i, "SB"); type=STORE; break;
+ case 0x29: set_mnemonic(i, "SH"); type=STORE; break;
+ case 0x2A: set_mnemonic(i, "SWL"); type=STORELR; break;
+ case 0x2B: set_mnemonic(i, "SW"); type=STORE; break;
#if 0
- case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
- case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
+ case 0x2C: set_mnemonic(i, "SDL"); type=STORELR; break;
+ case 0x2D: set_mnemonic(i, "SDR"); type=STORELR; break;
#endif
- case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
- case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
- case 0x30: strcpy(insn[i],"LL"); type=NI; break;
- case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
+ case 0x2E: set_mnemonic(i, "SWR"); type=STORELR; break;
+ case 0x2F: set_mnemonic(i, "CACHE"); type=NOP; break;
+ case 0x30: set_mnemonic(i, "LL"); type=NI; break;
+ case 0x31: set_mnemonic(i, "LWC1"); type=C1LS; break;
#if 0
- case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
- case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
- case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
+ case 0x34: set_mnemonic(i, "LLD"); type=NI; break;
+ case 0x35: set_mnemonic(i, "LDC1"); type=C1LS; break;
+ case 0x37: set_mnemonic(i, "LD"); type=LOAD; break;
#endif
- case 0x38: strcpy(insn[i],"SC"); type=NI; break;
- case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
+ case 0x38: set_mnemonic(i, "SC"); type=NI; break;
+ case 0x39: set_mnemonic(i, "SWC1"); type=C1LS; break;
#if 0
- case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
- case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
- case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
+ case 0x3C: set_mnemonic(i, "SCD"); type=NI; break;
+ case 0x3D: set_mnemonic(i, "SDC1"); type=C1LS; break;
+ case 0x3F: set_mnemonic(i, "SD"); type=STORE; break;
#endif
- case 0x12: strcpy(insn[i],"COP2"); type=NI;
+ case 0x12: set_mnemonic(i, "COP2"); type=NI;
op2=(source[i]>>21)&0x1f;
//if (op2 & 0x10)
if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
if (gte_handlers[source[i]&0x3f]!=NULL) {
+#ifdef DISASM
if (gte_regnames[source[i]&0x3f]!=NULL)
strcpy(insn[i],gte_regnames[source[i]&0x3f]);
else
snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
+#endif
type=C2OP;
}
}
else switch(op2)
{
- case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
- case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
- case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
- case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
+ case 0x00: set_mnemonic(i, "MFC2"); type=COP2; break;
+ case 0x02: set_mnemonic(i, "CFC2"); type=COP2; break;
+ case 0x04: set_mnemonic(i, "MTC2"); type=COP2; break;
+ case 0x06: set_mnemonic(i, "CTC2"); type=COP2; break;
}
break;
- case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
- case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
- case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
- default: strcpy(insn[i],"???"); type=NI;
- SysPrintf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
+ case 0x32: set_mnemonic(i, "LWC2"); type=C2LS; break;
+ case 0x3A: set_mnemonic(i, "SWC2"); type=C2LS; break;
+ case 0x3B: set_mnemonic(i, "HLECALL"); type=HLECALL; break;
+ default: set_mnemonic(i, "???"); type=NI;
+ SysPrintf("NI %08x @%08x (%08x)\n", source[i], start + i*4, start);
break;
}
dops[i].itype=type;
dops[i].opcode2=op2;
/* Get registers/immediates */
- dops[i].lt1=0;
+ dops[i].use_lt1=0;
gte_rs[i]=gte_rt[i]=0;
switch(type) {
case LOAD:
if(op&2) { // BGTZ/BLEZ
dops[i].rs2=0;
}
- dops[i].likely=(op>>4)?1:0;
break;
case SJUMP:
dops[i].rs1=(source[i]>>21)&0x1f;
dops[i].rt1=31;
// NOTE: If the branch is not taken, r31 is still overwritten
}
- dops[i].likely=(op2&2)?1:0;
break;
case ALU:
dops[i].rs1=(source[i]>>21)&0x1f; // source
else if (type == SJUMP && dops[i].rs1 == 0 && (op2 & 1))
dops[i].itype = type = UJUMP;
+ dops[i].is_jump = (dops[i].itype == RJUMP || dops[i].itype == UJUMP || dops[i].itype == CJUMP || dops[i].itype == SJUMP);
+ dops[i].is_ujump = (dops[i].itype == RJUMP || dops[i].itype == UJUMP); // || (source[i] >> 16) == 0x1000 // beq r0,r0
+ dops[i].is_load = (dops[i].itype == LOAD || dops[i].itype == LOADLR || op == 0x32); // LWC2
+ dops[i].is_store = (dops[i].itype == STORE || dops[i].itype == STORELR || op == 0x3a); // SWC2
+
/* messy cases to just pass over to the interpreter */
- if (i > 0 && is_jump(i-1)) {
+ if (i > 0 && dops[i-1].is_jump) {
int do_in_intrp=0;
// branch in delay slot?
- if (is_jump(i)) {
+ if (dops[i].is_jump) {
// don't handle first branch and call interpreter if it's hit
- SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
+ SysPrintf("branch in delay slot @%08x (%08x)\n", start + i*4, start);
do_in_intrp=1;
}
// basic load delay detection
int t=(ba[i-1]-start)/4;
if(0 <= t && t < i &&(dops[i].rt1==dops[t].rs1||dops[i].rt1==dops[t].rs2)&&dops[t].itype!=CJUMP&&dops[t].itype!=SJUMP) {
// jump target wants DS result - potential load delay effect
- SysPrintf("load delay @%08x (%08x)\n", addr + i*4, addr);
+ SysPrintf("load delay @%08x (%08x)\n", start + i*4, start);
do_in_intrp=1;
dops[t+1].bt=1; // expected return from interpreter
}
else if(i>=2&&dops[i-2].rt1==2&&dops[i].rt1==2&&dops[i].rs1!=2&&dops[i].rs2!=2&&dops[i-1].rs1!=2&&dops[i-1].rs2!=2&&
- !(i>=3&&is_jump(i-3))) {
+ !(i>=3&&dops[i-3].is_jump)) {
// v0 overwrite like this is a sign of trouble, bail out
- SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
+ SysPrintf("v0 overwrite @%08x (%08x)\n", start + i*4, start);
do_in_intrp=1;
}
}
- if(do_in_intrp) {
- dops[i-1].rs1=CCREG;
- dops[i-1].rs2=dops[i-1].rt1=dops[i-1].rt2=0;
- ba[i-1]=-1;
- dops[i-1].itype=INTCALL;
- done=2;
+ if (do_in_intrp) {
+ memset(&dops[i-1], 0, sizeof(dops[i-1]));
+ dops[i-1].itype = INTCALL;
+ dops[i-1].rs1 = CCREG;
+ ba[i-1] = -1;
+ done = 2;
i--; // don't compile the DS
}
}
/* Is this the end of the block? */
- if (i > 0 && is_ujump(i-1)) {
- if(dops[i-1].rt1==0) { // Continue past subroutine call (JAL)
- done=2;
+ if (i > 0 && dops[i-1].is_ujump) {
+ if (dops[i-1].rt1 == 0) { // not jal
+ int found_bbranch = 0, t = (ba[i-1] - start) / 4;
+ if ((u_int)(t - i) < 64 && start + (t+64)*4 < pagelimit) {
+ // scan for a branch back to i+1
+ for (j = t; j < t + 64; j++) {
+ int tmpop = source[j] >> 26;
+ if (tmpop == 1 || ((tmpop & ~3) == 4)) {
+ int t2 = j + 1 + (int)(signed short)source[j];
+ if (t2 == i + 1) {
+ //printf("blk expand %08x<-%08x\n", start + (i+1)*4, start + j*4);
+ found_bbranch = 1;
+ break;
+ }
+ }
+ }
+ }
+ if (!found_bbranch)
+ done = 2;
}
else {
if(stop_after_jal) done=1;
// Don't get too close to the limit
if(i>MAXBLOCK/2) done=1;
}
- if(dops[i].itype==SYSCALL&&stop_after_jal) done=1;
- if(dops[i].itype==HLECALL||dops[i].itype==INTCALL) done=2;
- if(done==2) {
+ if (dops[i].itype == SYSCALL || dops[i].itype == HLECALL || dops[i].itype == INTCALL)
+ done = stop_after_jal ? 1 : 2;
+ if (done == 2) {
// Does the block continue due to a branch?
for(j=i-1;j>=0;j--)
{
assert(start+i*4<pagelimit);
if (i==MAXBLOCK-1) done=1;
// Stop if we're compiling junk
- if(dops[i].itype==NI&&dops[i].opcode==0x11) {
+ if(dops[i].itype == NI && (++ni_count > 8 || dops[i].opcode == 0x11)) {
done=stop_after_jal=1;
SysPrintf("Disabled speculative precompilation\n");
}
- }
- slen=i;
- if(dops[i-1].itype==UJUMP||dops[i-1].itype==CJUMP||dops[i-1].itype==SJUMP||dops[i-1].itype==RJUMP) {
- if(start+i*4==pagelimit) {
- dops[i-1].itype=SPAN;
+ }
+ while (i > 0 && dops[i-1].is_jump)
+ i--;
+ assert(i > 0);
+ assert(!dops[i-1].is_jump);
+ slen = i;
+}
+
+// Basic liveness analysis for MIPS registers
+static noinline void pass2_unneeded_regs(int istart,int iend,int r)
+{
+ int i;
+ uint64_t u,gte_u,b,gte_b;
+ uint64_t temp_u,temp_gte_u=0;
+ uint64_t gte_u_unknown=0;
+ if (HACK_ENABLED(NDHACK_GTE_UNNEEDED))
+ gte_u_unknown=~0ll;
+ if(iend==slen-1) {
+ u=1;
+ gte_u=gte_u_unknown;
+ }else{
+ //u=unneeded_reg[iend+1];
+ u=1;
+ gte_u=gte_unneeded[iend+1];
+ }
+
+ for (i=iend;i>=istart;i--)
+ {
+ //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
+ if(dops[i].is_jump)
+ {
+ // If subroutine call, flag return address as a possible branch target
+ if(dops[i].rt1==31 && i<slen-2) dops[i+2].bt=1;
+
+ if(ba[i]<start || ba[i]>=(start+slen*4))
+ {
+ // Branch out of this block, flush all regs
+ u=1;
+ gte_u=gte_u_unknown;
+ branch_unneeded_reg[i]=u;
+ // Merge in delay slot
+ u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
+ u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
+ u|=1;
+ gte_u|=gte_rt[i+1];
+ gte_u&=~gte_rs[i+1];
+ }
+ else
+ {
+ // Internal branch, flag target
+ dops[(ba[i]-start)>>2].bt=1;
+ if(ba[i]<=start+i*4) {
+ // Backward branch
+ if(dops[i].is_ujump)
+ {
+ // Unconditional branch
+ temp_u=1;
+ temp_gte_u=0;
+ } else {
+ // Conditional branch (not taken case)
+ temp_u=unneeded_reg[i+2];
+ temp_gte_u&=gte_unneeded[i+2];
+ }
+ // Merge in delay slot
+ temp_u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
+ temp_u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
+ temp_u|=1;
+ temp_gte_u|=gte_rt[i+1];
+ temp_gte_u&=~gte_rs[i+1];
+ temp_u|=(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2);
+ temp_u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
+ temp_u|=1;
+ temp_gte_u|=gte_rt[i];
+ temp_gte_u&=~gte_rs[i];
+ unneeded_reg[i]=temp_u;
+ gte_unneeded[i]=temp_gte_u;
+ // Only go three levels deep. This recursion can take an
+ // excessive amount of time if there are a lot of nested loops.
+ if(r<2) {
+ pass2_unneeded_regs((ba[i]-start)>>2,i-1,r+1);
+ }else{
+ unneeded_reg[(ba[i]-start)>>2]=1;
+ gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
+ }
+ } /*else*/ if(1) {
+ if (dops[i].is_ujump)
+ {
+ // Unconditional branch
+ u=unneeded_reg[(ba[i]-start)>>2];
+ gte_u=gte_unneeded[(ba[i]-start)>>2];
+ branch_unneeded_reg[i]=u;
+ // Merge in delay slot
+ u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
+ u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
+ u|=1;
+ gte_u|=gte_rt[i+1];
+ gte_u&=~gte_rs[i+1];
+ } else {
+ // Conditional branch
+ b=unneeded_reg[(ba[i]-start)>>2];
+ gte_b=gte_unneeded[(ba[i]-start)>>2];
+ branch_unneeded_reg[i]=b;
+ // Branch delay slot
+ b|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
+ b&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
+ b|=1;
+ gte_b|=gte_rt[i+1];
+ gte_b&=~gte_rs[i+1];
+ u&=b;
+ gte_u&=gte_b;
+ if(i<slen-1) {
+ branch_unneeded_reg[i]&=unneeded_reg[i+2];
+ } else {
+ branch_unneeded_reg[i]=1;
+ }
+ }
+ }
+ }
+ }
+ else if(dops[i].itype==SYSCALL||dops[i].itype==HLECALL||dops[i].itype==INTCALL)
+ {
+ // SYSCALL instruction (software interrupt)
+ u=1;
+ }
+ else if(dops[i].itype==COP0 && dops[i].opcode2==0x10)
+ {
+ // RFE
+ u=1;
+ }
+ //u=1; // DEBUG
+ // Written registers are unneeded
+ u|=1LL<<dops[i].rt1;
+ u|=1LL<<dops[i].rt2;
+ gte_u|=gte_rt[i];
+ // Accessed registers are needed
+ u&=~(1LL<<dops[i].rs1);
+ u&=~(1LL<<dops[i].rs2);
+ gte_u&=~gte_rs[i];
+ if(gte_rs[i]&&dops[i].rt1&&(unneeded_reg[i+1]&(1ll<<dops[i].rt1)))
+ gte_u|=gte_rs[i]>e_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
+ // Source-target dependencies
+ // R0 is always unneeded
+ u|=1;
+ // Save it
+ unneeded_reg[i]=u;
+ gte_unneeded[i]=gte_u;
+ /*
+ printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
+ printf("U:");
+ int r;
+ for(r=1;r<=CCREG;r++) {
+ if((unneeded_reg[i]>>r)&1) {
+ if(r==HIREG) printf(" HI");
+ else if(r==LOREG) printf(" LO");
+ else printf(" r%d",r);
+ }
}
+ printf("\n");
+ */
}
- assert(slen>0);
-
- /* Pass 2 - Register dependencies and branch targets */
-
- unneeded_registers(0,slen-1,0);
-
- /* Pass 3 - Register allocation */
+}
+static noinline void pass3_register_alloc(u_int addr)
+{
struct regstat current; // Current register allocations/status
- current.dirty=0;
- current.u=unneeded_reg[0];
+ clear_all_regs(current.regmap_entry);
clear_all_regs(current.regmap);
- alloc_reg(¤t,0,CCREG);
- dirty_reg(¤t,CCREG);
- current.isconst=0;
- current.wasconst=0;
- current.waswritten=0;
+ current.wasdirty = current.dirty = 0;
+ current.u = unneeded_reg[0];
+ alloc_reg(¤t, 0, CCREG);
+ dirty_reg(¤t, CCREG);
+ current.wasconst = 0;
+ current.isconst = 0;
+ current.loadedconst = 0;
+ current.waswritten = 0;
int ds=0;
int cc=0;
- int hr=-1;
+ int hr;
+ int i, j;
- if((u_int)addr&1) {
+ if (addr & 1) {
// First instruction is delay slot
cc=-1;
dops[1].bt=1;
{
if(dops[i].bt)
{
- int hr;
for(hr=0;hr<HOST_REGS;hr++)
{
// Is this really necessary?
memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
regs[i].wasconst=current.isconst;
regs[i].wasdirty=current.dirty;
+ regs[i].dirty=0;
+ regs[i].u=0;
+ regs[i].isconst=0;
regs[i].loadedconst=0;
- if(dops[i].itype!=UJUMP&&dops[i].itype!=CJUMP&&dops[i].itype!=SJUMP&&dops[i].itype!=RJUMP) {
+ if (!dops[i].is_jump) {
if(i+1<slen) {
current.u=unneeded_reg[i+1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
current.u|=1;
current.u=branch_unneeded_reg[i]&~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
current.u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
current.u|=1;
- } else { SysPrintf("oops, branch at end of block with no delay slot\n");abort(); }
+ } else {
+ SysPrintf("oops, branch at end of block with no delay slot @%08x\n", start + i*4);
+ abort();
+ }
}
dops[i].is_ds=ds;
if(ds) {
case INTCALL:
syscall_alloc(¤t,i);
break;
- case SPAN:
- pagespan_alloc(¤t,i);
- break;
}
// Create entry (branch target) regmap
if(r!=regmap_pre[i][hr]) {
// TODO: delay slot (?)
or=get_reg(regmap_pre[i],r); // Get old mapping for this register
- if(or<0||(r&63)>=TEMPREG){
+ if(or<0||r>=TEMPREG){
regs[i].regmap_entry[hr]=-1;
}
else
// Just move it to a different register
regs[i].regmap_entry[hr]=r;
// If it was dirty before, it's still dirty
- if((regs[i].wasdirty>>or)&1) dirty_reg(¤t,r&63);
+ if((regs[i].wasdirty>>or)&1) dirty_reg(¤t,r);
}
}
else
break;
}
- if (is_ujump(i-1))
+ if (dops[i-1].is_ujump)
{
if(dops[i-1].rt1==31) // JAL/JALR
{
}
// Count cycles in between branches
- ccadj[i]=cc;
- if(i>0&&(dops[i-1].itype==RJUMP||dops[i-1].itype==UJUMP||dops[i-1].itype==CJUMP||dops[i-1].itype==SJUMP||dops[i].itype==SYSCALL||dops[i].itype==HLECALL))
+ ccadj[i] = CLOCK_ADJUST(cc);
+ if (i > 0 && (dops[i-1].is_jump || dops[i].itype == SYSCALL || dops[i].itype == HLECALL))
{
cc=0;
}
if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
regs[i].waswritten=current.waswritten;
}
+}
- /* Pass 4 - Cull unused host registers */
-
- uint64_t nr=0;
+static noinline void pass4_cull_unused_regs(void)
+{
+ u_int last_needed_regs[4] = {0,0,0,0};
+ u_int nr=0;
+ int i;
for (i=slen-1;i>=0;i--)
{
int hr;
- if(dops[i].itype==RJUMP||dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
+ __builtin_prefetch(regs[i-2].regmap);
+ if(dops[i].is_jump)
{
if(ba[i]<start || ba[i]>=(start+slen*4))
{
}
}
// Conditional branch may need registers for following instructions
- if (!is_ujump(i))
+ if (!dops[i].is_ujump)
{
if(i<slen-2) {
- nr|=needed_reg[i+2];
+ nr |= last_needed_regs[(i+2) & 3];
for(hr=0;hr<HOST_REGS;hr++)
{
if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
//if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
//if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
// Merge in delay slot
- for(hr=0;hr<HOST_REGS;hr++)
- {
- if(!dops[i].likely) {
- // These are overwritten unless the branch is "likely"
- // and the delay slot is nullified if not taken
- if(dops[i+1].rt1&&dops[i+1].rt1==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
- if(dops[i+1].rt2&&dops[i+1].rt2==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
- }
- if(dops[i+1].rs1==regmap_pre[i][hr]) nr|=1<<hr;
- if(dops[i+1].rs2==regmap_pre[i][hr]) nr|=1<<hr;
- if(dops[i+1].rs1==regs[i].regmap_entry[hr]) nr|=1<<hr;
- if(dops[i+1].rs2==regs[i].regmap_entry[hr]) nr|=1<<hr;
- if(dops[i+1].itype==STORE || dops[i+1].itype==STORELR || (dops[i+1].opcode&0x3b)==0x39 || (dops[i+1].opcode&0x3b)==0x3a) {
- if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
- if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
- }
+ if (dops[i+1].rt1) nr &= ~get_regm(regs[i].regmap, dops[i+1].rt1);
+ if (dops[i+1].rt2) nr &= ~get_regm(regs[i].regmap, dops[i+1].rt2);
+ nr |= get_regm(regmap_pre[i], dops[i+1].rs1);
+ nr |= get_regm(regmap_pre[i], dops[i+1].rs2);
+ nr |= get_regm(regs[i].regmap_entry, dops[i+1].rs1);
+ nr |= get_regm(regs[i].regmap_entry, dops[i+1].rs2);
+ if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store)) {
+ nr |= get_regm(regmap_pre[i], ROREG);
+ nr |= get_regm(regs[i].regmap_entry, ROREG);
+ }
+ if (dops[i+1].is_store) {
+ nr |= get_regm(regmap_pre[i], INVCP);
+ nr |= get_regm(regs[i].regmap_entry, INVCP);
}
}
else if(dops[i].itype==SYSCALL||dops[i].itype==HLECALL||dops[i].itype==INTCALL)
}
}
}
+ // Overwritten registers are not needed
+ if (dops[i].rt1) nr &= ~get_regm(regs[i].regmap, dops[i].rt1);
+ if (dops[i].rt2) nr &= ~get_regm(regs[i].regmap, dops[i].rt2);
+ nr &= ~get_regm(regs[i].regmap, FTEMP);
+ // Source registers are needed
+ nr |= get_regm(regmap_pre[i], dops[i].rs1);
+ nr |= get_regm(regmap_pre[i], dops[i].rs2);
+ nr |= get_regm(regs[i].regmap_entry, dops[i].rs1);
+ nr |= get_regm(regs[i].regmap_entry, dops[i].rs2);
+ if (ram_offset && (dops[i].is_load || dops[i].is_store)) {
+ nr |= get_regm(regmap_pre[i], ROREG);
+ nr |= get_regm(regs[i].regmap_entry, ROREG);
+ }
+ if (dops[i].is_store) {
+ nr |= get_regm(regmap_pre[i], INVCP);
+ nr |= get_regm(regs[i].regmap_entry, INVCP);
+ }
+
+ if (i > 0 && !dops[i].bt && regs[i].wasdirty)
for(hr=0;hr<HOST_REGS;hr++)
{
- // Overwritten registers are not needed
- if(dops[i].rt1&&dops[i].rt1==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
- if(dops[i].rt2&&dops[i].rt2==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
- if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
- // Source registers are needed
- if(dops[i].rs1==regmap_pre[i][hr]) nr|=1<<hr;
- if(dops[i].rs2==regmap_pre[i][hr]) nr|=1<<hr;
- if(dops[i].rs1==regs[i].regmap_entry[hr]) nr|=1<<hr;
- if(dops[i].rs2==regs[i].regmap_entry[hr]) nr|=1<<hr;
- if(dops[i].itype==STORE || dops[i].itype==STORELR || (dops[i].opcode&0x3b)==0x39 || (dops[i].opcode&0x3b)==0x3a) {
- if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
- if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
- }
// Don't store a register immediately after writing it,
// may prevent dual-issue.
// But do so if this is a branch target, otherwise we
// might have to load the register before the branch.
- if(i>0&&!dops[i].bt&&((regs[i].wasdirty>>hr)&1)) {
+ if((regs[i].wasdirty>>hr)&1) {
if((regmap_pre[i][hr]>0&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1))) {
- if(dops[i-1].rt1==(regmap_pre[i][hr]&63)) nr|=1<<hr;
- if(dops[i-1].rt2==(regmap_pre[i][hr]&63)) nr|=1<<hr;
+ if(dops[i-1].rt1==regmap_pre[i][hr]) nr|=1<<hr;
+ if(dops[i-1].rt2==regmap_pre[i][hr]) nr|=1<<hr;
}
if((regs[i].regmap_entry[hr]>0&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1))) {
- if(dops[i-1].rt1==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
- if(dops[i-1].rt2==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
+ if(dops[i-1].rt1==regs[i].regmap_entry[hr]) nr|=1<<hr;
+ if(dops[i-1].rt2==regs[i].regmap_entry[hr]) nr|=1<<hr;
}
}
}
// Cycle count is needed at branches. Assume it is needed at the target too.
- if(i==0||dops[i].bt||dops[i].itype==CJUMP||dops[i].itype==SPAN) {
+ if(i==0||dops[i].bt||dops[i].itype==CJUMP) {
if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
}
// Save it
- needed_reg[i]=nr;
+ last_needed_regs[i & 3] = nr;
// Deallocate unneeded registers
for(hr=0;hr<HOST_REGS;hr++)
{
if(!((nr>>hr)&1)) {
if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
- if((regs[i].regmap[hr]&63)!=dops[i].rs1 && (regs[i].regmap[hr]&63)!=dops[i].rs2 &&
- (regs[i].regmap[hr]&63)!=dops[i].rt1 && (regs[i].regmap[hr]&63)!=dops[i].rt2 &&
- (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
- {
- if (!is_ujump(i))
- {
- if(dops[i].likely) {
- regs[i].regmap[hr]=-1;
- regs[i].isconst&=~(1<<hr);
- if(i<slen-2) {
- regmap_pre[i+2][hr]=-1;
- regs[i+2].wasconst&=~(1<<hr);
- }
- }
- }
- }
- if(dops[i].itype==RJUMP||dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
+ if(dops[i].is_jump)
{
- int map=0,temp=0;
- if(dops[i+1].itype==STORE || dops[i+1].itype==STORELR ||
- (dops[i+1].opcode&0x3b)==0x39 || (dops[i+1].opcode&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
- map=INVCP;
- }
- if(dops[i+1].itype==LOADLR || dops[i+1].itype==STORELR ||
- dops[i+1].itype==C1LS || dops[i+1].itype==C2LS)
- temp=FTEMP;
- if((regs[i].regmap[hr]&63)!=dops[i].rs1 && (regs[i].regmap[hr]&63)!=dops[i].rs2 &&
- (regs[i].regmap[hr]&63)!=dops[i].rt1 && (regs[i].regmap[hr]&63)!=dops[i].rt2 &&
- (regs[i].regmap[hr]&63)!=dops[i+1].rt1 && (regs[i].regmap[hr]&63)!=dops[i+1].rt2 &&
+ int map1 = 0, map2 = 0, temp = 0; // or -1 ??
+ if (dops[i+1].is_load || dops[i+1].is_store)
+ map1 = ROREG;
+ if (dops[i+1].is_store)
+ map2 = INVCP;
+ if(dops[i+1].itype==LOADLR || dops[i+1].itype==STORELR || dops[i+1].itype==C2LS)
+ temp = FTEMP;
+ if(regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
+ regs[i].regmap[hr]!=dops[i].rt1 && regs[i].regmap[hr]!=dops[i].rt2 &&
+ regs[i].regmap[hr]!=dops[i+1].rt1 && regs[i].regmap[hr]!=dops[i+1].rt2 &&
regs[i].regmap[hr]!=dops[i+1].rs1 && regs[i].regmap[hr]!=dops[i+1].rs2 &&
- (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
+ regs[i].regmap[hr]!=temp && regs[i].regmap[hr]!=PTEMP &&
regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
- regs[i].regmap[hr]!=map )
+ regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2)
{
regs[i].regmap[hr]=-1;
regs[i].isconst&=~(1<<hr);
- if((branch_regs[i].regmap[hr]&63)!=dops[i].rs1 && (branch_regs[i].regmap[hr]&63)!=dops[i].rs2 &&
- (branch_regs[i].regmap[hr]&63)!=dops[i].rt1 && (branch_regs[i].regmap[hr]&63)!=dops[i].rt2 &&
- (branch_regs[i].regmap[hr]&63)!=dops[i+1].rt1 && (branch_regs[i].regmap[hr]&63)!=dops[i+1].rt2 &&
+ regs[i].dirty&=~(1<<hr);
+ regs[i+1].wasdirty&=~(1<<hr);
+ if(branch_regs[i].regmap[hr]!=dops[i].rs1 && branch_regs[i].regmap[hr]!=dops[i].rs2 &&
+ branch_regs[i].regmap[hr]!=dops[i].rt1 && branch_regs[i].regmap[hr]!=dops[i].rt2 &&
+ branch_regs[i].regmap[hr]!=dops[i+1].rt1 && branch_regs[i].regmap[hr]!=dops[i+1].rt2 &&
branch_regs[i].regmap[hr]!=dops[i+1].rs1 && branch_regs[i].regmap[hr]!=dops[i+1].rs2 &&
- (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
+ branch_regs[i].regmap[hr]!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
- branch_regs[i].regmap[hr]!=map)
+ branch_regs[i].regmap[hr]!=map1 && branch_regs[i].regmap[hr]!=map2)
{
branch_regs[i].regmap[hr]=-1;
branch_regs[i].regmap_entry[hr]=-1;
- if (!is_ujump(i))
+ if (!dops[i].is_ujump)
{
- if(!dops[i].likely&&i<slen-2) {
+ if (i < slen-2) {
regmap_pre[i+2][hr]=-1;
regs[i+2].wasconst&=~(1<<hr);
}
// Non-branch
if(i>0)
{
- int map=-1,temp=-1;
- if(dops[i].itype==STORE || dops[i].itype==STORELR ||
- (dops[i].opcode&0x3b)==0x39 || (dops[i].opcode&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
- map=INVCP;
- }
- if(dops[i].itype==LOADLR || dops[i].itype==STORELR ||
- dops[i].itype==C1LS || dops[i].itype==C2LS)
- temp=FTEMP;
- if((regs[i].regmap[hr]&63)!=dops[i].rt1 && (regs[i].regmap[hr]&63)!=dops[i].rt2 &&
+ int map1 = -1, map2 = -1, temp=-1;
+ if (dops[i].is_load || dops[i].is_store)
+ map1 = ROREG;
+ if (dops[i].is_store)
+ map2 = INVCP;
+ if (dops[i].itype==LOADLR || dops[i].itype==STORELR || dops[i].itype==C2LS)
+ temp = FTEMP;
+ if(regs[i].regmap[hr]!=dops[i].rt1 && regs[i].regmap[hr]!=dops[i].rt2 &&
regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
- (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
- (dops[i].itype!=SPAN||regs[i].regmap[hr]!=CCREG))
+ regs[i].regmap[hr]!=temp && regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2 &&
+ //(dops[i].itype!=SPAN||regs[i].regmap[hr]!=CCREG)
+ regs[i].regmap[hr] != CCREG)
{
if(i<slen-1&&!dops[i].is_ds) {
assert(regs[i].regmap[hr]<64);
}
regs[i].regmap[hr]=-1;
regs[i].isconst&=~(1<<hr);
+ regs[i].dirty&=~(1<<hr);
+ regs[i+1].wasdirty&=~(1<<hr);
}
}
}
} // if needed
} // for hr
}
+}
- /* Pass 5 - Pre-allocate registers */
-
- // If a register is allocated during a loop, try to allocate it for the
- // entire loop, if possible. This avoids loading/storing registers
- // inside of the loop.
-
+// If a register is allocated during a loop, try to allocate it for the
+// entire loop, if possible. This avoids loading/storing registers
+// inside of the loop.
+static noinline void pass5a_preallocate1(void)
+{
+ int i, j, hr;
signed char f_regmap[HOST_REGS];
clear_all_regs(f_regmap);
for(i=0;i<slen-1;i++)
||dops[i+1].itype==COP2||dops[i+1].itype==C2LS||dops[i+1].itype==C2OP)
{
int t=(ba[i]-start)>>2;
- if(t>0&&(dops[t-1].itype!=UJUMP&&dops[t-1].itype!=RJUMP&&dops[t-1].itype!=CJUMP&&dops[t-1].itype!=SJUMP)) // loop_preload can't handle jumps into delay slots
+ if(t > 0 && !dops[t-1].is_jump) // loop_preload can't handle jumps into delay slots
if(t<2||(dops[t-2].itype!=UJUMP&&dops[t-2].itype!=RJUMP)||dops[t-2].rt1!=31) // call/ret assumes no registers allocated
for(hr=0;hr<HOST_REGS;hr++)
{
//printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
if(r<34&&((unneeded_reg[j]>>r)&1)) break;
assert(r < 64);
- if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
+ if(regs[j].regmap[hr]==f_regmap[hr]&&f_regmap[hr]<TEMPREG) {
//printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
int k;
if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
+ if(get_reg(regs[i].regmap,f_regmap[hr])>=0) break;
if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
- if(r>63) {
- if(get_reg(regs[i].regmap,r&63)<0) break;
- if(get_reg(branch_regs[i].regmap,r&63)<0) break;
- }
k=i;
while(k>1&®s[k-1].regmap[hr]==-1) {
if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
//printf("no-match due to different register\n");
break;
}
- if(dops[k-2].itype==UJUMP||dops[k-2].itype==RJUMP||dops[k-2].itype==CJUMP||dops[k-2].itype==SJUMP) {
+ if (dops[k-2].is_jump) {
//printf("no-match due to branch\n");
break;
}
if(k>2&&(dops[k-3].itype==UJUMP||dops[k-3].itype==RJUMP)&&dops[k-3].rt1==31) {
break;
}
- assert(r < 64);
k--;
}
if(regs[k-1].regmap[hr]==f_regmap[hr]&®map_pre[k][hr]==f_regmap[hr]) {
branch_regs[i].dirty|=(1<<hr)®s[i].dirty;
branch_regs[i].wasconst&=~(1<<hr);
branch_regs[i].isconst&=~(1<<hr);
- if (!is_ujump(i)) {
+ if (!dops[i].is_ujump) {
regmap_pre[i+2][hr]=f_regmap[hr];
regs[i+2].wasdirty&=~(1<<hr);
regs[i+2].wasdirty|=(1<<hr)®s[i].dirty;
regs[k].dirty&=~(1<<hr);
regs[k].wasconst&=~(1<<hr);
regs[k].isconst&=~(1<<hr);
- if(dops[k].itype==UJUMP||dops[k].itype==RJUMP||dops[k].itype==CJUMP||dops[k].itype==SJUMP) {
+ if (dops[k].is_jump) {
branch_regs[k].regmap_entry[hr]=f_regmap[hr];
branch_regs[k].regmap[hr]=f_regmap[hr];
branch_regs[k].dirty&=~(1<<hr);
branch_regs[k].wasconst&=~(1<<hr);
branch_regs[k].isconst&=~(1<<hr);
- if (!is_ujump(k)) {
+ if (!dops[k].is_ujump) {
regmap_pre[k+2][hr]=f_regmap[hr];
regs[k+2].wasdirty&=~(1<<hr);
}
//printf("no-match due to different register\n");
break;
}
- if (is_ujump(j))
+ if (dops[j].is_ujump)
{
// Stop on unconditional branch
break;
}
}
}
+}
- // This allocates registers (if possible) one instruction prior
- // to use, which can avoid a load-use penalty on certain CPUs.
+// This allocates registers (if possible) one instruction prior
+// to use, which can avoid a load-use penalty on certain CPUs.
+static noinline void pass5b_preallocate2(void)
+{
+ int i, hr;
for(i=0;i<slen-1;i++)
{
- if(!i||(dops[i-1].itype!=UJUMP&&dops[i-1].itype!=CJUMP&&dops[i-1].itype!=SJUMP&&dops[i-1].itype!=RJUMP))
+ if (!i || !dops[i-1].is_jump)
{
if(!dops[i+1].bt)
{
}
}
// Load source into target register
- if(dops[i+1].lt1&&get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
+ if(dops[i+1].use_lt1&&get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
if((hr=get_reg(regs[i+1].regmap,dops[i+1].rt1))>=0)
{
if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
||(dops[i+1].opcode&0x3b)==0x39||(dops[i+1].opcode&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
if(get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
- if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
- else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
+ if(hr<0) hr=get_reg_temp(regs[i+1].regmap);
+ else {
+ regs[i+1].regmap[hr]=AGEN1+((i+1)&1);
+ regs[i+1].isconst&=~(1<<hr);
+ }
assert(hr>=0);
if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
{
}
}
}
- if(dops[i+1].itype==LOAD||dops[i+1].itype==LOADLR||dops[i+1].itype==STORE||dops[i+1].itype==STORELR/*||dops[i+1].itype==C1LS||||dops[i+1].itype==C2LS*/) {
- if(dops[i+1].itype==LOAD)
- hr=get_reg(regs[i+1].regmap,dops[i+1].rt1);
- if(dops[i+1].itype==LOADLR||(dops[i+1].opcode&0x3b)==0x31||(dops[i+1].opcode&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
- hr=get_reg(regs[i+1].regmap,FTEMP);
- if(dops[i+1].itype==STORE||dops[i+1].itype==STORELR||(dops[i+1].opcode&0x3b)==0x39||(dops[i+1].opcode&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
- hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
- if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
- }
- if(hr>=0&®s[i].regmap[hr]<0) {
- int rs=get_reg(regs[i+1].regmap,dops[i+1].rs1);
- if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
- regs[i].regmap[hr]=AGEN1+((i+1)&1);
- regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
- regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
- regs[i].isconst&=~(1<<hr);
- regs[i+1].wasdirty&=~(1<<hr);
- regs[i].dirty&=~(1<<hr);
+ if(dops[i+1].itype==LOAD||dops[i+1].itype==LOADLR||dops[i+1].itype==STORE||dops[i+1].itype==STORELR/*||dops[i+1].itype==C1LS||||dops[i+1].itype==C2LS*/) {
+ hr = -1;
+ if(dops[i+1].itype==LOAD)
+ hr=get_reg(regs[i+1].regmap,dops[i+1].rt1);
+ if(dops[i+1].itype==LOADLR||(dops[i+1].opcode&0x3b)==0x31||(dops[i+1].opcode&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
+ hr=get_reg(regs[i+1].regmap,FTEMP);
+ if(dops[i+1].itype==STORE||dops[i+1].itype==STORELR||(dops[i+1].opcode&0x3b)==0x39||(dops[i+1].opcode&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
+ hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
+ if(hr<0) hr=get_reg_temp(regs[i+1].regmap);
+ }
+ if(hr>=0&®s[i].regmap[hr]<0) {
+ int rs=get_reg(regs[i+1].regmap,dops[i+1].rs1);
+ if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
+ regs[i].regmap[hr]=AGEN1+((i+1)&1);
+ regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
+ regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
+ regs[i].isconst&=~(1<<hr);
+ regs[i+1].wasdirty&=~(1<<hr);
+ regs[i].dirty&=~(1<<hr);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Write back dirty registers as soon as we will no longer modify them,
+// so that we don't end up with lots of writes at the branches.
+static noinline void pass6_clean_registers(int istart, int iend, int wr)
+{
+ static u_int wont_dirty[MAXBLOCK];
+ static u_int will_dirty[MAXBLOCK];
+ int i;
+ int r;
+ u_int will_dirty_i,will_dirty_next,temp_will_dirty;
+ u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
+ if(iend==slen-1) {
+ will_dirty_i=will_dirty_next=0;
+ wont_dirty_i=wont_dirty_next=0;
+ }else{
+ will_dirty_i=will_dirty_next=will_dirty[iend+1];
+ wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
+ }
+ for (i=iend;i>=istart;i--)
+ {
+ signed char rregmap_i[RRMAP_SIZE];
+ u_int hr_candirty = 0;
+ assert(HOST_REGS < 32);
+ make_rregs(regs[i].regmap, rregmap_i, &hr_candirty);
+ __builtin_prefetch(regs[i-1].regmap);
+ if(dops[i].is_jump)
+ {
+ signed char branch_rregmap_i[RRMAP_SIZE];
+ u_int branch_hr_candirty = 0;
+ make_rregs(branch_regs[i].regmap, branch_rregmap_i, &branch_hr_candirty);
+ if(ba[i]<start || ba[i]>=(start+slen*4))
+ {
+ // Branch out of this block, flush all regs
+ will_dirty_i = 0;
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
+ will_dirty_i &= branch_hr_candirty;
+ if (dops[i].is_ujump)
+ {
+ // Unconditional branch
+ wont_dirty_i = 0;
+ // Merge in delay slot (will dirty)
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ will_dirty_i &= hr_candirty;
+ }
+ else
+ {
+ // Conditional branch
+ wont_dirty_i = wont_dirty_next;
+ // Merge in delay slot (will dirty)
+ // (the original code had no explanation why these 2 are commented out)
+ //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ will_dirty_i &= hr_candirty;
+ }
+ // Merge in delay slot (wont dirty)
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
+ wont_dirty_i &= ~(1u << 31);
+ if(wr) {
+ #ifndef DESTRUCTIVE_WRITEBACK
+ branch_regs[i].dirty&=wont_dirty_i;
+ #endif
+ branch_regs[i].dirty|=will_dirty_i;
+ }
+ }
+ else
+ {
+ // Internal branch
+ if(ba[i]<=start+i*4) {
+ // Backward branch
+ if (dops[i].is_ujump)
+ {
+ // Unconditional branch
+ temp_will_dirty=0;
+ temp_wont_dirty=0;
+ // Merge in delay slot (will dirty)
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
+ temp_will_dirty &= branch_hr_candirty;
+ temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ temp_will_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ temp_will_dirty &= hr_candirty;
+ } else {
+ // Conditional branch (not taken case)
+ temp_will_dirty=will_dirty_next;
+ temp_wont_dirty=wont_dirty_next;
+ // Merge in delay slot (will dirty)
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
+ temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
+ temp_will_dirty &= branch_hr_candirty;
+ //temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ //temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ temp_will_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ temp_will_dirty &= hr_candirty;
+ }
+ // Merge in delay slot (wont dirty)
+ temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
+ temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
+ temp_wont_dirty &= ~(1u << 31);
+ // Deal with changed mappings
+ if(i<iend) {
+ for(r=0;r<HOST_REGS;r++) {
+ if(r!=EXCLUDE_REG) {
+ if(regs[i].regmap[r]!=regmap_pre[i][r]) {
+ temp_will_dirty&=~(1<<r);
+ temp_wont_dirty&=~(1<<r);
+ if(regmap_pre[i][r]>0 && regmap_pre[i][r]<34) {
+ temp_will_dirty|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
+ temp_wont_dirty|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
+ } else {
+ temp_will_dirty|=1<<r;
+ temp_wont_dirty|=1<<r;
+ }
+ }
+ }
+ }
+ }
+ if(wr) {
+ will_dirty[i]=temp_will_dirty;
+ wont_dirty[i]=temp_wont_dirty;
+ pass6_clean_registers((ba[i]-start)>>2,i-1,0);
+ }else{
+ // Limit recursion. It can take an excessive amount
+ // of time if there are a lot of nested loops.
+ will_dirty[(ba[i]-start)>>2]=0;
+ wont_dirty[(ba[i]-start)>>2]=-1;
+ }
+ }
+ /*else*/ if(1)
+ {
+ if (dops[i].is_ujump)
+ {
+ // Unconditional branch
+ will_dirty_i=0;
+ wont_dirty_i=0;
+ //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
+ for(r=0;r<HOST_REGS;r++) {
+ if(r!=EXCLUDE_REG) {
+ if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
+ will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
+ wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
+ }
+ if(branch_regs[i].regmap[r]>=0) {
+ will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>branch_regs[i].regmap[r])&1)<<r;
+ wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>branch_regs[i].regmap[r])&1)<<r;
+ }
+ }
+ }
+ //}
+ // Merge in delay slot
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
+ will_dirty_i &= branch_hr_candirty;
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ will_dirty_i &= hr_candirty;
+ } else {
+ // Conditional branch
+ will_dirty_i=will_dirty_next;
+ wont_dirty_i=wont_dirty_next;
+ //if(ba[i]>start+i*4) // Disable recursion (for debugging)
+ for(r=0;r<HOST_REGS;r++) {
+ if(r!=EXCLUDE_REG) {
+ signed char target_reg=branch_regs[i].regmap[r];
+ if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
+ will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
+ wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
+ }
+ else if(target_reg>=0) {
+ will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>target_reg)&1)<<r;
+ wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>target_reg)&1)<<r;
+ }
+ }
+ }
+ // Merge in delay slot
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
+ will_dirty_i &= branch_hr_candirty;
+ //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ will_dirty_i &= hr_candirty;
+ }
+ // Merge in delay slot (won't dirty)
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
+ wont_dirty_i &= ~(1u << 31);
+ if(wr) {
+ #ifndef DESTRUCTIVE_WRITEBACK
+ branch_regs[i].dirty&=wont_dirty_i;
+ #endif
+ branch_regs[i].dirty|=will_dirty_i;
+ }
+ }
+ }
+ }
+ else if(dops[i].itype==SYSCALL||dops[i].itype==HLECALL||dops[i].itype==INTCALL)
+ {
+ // SYSCALL instruction (software interrupt)
+ will_dirty_i=0;
+ wont_dirty_i=0;
+ }
+ else if(dops[i].itype==COP0 && (source[i]&0x3f)==0x18)
+ {
+ // ERET instruction (return from interrupt)
+ will_dirty_i=0;
+ wont_dirty_i=0;
+ }
+ will_dirty_next=will_dirty_i;
+ wont_dirty_next=wont_dirty_i;
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ will_dirty_i &= hr_candirty;
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
+ wont_dirty_i &= ~(1u << 31);
+ if (i > istart && !dops[i].is_jump) {
+ // Don't store a register immediately after writing it,
+ // may prevent dual-issue.
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i-1].rt1) & 31);
+ wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i-1].rt2) & 31);
+ }
+ // Save it
+ will_dirty[i]=will_dirty_i;
+ wont_dirty[i]=wont_dirty_i;
+ // Mark registers that won't be dirtied as not dirty
+ if(wr) {
+ regs[i].dirty|=will_dirty_i;
+ #ifndef DESTRUCTIVE_WRITEBACK
+ regs[i].dirty&=wont_dirty_i;
+ if(dops[i].is_jump)
+ {
+ if (i < iend-1 && !dops[i].is_ujump) {
+ for(r=0;r<HOST_REGS;r++) {
+ if(r!=EXCLUDE_REG) {
+ if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
+ regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
+ }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
+ }
+ }
+ }
+ }
+ else
+ {
+ if(i<iend) {
+ for(r=0;r<HOST_REGS;r++) {
+ if(r!=EXCLUDE_REG) {
+ if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
+ regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
+ }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
}
}
}
}
+ #endif
+ }
+ // Deal with changed mappings
+ temp_will_dirty=will_dirty_i;
+ temp_wont_dirty=wont_dirty_i;
+ for(r=0;r<HOST_REGS;r++) {
+ if(r!=EXCLUDE_REG) {
+ int nr;
+ if(regs[i].regmap[r]==regmap_pre[i][r]) {
+ if(wr) {
+ #ifndef DESTRUCTIVE_WRITEBACK
+ regs[i].wasdirty&=wont_dirty_i|~(1<<r);
+ #endif
+ regs[i].wasdirty|=will_dirty_i&(1<<r);
+ }
+ }
+ else if(regmap_pre[i][r]>=0&&(nr=get_rreg(rregmap_i,regmap_pre[i][r]))>=0) {
+ // Register moved to a different register
+ will_dirty_i&=~(1<<r);
+ wont_dirty_i&=~(1<<r);
+ will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
+ wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
+ if(wr) {
+ #ifndef DESTRUCTIVE_WRITEBACK
+ regs[i].wasdirty&=wont_dirty_i|~(1<<r);
+ #endif
+ regs[i].wasdirty|=will_dirty_i&(1<<r);
+ }
+ }
+ else {
+ will_dirty_i&=~(1<<r);
+ wont_dirty_i&=~(1<<r);
+ if(regmap_pre[i][r]>0 && regmap_pre[i][r]<34) {
+ will_dirty_i|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
+ wont_dirty_i|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
+ } else {
+ wont_dirty_i|=1<<r;
+ /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
+ }
+ }
+ }
+ }
+ }
+}
+
+static noinline void pass10_expire_blocks(void)
+{
+ u_int step = MAX_OUTPUT_BLOCK_SIZE / PAGE_COUNT / 2;
+ // not sizeof(ndrc->translation_cache) due to vita hack
+ u_int step_mask = ((1u << TARGET_SIZE_2) - 1u) & ~(step - 1u);
+ u_int end = (out - ndrc->translation_cache + EXPIRITY_OFFSET) & step_mask;
+ u_int base_shift = __builtin_ctz(MAX_OUTPUT_BLOCK_SIZE);
+ int hit;
+
+ for (; expirep != end; expirep = ((expirep + step) & step_mask))
+ {
+ u_int base_offs = expirep & ~(MAX_OUTPUT_BLOCK_SIZE - 1);
+ u_int block_i = expirep / step & (PAGE_COUNT - 1);
+ u_int phase = (expirep >> (base_shift - 1)) & 1u;
+ if (!(expirep & (MAX_OUTPUT_BLOCK_SIZE / 2 - 1))) {
+ inv_debug("EXP: base_offs %x/%x phase %u\n", base_offs,
+ out - ndrc->translation_cache, phase);
+ }
+
+ if (!phase) {
+ hit = blocks_remove_matching_addrs(&blocks[block_i], base_offs, base_shift);
+ if (hit) {
+ do_clear_cache();
+ #ifdef USE_MINI_HT
+ memset(mini_ht, -1, sizeof(mini_ht));
+ #endif
}
}
+ else
+ unlink_jumps_tc_range(jumps[block_i], base_offs, base_shift);
+ }
+}
+
+static struct block_info *new_block_info(u_int start, u_int len,
+ const void *source, const void *copy, u_char *beginning, u_short jump_in_count)
+{
+ struct block_info **b_pptr;
+ struct block_info *block;
+ u_int page = get_page(start);
+
+ block = malloc(sizeof(*block) + jump_in_count * sizeof(block->jump_in[0]));
+ assert(block);
+ assert(jump_in_count > 0);
+ block->source = source;
+ block->copy = copy;
+ block->start = start;
+ block->len = len;
+ block->reg_sv_flags = 0;
+ block->tc_offs = beginning - ndrc->translation_cache;
+ //block->tc_len = out - beginning;
+ block->is_dirty = 0;
+ block->inv_near_misses = 0;
+ block->jump_in_cnt = jump_in_count;
+
+ // insert sorted by start mirror-unmasked vaddr
+ for (b_pptr = &blocks[page]; ; b_pptr = &((*b_pptr)->next)) {
+ if (*b_pptr == NULL || (*b_pptr)->start >= start) {
+ block->next = *b_pptr;
+ *b_pptr = block;
+ break;
+ }
+ }
+ stat_inc(stat_blocks);
+ return block;
+}
+
+static int new_recompile_block(u_int addr)
+{
+ u_int pagelimit = 0;
+ u_int state_rflags = 0;
+ int i;
+
+ assem_debug("NOTCOMPILED: addr = %x -> %p\n", addr, out);
+
+ // this is just for speculation
+ for (i = 1; i < 32; i++) {
+ if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
+ state_rflags |= 1 << i;
+ }
+
+ assert(!(addr & 3));
+ start = addr & ~3;
+ new_dynarec_did_compile=1;
+ if (Config.HLE && start == 0x80001000) // hlecall
+ {
+ // XXX: is this enough? Maybe check hleSoftCall?
+ void *beginning = start_block();
+
+ emit_movimm(start,0);
+ emit_writeword(0,&pcaddr);
+ emit_far_jump(new_dyna_leave);
+ literal_pool(0);
+ end_block(beginning);
+ struct block_info *block = new_block_info(start, 4, NULL, NULL, beginning, 1);
+ block->jump_in[0].vaddr = start;
+ block->jump_in[0].addr = beginning;
+ return 0;
+ }
+ else if (f1_hack && hack_addr == 0) {
+ void *beginning = start_block();
+ emit_movimm(start, 0);
+ emit_writeword(0, &hack_addr);
+ emit_readword(&psxRegs.GPR.n.sp, 0);
+ emit_readptr(&mem_rtab, 1);
+ emit_shrimm(0, 12, 2);
+ emit_readptr_dualindexedx_ptrlen(1, 2, 1);
+ emit_addimm(0, 0x18, 0);
+ emit_adds_ptr(1, 1, 1);
+ emit_ldr_dualindexed(1, 0, 0);
+ emit_writeword(0, &psxRegs.GPR.r[26]); // lw k0, 0x18(sp)
+ emit_far_call(ndrc_get_addr_ht);
+ emit_jmpreg(0); // jr k0
+ literal_pool(0);
+ end_block(beginning);
+
+ struct block_info *block = new_block_info(start, 4, NULL, NULL, beginning, 1);
+ block->jump_in[0].vaddr = start;
+ block->jump_in[0].addr = beginning;
+ SysPrintf("F1 hack to %08x\n", start);
+ return 0;
+ }
+
+ cycle_multiplier_active = cycle_multiplier_override && cycle_multiplier == CYCLE_MULT_DEFAULT
+ ? cycle_multiplier_override : cycle_multiplier;
+
+ source = get_source_start(start, &pagelimit);
+ if (source == NULL) {
+ if (addr != hack_addr) {
+ SysPrintf("Compile at bogus memory address: %08x\n", addr);
+ hack_addr = addr;
+ }
+ //abort();
+ return -1;
}
+ /* Pass 1: disassemble */
+ /* Pass 2: register dependencies, branch targets */
+ /* Pass 3: register allocation */
+ /* Pass 4: branch dependencies */
+ /* Pass 5: pre-alloc */
+ /* Pass 6: optimize clean/dirty state */
+ /* Pass 7: flag 32-bit registers */
+ /* Pass 8: assembly */
+ /* Pass 9: linker */
+ /* Pass 10: garbage collection / free memory */
+
+ /* Pass 1 disassembly */
+
+ pass1_disassemble(pagelimit);
+
+ int clear_hack_addr = apply_hacks();
+
+ /* Pass 2 - Register dependencies and branch targets */
+
+ pass2_unneeded_regs(0,slen-1,0);
+
+ /* Pass 3 - Register allocation */
+
+ pass3_register_alloc(addr);
+
+ /* Pass 4 - Cull unused host registers */
+
+ pass4_cull_unused_regs();
+
+ /* Pass 5 - Pre-allocate registers */
+
+ pass5a_preallocate1();
+ pass5b_preallocate2();
+
/* Pass 6 - Optimize clean/dirty state */
- clean_registers(0,slen-1,1);
+ pass6_clean_registers(0, slen-1, 1);
/* Pass 7 - Identify 32-bit registers */
for (i=slen-1;i>=0;i--)
}
}
- if(dops[slen-1].itype==SPAN) {
- dops[slen-1].bt=1; // Mark as a branch target so instruction can restart after exception
- }
-
-#ifdef DISASM
- /* Debug/disassembly */
- for(i=0;i<slen;i++)
- {
- printf("U:");
- int r;
- for(r=1;r<=CCREG;r++) {
- if((unneeded_reg[i]>>r)&1) {
- if(r==HIREG) printf(" HI");
- else if(r==LOREG) printf(" LO");
- else printf(" r%d",r);
- }
- }
- printf("\n");
- #if defined(__i386__) || defined(__x86_64__)
- printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
- #endif
- #ifdef __arm__
- printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
- #endif
- #if defined(__i386__) || defined(__x86_64__)
- printf("needs: ");
- if(needed_reg[i]&1) printf("eax ");
- if((needed_reg[i]>>1)&1) printf("ecx ");
- if((needed_reg[i]>>2)&1) printf("edx ");
- if((needed_reg[i]>>3)&1) printf("ebx ");
- if((needed_reg[i]>>5)&1) printf("ebp ");
- if((needed_reg[i]>>6)&1) printf("esi ");
- if((needed_reg[i]>>7)&1) printf("edi ");
- printf("\n");
- printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
- printf("dirty: ");
- if(regs[i].wasdirty&1) printf("eax ");
- if((regs[i].wasdirty>>1)&1) printf("ecx ");
- if((regs[i].wasdirty>>2)&1) printf("edx ");
- if((regs[i].wasdirty>>3)&1) printf("ebx ");
- if((regs[i].wasdirty>>5)&1) printf("ebp ");
- if((regs[i].wasdirty>>6)&1) printf("esi ");
- if((regs[i].wasdirty>>7)&1) printf("edi ");
- #endif
- #ifdef __arm__
- printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
- printf("dirty: ");
- if(regs[i].wasdirty&1) printf("r0 ");
- if((regs[i].wasdirty>>1)&1) printf("r1 ");
- if((regs[i].wasdirty>>2)&1) printf("r2 ");
- if((regs[i].wasdirty>>3)&1) printf("r3 ");
- if((regs[i].wasdirty>>4)&1) printf("r4 ");
- if((regs[i].wasdirty>>5)&1) printf("r5 ");
- if((regs[i].wasdirty>>6)&1) printf("r6 ");
- if((regs[i].wasdirty>>7)&1) printf("r7 ");
- if((regs[i].wasdirty>>8)&1) printf("r8 ");
- if((regs[i].wasdirty>>9)&1) printf("r9 ");
- if((regs[i].wasdirty>>10)&1) printf("r10 ");
- if((regs[i].wasdirty>>12)&1) printf("r12 ");
- #endif
- printf("\n");
- disassemble_inst(i);
- //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
- #if defined(__i386__) || defined(__x86_64__)
- printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
- if(regs[i].dirty&1) printf("eax ");
- if((regs[i].dirty>>1)&1) printf("ecx ");
- if((regs[i].dirty>>2)&1) printf("edx ");
- if((regs[i].dirty>>3)&1) printf("ebx ");
- if((regs[i].dirty>>5)&1) printf("ebp ");
- if((regs[i].dirty>>6)&1) printf("esi ");
- if((regs[i].dirty>>7)&1) printf("edi ");
- #endif
- #ifdef __arm__
- printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
- if(regs[i].dirty&1) printf("r0 ");
- if((regs[i].dirty>>1)&1) printf("r1 ");
- if((regs[i].dirty>>2)&1) printf("r2 ");
- if((regs[i].dirty>>3)&1) printf("r3 ");
- if((regs[i].dirty>>4)&1) printf("r4 ");
- if((regs[i].dirty>>5)&1) printf("r5 ");
- if((regs[i].dirty>>6)&1) printf("r6 ");
- if((regs[i].dirty>>7)&1) printf("r7 ");
- if((regs[i].dirty>>8)&1) printf("r8 ");
- if((regs[i].dirty>>9)&1) printf("r9 ");
- if((regs[i].dirty>>10)&1) printf("r10 ");
- if((regs[i].dirty>>12)&1) printf("r12 ");
- #endif
- printf("\n");
- if(regs[i].isconst) {
- printf("constants: ");
- #if defined(__i386__) || defined(__x86_64__)
- if(regs[i].isconst&1) printf("eax=%x ",(u_int)constmap[i][0]);
- if((regs[i].isconst>>1)&1) printf("ecx=%x ",(u_int)constmap[i][1]);
- if((regs[i].isconst>>2)&1) printf("edx=%x ",(u_int)constmap[i][2]);
- if((regs[i].isconst>>3)&1) printf("ebx=%x ",(u_int)constmap[i][3]);
- if((regs[i].isconst>>5)&1) printf("ebp=%x ",(u_int)constmap[i][5]);
- if((regs[i].isconst>>6)&1) printf("esi=%x ",(u_int)constmap[i][6]);
- if((regs[i].isconst>>7)&1) printf("edi=%x ",(u_int)constmap[i][7]);
- #endif
- #if defined(__arm__) || defined(__aarch64__)
- int r;
- for (r = 0; r < ARRAY_SIZE(constmap[i]); r++)
- if ((regs[i].isconst >> r) & 1)
- printf(" r%d=%x", r, (u_int)constmap[i][r]);
- #endif
- printf("\n");
- }
- if(dops[i].itype==RJUMP||dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP) {
- #if defined(__i386__) || defined(__x86_64__)
- printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
- if(branch_regs[i].dirty&1) printf("eax ");
- if((branch_regs[i].dirty>>1)&1) printf("ecx ");
- if((branch_regs[i].dirty>>2)&1) printf("edx ");
- if((branch_regs[i].dirty>>3)&1) printf("ebx ");
- if((branch_regs[i].dirty>>5)&1) printf("ebp ");
- if((branch_regs[i].dirty>>6)&1) printf("esi ");
- if((branch_regs[i].dirty>>7)&1) printf("edi ");
- #endif
- #ifdef __arm__
- printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
- if(branch_regs[i].dirty&1) printf("r0 ");
- if((branch_regs[i].dirty>>1)&1) printf("r1 ");
- if((branch_regs[i].dirty>>2)&1) printf("r2 ");
- if((branch_regs[i].dirty>>3)&1) printf("r3 ");
- if((branch_regs[i].dirty>>4)&1) printf("r4 ");
- if((branch_regs[i].dirty>>5)&1) printf("r5 ");
- if((branch_regs[i].dirty>>6)&1) printf("r6 ");
- if((branch_regs[i].dirty>>7)&1) printf("r7 ");
- if((branch_regs[i].dirty>>8)&1) printf("r8 ");
- if((branch_regs[i].dirty>>9)&1) printf("r9 ");
- if((branch_regs[i].dirty>>10)&1) printf("r10 ");
- if((branch_regs[i].dirty>>12)&1) printf("r12 ");
- #endif
- }
- }
-#endif // DISASM
-
/* Pass 8 - Assembly */
linkcount=0;stubcount=0;
- ds=0;is_delayslot=0;
+ is_delayslot=0;
u_int dirty_pre=0;
void *beginning=start_block();
- if((u_int)addr&1) {
- ds=1;
- pagespan_ds();
- }
void *instr_addr0_override = NULL;
+ int ds = 0;
if (start == 0x80030000) {
// nasty hack for the fastbios thing
}
for(i=0;i<slen;i++)
{
+ __builtin_prefetch(regs[i+1].regmap);
+ check_regmap(regmap_pre[i]);
+ check_regmap(regs[i].regmap_entry);
+ check_regmap(regs[i].regmap);
//if(ds) printf("ds: ");
disassemble_inst(i);
if(ds) {
} else {
speculate_register_values(i);
#ifndef DESTRUCTIVE_WRITEBACK
- if (i < 2 || !is_ujump(i-2))
+ if (i < 2 || !dops[i-2].is_ujump)
{
wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]);
}
- if((dops[i].itype==CJUMP||dops[i].itype==SJUMP)&&!dops[i].likely) {
+ if((dops[i].itype==CJUMP||dops[i].itype==SJUMP)) {
dirty_pre=branch_regs[i].dirty;
}else{
dirty_pre=regs[i].dirty;
}
#endif
// write back
- if (i < 2 || !is_ujump(i-2))
+ if (i < 2 || !dops[i-2].is_ujump)
{
wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]);
loop_preload(regmap_pre[i],regs[i].regmap_entry);
// branch target entry point
instr_addr[i] = out;
assem_debug("<->\n");
- drc_dbg_emit_do_cmp(i);
+ drc_dbg_emit_do_cmp(i, ccadj[i]);
+ if (clear_hack_addr) {
+ emit_movimm(0, 0);
+ emit_writeword(0, &hack_addr);
+ clear_hack_addr = 0;
+ }
// load regs
if(regs[i].regmap_entry[HOST_CCREG]==CCREG&®s[i].regmap[HOST_CCREG]!=CCREG)
load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i].rs1,dops[i].rs2);
address_generation(i,®s[i],regs[i].regmap_entry);
load_consts(regmap_pre[i],regs[i].regmap,i);
- if(dops[i].itype==RJUMP||dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
+ if(dops[i].is_jump)
{
// Load the delay slot registers if necessary
if(dops[i+1].rs1!=dops[i].rs1&&dops[i+1].rs1!=dops[i].rs2&&(dops[i+1].rs1!=dops[i].rt1||dops[i].rt1==0))
load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs1,dops[i+1].rs1);
if(dops[i+1].rs2!=dops[i+1].rs1&&dops[i+1].rs2!=dops[i].rs1&&dops[i+1].rs2!=dops[i].rs2&&(dops[i+1].rs2!=dops[i].rt1||dops[i].rt1==0))
load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
- if(dops[i+1].itype==STORE||dops[i+1].itype==STORELR||(dops[i+1].opcode&0x3b)==0x39||(dops[i+1].opcode&0x3b)==0x3a)
- load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
+ if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store))
+ load_reg(regs[i].regmap_entry,regs[i].regmap,ROREG);
+ if (dops[i+1].is_store)
+ load_reg(regs[i].regmap_entry,regs[i].regmap,INVCP);
}
else if(i+1<slen)
{
load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
}
// TODO: if(is_ooo(i)) address_generation(i+1);
- if(dops[i].itype==CJUMP)
- load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
- if(dops[i].itype==STORE||dops[i].itype==STORELR||(dops[i].opcode&0x3b)==0x39||(dops[i].opcode&0x3b)==0x3a)
- load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
- // assemble
- switch(dops[i].itype) {
- case ALU:
- alu_assemble(i,®s[i]);break;
- case IMM16:
- imm16_assemble(i,®s[i]);break;
- case SHIFT:
- shift_assemble(i,®s[i]);break;
- case SHIFTIMM:
- shiftimm_assemble(i,®s[i]);break;
- case LOAD:
- load_assemble(i,®s[i]);break;
- case LOADLR:
- loadlr_assemble(i,®s[i]);break;
- case STORE:
- store_assemble(i,®s[i]);break;
- case STORELR:
- storelr_assemble(i,®s[i]);break;
- case COP0:
- cop0_assemble(i,®s[i]);break;
- case COP1:
- cop1_assemble(i,®s[i]);break;
- case C1LS:
- c1ls_assemble(i,®s[i]);break;
- case COP2:
- cop2_assemble(i,®s[i]);break;
- case C2LS:
- c2ls_assemble(i,®s[i]);break;
- case C2OP:
- c2op_assemble(i,®s[i]);break;
- case MULTDIV:
- multdiv_assemble(i,®s[i]);
- multdiv_prepare_stall(i,®s[i]);
- break;
- case MOV:
- mov_assemble(i,®s[i]);break;
- case SYSCALL:
- syscall_assemble(i,®s[i]);break;
- case HLECALL:
- hlecall_assemble(i,®s[i]);break;
- case INTCALL:
- intcall_assemble(i,®s[i]);break;
- case UJUMP:
- ujump_assemble(i,®s[i]);ds=1;break;
- case RJUMP:
- rjump_assemble(i,®s[i]);ds=1;break;
- case CJUMP:
- cjump_assemble(i,®s[i]);ds=1;break;
- case SJUMP:
- sjump_assemble(i,®s[i]);ds=1;break;
- case SPAN:
- pagespan_assemble(i,®s[i]);break;
- }
- if (is_ujump(i))
+ if (!dops[i].is_jump || dops[i].itype == CJUMP)
+ load_reg(regs[i].regmap_entry,regs[i].regmap,CCREG);
+ if (ram_offset && (dops[i].is_load || dops[i].is_store))
+ load_reg(regs[i].regmap_entry,regs[i].regmap,ROREG);
+ if (dops[i].is_store)
+ load_reg(regs[i].regmap_entry,regs[i].regmap,INVCP);
+
+ ds = assemble(i, ®s[i], ccadj[i]);
+
+ if (dops[i].is_ujump)
literal_pool(1024);
else
literal_pool_jumpover(256);
// If the block did not end with an unconditional branch,
// add a jump to the next instruction.
else if (i > 1) {
- if(!is_ujump(i-2)&&dops[i-1].itype!=SPAN) {
- assert(dops[i-1].itype!=UJUMP&&dops[i-1].itype!=CJUMP&&dops[i-1].itype!=SJUMP&&dops[i-1].itype!=RJUMP);
+ if (!dops[i-2].is_ujump) {
+ assert(!dops[i-1].is_jump);
assert(i==slen);
if(dops[i-2].itype!=CJUMP&&dops[i-2].itype!=SJUMP) {
store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
emit_loadreg(CCREG,HOST_CCREG);
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
+ emit_addimm(HOST_CCREG, ccadj[i-1] + CLOCK_ADJUST(1), HOST_CCREG);
}
- else if(!dops[i-2].likely)
+ else
{
store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].dirty,start+i*4);
assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
}
- else
- {
- store_regs_bt(regs[i-2].regmap,regs[i-2].dirty,start+i*4);
- assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
- }
add_to_linker(out,start+i*4,0);
emit_jmp(0);
}
else
{
assert(i>0);
- assert(dops[i-1].itype!=UJUMP&&dops[i-1].itype!=CJUMP&&dops[i-1].itype!=SJUMP&&dops[i-1].itype!=RJUMP);
+ assert(!dops[i-1].is_jump);
store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
emit_loadreg(CCREG,HOST_CCREG);
- emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
+ emit_addimm(HOST_CCREG, ccadj[i-1] + CLOCK_ADJUST(1), HOST_CCREG);
add_to_linker(out,start+i*4,0);
emit_jmp(0);
}
if (instr_addr0_override)
instr_addr[0] = instr_addr0_override;
+#if 0
+ /* check for improper expiration */
+ for (i = 0; i < ARRAY_SIZE(jumps); i++) {
+ int j;
+ if (!jumps[i])
+ continue;
+ for (j = 0; j < jumps[i]->count; j++)
+ assert(jumps[i]->e[j].stub < beginning || (u_char *)jumps[i]->e[j].stub > out);
+ }
+#endif
+
/* Pass 9 - Linker */
for(i=0;i<linkcount;i++)
{
assem_debug("%p -> %8x\n",link_addr[i].addr,link_addr[i].target);
literal_pool(64);
- if (!link_addr[i].ext)
+ if (!link_addr[i].internal)
{
void *stub = out;
void *addr = check_addr(link_addr[i].target);
emit_extjump(link_addr[i].addr, link_addr[i].target);
if (addr) {
set_jump_target(link_addr[i].addr, addr);
- add_jump_out(link_addr[i].target,stub);
+ ndrc_add_jump_out(link_addr[i].target,stub);
}
else
set_jump_target(link_addr[i].addr, stub);
copy = shadow;
// External Branch Targets (jump_in)
- for(i=0;i<slen;i++)
+ int jump_in_count = 1;
+ assert(instr_addr[0]);
+ for (i = 1; i < slen; i++)
+ {
+ if (dops[i].bt && instr_addr[i])
+ jump_in_count++;
+ }
+
+ struct block_info *block =
+ new_block_info(start, slen * 4, source, copy, beginning, jump_in_count);
+ block->reg_sv_flags = state_rflags;
+
+ int jump_in_i = 0;
+ for (i = 0; i < slen; i++)
{
- if(dops[i].bt||i==0)
+ if ((i == 0 || dops[i].bt) && instr_addr[i])
{
- if(instr_addr[i]) // TODO - delay slots (=null)
- {
- u_int vaddr=start+i*4;
- u_int page=get_page(vaddr);
- u_int vpage=get_vpage(vaddr);
- literal_pool(256);
- {
- assem_debug("%p (%d) <- %8x\n",instr_addr[i],i,start+i*4);
- assem_debug("jump_in: %x\n",start+i*4);
- ll_add(jump_dirty+vpage,vaddr,out);
- void *entry_point = do_dirty_stub(i, source_len);
- ll_add_flags(jump_in+page,vaddr,state_rflags,entry_point);
- // If there was an existing entry in the hash table,
- // replace it with the new address.
- // Don't add new entries. We'll insert the
- // ones that actually get used in check_addr().
- struct ht_entry *ht_bin = hash_table_get(vaddr);
- if (ht_bin->vaddr[0] == vaddr)
- ht_bin->tcaddr[0] = entry_point;
- if (ht_bin->vaddr[1] == vaddr)
- ht_bin->tcaddr[1] = entry_point;
- }
- }
+ assem_debug("%p (%d) <- %8x\n", instr_addr[i], i, start + i*4);
+ u_int vaddr = start + i*4;
+
+ literal_pool(256);
+ void *entry = out;
+ load_regs_entry(i);
+ if (entry == out)
+ entry = instr_addr[i];
+ else
+ emit_jmp(instr_addr[i]);
+
+ block->jump_in[jump_in_i].vaddr = vaddr;
+ block->jump_in[jump_in_i].addr = entry;
+ jump_in_i++;
}
}
+ assert(jump_in_i == jump_in_count);
+ hash_table_add(block->jump_in[0].vaddr, block->jump_in[0].addr);
// Write out the literal pool if necessary
literal_pool(0);
#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
out = ndrc->translation_cache;
// Trap writes to any of the pages we compiled
- for(i=start>>12;i<=(start+slen*4)>>12;i++) {
- invalid_code[i]=0;
- }
- inv_code_start=inv_code_end=~0;
-
- // for PCSX we need to mark all mirrors too
- if(get_page(start)<(RAM_SIZE>>12))
- for(i=start>>12;i<=(start+slen*4)>>12;i++)
- invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
- invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
- invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
+ mark_invalid_code(start, slen*4, 0);
/* Pass 10 - Free memory by expiring oldest blocks */
- int end=(((out-ndrc->translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535;
- while(expirep!=end)
- {
- int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
- uintptr_t base_offs = ((uintptr_t)(expirep >> 13) << shift); // Base offset of this block
- uintptr_t base_offs_s = base_offs >> shift;
- inv_debug("EXP: Phase %d\n",expirep);
- switch((expirep>>11)&3)
- {
- case 0:
- // Clear jump_in and jump_dirty
- ll_remove_matching_addrs(jump_in+(expirep&2047),base_offs_s,shift);
- ll_remove_matching_addrs(jump_dirty+(expirep&2047),base_offs_s,shift);
- ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base_offs_s,shift);
- ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base_offs_s,shift);
- break;
- case 1:
- // Clear pointers
- ll_kill_pointers(jump_out[expirep&2047],base_offs_s,shift);
- ll_kill_pointers(jump_out[(expirep&2047)+2048],base_offs_s,shift);
- break;
- case 2:
- // Clear hash table
- for(i=0;i<32;i++) {
- struct ht_entry *ht_bin = &hash_table[((expirep&2047)<<5)+i];
- uintptr_t o1 = (u_char *)ht_bin->tcaddr[1] - ndrc->translation_cache;
- uintptr_t o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s) {
- inv_debug("EXP: Remove hash %x -> %p\n",ht_bin->vaddr[1],ht_bin->tcaddr[1]);
- ht_bin->vaddr[1] = -1;
- ht_bin->tcaddr[1] = NULL;
- }
- o1 = (u_char *)ht_bin->tcaddr[0] - ndrc->translation_cache;
- o2 = o1 - MAX_OUTPUT_BLOCK_SIZE;
- if ((o1 >> shift) == base_offs_s || (o2 >> shift) == base_offs_s) {
- inv_debug("EXP: Remove hash %x -> %p\n",ht_bin->vaddr[0],ht_bin->tcaddr[0]);
- ht_bin->vaddr[0] = ht_bin->vaddr[1];
- ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
- ht_bin->vaddr[1] = -1;
- ht_bin->tcaddr[1] = NULL;
- }
- }
- break;
- case 3:
- // Clear jump_out
- if((expirep&2047)==0)
- do_clear_cache();
- ll_remove_matching_addrs(jump_out+(expirep&2047),base_offs_s,shift);
- ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base_offs_s,shift);
- break;
- }
- expirep=(expirep+1)&65535;
- }
+ pass10_expire_blocks();
+
+#ifdef ASSEM_PRINT
+ fflush(stdout);
+#endif
+ stat_inc(stat_bc_direct);
return 0;
}