drc: try to make gte stall handling less bloaty
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
index d19fcad..e0cff62 100644 (file)
@@ -35,12 +35,18 @@ static int sceBlock;
 #endif
 
 #include "new_dynarec_config.h"
-#include "../psxhle.h" //emulator interface
-#include "emu_if.h" //emulator interface
+#include "../psxhle.h"
+#include "../psxinterpreter.h"
+#include "../gte.h"
+#include "emu_if.h" // emulator interface
 
+#define noinline __attribute__((noinline,noclone))
 #ifndef ARRAY_SIZE
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
 #endif
+#ifndef min
+#define min(a, b) ((b) < (a) ? (b) : (a))
+#endif
 
 //#define DISASM
 //#define assem_debug printf
@@ -57,10 +63,31 @@ static int sceBlock;
 #ifdef __arm__
 #include "assem_arm.h"
 #endif
+#ifdef __aarch64__
+#include "assem_arm64.h"
+#endif
 
+#define RAM_SIZE 0x200000
 #define MAXBLOCK 4096
 #define MAX_OUTPUT_BLOCK_SIZE 262144
 
+struct ndrc_mem
+{
+  u_char translation_cache[1 << TARGET_SIZE_2];
+  struct
+  {
+    struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
+    const void *f[2048 / sizeof(void *)];
+  } tramp;
+};
+
+#ifdef BASE_ADDR_DYNAMIC
+static struct ndrc_mem *ndrc;
+#else
+static struct ndrc_mem ndrc_ __attribute__((aligned(4096)));
+static struct ndrc_mem *ndrc = &ndrc_;
+#endif
+
 // stubs
 enum stub_type {
   CC_STUB = 1,
@@ -83,8 +110,6 @@ struct regstat
 {
   signed char regmap_entry[HOST_REGS];
   signed char regmap[HOST_REGS];
-  uint64_t was32;
-  uint64_t is32;
   uint64_t wasdirty;
   uint64_t dirty;
   uint64_t u;
@@ -146,8 +171,6 @@ struct link_entry
   static u_char rs2[MAXBLOCK];
   static u_char rt1[MAXBLOCK];
   static u_char rt2[MAXBLOCK];
-  static u_char us1[MAXBLOCK];
-  static u_char us2[MAXBLOCK];
   static u_char dep1[MAXBLOCK];
   static u_char dep2[MAXBLOCK];
   static u_char lt1[MAXBLOCK];
@@ -166,9 +189,11 @@ struct link_entry
   static char ooo[MAXBLOCK];
   static uint64_t unneeded_reg[MAXBLOCK];
   static uint64_t branch_unneeded_reg[MAXBLOCK];
-  static signed char regmap_pre[MAXBLOCK][HOST_REGS];
-  static uint64_t current_constmap[HOST_REGS];
-  static uint64_t constmap[MAXBLOCK][HOST_REGS];
+  static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i?
+  // contains 'real' consts at [i] insn, but may differ from what's actually
+  // loaded in host reg as 'final' value is always loaded, see get_final_value()
+  static uint32_t current_constmap[HOST_REGS];
+  static uint32_t constmap[MAXBLOCK][HOST_REGS];
   static struct regstat regs[MAXBLOCK];
   static struct regstat branch_regs[MAXBLOCK];
   static signed char minimum_free_regs[MAXBLOCK];
@@ -196,14 +221,23 @@ struct link_entry
 #endif
 
   int new_dynarec_hacks;
+  int new_dynarec_hacks_pergame;
   int new_dynarec_did_compile;
+
+  #define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x))
+
+  extern int cycle_count; // ... until end of the timeslice, counts -N -> 0
+  extern int last_count;  // last absolute target, often = next_interupt
+  extern int pcaddr;
+  extern int pending_exception;
+  extern int branch_target;
+  extern uintptr_t mini_ht[32][2];
   extern u_char restore_candidate[512];
-  extern int cycle_count;
 
   /* registers that may be allocated */
   /* 1-31 gpr */
-#define HIREG 32 // hi
-#define LOREG 33 // lo
+#define LOREG 32 // lo
+#define HIREG 33 // hi
 //#define FSREG 34 // FPU status (FCSR)
 #define CSREG 35 // Coprocessor status
 #define CCREG 36 // Cycle count
@@ -243,7 +277,7 @@ struct link_entry
 #define COP0 15   // Coprocessor 0
 #define COP1 16   // Coprocessor 1
 #define C1LS 17   // Coprocessor 1 load/store
-#define FJUMP 18  // Conditional branch (floating point)
+//#define FJUMP 18  // Conditional branch (floating point)
 //#define FLOAT 19  // Floating point unit
 //#define FCONV 20  // Convert integer to float
 //#define FCOMP 21  // Floating point compare (sets FSREG)
@@ -262,8 +296,11 @@ struct link_entry
 #define NOTTAKEN 2
 #define NULLDS 3
 
+#define DJT_1 (void *)1l // no function, just a label in assem_debug log
+#define DJT_2 (void *)2l
+
 // asm linkage
-int new_recompile_block(int addr);
+int new_recompile_block(u_int addr);
 void *get_addr_ht(u_int vaddr);
 void invalidate_block(u_int block);
 void invalidate_addr(u_int addr);
@@ -271,32 +308,38 @@ void remove_hash(int vaddr);
 void dyna_linker();
 void dyna_linker_ds();
 void verify_code();
-void verify_code_vm();
 void verify_code_ds();
 void cc_interrupt();
 void fp_exception();
 void fp_exception_ds();
-void jump_syscall_hle();
-void jump_hlecall();
-void jump_intcall();
+void jump_to_new_pc();
+void call_gteStall();
 void new_dyna_leave();
 
 // Needed by assembler
-static void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
-static void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
-static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
+static void wb_register(signed char r,signed char regmap[],uint64_t dirty);
+static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty);
+static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr);
 static void load_all_regs(signed char i_regmap[]);
 static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
 static void load_regs_entry(int t);
-static void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
+static void load_all_consts(signed char regmap[],u_int dirty,int i);
+static u_int get_host_reglist(const signed char *regmap);
 
-static int verify_dirty(u_int *ptr);
+static int verify_dirty(const u_int *ptr);
 static int get_final_value(int hr, int i, int *value);
 static void add_stub(enum stub_type type, void *addr, void *retaddr,
   u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
-  int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist);
+  int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
 static void add_to_linker(void *addr, u_int target, int ext);
+static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override);
+static void *get_direct_memhandler(void *table, u_int addr,
+  enum stub_type type, uintptr_t *addr_host);
+static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
+static void pass_args(int a0, int a1);
+static void emit_far_jump(const void *f);
+static void emit_far_call(const void *f);
 
 static void mprotect_w_x(void *start, void *end, int is_x)
 {
@@ -325,7 +368,7 @@ static void start_tcache_write(void *start, void *end)
 
 static void end_tcache_write(void *start, void *end)
 {
-#ifdef __arm__
+#if defined(__arm__) || defined(__aarch64__)
   size_t len = (char *)end - (char *)start;
   #if   defined(__BLACKBERRY_QNX__)
   msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
@@ -335,6 +378,10 @@ static void end_tcache_write(void *start, void *end)
   sceKernelSyncVMDomain(sceBlock, start, len);
   #elif defined(_3DS)
   ctr_flush_invalidate_cache();
+  #elif defined(__aarch64__)
+  // as of 2021, __clear_cache() is still broken on arm64
+  // so here is a custom one :(
+  clear_cache_arm64(start, end);
   #else
   __clear_cache(start, end);
   #endif
@@ -347,8 +394,8 @@ static void end_tcache_write(void *start, void *end)
 static void *start_block(void)
 {
   u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
-  if (end > translation_cache + (1<<TARGET_SIZE_2))
-    end = translation_cache + (1<<TARGET_SIZE_2);
+  if (end > ndrc->translation_cache + sizeof(ndrc->translation_cache))
+    end = ndrc->translation_cache + sizeof(ndrc->translation_cache);
   start_tcache_write(out, end);
   return out;
 }
@@ -358,16 +405,74 @@ static void end_block(void *start)
   end_tcache_write(start, out);
 }
 
+// also takes care of w^x mappings when patching code
+static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
+
+static void mark_clear_cache(void *target)
+{
+  uintptr_t offset = (u_char *)target - ndrc->translation_cache;
+  u_int mask = 1u << ((offset >> 12) & 31);
+  if (!(needs_clear_cache[offset >> 17] & mask)) {
+    char *start = (char *)((uintptr_t)target & ~4095l);
+    start_tcache_write(start, start + 4095);
+    needs_clear_cache[offset >> 17] |= mask;
+  }
+}
+
+// Clearing the cache is rather slow on ARM Linux, so mark the areas
+// that need to be cleared, and then only clear these areas once.
+static void do_clear_cache(void)
+{
+  int i, j;
+  for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
+  {
+    u_int bitmap = needs_clear_cache[i];
+    if (!bitmap)
+      continue;
+    for (j = 0; j < 32; j++)
+    {
+      u_char *start, *end;
+      if (!(bitmap & (1<<j)))
+        continue;
+
+      start = ndrc->translation_cache + i*131072 + j*4096;
+      end = start + 4095;
+      for (j++; j < 32; j++) {
+        if (!(bitmap & (1<<j)))
+          break;
+        end += 4096;
+      }
+      end_tcache_write(start, end);
+    }
+    needs_clear_cache[i] = 0;
+  }
+}
+
 //#define DEBUG_CYCLE_COUNT 1
 
 #define NO_CYCLE_PENALTY_THR 12
 
 int cycle_multiplier; // 100 for 1.0
+int cycle_multiplier_override;
 
 static int CLOCK_ADJUST(int x)
 {
+  int m = cycle_multiplier_override
+        ? cycle_multiplier_override : cycle_multiplier;
   int s=(x>>31)|1;
-  return (x * cycle_multiplier + s * 50) / 100;
+  return (x * m + s * 50) / 100;
+}
+
+// is the op an unconditional jump?
+static int is_ujump(int i)
+{
+  return itype[i] == UJUMP || itype[i] == RJUMP
+    || (source[i] >> 16) == 0x1000; // beq r0, r0, offset // b offset
+}
+
+static int is_jump(int i)
+{
+  return itype[i] == RJUMP || itype[i] == UJUMP || itype[i] == CJUMP || itype[i] == SJUMP;
 }
 
 static u_int get_page(u_int vaddr)
@@ -408,7 +513,7 @@ static int doesnt_expire_soon(void *tcaddr)
 
 // Get address from virtual address
 // This is called from the recompiled JR/JALR instructions
-void *get_addr(u_int vaddr)
+void noinline *get_addr(u_int vaddr)
 {
   u_int page=get_page(vaddr);
   u_int vpage=get_vpage(vaddr);
@@ -476,7 +581,7 @@ void clear_all_regs(signed char regmap[])
   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
 }
 
-signed char get_reg(signed char regmap[],int r)
+static signed char get_reg(const signed char regmap[],int r)
 {
   int hr;
   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
@@ -484,7 +589,7 @@ signed char get_reg(signed char regmap[],int r)
 }
 
 // Find a register that is available for two consecutive cycles
-signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
+static signed char get_reg2(signed char regmap1[], const signed char regmap2[], int r)
 {
   int hr;
   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
@@ -515,24 +620,7 @@ void dirty_reg(struct regstat *cur,signed char reg)
   }
 }
 
-// If we dirty the lower half of a 64 bit register which is now being
-// sign-extended, we need to dump the upper half.
-// Note: Do this only after completion of the instruction, because
-// some instructions may need to read the full 64-bit value even if
-// overwriting it (eg SLTI, DSRA32).
-static void flush_dirty_uppers(struct regstat *cur)
-{
-  int hr,reg;
-  for (hr=0;hr<HOST_REGS;hr++) {
-    if((cur->dirty>>hr)&1) {
-      reg=cur->regmap[hr];
-      if(reg>=64)
-        if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
-    }
-  }
-}
-
-void set_const(struct regstat *cur,signed char reg,uint64_t value)
+static void set_const(struct regstat *cur, signed char reg, uint32_t value)
 {
   int hr;
   if(!reg) return;
@@ -541,14 +629,10 @@ void set_const(struct regstat *cur,signed char reg,uint64_t value)
       cur->isconst|=1<<hr;
       current_constmap[hr]=value;
     }
-    else if((cur->regmap[hr]^64)==reg) {
-      cur->isconst|=1<<hr;
-      current_constmap[hr]=value>>32;
-    }
   }
 }
 
-void clear_const(struct regstat *cur,signed char reg)
+static void clear_const(struct regstat *cur, signed char reg)
 {
   int hr;
   if(!reg) return;
@@ -559,7 +643,7 @@ void clear_const(struct regstat *cur,signed char reg)
   }
 }
 
-int is_const(struct regstat *cur,signed char reg)
+static int is_const(struct regstat *cur, signed char reg)
 {
   int hr;
   if(reg<0) return 0;
@@ -571,7 +655,8 @@ int is_const(struct regstat *cur,signed char reg)
   }
   return 0;
 }
-uint64_t get_const(struct regstat *cur,signed char reg)
+
+static uint32_t get_const(struct regstat *cur, signed char reg)
 {
   int hr;
   if(!reg) return 0;
@@ -581,7 +666,7 @@ uint64_t get_const(struct regstat *cur,signed char reg)
     }
   }
   SysPrintf("Unknown constant in r%d\n",reg);
-  exit(1);
+  abort();
 }
 
 // Least soon needed registers
@@ -597,7 +682,7 @@ void lsn(u_char hsn[], int i, int *preferred_reg)
       j=slen-i-1;
       break;
     }
-    if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
+    if (is_ujump(i+j))
     {
       // Don't go past an unconditonal jump
       j++;
@@ -621,7 +706,7 @@ void lsn(u_char hsn[], int i, int *preferred_reg)
       hsn[INVCP]=j;
     }
     #endif
-    if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
+    if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
     {
       hsn[CCREG]=j;
       b=j;
@@ -645,7 +730,7 @@ void lsn(u_char hsn[], int i, int *preferred_reg)
     // TODO: preferred register based on backward branch
   }
   // Delay slot should preferably not overwrite branch conditions or cycle count
-  if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
+  if (i > 0 && is_jump(i-1)) {
     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
     hsn[CCREG]=1;
@@ -680,7 +765,7 @@ int needed_again(int r, int i)
   int b=-1;
   int rn=10;
 
-  if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
+  if (i > 0 && is_ujump(i-1))
   {
     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
       return 0; // Don't need any registers if exiting the block
@@ -691,7 +776,7 @@ int needed_again(int r, int i)
       j=slen-i-1;
       break;
     }
-    if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
+    if (is_ujump(i+j))
     {
       // Don't go past an unconditonal jump
       j++;
@@ -707,7 +792,7 @@ int needed_again(int r, int i)
     if(rs1[i+j]==r) rn=j;
     if(rs2[i+j]==r) rn=j;
     if((unneeded_reg[i+j]>>r)&1) rn=10;
-    if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
+    if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
     {
       b=j;
     }
@@ -747,7 +832,7 @@ int loop_reg(int i, int r, int hr)
       j=slen-i-1;
       break;
     }
-    if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
+    if (is_ujump(i+j))
     {
       // Don't go past an unconditonal jump
       j++;
@@ -756,14 +841,14 @@ int loop_reg(int i, int r, int hr)
   }
   k=0;
   if(i>0){
-    if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
+    if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)
       k--;
   }
   for(;k<j;k++)
   {
     assert(r < 64);
     if((unneeded_reg[i+k]>>r)&1) return hr;
-    if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
+    if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP))
     {
       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
       {
@@ -802,6 +887,66 @@ void alloc_all(struct regstat *cur,int i)
   }
 }
 
+#ifndef NDEBUG
+static int host_tempreg_in_use;
+
+static void host_tempreg_acquire(void)
+{
+  assert(!host_tempreg_in_use);
+  host_tempreg_in_use = 1;
+}
+
+static void host_tempreg_release(void)
+{
+  host_tempreg_in_use = 0;
+}
+#else
+static void host_tempreg_acquire(void) {}
+static void host_tempreg_release(void) {}
+#endif
+
+#ifdef DRC_DBG
+extern void gen_interupt();
+extern void do_insn_cmp();
+#define FUNCNAME(f) { f, " " #f }
+static const struct {
+  void *addr;
+  const char *name;
+} function_names[] = {
+  FUNCNAME(cc_interrupt),
+  FUNCNAME(gen_interupt),
+  FUNCNAME(get_addr_ht),
+  FUNCNAME(get_addr),
+  FUNCNAME(jump_handler_read8),
+  FUNCNAME(jump_handler_read16),
+  FUNCNAME(jump_handler_read32),
+  FUNCNAME(jump_handler_write8),
+  FUNCNAME(jump_handler_write16),
+  FUNCNAME(jump_handler_write32),
+  FUNCNAME(invalidate_addr),
+  FUNCNAME(jump_to_new_pc),
+  FUNCNAME(call_gteStall),
+  FUNCNAME(new_dyna_leave),
+  FUNCNAME(pcsx_mtc0),
+  FUNCNAME(pcsx_mtc0_ds),
+  FUNCNAME(do_insn_cmp),
+#ifdef __arm__
+  FUNCNAME(verify_code),
+#endif
+};
+
+static const char *func_name(const void *a)
+{
+  int i;
+  for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++)
+    if (function_names[i].addr == a)
+      return function_names[i].name;
+  return "";
+}
+#else
+#define func_name(x) ""
+#endif
+
 #ifdef __i386__
 #include "assem_x86.c"
 #endif
@@ -811,6 +956,51 @@ void alloc_all(struct regstat *cur,int i)
 #ifdef __arm__
 #include "assem_arm.c"
 #endif
+#ifdef __aarch64__
+#include "assem_arm64.c"
+#endif
+
+static void *get_trampoline(const void *f)
+{
+  size_t i;
+
+  for (i = 0; i < ARRAY_SIZE(ndrc->tramp.f); i++) {
+    if (ndrc->tramp.f[i] == f || ndrc->tramp.f[i] == NULL)
+      break;
+  }
+  if (i == ARRAY_SIZE(ndrc->tramp.f)) {
+    SysPrintf("trampoline table is full, last func %p\n", f);
+    abort();
+  }
+  if (ndrc->tramp.f[i] == NULL) {
+    start_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
+    ndrc->tramp.f[i] = f;
+    end_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
+  }
+  return &ndrc->tramp.ops[i];
+}
+
+static void emit_far_jump(const void *f)
+{
+  if (can_jump_or_call(f)) {
+    emit_jmp(f);
+    return;
+  }
+
+  f = get_trampoline(f);
+  emit_jmp(f);
+}
+
+static void emit_far_call(const void *f)
+{
+  if (can_jump_or_call(f)) {
+    emit_call(f);
+    return;
+  }
+
+  f = get_trampoline(f);
+  emit_call(f);
+}
 
 // Add virtual address mapping to linked list
 void ll_add(struct ll_entry **head,int vaddr,void *addr)
@@ -939,9 +1129,7 @@ static void ll_kill_pointers(struct ll_entry *head,uintptr_t addr,int shift)
     {
       inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
       void *host_addr=find_extjump_insn(head->addr);
-      #ifdef __arm__
-        mark_clear_cache(host_addr);
-      #endif
+      mark_clear_cache(host_addr);
       set_jump_target(host_addr, head->addr);
     }
     head=head->next;
@@ -949,7 +1137,7 @@ static void ll_kill_pointers(struct ll_entry *head,uintptr_t addr,int shift)
 }
 
 // This is called when we write to a compiled block (see do_invstub)
-void invalidate_page(u_int page)
+static void invalidate_page(u_int page)
 {
   struct ll_entry *head;
   struct ll_entry *next;
@@ -967,9 +1155,7 @@ void invalidate_page(u_int page)
   while(head!=NULL) {
     inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
     void *host_addr=find_extjump_insn(head->addr);
-    #ifdef __arm__
-      mark_clear_cache(host_addr);
-    #endif
+    mark_clear_cache(host_addr);
     set_jump_target(host_addr, head->addr);
     next=head->next;
     free(head);
@@ -992,9 +1178,7 @@ static void invalidate_block_range(u_int block, u_int first, u_int last)
   for(first=page+1;first<last;first++) {
     invalidate_page(first);
   }
-  #ifdef __arm__
-    do_clear_cache();
-  #endif
+  do_clear_cache();
 
   // Don't trap writes
   invalid_code[block]=1;
@@ -1091,7 +1275,7 @@ void invalidate_addr(u_int addr)
 
 // This is called when loading a save state.
 // Anything could have changed, so invalidate everything.
-void invalidate_all_pages()
+void invalidate_all_pages(void)
 {
   u_int page;
   for(page=0;page<4096;page++)
@@ -1104,16 +1288,28 @@ void invalidate_all_pages()
   #ifdef USE_MINI_HT
   memset(mini_ht,-1,sizeof(mini_ht));
   #endif
+  do_clear_cache();
+}
+
+static void do_invstub(int n)
+{
+  literal_pool(20);
+  u_int reglist=stubs[n].a;
+  set_jump_target(stubs[n].addr, out);
+  save_regs(reglist);
+  if(stubs[n].b!=0) emit_mov(stubs[n].b,0);
+  emit_far_call(invalidate_addr);
+  restore_regs(reglist);
+  emit_jmp(stubs[n].retaddr); // return address
 }
 
 // Add an entry to jump_out after making a link
+// src should point to code by emit_extjump2()
 void add_link(u_int vaddr,void *src)
 {
   u_int page=get_page(vaddr);
   inv_debug("add_link: %p -> %x (%d)\n",src,vaddr,page);
-  int *ptr=(int *)(src+4);
-  assert((*ptr&0x0fff0000)==0x059f0000);
-  (void)ptr;
+  check_extjump2(src);
   ll_add(jump_out+page,vaddr,src);
   //void *ptr=get_pointer(src);
   //inv_debug("add_link: Pointer is to %p\n",ptr);
@@ -1168,24 +1364,252 @@ void clean_blocks(u_int page)
   }
 }
 
+/* Register allocation */
 
-void mov_alloc(struct regstat *current,int i)
+// Note: registers are allocated clean (unmodified state)
+// if you intend to modify the register, you must call dirty_reg().
+static void alloc_reg(struct regstat *cur,int i,signed char reg)
 {
-  // Note: Don't need to actually alloc the source registers
-  if((~current->is32>>rs1[i])&1) {
-    //alloc_reg64(current,i,rs1[i]);
-    assert(0);
-  } else {
-    //alloc_reg(current,i,rs1[i]);
-    alloc_reg(current,i,rt1[i]);
-    current->is32|=(1LL<<rt1[i]);
+  int r,hr;
+  int preferred_reg = (reg&7);
+  if(reg==CCREG) preferred_reg=HOST_CCREG;
+  if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
+
+  // Don't allocate unused registers
+  if((cur->u>>reg)&1) return;
+
+  // see if it's already allocated
+  for(hr=0;hr<HOST_REGS;hr++)
+  {
+    if(cur->regmap[hr]==reg) return;
+  }
+
+  // Keep the same mapping if the register was already allocated in a loop
+  preferred_reg = loop_reg(i,reg,preferred_reg);
+
+  // Try to allocate the preferred register
+  if(cur->regmap[preferred_reg]==-1) {
+    cur->regmap[preferred_reg]=reg;
+    cur->dirty&=~(1<<preferred_reg);
+    cur->isconst&=~(1<<preferred_reg);
+    return;
+  }
+  r=cur->regmap[preferred_reg];
+  assert(r < 64);
+  if((cur->u>>r)&1) {
+    cur->regmap[preferred_reg]=reg;
+    cur->dirty&=~(1<<preferred_reg);
+    cur->isconst&=~(1<<preferred_reg);
+    return;
+  }
+
+  // Clear any unneeded registers
+  // We try to keep the mapping consistent, if possible, because it
+  // makes branches easier (especially loops).  So we try to allocate
+  // first (see above) before removing old mappings.  If this is not
+  // possible then go ahead and clear out the registers that are no
+  // longer needed.
+  for(hr=0;hr<HOST_REGS;hr++)
+  {
+    r=cur->regmap[hr];
+    if(r>=0) {
+      assert(r < 64);
+      if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
+    }
+  }
+  // Try to allocate any available register, but prefer
+  // registers that have not been used recently.
+  if(i>0) {
+    for(hr=0;hr<HOST_REGS;hr++) {
+      if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+        if(regs[i-1].regmap[hr]!=rs1[i-1]&&regs[i-1].regmap[hr]!=rs2[i-1]&&regs[i-1].regmap[hr]!=rt1[i-1]&&regs[i-1].regmap[hr]!=rt2[i-1]) {
+          cur->regmap[hr]=reg;
+          cur->dirty&=~(1<<hr);
+          cur->isconst&=~(1<<hr);
+          return;
+        }
+      }
+    }
+  }
+  // Try to allocate any available register
+  for(hr=0;hr<HOST_REGS;hr++) {
+    if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+      cur->regmap[hr]=reg;
+      cur->dirty&=~(1<<hr);
+      cur->isconst&=~(1<<hr);
+      return;
+    }
+  }
+
+  // Ok, now we have to evict someone
+  // Pick a register we hopefully won't need soon
+  u_char hsn[MAXREG+1];
+  memset(hsn,10,sizeof(hsn));
+  int j;
+  lsn(hsn,i,&preferred_reg);
+  //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
+  //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
+  if(i>0) {
+    // Don't evict the cycle count at entry points, otherwise the entry
+    // stub will have to write it.
+    if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
+    if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
+    for(j=10;j>=3;j--)
+    {
+      // Alloc preferred register if available
+      if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
+        for(hr=0;hr<HOST_REGS;hr++) {
+          // Evict both parts of a 64-bit register
+          if((cur->regmap[hr]&63)==r) {
+            cur->regmap[hr]=-1;
+            cur->dirty&=~(1<<hr);
+            cur->isconst&=~(1<<hr);
+          }
+        }
+        cur->regmap[preferred_reg]=reg;
+        return;
+      }
+      for(r=1;r<=MAXREG;r++)
+      {
+        if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
+          for(hr=0;hr<HOST_REGS;hr++) {
+            if(hr!=HOST_CCREG||j<hsn[CCREG]) {
+              if(cur->regmap[hr]==r) {
+                cur->regmap[hr]=reg;
+                cur->dirty&=~(1<<hr);
+                cur->isconst&=~(1<<hr);
+                return;
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+  for(j=10;j>=0;j--)
+  {
+    for(r=1;r<=MAXREG;r++)
+    {
+      if(hsn[r]==j) {
+        for(hr=0;hr<HOST_REGS;hr++) {
+          if(cur->regmap[hr]==r) {
+            cur->regmap[hr]=reg;
+            cur->dirty&=~(1<<hr);
+            cur->isconst&=~(1<<hr);
+            return;
+          }
+        }
+      }
+    }
+  }
+  SysPrintf("This shouldn't happen (alloc_reg)");abort();
+}
+
+// Allocate a temporary register.  This is done without regard to
+// dirty status or whether the register we request is on the unneeded list
+// Note: This will only allocate one register, even if called multiple times
+static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
+{
+  int r,hr;
+  int preferred_reg = -1;
+
+  // see if it's already allocated
+  for(hr=0;hr<HOST_REGS;hr++)
+  {
+    if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
+  }
+
+  // Try to allocate any available register
+  for(hr=HOST_REGS-1;hr>=0;hr--) {
+    if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+      cur->regmap[hr]=reg;
+      cur->dirty&=~(1<<hr);
+      cur->isconst&=~(1<<hr);
+      return;
+    }
+  }
+
+  // Find an unneeded register
+  for(hr=HOST_REGS-1;hr>=0;hr--)
+  {
+    r=cur->regmap[hr];
+    if(r>=0) {
+      assert(r < 64);
+      if((cur->u>>r)&1) {
+        if(i==0||((unneeded_reg[i-1]>>r)&1)) {
+          cur->regmap[hr]=reg;
+          cur->dirty&=~(1<<hr);
+          cur->isconst&=~(1<<hr);
+          return;
+        }
+      }
+    }
+  }
+
+  // Ok, now we have to evict someone
+  // Pick a register we hopefully won't need soon
+  // TODO: we might want to follow unconditional jumps here
+  // TODO: get rid of dupe code and make this into a function
+  u_char hsn[MAXREG+1];
+  memset(hsn,10,sizeof(hsn));
+  int j;
+  lsn(hsn,i,&preferred_reg);
+  //printf("hsn: %d %d %d %d %d %d %d\n",hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
+  if(i>0) {
+    // Don't evict the cycle count at entry points, otherwise the entry
+    // stub will have to write it.
+    if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
+    if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
+    for(j=10;j>=3;j--)
+    {
+      for(r=1;r<=MAXREG;r++)
+      {
+        if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
+          for(hr=0;hr<HOST_REGS;hr++) {
+            if(hr!=HOST_CCREG||hsn[CCREG]>2) {
+              if(cur->regmap[hr]==r) {
+                cur->regmap[hr]=reg;
+                cur->dirty&=~(1<<hr);
+                cur->isconst&=~(1<<hr);
+                return;
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+  for(j=10;j>=0;j--)
+  {
+    for(r=1;r<=MAXREG;r++)
+    {
+      if(hsn[r]==j) {
+        for(hr=0;hr<HOST_REGS;hr++) {
+          if(cur->regmap[hr]==r) {
+            cur->regmap[hr]=reg;
+            cur->dirty&=~(1<<hr);
+            cur->isconst&=~(1<<hr);
+            return;
+          }
+        }
+      }
+    }
   }
+  SysPrintf("This shouldn't happen");abort();
+}
+
+static void mov_alloc(struct regstat *current,int i)
+{
+  // Note: Don't need to actually alloc the source registers
+  //alloc_reg(current,i,rs1[i]);
+  alloc_reg(current,i,rt1[i]);
+
   clear_const(current,rs1[i]);
   clear_const(current,rt1[i]);
   dirty_reg(current,rt1[i]);
 }
 
-void shiftimm_alloc(struct regstat *current,int i)
+static void shiftimm_alloc(struct regstat *current,int i)
 {
   if(opcode2[i]<=0x3) // SLL/SRL/SRA
   {
@@ -1193,7 +1617,6 @@ void shiftimm_alloc(struct regstat *current,int i)
       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
       else lt1[i]=rs1[i];
       alloc_reg(current,i,rt1[i]);
-      current->is32|=1LL<<rt1[i];
       dirty_reg(current,rt1[i]);
       if(is_const(current,rs1[i])) {
         int v=get_const(current,rs1[i]);
@@ -1228,7 +1651,7 @@ void shiftimm_alloc(struct regstat *current,int i)
   }
 }
 
-void shift_alloc(struct regstat *current,int i)
+static void shift_alloc(struct regstat *current,int i)
 {
   if(rt1[i]) {
     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
@@ -1240,7 +1663,6 @@ void shift_alloc(struct regstat *current,int i)
         alloc_reg_temp(current,i,-1);
         minimum_free_regs[i]=1;
       }
-      current->is32|=1LL<<rt1[i];
     } else { // DSLLV/DSRLV/DSRAV
       assert(0);
     }
@@ -1251,7 +1673,7 @@ void shift_alloc(struct regstat *current,int i)
   }
 }
 
-void alu_alloc(struct regstat *current,int i)
+static void alu_alloc(struct regstat *current,int i)
 {
   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
     if(rt1[i]) {
@@ -1265,22 +1687,13 @@ void alu_alloc(struct regstat *current,int i)
       }
       alloc_reg(current,i,rt1[i]);
     }
-    current->is32|=1LL<<rt1[i];
   }
   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
     if(rt1[i]) {
-      if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
-      {
-        alloc_reg64(current,i,rs1[i]);
-        alloc_reg64(current,i,rs2[i]);
-        alloc_reg(current,i,rt1[i]);
-      } else {
-        alloc_reg(current,i,rs1[i]);
-        alloc_reg(current,i,rs2[i]);
-        alloc_reg(current,i,rt1[i]);
-      }
+      alloc_reg(current,i,rs1[i]);
+      alloc_reg(current,i,rs2[i]);
+      alloc_reg(current,i,rt1[i]);
     }
-    current->is32|=1LL<<rt1[i];
   }
   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
     if(rt1[i]) {
@@ -1294,15 +1707,6 @@ void alu_alloc(struct regstat *current,int i)
         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
       }
       alloc_reg(current,i,rt1[i]);
-      if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
-      {
-        if(get_reg(current->regmap,rt1[i]|64)>=0) {
-          assert(0);
-        }
-        current->is32&=~(1LL<<rt1[i]);
-      } else {
-        current->is32|=1LL<<rt1[i];
-      }
     }
   }
   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
@@ -1314,7 +1718,7 @@ void alu_alloc(struct regstat *current,int i)
   dirty_reg(current,rt1[i]);
 }
 
-void imm16_alloc(struct regstat *current,int i)
+static void imm16_alloc(struct regstat *current,int i)
 {
   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
   else lt1[i]=rs1[i];
@@ -1323,20 +1727,10 @@ void imm16_alloc(struct regstat *current,int i)
     assert(0);
   }
   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
-    if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
-    current->is32|=1LL<<rt1[i];
     clear_const(current,rs1[i]);
     clear_const(current,rt1[i]);
   }
   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
-    if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
-      if(rs1[i]!=rt1[i]) {
-        if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
-        alloc_reg64(current,i,rt1[i]);
-        current->is32&=~(1LL<<rt1[i]);
-      }
-    }
-    else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
     if(is_const(current,rs1[i])) {
       int v=get_const(current,rs1[i]);
       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
@@ -1351,16 +1745,14 @@ void imm16_alloc(struct regstat *current,int i)
       set_const(current,rt1[i],v+imm[i]);
     }
     else clear_const(current,rt1[i]);
-    current->is32|=1LL<<rt1[i];
   }
   else {
-    set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
-    current->is32|=1LL<<rt1[i];
+    set_const(current,rt1[i],imm[i]<<16); // LUI
   }
   dirty_reg(current,rt1[i]);
 }
 
-void load_alloc(struct regstat *current,int i)
+static void load_alloc(struct regstat *current,int i)
 {
   clear_const(current,rt1[i]);
   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
@@ -1371,18 +1763,12 @@ void load_alloc(struct regstat *current,int i)
     assert(get_reg(current->regmap,rt1[i])>=0);
     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
     {
-      current->is32&=~(1LL<<rt1[i]);
-      alloc_reg64(current,i,rt1[i]);
+      assert(0);
     }
     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
     {
-      current->is32&=~(1LL<<rt1[i]);
-      alloc_reg64(current,i,rt1[i]);
-      alloc_all(current,i);
-      alloc_reg64(current,i,FTEMP);
-      minimum_free_regs[i]=HOST_REGS;
+      assert(0);
     }
-    else current->is32|=1LL<<rt1[i];
     dirty_reg(current,rt1[i]);
     // LWL/LWR need a temporary register for the old value
     if(opcode[i]==0x22||opcode[i]==0x26)
@@ -1404,9 +1790,7 @@ void load_alloc(struct regstat *current,int i)
     minimum_free_regs[i]=1;
     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
     {
-      alloc_all(current,i);
-      alloc_reg64(current,i,FTEMP);
-      minimum_free_regs[i]=HOST_REGS;
+      assert(0);
     }
   }
 }
@@ -1418,8 +1802,7 @@ void store_alloc(struct regstat *current,int i)
   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
   alloc_reg(current,i,rs2[i]);
   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
-    alloc_reg64(current,i,rs2[i]);
-    if(rs2[i]) alloc_reg(current,i,FTEMP);
+    assert(0);
   }
   #if defined(HOST_IMM8)
   // On CPUs without 32-bit immediates we need a pointer to invalid_code
@@ -1441,7 +1824,7 @@ void c1ls_alloc(struct regstat *current,int i)
   alloc_reg(current,i,CSREG); // Status
   alloc_reg(current,i,FTEMP);
   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
-    alloc_reg64(current,i,FTEMP);
+    assert(0);
   }
   #if defined(HOST_IMM8)
   // On CPUs without 32-bit immediates we need a pointer to invalid_code
@@ -1490,8 +1873,6 @@ void multdiv_alloc(struct regstat *current,int i)
       alloc_reg(current,i,LOREG);
       alloc_reg(current,i,rs1[i]);
       alloc_reg(current,i,rs2[i]);
-      current->is32|=1LL<<HIREG;
-      current->is32|=1LL<<LOREG;
       dirty_reg(current,HIREG);
       dirty_reg(current,LOREG);
     }
@@ -1507,8 +1888,6 @@ void multdiv_alloc(struct regstat *current,int i)
     // The result is undefined, we return zero.
     alloc_reg(current,i,HIREG);
     alloc_reg(current,i,LOREG);
-    current->is32|=1LL<<HIREG;
-    current->is32|=1LL<<LOREG;
     dirty_reg(current,HIREG);
     dirty_reg(current,LOREG);
   }
@@ -1523,7 +1902,6 @@ void cop0_alloc(struct regstat *current,int i)
       clear_const(current,rt1[i]);
       alloc_all(current,i);
       alloc_reg(current,i,rt1[i]);
-      current->is32|=1LL<<rt1[i];
       dirty_reg(current,rt1[i]);
     }
   }
@@ -1549,20 +1927,19 @@ void cop0_alloc(struct regstat *current,int i)
   minimum_free_regs[i]=HOST_REGS;
 }
 
-static void cop12_alloc(struct regstat *current,int i)
+static void cop2_alloc(struct regstat *current,int i)
 {
-  alloc_reg(current,i,CSREG); // Load status
-  if(opcode2[i]<3) // MFC1/CFC1
+  if (opcode2[i] < 3) // MFC2/CFC2
   {
+    alloc_cc(current,i); // for stalls
+    dirty_reg(current,CCREG);
     if(rt1[i]){
       clear_const(current,rt1[i]);
       alloc_reg(current,i,rt1[i]);
-      current->is32|=1LL<<rt1[i];
       dirty_reg(current,rt1[i]);
     }
-    alloc_reg_temp(current,i,-1);
   }
-  else if(opcode2[i]>3) // MTC1/CTC1
+  else if (opcode2[i] > 3) // MTC2/CTC2
   {
     if(rs1[i]){
       clear_const(current,rs1[i]);
@@ -1572,13 +1949,15 @@ static void cop12_alloc(struct regstat *current,int i)
       current->u&=~1LL;
       alloc_reg(current,i,0);
     }
-    alloc_reg_temp(current,i,-1);
   }
+  alloc_reg_temp(current,i,-1);
   minimum_free_regs[i]=1;
 }
 
 void c2op_alloc(struct regstat *current,int i)
 {
+  alloc_cc(current,i); // for stalls
+  dirty_reg(current,CCREG);
   alloc_reg_temp(current,i,-1);
 }
 
@@ -1598,11 +1977,10 @@ void delayslot_alloc(struct regstat *current,int i)
     case CJUMP:
     case SJUMP:
     case RJUMP:
-    case FJUMP:
     case SYSCALL:
     case HLECALL:
     case SPAN:
-      assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
+      assem_debug("jump in the delay slot.  this shouldn't happen.\n");//abort();
       SysPrintf("Disabled speculative precompilation\n");
       stop_after_jal=1;
       break;
@@ -1636,8 +2014,9 @@ void delayslot_alloc(struct regstat *current,int i)
       cop0_alloc(current,i);
       break;
     case COP1:
+      break;
     case COP2:
-      cop12_alloc(current,i);
+      cop2_alloc(current,i);
       break;
     case C1LS:
       c1ls_alloc(current,i);
@@ -1678,19 +2057,11 @@ static void pagespan_alloc(struct regstat *current,int i)
   {
     if(rs1[i]) alloc_reg(current,i,rs1[i]);
     if(rs2[i]) alloc_reg(current,i,rs2[i]);
-    if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
-    {
-      assert(0);
-    }
   }
   else
   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
   {
     if(rs1[i]) alloc_reg(current,i,rs1[i]);
-    if(!((current->is32>>rs1[i])&1))
-    {
-      assert(0);
-    }
   }
   //else ...
 }
@@ -1698,7 +2069,7 @@ static void pagespan_alloc(struct regstat *current,int i)
 static void add_stub(enum stub_type type, void *addr, void *retaddr,
   u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
 {
-  assert(a < ARRAY_SIZE(stubs));
+  assert(stubcount < ARRAY_SIZE(stubs));
   stubs[stubcount].type = type;
   stubs[stubcount].addr = addr;
   stubs[stubcount].retaddr = retaddr;
@@ -1711,23 +2082,43 @@ static void add_stub(enum stub_type type, void *addr, void *retaddr,
 }
 
 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
-  int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist)
+  int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist)
 {
   add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
 }
 
 // Write out a single register
-void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
+static void wb_register(signed char r,signed char regmap[],uint64_t dirty)
 {
   int hr;
   for(hr=0;hr<HOST_REGS;hr++) {
     if(hr!=EXCLUDE_REG) {
       if((regmap[hr]&63)==r) {
         if((dirty>>hr)&1) {
-          if(regmap[hr]<64) {
-            emit_storereg(r,hr);
-          }else{
-            emit_storereg(r|64,hr);
+          assert(regmap[hr]<64);
+          emit_storereg(r,hr);
+        }
+      }
+    }
+  }
+}
+
+static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
+{
+  //if(dirty_pre==dirty) return;
+  int hr,reg;
+  for(hr=0;hr<HOST_REGS;hr++) {
+    if(hr!=EXCLUDE_REG) {
+      reg=pre[hr];
+      if(((~u)>>(reg&63))&1) {
+        if(reg>0) {
+          if(((dirty_pre&~dirty)>>hr)&1) {
+            if(reg>0&&reg<34) {
+              emit_storereg(reg,hr);
+            }
+            else if(reg>=64) {
+              assert(0);
+            }
           }
         }
       }
@@ -1735,16 +2126,24 @@ void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32
   }
 }
 
-void rlist()
+// trashes r2
+static void pass_args(int a0, int a1)
 {
-  int i;
-  printf("TRACE: ");
-  for(i=0;i<32;i++)
-    printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
-  printf("\n");
+  if(a0==1&&a1==0) {
+    // must swap
+    emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
+  }
+  else if(a0!=0&&a1==0) {
+    emit_mov(a1,1);
+    if (a0>=0) emit_mov(a0,0);
+  }
+  else {
+    if(a0>=0&&a0!=0) emit_mov(a0,0);
+    if(a1>=0&&a1!=1) emit_mov(a1,1);
+  }
 }
 
-void alu_assemble(int i,struct regstat *i_regs)
+static void alu_assemble(int i,struct regstat *i_regs)
 {
   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
     if(rt1[i]) {
@@ -1782,69 +2181,36 @@ void alu_assemble(int i,struct regstat *i_regs)
   }
   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
     if(rt1[i]) {
-      signed char s1l,s1h,s2l,s2h,t;
-      if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
+      signed char s1l,s2l,t;
       {
         t=get_reg(i_regs->regmap,rt1[i]);
         //assert(t>=0);
         if(t>=0) {
           s1l=get_reg(i_regs->regmap,rs1[i]);
-          s1h=get_reg(i_regs->regmap,rs1[i]|64);
           s2l=get_reg(i_regs->regmap,rs2[i]);
-          s2h=get_reg(i_regs->regmap,rs2[i]|64);
           if(rs2[i]==0) // rx<r0
           {
-            assert(s1h>=0);
-            if(opcode2[i]==0x2a) // SLT
-              emit_shrimm(s1h,31,t);
-            else // SLTU (unsigned can not be less than zero)
+            if(opcode2[i]==0x2a&&rs1[i]!=0) { // SLT
+              assert(s1l>=0);
+              emit_shrimm(s1l,31,t);
+            }
+            else // SLTU (unsigned can not be less than zero, 0<0)
               emit_zeroreg(t);
           }
           else if(rs1[i]==0) // r0<rx
           {
-            assert(s2h>=0);
+            assert(s2l>=0);
             if(opcode2[i]==0x2a) // SLT
-              emit_set_gz64_32(s2h,s2l,t);
+              emit_set_gz32(s2l,t);
             else // SLTU (set if not zero)
-              emit_set_nz64_32(s2h,s2l,t);
+              emit_set_nz32(s2l,t);
           }
-          else {
-            assert(s1l>=0);assert(s1h>=0);
-            assert(s2l>=0);assert(s2h>=0);
+          else{
+            assert(s1l>=0);assert(s2l>=0);
             if(opcode2[i]==0x2a) // SLT
-              emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
+              emit_set_if_less32(s1l,s2l,t);
             else // SLTU
-              emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
-          }
-        }
-      } else {
-        t=get_reg(i_regs->regmap,rt1[i]);
-        //assert(t>=0);
-        if(t>=0) {
-          s1l=get_reg(i_regs->regmap,rs1[i]);
-          s2l=get_reg(i_regs->regmap,rs2[i]);
-          if(rs2[i]==0) // rx<r0
-          {
-            assert(s1l>=0);
-            if(opcode2[i]==0x2a) // SLT
-              emit_shrimm(s1l,31,t);
-            else // SLTU (unsigned can not be less than zero)
-              emit_zeroreg(t);
-          }
-          else if(rs1[i]==0) // r0<rx
-          {
-            assert(s2l>=0);
-            if(opcode2[i]==0x2a) // SLT
-              emit_set_gz32(s2l,t);
-            else // SLTU (set if not zero)
-              emit_set_nz32(s2l,t);
-          }
-          else{
-            assert(s1l>=0);assert(s2l>=0);
-            if(opcode2[i]==0x2a) // SLT
-              emit_set_if_less32(s1l,s2l,t);
-            else // SLTU
-              emit_set_if_carry32(s1l,s2l,t);
+              emit_set_if_carry32(s1l,s2l,t);
           }
         }
       }
@@ -1852,101 +2218,9 @@ void alu_assemble(int i,struct regstat *i_regs)
   }
   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
     if(rt1[i]) {
-      signed char s1l,s1h,s2l,s2h,th,tl;
+      signed char s1l,s2l,tl;
       tl=get_reg(i_regs->regmap,rt1[i]);
-      th=get_reg(i_regs->regmap,rt1[i]|64);
-      if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
       {
-        assert(tl>=0);
-        if(tl>=0) {
-          s1l=get_reg(i_regs->regmap,rs1[i]);
-          s1h=get_reg(i_regs->regmap,rs1[i]|64);
-          s2l=get_reg(i_regs->regmap,rs2[i]);
-          s2h=get_reg(i_regs->regmap,rs2[i]|64);
-          if(rs1[i]&&rs2[i]) {
-            assert(s1l>=0);assert(s1h>=0);
-            assert(s2l>=0);assert(s2h>=0);
-            if(opcode2[i]==0x24) { // AND
-              emit_and(s1l,s2l,tl);
-              emit_and(s1h,s2h,th);
-            } else
-            if(opcode2[i]==0x25) { // OR
-              emit_or(s1l,s2l,tl);
-              emit_or(s1h,s2h,th);
-            } else
-            if(opcode2[i]==0x26) { // XOR
-              emit_xor(s1l,s2l,tl);
-              emit_xor(s1h,s2h,th);
-            } else
-            if(opcode2[i]==0x27) { // NOR
-              emit_or(s1l,s2l,tl);
-              emit_or(s1h,s2h,th);
-              emit_not(tl,tl);
-              emit_not(th,th);
-            }
-          }
-          else
-          {
-            if(opcode2[i]==0x24) { // AND
-              emit_zeroreg(tl);
-              emit_zeroreg(th);
-            } else
-            if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
-              if(rs1[i]){
-                if(s1l>=0) emit_mov(s1l,tl);
-                else emit_loadreg(rs1[i],tl);
-                if(s1h>=0) emit_mov(s1h,th);
-                else emit_loadreg(rs1[i]|64,th);
-              }
-              else
-              if(rs2[i]){
-                if(s2l>=0) emit_mov(s2l,tl);
-                else emit_loadreg(rs2[i],tl);
-                if(s2h>=0) emit_mov(s2h,th);
-                else emit_loadreg(rs2[i]|64,th);
-              }
-              else{
-                emit_zeroreg(tl);
-                emit_zeroreg(th);
-              }
-            } else
-            if(opcode2[i]==0x27) { // NOR
-              if(rs1[i]){
-                if(s1l>=0) emit_not(s1l,tl);
-                else{
-                  emit_loadreg(rs1[i],tl);
-                  emit_not(tl,tl);
-                }
-                if(s1h>=0) emit_not(s1h,th);
-                else{
-                  emit_loadreg(rs1[i]|64,th);
-                  emit_not(th,th);
-                }
-              }
-              else
-              if(rs2[i]){
-                if(s2l>=0) emit_not(s2l,tl);
-                else{
-                  emit_loadreg(rs2[i],tl);
-                  emit_not(tl,tl);
-                }
-                if(s2h>=0) emit_not(s2h,th);
-                else{
-                  emit_loadreg(rs2[i]|64,th);
-                  emit_not(th,th);
-                }
-              }
-              else {
-                emit_movimm(-1,tl);
-                emit_movimm(-1,th);
-              }
-            }
-          }
-        }
-      }
-      else
-      {
-        // 32 bit
         if(tl>=0) {
           s1l=get_reg(i_regs->regmap,rs1[i]);
           s2l=get_reg(i_regs->regmap,rs2[i]);
@@ -2053,24 +2327,15 @@ void imm16_assemble(int i,struct regstat *i_regs)
   }
   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
     if(rt1[i]) {
-      signed char sh,sl,th,tl;
-      th=get_reg(i_regs->regmap,rt1[i]|64);
+      signed char sl,tl;
       tl=get_reg(i_regs->regmap,rt1[i]);
-      sh=get_reg(i_regs->regmap,rs1[i]|64);
       sl=get_reg(i_regs->regmap,rs1[i]);
       if(tl>=0) {
         if(rs1[i]) {
-          assert(sh>=0);
           assert(sl>=0);
-          if(th>=0) {
-            emit_addimm64_32(sh,sl,imm[i],th,tl);
-          }
-          else {
-            emit_addimm(sl,imm[i],tl);
-          }
+          emit_addimm(sl,imm[i],tl);
         } else {
           emit_movimm(imm[i],tl);
-          if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
         }
       }
     }
@@ -2078,15 +2343,12 @@ void imm16_assemble(int i,struct regstat *i_regs)
   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
     if(rt1[i]) {
       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
-      signed char sh,sl,t;
+      signed char sl,t;
       t=get_reg(i_regs->regmap,rt1[i]);
-      sh=get_reg(i_regs->regmap,rs1[i]|64);
       sl=get_reg(i_regs->regmap,rs1[i]);
       //assert(t>=0);
       if(t>=0) {
         if(rs1[i]>0) {
-          if(sh<0) assert((i_regs->was32>>rs1[i])&1);
-          if(sh<0||((i_regs->was32>>rs1[i])&1)) {
             if(opcode[i]==0x0a) { // SLTI
               if(sl<0) {
                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
@@ -2103,13 +2365,6 @@ void imm16_assemble(int i,struct regstat *i_regs)
                 emit_sltiu32(sl,imm[i],t);
               }
             }
-          }else{ // 64-bit
-            assert(sl>=0);
-            if(opcode[i]==0x0a) // SLTI
-              emit_slti64_32(sh,sl,imm[i],t);
-            else // SLTIU
-              emit_sltiu64_32(sh,sl,imm[i],t);
-          }
         }else{
           // SLTI(U) with r0 is just stupid,
           // nonetheless examples can be found
@@ -2127,10 +2382,8 @@ void imm16_assemble(int i,struct regstat *i_regs)
   }
   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
     if(rt1[i]) {
-      signed char sh,sl,th,tl;
-      th=get_reg(i_regs->regmap,rt1[i]|64);
+      signed char sl,tl;
       tl=get_reg(i_regs->regmap,rt1[i]);
-      sh=get_reg(i_regs->regmap,rs1[i]|64);
       sl=get_reg(i_regs->regmap,rs1[i]);
       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
         if(opcode[i]==0x0c) //ANDI
@@ -2148,7 +2401,6 @@ void imm16_assemble(int i,struct regstat *i_regs)
           }
           else
             emit_zeroreg(tl);
-          if(th>=0) emit_zeroreg(th);
         }
         else
         {
@@ -2156,13 +2408,6 @@ void imm16_assemble(int i,struct regstat *i_regs)
             if(sl<0) {
               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
             }
-            if(th>=0) {
-              if(sh<0) {
-                emit_loadreg(rs1[i]|64,th);
-              }else{
-                emit_mov(sh,th);
-              }
-            }
             if(opcode[i]==0x0d) { // ORI
               if(sl<0) {
                 emit_orimm(tl,imm[i],tl);
@@ -2186,7 +2431,6 @@ void imm16_assemble(int i,struct regstat *i_regs)
           }
           else {
             emit_movimm(imm[i],tl);
-            if(th>=0) emit_zeroreg(th);
           }
         }
       }
@@ -2252,28 +2496,208 @@ void shiftimm_assemble(int i,struct regstat *i_regs)
 }
 
 #ifndef shift_assemble
-void shift_assemble(int i,struct regstat *i_regs)
+static void shift_assemble(int i,struct regstat *i_regs)
 {
-  printf("Need shift_assemble for this architecture.\n");
-  exit(1);
+  signed char s,t,shift;
+  if (rt1[i] == 0)
+    return;
+  assert(opcode2[i]<=0x07); // SLLV/SRLV/SRAV
+  t = get_reg(i_regs->regmap, rt1[i]);
+  s = get_reg(i_regs->regmap, rs1[i]);
+  shift = get_reg(i_regs->regmap, rs2[i]);
+  if (t < 0)
+    return;
+
+  if(rs1[i]==0)
+    emit_zeroreg(t);
+  else if(rs2[i]==0) {
+    assert(s>=0);
+    if(s!=t) emit_mov(s,t);
+  }
+  else {
+    host_tempreg_acquire();
+    emit_andimm(shift,31,HOST_TEMPREG);
+    switch(opcode2[i]) {
+    case 4: // SLLV
+      emit_shl(s,HOST_TEMPREG,t);
+      break;
+    case 6: // SRLV
+      emit_shr(s,HOST_TEMPREG,t);
+      break;
+    case 7: // SRAV
+      emit_sar(s,HOST_TEMPREG,t);
+      break;
+    default:
+      assert(0);
+    }
+    host_tempreg_release();
+  }
 }
+
 #endif
 
-void load_assemble(int i,struct regstat *i_regs)
+enum {
+  MTYPE_8000 = 0,
+  MTYPE_8020,
+  MTYPE_0000,
+  MTYPE_A000,
+  MTYPE_1F80,
+};
+
+static int get_ptr_mem_type(u_int a)
+{
+  if(a < 0x00200000) {
+    if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
+      // return wrong, must use memhandler for BIOS self-test to pass
+      // 007 does similar stuff from a00 mirror, weird stuff
+      return MTYPE_8000;
+    return MTYPE_0000;
+  }
+  if(0x1f800000 <= a && a < 0x1f801000)
+    return MTYPE_1F80;
+  if(0x80200000 <= a && a < 0x80800000)
+    return MTYPE_8020;
+  if(0xa0000000 <= a && a < 0xa0200000)
+    return MTYPE_A000;
+  return MTYPE_8000;
+}
+
+static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override)
+{
+  void *jaddr = NULL;
+  int type=0;
+  int mr=rs1[i];
+  if(((smrv_strong|smrv_weak)>>mr)&1) {
+    type=get_ptr_mem_type(smrv[mr]);
+    //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
+  }
+  else {
+    // use the mirror we are running on
+    type=get_ptr_mem_type(start);
+    //printf("set nospec   @%08x r%d %d\n", start+i*4, mr, type);
+  }
+
+  if(type==MTYPE_8020) { // RAM 80200000+ mirror
+    host_tempreg_acquire();
+    emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
+    addr=*addr_reg_override=HOST_TEMPREG;
+    type=0;
+  }
+  else if(type==MTYPE_0000) { // RAM 0 mirror
+    host_tempreg_acquire();
+    emit_orimm(addr,0x80000000,HOST_TEMPREG);
+    addr=*addr_reg_override=HOST_TEMPREG;
+    type=0;
+  }
+  else if(type==MTYPE_A000) { // RAM A mirror
+    host_tempreg_acquire();
+    emit_andimm(addr,~0x20000000,HOST_TEMPREG);
+    addr=*addr_reg_override=HOST_TEMPREG;
+    type=0;
+  }
+  else if(type==MTYPE_1F80) { // scratchpad
+    if (psxH == (void *)0x1f800000) {
+      host_tempreg_acquire();
+      emit_xorimm(addr,0x1f800000,HOST_TEMPREG);
+      emit_cmpimm(HOST_TEMPREG,0x1000);
+      host_tempreg_release();
+      jaddr=out;
+      emit_jc(0);
+    }
+    else {
+      // do the usual RAM check, jump will go to the right handler
+      type=0;
+    }
+  }
+
+  if(type==0)
+  {
+    emit_cmpimm(addr,RAM_SIZE);
+    jaddr=out;
+    #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
+    // Hint to branch predictor that the branch is unlikely to be taken
+    if(rs1[i]>=28)
+      emit_jno_unlikely(0);
+    else
+    #endif
+      emit_jno(0);
+    if(ram_offset!=0) {
+      host_tempreg_acquire();
+      emit_addimm(addr,ram_offset,HOST_TEMPREG);
+      addr=*addr_reg_override=HOST_TEMPREG;
+    }
+  }
+
+  return jaddr;
+}
+
+// return memhandler, or get directly accessable address and return 0
+static void *get_direct_memhandler(void *table, u_int addr,
+  enum stub_type type, uintptr_t *addr_host)
 {
-  int s,th,tl,addr;
+  uintptr_t l1, l2 = 0;
+  l1 = ((uintptr_t *)table)[addr>>12];
+  if ((l1 & (1ul << (sizeof(l1)*8-1))) == 0) {
+    uintptr_t v = l1 << 1;
+    *addr_host = v + addr;
+    return NULL;
+  }
+  else {
+    l1 <<= 1;
+    if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
+      l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
+    else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
+      l2=((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
+    else
+      l2=((uintptr_t *)l1)[(addr&0xfff)/4];
+    if ((l2 & (1<<31)) == 0) {
+      uintptr_t v = l2 << 1;
+      *addr_host = v + (addr&0xfff);
+      return NULL;
+    }
+    return (void *)(l2 << 1);
+  }
+}
+
+static u_int get_host_reglist(const signed char *regmap)
+{
+  u_int reglist = 0, hr;
+  for (hr = 0; hr < HOST_REGS; hr++) {
+    if (hr != EXCLUDE_REG && regmap[hr] >= 0)
+      reglist |= 1 << hr;
+  }
+  return reglist;
+}
+
+static u_int reglist_exclude(u_int reglist, int r1, int r2)
+{
+  if (r1 >= 0)
+    reglist &= ~(1u << r1);
+  if (r2 >= 0)
+    reglist &= ~(1u << r2);
+  return reglist;
+}
+
+// find a temp caller-saved register not in reglist (so assumed to be free)
+static int reglist_find_free(u_int reglist)
+{
+  u_int free_regs = ~reglist & CALLER_SAVE_REGS;
+  if (free_regs == 0)
+    return -1;
+  return __builtin_ctz(free_regs);
+}
+
+static void load_assemble(int i, const struct regstat *i_regs)
+{
+  int s,tl,addr;
   int offset;
   void *jaddr=0;
   int memtarget=0,c=0;
-  int fastload_reg_override=0;
-  u_int hr,reglist=0;
-  th=get_reg(i_regs->regmap,rt1[i]|64);
+  int fastio_reg_override=-1;
+  u_int reglist=get_host_reglist(i_regs->regmap);
   tl=get_reg(i_regs->regmap,rt1[i]);
   s=get_reg(i_regs->regmap,rs1[i]);
   offset=imm[i];
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
-  }
   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
   if(s>=0) {
     c=(i_regs->wasconst>>s)&1;
@@ -2300,19 +2724,19 @@ void load_assemble(int i,struct regstat *i_regs)
   //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
   reglist&=~(1<<tl);
-  if(th>=0) reglist&=~(1<<th);
   if(!c) {
     #ifdef R29_HACK
     // Strmnnrmn's speed hack
     if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
     #endif
     {
-      jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
+      jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
     }
   }
   else if(ram_offset&&memtarget) {
+    host_tempreg_acquire();
     emit_addimm(addr,ram_offset,HOST_TEMPREG);
-    fastload_reg_override=HOST_TEMPREG;
+    fastio_reg_override=HOST_TEMPREG;
   }
   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
   if (opcode[i]==0x20) { // LB
@@ -2321,7 +2745,7 @@ void load_assemble(int i,struct regstat *i_regs)
         {
           int x=0,a=tl;
           if(!c) a=addr;
-          if(fastload_reg_override) a=fastload_reg_override;
+          if(fastio_reg_override>=0) a=fastio_reg_override;
 
           emit_movsbl_indexed(x,a,tl);
         }
@@ -2337,7 +2761,7 @@ void load_assemble(int i,struct regstat *i_regs)
       if(!dummy) {
         int x=0,a=tl;
         if(!c) a=addr;
-        if(fastload_reg_override) a=fastload_reg_override;
+        if(fastio_reg_override>=0) a=fastio_reg_override;
         emit_movswl_indexed(x,a,tl);
       }
       if(jaddr)
@@ -2350,7 +2774,7 @@ void load_assemble(int i,struct regstat *i_regs)
     if(!c||memtarget) {
       if(!dummy) {
         int a=addr;
-        if(fastload_reg_override) a=fastload_reg_override;
+        if(fastio_reg_override>=0) a=fastio_reg_override;
         emit_readword_indexed(0,a,tl);
       }
       if(jaddr)
@@ -2364,7 +2788,7 @@ void load_assemble(int i,struct regstat *i_regs)
       if(!dummy) {
         int x=0,a=tl;
         if(!c) a=addr;
-        if(fastload_reg_override) a=fastload_reg_override;
+        if(fastio_reg_override>=0) a=fastio_reg_override;
 
         emit_movzbl_indexed(x,a,tl);
       }
@@ -2379,7 +2803,7 @@ void load_assemble(int i,struct regstat *i_regs)
       if(!dummy) {
         int x=0,a=tl;
         if(!c) a=addr;
-        if(fastload_reg_override) a=fastload_reg_override;
+        if(fastio_reg_override>=0) a=fastio_reg_override;
         emit_movzwl_indexed(x,a,tl);
       }
       if(jaddr)
@@ -2389,36 +2813,98 @@ void load_assemble(int i,struct regstat *i_regs)
       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
   }
   if (opcode[i]==0x27) { // LWU
-    assert(th>=0);
-    if(!c||memtarget) {
-      if(!dummy) {
-        int a=addr;
-        if(fastload_reg_override) a=fastload_reg_override;
-        emit_readword_indexed(0,a,tl);
-      }
-      if(jaddr)
-        add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
-    }
-    else {
-      inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
-    }
-    emit_zeroreg(th);
+    assert(0);
   }
   if (opcode[i]==0x37) { // LD
     assert(0);
   }
  }
+ if (fastio_reg_override == HOST_TEMPREG)
+   host_tempreg_release();
 }
 
 #ifndef loadlr_assemble
-void loadlr_assemble(int i,struct regstat *i_regs)
+static void loadlr_assemble(int i, const struct regstat *i_regs)
 {
-  printf("Need loadlr_assemble for this architecture.\n");
-  exit(1);
+  int s,tl,temp,temp2,addr;
+  int offset;
+  void *jaddr=0;
+  int memtarget=0,c=0;
+  int fastio_reg_override=-1;
+  u_int reglist=get_host_reglist(i_regs->regmap);
+  tl=get_reg(i_regs->regmap,rt1[i]);
+  s=get_reg(i_regs->regmap,rs1[i]);
+  temp=get_reg(i_regs->regmap,-1);
+  temp2=get_reg(i_regs->regmap,FTEMP);
+  addr=get_reg(i_regs->regmap,AGEN1+(i&1));
+  assert(addr<0);
+  offset=imm[i];
+  reglist|=1<<temp;
+  if(offset||s<0||c) addr=temp2;
+  else addr=s;
+  if(s>=0) {
+    c=(i_regs->wasconst>>s)&1;
+    if(c) {
+      memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
+    }
+  }
+  if(!c) {
+    emit_shlimm(addr,3,temp);
+    if (opcode[i]==0x22||opcode[i]==0x26) {
+      emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR
+    }else{
+      emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
+    }
+    jaddr=emit_fastpath_cmp_jump(i,temp2,&fastio_reg_override);
+  }
+  else {
+    if(ram_offset&&memtarget) {
+      host_tempreg_acquire();
+      emit_addimm(temp2,ram_offset,HOST_TEMPREG);
+      fastio_reg_override=HOST_TEMPREG;
+    }
+    if (opcode[i]==0x22||opcode[i]==0x26) {
+      emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
+    }else{
+      emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR
+    }
+  }
+  if (opcode[i]==0x22||opcode[i]==0x26) { // LWL/LWR
+    if(!c||memtarget) {
+      int a=temp2;
+      if(fastio_reg_override>=0) a=fastio_reg_override;
+      emit_readword_indexed(0,a,temp2);
+      if(fastio_reg_override==HOST_TEMPREG) host_tempreg_release();
+      if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj[i],reglist);
+    }
+    else
+      inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj[i],reglist);
+    if(rt1[i]) {
+      assert(tl>=0);
+      emit_andimm(temp,24,temp);
+      if (opcode[i]==0x22) // LWL
+        emit_xorimm(temp,24,temp);
+      host_tempreg_acquire();
+      emit_movimm(-1,HOST_TEMPREG);
+      if (opcode[i]==0x26) {
+        emit_shr(temp2,temp,temp2);
+        emit_bic_lsr(tl,HOST_TEMPREG,temp,tl);
+      }else{
+        emit_shl(temp2,temp,temp2);
+        emit_bic_lsl(tl,HOST_TEMPREG,temp,tl);
+      }
+      host_tempreg_release();
+      emit_or(temp2,tl,tl);
+    }
+    //emit_storereg(rt1[i],tl); // DEBUG
+  }
+  if (opcode[i]==0x1A||opcode[i]==0x1B) { // LDL/LDR
+    assert(0);
+  }
 }
 #endif
 
-void store_assemble(int i,struct regstat *i_regs)
+void store_assemble(int i, const struct regstat *i_regs)
 {
   int s,tl;
   int addr,temp;
@@ -2427,8 +2913,8 @@ void store_assemble(int i,struct regstat *i_regs)
   enum stub_type type;
   int memtarget=0,c=0;
   int agr=AGEN1+(i&1);
-  int faststore_reg_override=0;
-  u_int hr,reglist=0;
+  int fastio_reg_override=-1;
+  u_int reglist=get_host_reglist(i_regs->regmap);
   tl=get_reg(i_regs->regmap,rs2[i]);
   s=get_reg(i_regs->regmap,rs1[i]);
   temp=get_reg(i_regs->regmap,agr);
@@ -2442,25 +2928,23 @@ void store_assemble(int i,struct regstat *i_regs)
   }
   assert(tl>=0);
   assert(temp>=0);
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
-  }
   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
   if(offset||s<0||c) addr=temp;
   else addr=s;
   if(!c) {
-    jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
+    jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
   }
   else if(ram_offset&&memtarget) {
+    host_tempreg_acquire();
     emit_addimm(addr,ram_offset,HOST_TEMPREG);
-    faststore_reg_override=HOST_TEMPREG;
+    fastio_reg_override=HOST_TEMPREG;
   }
 
   if (opcode[i]==0x28) { // SB
     if(!c||memtarget) {
       int x=0,a=temp;
       if(!c) a=addr;
-      if(faststore_reg_override) a=faststore_reg_override;
+      if(fastio_reg_override>=0) a=fastio_reg_override;
       emit_writebyte_indexed(tl,x,a);
     }
     type=STOREB_STUB;
@@ -2469,7 +2953,7 @@ void store_assemble(int i,struct regstat *i_regs)
     if(!c||memtarget) {
       int x=0,a=temp;
       if(!c) a=addr;
-      if(faststore_reg_override) a=faststore_reg_override;
+      if(fastio_reg_override>=0) a=fastio_reg_override;
       emit_writehword_indexed(tl,x,a);
     }
     type=STOREH_STUB;
@@ -2477,7 +2961,7 @@ void store_assemble(int i,struct regstat *i_regs)
   if (opcode[i]==0x2B) { // SW
     if(!c||memtarget) {
       int a=addr;
-      if(faststore_reg_override) a=faststore_reg_override;
+      if(fastio_reg_override>=0) a=fastio_reg_override;
       emit_writeword_indexed(tl,0,a);
     }
     type=STOREW_STUB;
@@ -2486,13 +2970,15 @@ void store_assemble(int i,struct regstat *i_regs)
     assert(0);
     type=STORED_STUB;
   }
+  if(fastio_reg_override==HOST_TEMPREG)
+    host_tempreg_release();
   if(jaddr) {
     // PCSX store handlers don't check invcode again
     reglist|=1<<addr;
     add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
     jaddr=0;
   }
-  if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
+  if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
     if(!c||memtarget) {
       #ifdef DESTRUCTIVE_SHIFT
       // The x86 shift operation is 'destructive'; it overwrites the
@@ -2523,20 +3009,23 @@ void store_assemble(int i,struct regstat *i_regs)
   }
   // basic current block modification detection..
   // not looking back as that should be in mips cache already
+  // (see Spyro2 title->attract mode)
   if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
     SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
     assert(i_regs->regmap==regs[i].regmap); // not delay slot
     if(i_regs->regmap==regs[i].regmap) {
-      load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
-      wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty);
+      load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
+      wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty);
       emit_movimm(start+i*4+4,0);
       emit_writeword(0,&pcaddr);
-      emit_jmp(do_interrupt);
+      emit_addimm(HOST_CCREG,2,HOST_CCREG);
+      emit_far_call(get_addr_ht);
+      emit_jmpreg(0);
     }
   }
 }
 
-void storelr_assemble(int i,struct regstat *i_regs)
+static void storelr_assemble(int i, const struct regstat *i_regs)
 {
   int s,tl;
   int temp;
@@ -2546,7 +3035,7 @@ void storelr_assemble(int i,struct regstat *i_regs)
   void *done0, *done1, *done2;
   int memtarget=0,c=0;
   int agr=AGEN1+(i&1);
-  u_int hr,reglist=0;
+  u_int reglist=get_host_reglist(i_regs->regmap);
   tl=get_reg(i_regs->regmap,rs2[i]);
   s=get_reg(i_regs->regmap,rs1[i]);
   temp=get_reg(i_regs->regmap,agr);
@@ -2559,9 +3048,6 @@ void storelr_assemble(int i,struct regstat *i_regs)
     }
   }
   assert(tl>=0);
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
-  }
   assert(temp>=0);
   if(!c) {
     emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
@@ -2576,7 +3062,8 @@ void storelr_assemble(int i,struct regstat *i_regs)
       emit_jmp(0);
     }
   }
-  emit_addimm_no_flags(ram_offset,temp);
+  if(ram_offset)
+    emit_addimm_no_flags(ram_offset,temp);
 
   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
     assert(0);
@@ -2593,15 +3080,11 @@ void storelr_assemble(int i,struct regstat *i_regs)
   if (opcode[i]==0x2A) { // SWL
     emit_writeword_indexed(tl,0,temp);
   }
-  if (opcode[i]==0x2E) { // SWR
+  else if (opcode[i]==0x2E) { // SWR
     emit_writebyte_indexed(tl,3,temp);
   }
-  if (opcode[i]==0x2C) { // SDL
-    assert(0);
-  }
-  if (opcode[i]==0x2D) { // SDR
+  else
     assert(0);
-  }
   done0=out;
   emit_jmp(0);
   // 1
@@ -2614,16 +3097,10 @@ void storelr_assemble(int i,struct regstat *i_regs)
     emit_writebyte_indexed(tl,1,temp);
     if(rs2[i]) emit_rorimm(tl,8,tl);
   }
-  if (opcode[i]==0x2E) { // SWR
+  else if (opcode[i]==0x2E) { // SWR
     // Write two lsb into two most significant bytes
     emit_writehword_indexed(tl,1,temp);
   }
-  if (opcode[i]==0x2C) { // SDL
-    assert(0);
-  }
-  if (opcode[i]==0x2D) { // SDR
-    assert(0);
-  }
   done1=out;
   emit_jmp(0);
   // 2
@@ -2637,19 +3114,13 @@ void storelr_assemble(int i,struct regstat *i_regs)
     emit_writehword_indexed(tl,-2,temp);
     if(rs2[i]) emit_rorimm(tl,16,tl);
   }
-  if (opcode[i]==0x2E) { // SWR
+  else if (opcode[i]==0x2E) { // SWR
     // Write 3 lsb into three most significant bytes
     emit_writebyte_indexed(tl,-1,temp);
     if(rs2[i]) emit_rorimm(tl,8,tl);
     emit_writehword_indexed(tl,0,temp);
     if(rs2[i]) emit_rorimm(tl,24,tl);
   }
-  if (opcode[i]==0x2C) { // SDL
-    assert(0);
-  }
-  if (opcode[i]==0x2D) { // SDR
-    assert(0);
-  }
   done2=out;
   emit_jmp(0);
   // 3
@@ -2660,28 +3131,16 @@ void storelr_assemble(int i,struct regstat *i_regs)
     emit_writebyte_indexed(tl,-3,temp);
     if(rs2[i]) emit_rorimm(tl,8,tl);
   }
-  if (opcode[i]==0x2E) { // SWR
+  else if (opcode[i]==0x2E) { // SWR
     // Write entire word
     emit_writeword_indexed(tl,-3,temp);
   }
-  if (opcode[i]==0x2C) { // SDL
-    assert(0);
-  }
-  if (opcode[i]==0x2D) { // SDR
-    assert(0);
-  }
   set_jump_target(done0, out);
   set_jump_target(done1, out);
   set_jump_target(done2, out);
-  if (opcode[i]==0x2C) { // SDL
-    assert(0);
-  }
-  if (opcode[i]==0x2D) { // SDR
-    assert(0);
-  }
   if(!c||!memtarget)
     add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist);
-  if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
+  if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
     emit_addimm_no_flags(-ram_offset,temp);
     #if defined(HOST_IMM8)
     int ir=get_reg(i_regs->regmap,INVCP);
@@ -2700,12 +3159,346 @@ void storelr_assemble(int i,struct regstat *i_regs)
   }
 }
 
-void c1ls_assemble(int i,struct regstat *i_regs)
+static void cop0_assemble(int i,struct regstat *i_regs)
+{
+  if(opcode2[i]==0) // MFC0
+  {
+    signed char t=get_reg(i_regs->regmap,rt1[i]);
+    u_int copr=(source[i]>>11)&0x1f;
+    //assert(t>=0); // Why does this happen?  OOT is weird
+    if(t>=0&&rt1[i]!=0) {
+      emit_readword(&reg_cop0[copr],t);
+    }
+  }
+  else if(opcode2[i]==4) // MTC0
+  {
+    signed char s=get_reg(i_regs->regmap,rs1[i]);
+    char copr=(source[i]>>11)&0x1f;
+    assert(s>=0);
+    wb_register(rs1[i],i_regs->regmap,i_regs->dirty);
+    if(copr==9||copr==11||copr==12||copr==13) {
+      emit_readword(&last_count,HOST_TEMPREG);
+      emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
+      emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
+      emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+      emit_writeword(HOST_CCREG,&Count);
+    }
+    // What a mess.  The status register (12) can enable interrupts,
+    // so needs a special case to handle a pending interrupt.
+    // The interrupt must be taken immediately, because a subsequent
+    // instruction might disable interrupts again.
+    if(copr==12||copr==13) {
+      if (is_delayslot) {
+        // burn cycles to cause cc_interrupt, which will
+        // reschedule next_interupt. Relies on CCREG from above.
+        assem_debug("MTC0 DS %d\n", copr);
+        emit_writeword(HOST_CCREG,&last_count);
+        emit_movimm(0,HOST_CCREG);
+        emit_storereg(CCREG,HOST_CCREG);
+        emit_loadreg(rs1[i],1);
+        emit_movimm(copr,0);
+        emit_far_call(pcsx_mtc0_ds);
+        emit_loadreg(rs1[i],s);
+        return;
+      }
+      emit_movimm(start+i*4+4,HOST_TEMPREG);
+      emit_writeword(HOST_TEMPREG,&pcaddr);
+      emit_movimm(0,HOST_TEMPREG);
+      emit_writeword(HOST_TEMPREG,&pending_exception);
+    }
+    if(s==HOST_CCREG)
+      emit_loadreg(rs1[i],1);
+    else if(s!=1)
+      emit_mov(s,1);
+    emit_movimm(copr,0);
+    emit_far_call(pcsx_mtc0);
+    if(copr==9||copr==11||copr==12||copr==13) {
+      emit_readword(&Count,HOST_CCREG);
+      emit_readword(&next_interupt,HOST_TEMPREG);
+      emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+      emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
+      emit_writeword(HOST_TEMPREG,&last_count);
+      emit_storereg(CCREG,HOST_CCREG);
+    }
+    if(copr==12||copr==13) {
+      assert(!is_delayslot);
+      emit_readword(&pending_exception,14);
+      emit_test(14,14);
+      void *jaddr = out;
+      emit_jeq(0);
+      emit_readword(&pcaddr, 0);
+      emit_addimm(HOST_CCREG,2,HOST_CCREG);
+      emit_far_call(get_addr_ht);
+      emit_jmpreg(0);
+      set_jump_target(jaddr, out);
+    }
+    emit_loadreg(rs1[i],s);
+  }
+  else
+  {
+    assert(opcode2[i]==0x10);
+    //if((source[i]&0x3f)==0x10) // RFE
+    {
+      emit_readword(&Status,0);
+      emit_andimm(0,0x3c,1);
+      emit_andimm(0,~0xf,0);
+      emit_orrshr_imm(1,2,0);
+      emit_writeword(0,&Status);
+    }
+  }
+}
+
+static void cop1_unusable(int i,struct regstat *i_regs)
+{
+  // XXX: should just just do the exception instead
+  //if(!cop1_usable)
+  {
+    void *jaddr=out;
+    emit_jmp(0);
+    add_stub_r(FP_STUB,jaddr,out,i,0,i_regs,is_delayslot,0);
+  }
+}
+
+static void cop1_assemble(int i,struct regstat *i_regs)
 {
   cop1_unusable(i, i_regs);
 }
 
-void c2ls_assemble(int i,struct regstat *i_regs)
+static void c1ls_assemble(int i,struct regstat *i_regs)
+{
+  cop1_unusable(i, i_regs);
+}
+
+// FP_STUB
+static void do_cop1stub(int n)
+{
+  literal_pool(256);
+  assem_debug("do_cop1stub %x\n",start+stubs[n].a*4);
+  set_jump_target(stubs[n].addr, out);
+  int i=stubs[n].a;
+//  int rs=stubs[n].b;
+  struct regstat *i_regs=(struct regstat *)stubs[n].c;
+  int ds=stubs[n].d;
+  if(!ds) {
+    load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
+    //if(i_regs!=&regs[i]) printf("oops: regs[i]=%x i_regs=%x",(int)&regs[i],(int)i_regs);
+  }
+  //else {printf("fp exception in delay slot\n");}
+  wb_dirtys(i_regs->regmap_entry,i_regs->wasdirty);
+  if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
+  emit_movimm(start+(i-ds)*4,EAX); // Get PC
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
+  emit_far_jump(ds?fp_exception_ds:fp_exception);
+}
+
+static int cop2_is_stalling_op(int i, int *cycles)
+{
+  if (opcode[i] == 0x3a) { // SWC2
+    *cycles = 0;
+    return 1;
+  }
+  if (itype[i] == COP2 && (opcode2[i] == 0 || opcode2[i] == 2)) { // MFC2/CFC2
+    *cycles = 0;
+    return 1;
+  }
+  if (itype[i] == C2OP) {
+    *cycles = gte_cycletab[source[i] & 0x3f];
+    return 1;
+  }
+  // ... what about MTC2/CTC2/LWC2?
+  return 0;
+}
+
+#if 0
+static void log_gte_stall(int stall, u_int cycle)
+{
+  if ((u_int)stall <= 44)
+    printf("x    stall %2d %u\n", stall, cycle + last_count);
+ if (cycle + last_count > 1215348544) exit(1);
+}
+
+static void emit_log_gte_stall(int i, int stall, u_int reglist)
+{
+  save_regs(reglist);
+  if (stall > 0)
+    emit_movimm(stall, 0);
+  else
+    emit_mov(HOST_TEMPREG, 0);
+  emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
+  emit_far_call(log_gte_stall);
+  restore_regs(reglist);
+}
+#endif
+
+static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist)
+{
+  int j = i, other_gte_op_cycles = -1, stall = -MAXBLOCK, cycles_passed;
+  int rtmp = reglist_find_free(reglist);
+
+  if (HACK_ENABLED(NDHACK_GTE_NO_STALL))
+    return;
+  //assert(get_reg(i_regs->regmap, CCREG) == HOST_CCREG);
+  if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
+    // happens occasionally... cc evicted? Don't bother then
+    //printf("no cc %08x\n", start + i*4);
+    return;
+  }
+  if (!bt[i]) {
+    for (j = i - 1; j >= 0; j--) {
+      //if (is_ds[j]) break;
+      if (cop2_is_stalling_op(j, &other_gte_op_cycles) || bt[j])
+        break;
+    }
+  }
+  cycles_passed = CLOCK_ADJUST(ccadj[i] - ccadj[j]);
+  if (other_gte_op_cycles >= 0)
+    stall = other_gte_op_cycles - cycles_passed;
+  else if (cycles_passed >= 44)
+    stall = 0; // can't stall
+  if (stall == -MAXBLOCK && rtmp >= 0) {
+    // unknown stall, do the expensive runtime check
+    assem_debug("; cop2_call_stall_check\n");
+#if 0 // too slow
+    save_regs(reglist);
+    emit_movimm(gte_cycletab[op], 0);
+    emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
+    emit_far_call(call_gteStall);
+    restore_regs(reglist);
+#else
+    host_tempreg_acquire();
+    emit_readword(&psxRegs.gteBusyCycle, rtmp);
+    emit_addimm(rtmp, -CLOCK_ADJUST(ccadj[i]), rtmp);
+    emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
+    emit_cmpimm(HOST_TEMPREG, 44);
+    emit_cmovb_reg(rtmp, HOST_CCREG);
+    //emit_log_gte_stall(i, 0, reglist);
+    host_tempreg_release();
+#endif
+  }
+  else if (stall > 0) {
+    //emit_log_gte_stall(i, stall, reglist);
+    emit_addimm(HOST_CCREG, stall, HOST_CCREG);
+  }
+
+  // save gteBusyCycle, if needed
+  if (gte_cycletab[op] == 0)
+    return;
+  other_gte_op_cycles = -1;
+  for (j = i + 1; j < slen; j++) {
+    if (cop2_is_stalling_op(j, &other_gte_op_cycles))
+      break;
+    if (is_jump(j)) {
+      // check ds
+      if (j + 1 < slen && cop2_is_stalling_op(j + 1, &other_gte_op_cycles))
+        j++;
+      break;
+    }
+  }
+  if (other_gte_op_cycles >= 0)
+    // will handle stall when assembling that op
+    return;
+  cycles_passed = CLOCK_ADJUST(ccadj[min(j, slen -1)] - ccadj[i]);
+  if (cycles_passed >= 44)
+    return;
+  assem_debug("; save gteBusyCycle\n");
+  host_tempreg_acquire();
+#if 0
+  emit_readword(&last_count, HOST_TEMPREG);
+  emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG);
+  emit_addimm(HOST_TEMPREG, CLOCK_ADJUST(ccadj[i]), HOST_TEMPREG);
+  emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG);
+  emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
+#else
+  emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]) + gte_cycletab[op], HOST_TEMPREG);
+  emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
+#endif
+  host_tempreg_release();
+}
+
+static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
+{
+  switch (copr) {
+    case 1:
+    case 3:
+    case 5:
+    case 8:
+    case 9:
+    case 10:
+    case 11:
+      emit_readword(&reg_cop2d[copr],tl);
+      emit_signextend16(tl,tl);
+      emit_writeword(tl,&reg_cop2d[copr]); // hmh
+      break;
+    case 7:
+    case 16:
+    case 17:
+    case 18:
+    case 19:
+      emit_readword(&reg_cop2d[copr],tl);
+      emit_andimm(tl,0xffff,tl);
+      emit_writeword(tl,&reg_cop2d[copr]);
+      break;
+    case 15:
+      emit_readword(&reg_cop2d[14],tl); // SXY2
+      emit_writeword(tl,&reg_cop2d[copr]);
+      break;
+    case 28:
+    case 29:
+      c2op_mfc2_29_assemble(tl,temp);
+      break;
+    default:
+      emit_readword(&reg_cop2d[copr],tl);
+      break;
+  }
+}
+
+static void cop2_put_dreg(u_int copr,signed char sl,signed char temp)
+{
+  switch (copr) {
+    case 15:
+      emit_readword(&reg_cop2d[13],temp);  // SXY1
+      emit_writeword(sl,&reg_cop2d[copr]);
+      emit_writeword(temp,&reg_cop2d[12]); // SXY0
+      emit_readword(&reg_cop2d[14],temp);  // SXY2
+      emit_writeword(sl,&reg_cop2d[14]);
+      emit_writeword(temp,&reg_cop2d[13]); // SXY1
+      break;
+    case 28:
+      emit_andimm(sl,0x001f,temp);
+      emit_shlimm(temp,7,temp);
+      emit_writeword(temp,&reg_cop2d[9]);
+      emit_andimm(sl,0x03e0,temp);
+      emit_shlimm(temp,2,temp);
+      emit_writeword(temp,&reg_cop2d[10]);
+      emit_andimm(sl,0x7c00,temp);
+      emit_shrimm(temp,3,temp);
+      emit_writeword(temp,&reg_cop2d[11]);
+      emit_writeword(sl,&reg_cop2d[28]);
+      break;
+    case 30:
+      emit_xorsar_imm(sl,sl,31,temp);
+#if defined(HAVE_ARMV5) || defined(__aarch64__)
+      emit_clz(temp,temp);
+#else
+      emit_movs(temp,HOST_TEMPREG);
+      emit_movimm(0,temp);
+      emit_jeq((int)out+4*4);
+      emit_addpl_imm(temp,1,temp);
+      emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG);
+      emit_jns((int)out-2*4);
+#endif
+      emit_writeword(sl,&reg_cop2d[30]);
+      emit_writeword(temp,&reg_cop2d[31]);
+      break;
+    case 31:
+      break;
+    default:
+      emit_writeword(sl,&reg_cop2d[copr]);
+      break;
+  }
+}
+
+static void c2ls_assemble(int i, const struct regstat *i_regs)
 {
   int s,tl;
   int ar;
@@ -2714,8 +3507,8 @@ void c2ls_assemble(int i,struct regstat *i_regs)
   void *jaddr2=NULL;
   enum stub_type type;
   int agr=AGEN1+(i&1);
-  int fastio_reg_override=0;
-  u_int hr,reglist=0;
+  int fastio_reg_override=-1;
+  u_int reglist=get_host_reglist(i_regs->regmap);
   u_int copr=(source[i]>>16)&0x1f;
   s=get_reg(i_regs->regmap,rs1[i]);
   tl=get_reg(i_regs->regmap,FTEMP);
@@ -2723,9 +3516,6 @@ void c2ls_assemble(int i,struct regstat *i_regs)
   assert(rs1[i]>0);
   assert(tl>=0);
 
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
-  }
   if(i_regs->regmap[HOST_CCREG]==CCREG)
     reglist&=~(1<<HOST_CCREG);
 
@@ -2743,7 +3533,8 @@ void c2ls_assemble(int i,struct regstat *i_regs)
   assert(ar>=0);
 
   if (opcode[i]==0x3a) { // SWC2
-    cop2_get_dreg(copr,tl,HOST_TEMPREG);
+    cop2_call_stall_check(0, i, i_regs, reglist_exclude(reglist, tl, -1));
+    cop2_get_dreg(copr,tl,-1);
     type=STOREW_STUB;
   }
   else
@@ -2758,12 +3549,13 @@ void c2ls_assemble(int i,struct regstat *i_regs)
       jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
     }
     else if(ram_offset&&memtarget) {
+      host_tempreg_acquire();
       emit_addimm(ar,ram_offset,HOST_TEMPREG);
       fastio_reg_override=HOST_TEMPREG;
     }
     if (opcode[i]==0x32) { // LWC2
       int a=ar;
-      if(fastio_reg_override) a=fastio_reg_override;
+      if(fastio_reg_override>=0) a=fastio_reg_override;
       emit_readword_indexed(0,a,tl);
     }
     if (opcode[i]==0x3a) { // SWC2
@@ -2771,14 +3563,16 @@ void c2ls_assemble(int i,struct regstat *i_regs)
       if(!offset&&!c&&s>=0) emit_mov(s,ar);
       #endif
       int a=ar;
-      if(fastio_reg_override) a=fastio_reg_override;
+      if(fastio_reg_override>=0) a=fastio_reg_override;
       emit_writeword_indexed(tl,0,a);
     }
   }
+  if(fastio_reg_override==HOST_TEMPREG)
+    host_tempreg_release();
   if(jaddr2)
     add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj[i],reglist);
   if(opcode[i]==0x3a) // SWC2
-  if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
+  if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
 #if defined(HOST_IMM8)
     int ir=get_reg(i_regs->regmap,INVCP);
     assert(ir>=0);
@@ -2795,80 +3589,305 @@ void c2ls_assemble(int i,struct regstat *i_regs)
     #endif
   }
   if (opcode[i]==0x32) { // LWC2
+    host_tempreg_acquire();
     cop2_put_dreg(copr,tl,HOST_TEMPREG);
+    host_tempreg_release();
+  }
+}
+
+static void cop2_assemble(int i, const struct regstat *i_regs)
+{
+  u_int copr = (source[i]>>11) & 0x1f;
+  signed char temp = get_reg(i_regs->regmap, -1);
+
+  if (opcode2[i] == 0 || opcode2[i] == 2) { // MFC2/CFC2
+    if (!HACK_ENABLED(NDHACK_GTE_NO_STALL)) {
+      signed char tl = get_reg(i_regs->regmap, rt1[i]);
+      u_int reglist = reglist_exclude(get_host_reglist(i_regs->regmap), tl, temp);
+      cop2_call_stall_check(0, i, i_regs, reglist);
+    }
+  }
+  if (opcode2[i]==0) { // MFC2
+    signed char tl=get_reg(i_regs->regmap,rt1[i]);
+    if(tl>=0&&rt1[i]!=0)
+      cop2_get_dreg(copr,tl,temp);
+  }
+  else if (opcode2[i]==4) { // MTC2
+    signed char sl=get_reg(i_regs->regmap,rs1[i]);
+    cop2_put_dreg(copr,sl,temp);
+  }
+  else if (opcode2[i]==2) // CFC2
+  {
+    signed char tl=get_reg(i_regs->regmap,rt1[i]);
+    if(tl>=0&&rt1[i]!=0)
+      emit_readword(&reg_cop2c[copr],tl);
+  }
+  else if (opcode2[i]==6) // CTC2
+  {
+    signed char sl=get_reg(i_regs->regmap,rs1[i]);
+    switch(copr) {
+      case 4:
+      case 12:
+      case 20:
+      case 26:
+      case 27:
+      case 29:
+      case 30:
+        emit_signextend16(sl,temp);
+        break;
+      case 31:
+        c2op_ctc2_31_assemble(sl,temp);
+        break;
+      default:
+        temp=sl;
+        break;
+    }
+    emit_writeword(temp,&reg_cop2c[copr]);
+    assert(sl>=0);
   }
 }
 
+static void do_unalignedwritestub(int n)
+{
+  assem_debug("do_unalignedwritestub %x\n",start+stubs[n].a*4);
+  literal_pool(256);
+  set_jump_target(stubs[n].addr, out);
+
+  int i=stubs[n].a;
+  struct regstat *i_regs=(struct regstat *)stubs[n].c;
+  int addr=stubs[n].b;
+  u_int reglist=stubs[n].e;
+  signed char *i_regmap=i_regs->regmap;
+  int temp2=get_reg(i_regmap,FTEMP);
+  int rt;
+  rt=get_reg(i_regmap,rs2[i]);
+  assert(rt>=0);
+  assert(addr>=0);
+  assert(opcode[i]==0x2a||opcode[i]==0x2e); // SWL/SWR only implemented
+  reglist|=(1<<addr);
+  reglist&=~(1<<temp2);
+
+#if 1
+  // don't bother with it and call write handler
+  save_regs(reglist);
+  pass_args(addr,rt);
+  int cc=get_reg(i_regmap,CCREG);
+  if(cc<0)
+    emit_loadreg(CCREG,2);
+  emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
+  emit_far_call((opcode[i]==0x2a?jump_handle_swl:jump_handle_swr));
+  emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc);
+  if(cc<0)
+    emit_storereg(CCREG,2);
+  restore_regs(reglist);
+  emit_jmp(stubs[n].retaddr); // return address
+#else
+  emit_andimm(addr,0xfffffffc,temp2);
+  emit_writeword(temp2,&address);
+
+  save_regs(reglist);
+  emit_shrimm(addr,16,1);
+  int cc=get_reg(i_regmap,CCREG);
+  if(cc<0) {
+    emit_loadreg(CCREG,2);
+  }
+  emit_movimm((u_int)readmem,0);
+  emit_addimm(cc<0?2:cc,2*stubs[n].d+2,2);
+  emit_call((int)&indirect_jump_indexed);
+  restore_regs(reglist);
+
+  emit_readword(&readmem_dword,temp2);
+  int temp=addr; //hmh
+  emit_shlimm(addr,3,temp);
+  emit_andimm(temp,24,temp);
+  if (opcode[i]==0x2a) // SWL
+    emit_xorimm(temp,24,temp);
+  emit_movimm(-1,HOST_TEMPREG);
+  if (opcode[i]==0x2a) { // SWL
+    emit_bic_lsr(temp2,HOST_TEMPREG,temp,temp2);
+    emit_orrshr(rt,temp,temp2);
+  }else{
+    emit_bic_lsl(temp2,HOST_TEMPREG,temp,temp2);
+    emit_orrshl(rt,temp,temp2);
+  }
+  emit_readword(&address,addr);
+  emit_writeword(temp2,&word);
+  //save_regs(reglist); // don't need to, no state changes
+  emit_shrimm(addr,16,1);
+  emit_movimm((u_int)writemem,0);
+  //emit_call((int)&indirect_jump_indexed);
+  emit_mov(15,14);
+  emit_readword_dualindexedx4(0,1,15);
+  emit_readword(&Count,HOST_TEMPREG);
+  emit_readword(&next_interupt,2);
+  emit_addimm(HOST_TEMPREG,-2*stubs[n].d-2,HOST_TEMPREG);
+  emit_writeword(2,&last_count);
+  emit_sub(HOST_TEMPREG,2,cc<0?HOST_TEMPREG:cc);
+  if(cc<0) {
+    emit_storereg(CCREG,HOST_TEMPREG);
+  }
+  restore_regs(reglist);
+  emit_jmp(stubs[n].retaddr); // return address
+#endif
+}
+
 #ifndef multdiv_assemble
 void multdiv_assemble(int i,struct regstat *i_regs)
 {
   printf("Need multdiv_assemble for this architecture.\n");
-  exit(1);
+  abort();
 }
 #endif
 
-void mov_assemble(int i,struct regstat *i_regs)
+static void mov_assemble(int i,struct regstat *i_regs)
 {
   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
   if(rt1[i]) {
-    signed char sh,sl,th,tl;
-    th=get_reg(i_regs->regmap,rt1[i]|64);
+    signed char sl,tl;
     tl=get_reg(i_regs->regmap,rt1[i]);
     //assert(tl>=0);
     if(tl>=0) {
-      sh=get_reg(i_regs->regmap,rs1[i]|64);
       sl=get_reg(i_regs->regmap,rs1[i]);
       if(sl>=0) emit_mov(sl,tl);
       else emit_loadreg(rs1[i],tl);
-      if(th>=0) {
-        if(sh>=0) emit_mov(sh,th);
-        else emit_loadreg(rs1[i]|64,th);
-      }
     }
   }
 }
 
-void syscall_assemble(int i,struct regstat *i_regs)
+// call interpreter, exception handler, things that change pc/regs/cycles ...
+static void call_c_cpu_handler(int i, const struct regstat *i_regs, u_int pc, void *func)
 {
   signed char ccreg=get_reg(i_regs->regmap,CCREG);
   assert(ccreg==HOST_CCREG);
   assert(!is_delayslot);
   (void)ccreg;
-  emit_movimm(start+i*4,EAX); // Get PC
-  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
-  emit_jmp(jump_syscall_hle); // XXX
+
+  emit_movimm(pc,3); // Get PC
+  emit_readword(&last_count,2);
+  emit_writeword(3,&psxRegs.pc);
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
+  emit_add(2,HOST_CCREG,2);
+  emit_writeword(2,&psxRegs.cycle);
+  emit_far_call(func);
+  emit_far_jump(jump_to_new_pc);
 }
 
-void hlecall_assemble(int i,struct regstat *i_regs)
+static void syscall_assemble(int i,struct regstat *i_regs)
 {
-  extern void psxNULL();
-  signed char ccreg=get_reg(i_regs->regmap,CCREG);
-  assert(ccreg==HOST_CCREG);
-  assert(!is_delayslot);
-  (void)ccreg;
-  emit_movimm(start+i*4+4,0); // Get PC
+  emit_movimm(0x20,0); // cause code
+  emit_movimm(0,1);    // not in delay slot
+  call_c_cpu_handler(i,i_regs,start+i*4,psxException);
+}
+
+static void hlecall_assemble(int i,struct regstat *i_regs)
+{
+  void *hlefunc = psxNULL;
   uint32_t hleCode = source[i] & 0x03ffffff;
-  if (hleCode >= ARRAY_SIZE(psxHLEt))
-    emit_movimm((uintptr_t)psxNULL,1);
-  else
-    emit_movimm((uintptr_t)psxHLEt[hleCode],1);
-  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
-  emit_jmp(jump_hlecall);
+  if (hleCode < ARRAY_SIZE(psxHLEt))
+    hlefunc = psxHLEt[hleCode];
+
+  call_c_cpu_handler(i,i_regs,start+i*4+4,hlefunc);
 }
 
-void intcall_assemble(int i,struct regstat *i_regs)
+static void intcall_assemble(int i,struct regstat *i_regs)
 {
-  signed char ccreg=get_reg(i_regs->regmap,CCREG);
-  assert(ccreg==HOST_CCREG);
-  assert(!is_delayslot);
-  (void)ccreg;
-  emit_movimm(start+i*4,0); // Get PC
-  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
-  emit_jmp(jump_intcall);
+  call_c_cpu_handler(i,i_regs,start+i*4,execI);
+}
+
+static void speculate_mov(int rs,int rt)
+{
+  if(rt!=0) {
+    smrv_strong_next|=1<<rt;
+    smrv[rt]=smrv[rs];
+  }
+}
+
+static void speculate_mov_weak(int rs,int rt)
+{
+  if(rt!=0) {
+    smrv_weak_next|=1<<rt;
+    smrv[rt]=smrv[rs];
+  }
 }
 
-void ds_assemble(int i,struct regstat *i_regs)
+static void speculate_register_values(int i)
+{
+  if(i==0) {
+    memcpy(smrv,psxRegs.GPR.r,sizeof(smrv));
+    // gp,sp are likely to stay the same throughout the block
+    smrv_strong_next=(1<<28)|(1<<29)|(1<<30);
+    smrv_weak_next=~smrv_strong_next;
+    //printf(" llr %08x\n", smrv[4]);
+  }
+  smrv_strong=smrv_strong_next;
+  smrv_weak=smrv_weak_next;
+  switch(itype[i]) {
+    case ALU:
+      if     ((smrv_strong>>rs1[i])&1) speculate_mov(rs1[i],rt1[i]);
+      else if((smrv_strong>>rs2[i])&1) speculate_mov(rs2[i],rt1[i]);
+      else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]);
+      else if((smrv_weak>>rs2[i])&1) speculate_mov_weak(rs2[i],rt1[i]);
+      else {
+        smrv_strong_next&=~(1<<rt1[i]);
+        smrv_weak_next&=~(1<<rt1[i]);
+      }
+      break;
+    case SHIFTIMM:
+      smrv_strong_next&=~(1<<rt1[i]);
+      smrv_weak_next&=~(1<<rt1[i]);
+      // fallthrough
+    case IMM16:
+      if(rt1[i]&&is_const(&regs[i],rt1[i])) {
+        int value,hr=get_reg(regs[i].regmap,rt1[i]);
+        if(hr>=0) {
+          if(get_final_value(hr,i,&value))
+               smrv[rt1[i]]=value;
+          else smrv[rt1[i]]=constmap[i][hr];
+          smrv_strong_next|=1<<rt1[i];
+        }
+      }
+      else {
+        if     ((smrv_strong>>rs1[i])&1) speculate_mov(rs1[i],rt1[i]);
+        else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]);
+      }
+      break;
+    case LOAD:
+      if(start<0x2000&&(rt1[i]==26||(smrv[rt1[i]]>>24)==0xa0)) {
+        // special case for BIOS
+        smrv[rt1[i]]=0xa0000000;
+        smrv_strong_next|=1<<rt1[i];
+        break;
+      }
+      // fallthrough
+    case SHIFT:
+    case LOADLR:
+    case MOV:
+      smrv_strong_next&=~(1<<rt1[i]);
+      smrv_weak_next&=~(1<<rt1[i]);
+      break;
+    case COP0:
+    case COP2:
+      if(opcode2[i]==0||opcode2[i]==2) { // MFC/CFC
+        smrv_strong_next&=~(1<<rt1[i]);
+        smrv_weak_next&=~(1<<rt1[i]);
+      }
+      break;
+    case C2LS:
+      if (opcode[i]==0x32) { // LWC2
+        smrv_strong_next&=~(1<<rt1[i]);
+        smrv_weak_next&=~(1<<rt1[i]);
+      }
+      break;
+  }
+#if 0
+  int r=4;
+  printf("x %08x %08x %d %d c %08x %08x\n",smrv[r],start+i*4,
+    ((smrv_strong>>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst);
+#endif
+}
+
+static void ds_assemble(int i,struct regstat *i_regs)
 {
   speculate_register_values(i);
   is_delayslot=1;
@@ -2913,14 +3932,13 @@ void ds_assemble(int i,struct regstat *i_regs)
     case RJUMP:
     case CJUMP:
     case SJUMP:
-    case FJUMP:
       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
   }
   is_delayslot=0;
 }
 
 // Is the branch target a valid internal jump?
-int internal_branch(uint64_t i_is32,int addr)
+static int internal_branch(int addr)
 {
   if(addr&1) return 0; // Indirect (register) jump
   if(addr>=start && addr<start+slen*4-4)
@@ -2930,7 +3948,7 @@ int internal_branch(uint64_t i_is32,int addr)
   return 0;
 }
 
-static void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,uint64_t u)
+static void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t u)
 {
   int hr;
   for(hr=0;hr<HOST_REGS;hr++) {
@@ -2966,7 +3984,7 @@ static void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,u
 // Load the specified registers
 // This only loads the registers given as arguments because
 // we don't want to load things that will be overwritten
-void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
+static void load_regs(signed char entry[],signed char regmap[],int rs1,int rs2)
 {
   int hr;
   // Load 32-bit regs
@@ -2986,28 +4004,6 @@ void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2
       }
     }
   }
-  //Load 64-bit regs
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
-      if(entry[hr]!=regmap[hr]) {
-        if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
-        {
-          assert(regmap[hr]!=64);
-          if((is32>>(regmap[hr]&63))&1) {
-            int lr=get_reg(regmap,regmap[hr]-64);
-            if(lr>=0)
-              emit_sarimm(lr,31,hr);
-            else
-              emit_loadreg(regmap[hr],hr);
-          }
-          else
-          {
-            emit_loadreg(regmap[hr],hr);
-          }
-        }
-      }
-    }
-  }
 }
 
 // Load registers prior to the start of a loop
@@ -3187,7 +4183,7 @@ static int get_final_value(int hr, int i, int *value)
 }
 
 // Load registers with known constants
-void load_consts(signed char pre[],signed char regmap[],int is32,int i)
+static void load_consts(signed char pre[],signed char regmap[],int i)
 {
   int hr,hr2;
   // propagate loaded constant flags
@@ -3207,7 +4203,8 @@ void load_consts(signed char pre[],signed char regmap[],int is32,int i)
     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
       //if(entry[hr]!=regmap[hr]) {
       if(!((regs[i].loadedconst>>hr)&1)) {
-        if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
+        assert(regmap[hr]<64);
+        if(((regs[i].isconst>>hr)&1)&&regmap[hr]>0) {
           int value,similar=0;
           if(get_final_value(hr,i,&value)) {
             // see if some other register has similar value
@@ -3238,41 +4235,16 @@ void load_consts(signed char pre[],signed char regmap[],int is32,int i)
       }
     }
   }
-  // Load 64-bit regs
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
-      //if(entry[hr]!=regmap[hr]) {
-      if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
-        if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
-          if((is32>>(regmap[hr]&63))&1) {
-            int lr=get_reg(regmap,regmap[hr]-64);
-            assert(lr>=0);
-            emit_sarimm(lr,31,hr);
-          }
-          else
-          {
-            int value;
-            if(get_final_value(hr,i,&value)) {
-              if(value==0) {
-                emit_zeroreg(hr);
-              }
-              else {
-                emit_movimm(value,hr);
-              }
-            }
-          }
-        }
-      }
-    }
-  }
 }
-void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
+
+void load_all_consts(signed char regmap[], u_int dirty, int i)
 {
   int hr;
   // Load 32-bit regs
   for(hr=0;hr<HOST_REGS;hr++) {
     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
-      if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
+      assert(regmap[hr] < 64);
+      if(((regs[i].isconst>>hr)&1)&&regmap[hr]>0) {
         int value=constmap[i][hr];
         if(value==0) {
           emit_zeroreg(hr);
@@ -3283,32 +4255,10 @@ void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
       }
     }
   }
-  // Load 64-bit regs
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
-      if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
-        if((is32>>(regmap[hr]&63))&1) {
-          int lr=get_reg(regmap,regmap[hr]-64);
-          assert(lr>=0);
-          emit_sarimm(lr,31,hr);
-        }
-        else
-        {
-          int value=constmap[i][hr];
-          if(value==0) {
-            emit_zeroreg(hr);
-          }
-          else {
-            emit_movimm(value,hr);
-          }
-        }
-      }
-    }
-  }
 }
 
 // Write out all dirty registers (except cycle count)
-void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
+static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty)
 {
   int hr;
   for(hr=0;hr<HOST_REGS;hr++) {
@@ -3324,9 +4274,10 @@ void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
     }
   }
 }
+
 // Write out dirty registers that we need to reload (pair with load_needed_regs)
 // This writes the registers not written by store_regs_bt
-void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
+void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr)
 {
   int hr;
   int t=(addr-start)>>2;
@@ -3334,7 +4285,7 @@ void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,in
     if(hr!=EXCLUDE_REG) {
       if(i_regmap[hr]>0) {
         if(i_regmap[hr]!=CCREG) {
-          if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32)>>(i_regmap[hr]&63))&1)) {
+          if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1)) {
             if((i_dirty>>hr)&1) {
               assert(i_regmap[hr]<64);
               emit_storereg(i_regmap[hr],hr);
@@ -3405,39 +4356,19 @@ void load_regs_entry(int t)
       }
     }
   }
-  // Load 64-bit regs
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
-      assert(regs[t].regmap_entry[hr]!=64);
-      if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
-        int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
-        if(lr<0) {
-          emit_loadreg(regs[t].regmap_entry[hr],hr);
-        }
-        else
-        {
-          emit_sarimm(lr,31,hr);
-        }
-      }
-      else
-      {
-        emit_loadreg(regs[t].regmap_entry[hr],hr);
-      }
-    }
-  }
 }
 
 // Store dirty registers prior to branch
-void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
+void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
 {
-  if(internal_branch(i_is32,addr))
+  if(internal_branch(addr))
   {
     int t=(addr-start)>>2;
     int hr;
     for(hr=0;hr<HOST_REGS;hr++) {
       if(hr!=EXCLUDE_REG) {
         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
-          if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32)>>(i_regmap[hr]&63))&1)) {
+          if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1)) {
             if((i_dirty>>hr)&1) {
               assert(i_regmap[hr]<64);
               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
@@ -3451,15 +4382,15 @@ void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int a
   else
   {
     // Branch out of this block, write out all dirty regs
-    wb_dirtys(i_regmap,i_is32,i_dirty);
+    wb_dirtys(i_regmap,i_dirty);
   }
 }
 
 // Load all needed registers for branch target
-void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
+static void load_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
 {
   //if(addr>=start && addr<(start+slen*4))
-  if(internal_branch(i_is32,addr))
+  if(internal_branch(addr))
   {
     int t=(addr-start)>>2;
     int hr;
@@ -3470,51 +4401,24 @@ void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int ad
     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
       emit_storereg(CCREG,HOST_CCREG);
     }
-    // Load 32-bit regs
-    for(hr=0;hr<HOST_REGS;hr++) {
-      if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
-        if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
-          if(regs[t].regmap_entry[hr]==0) {
-            emit_zeroreg(hr);
-          }
-          else if(regs[t].regmap_entry[hr]!=CCREG)
-          {
-            emit_loadreg(regs[t].regmap_entry[hr],hr);
-          }
-        }
-      }
-    }
-    //Load 64-bit regs
+    // Load 32-bit regs
     for(hr=0;hr<HOST_REGS;hr++) {
-      if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
+      if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
-          assert(regs[t].regmap_entry[hr]!=64);
-          if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
-            int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
-            if(lr<0) {
-              emit_loadreg(regs[t].regmap_entry[hr],hr);
-            }
-            else
-            {
-              emit_sarimm(lr,31,hr);
-            }
+          if(regs[t].regmap_entry[hr]==0) {
+            emit_zeroreg(hr);
           }
-          else
+          else if(regs[t].regmap_entry[hr]!=CCREG)
           {
             emit_loadreg(regs[t].regmap_entry[hr],hr);
           }
         }
-        else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
-          int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
-          assert(lr>=0);
-          emit_sarimm(lr,31,hr);
-        }
       }
     }
   }
 }
 
-int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
+static int match_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
 {
   if(addr>=start && addr<start+slen*4-4)
   {
@@ -3563,7 +4467,7 @@ int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
       }
     }
     // Delay slots are not valid branch targets
-    //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
+    //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP)) return 0;
     // Delay slots require additional processing, so do not match
     if(is_ds[t]) return 0;
   }
@@ -3594,26 +4498,41 @@ int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
 static void drc_dbg_emit_do_cmp(int i)
 {
   extern void do_insn_cmp();
-  extern int cycle;
-  u_int hr,reglist=0;
+  //extern int cycle;
+  u_int hr, reglist = get_host_reglist(regs[i].regmap);
 
-  for(hr=0;hr<HOST_REGS;hr++)
-    if(regs[i].regmap[hr]>=0) reglist|=1<<hr;
+  assem_debug("//do_insn_cmp %08x\n", start+i*4);
   save_regs(reglist);
+  // write out changed consts to match the interpreter
+  if (i > 0 && !bt[i]) {
+    for (hr = 0; hr < HOST_REGS; hr++) {
+      int reg = regs[i-1].regmap[hr];
+      if (hr == EXCLUDE_REG || reg < 0)
+        continue;
+      if (!((regs[i-1].isconst >> hr) & 1))
+        continue;
+      if (i > 1 && reg == regs[i-2].regmap[hr] && constmap[i-1][hr] == constmap[i-2][hr])
+        continue;
+      emit_movimm(constmap[i-1][hr],0);
+      emit_storereg(reg, 0);
+    }
+  }
   emit_movimm(start+i*4,0);
   emit_writeword(0,&pcaddr);
-  emit_call(do_insn_cmp);
+  emit_far_call(do_insn_cmp);
   //emit_readword(&cycle,0);
   //emit_addimm(0,2,0);
   //emit_writeword(0,&cycle);
+  (void)get_reg2;
   restore_regs(reglist);
+  assem_debug("\\\\do_insn_cmp\n");
 }
 #else
 #define drc_dbg_emit_do_cmp(x)
 #endif
 
 // Used when a branch jumps into the delay slot of another branch
-void ds_assemble_entry(int i)
+static void ds_assemble_entry(int i)
 {
   int t=(ba[i]-start)>>2;
   if (!instr_addr[t])
@@ -3622,11 +4541,11 @@ void ds_assemble_entry(int i)
   assem_debug("<->\n");
   drc_dbg_emit_do_cmp(t);
   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
-    wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
-  load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
+    wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty);
+  load_regs(regs[t].regmap_entry,regs[t].regmap,rs1[t],rs2[t]);
   address_generation(t,&regs[t],regs[t].regmap_entry);
   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
-    load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
+    load_regs(regs[t].regmap_entry,regs[t].regmap,INVCP,INVCP);
   is_delayslot=0;
   switch(itype[t]) {
     case ALU:
@@ -3669,20 +4588,36 @@ void ds_assemble_entry(int i)
     case RJUMP:
     case CJUMP:
     case SJUMP:
-    case FJUMP:
       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
   }
-  store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
-  load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
-  if(internal_branch(regs[t].is32,ba[i]+4))
+  store_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
+  load_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
+  if(internal_branch(ba[i]+4))
     assem_debug("branch: internal\n");
   else
     assem_debug("branch: external\n");
-  assert(internal_branch(regs[t].is32,ba[i]+4));
-  add_to_linker(out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
+  assert(internal_branch(ba[i]+4));
+  add_to_linker(out,ba[i]+4,internal_branch(ba[i]+4));
   emit_jmp(0);
 }
 
+static void emit_extjump(void *addr, u_int target)
+{
+  emit_extjump2(addr, target, dyna_linker);
+}
+
+static void emit_extjump_ds(void *addr, u_int target)
+{
+  emit_extjump2(addr, target, dyna_linker_ds);
+}
+
+// Load 2 immediates optimizing for small code size
+static void emit_mov2imm_compact(int imm1,u_int rt1,int imm2,u_int rt2)
+{
+  emit_movimm(imm1,rt1);
+  emit_movimm_from(imm1,rt1,imm2,rt2);
+}
+
 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
 {
   int count;
@@ -3694,7 +4629,7 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
     *adj=0;
   }
   //if(ba[i]>=start && ba[i]<(start+slen*4))
-  if(internal_branch(branch_regs[i].is32,ba[i]))
+  if(internal_branch(ba[i]))
   {
     t=(ba[i]-start)>>2;
     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
@@ -3717,11 +4652,13 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
   else if(*adj==0||invert) {
     int cycles=CLOCK_ADJUST(count+2);
     // faster loop HACK
+#if 0
     if (t&&*adj) {
       int rel=t-i;
       if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
         cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
     }
+#endif
     emit_addimm_and_set_flags(cycles,HOST_CCREG);
     jaddr=out;
     emit_jns(0);
@@ -3738,19 +4675,19 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
 static void do_ccstub(int n)
 {
   literal_pool(256);
-  assem_debug("do_ccstub %x\n",start+stubs[n].b*4);
+  assem_debug("do_ccstub %x\n",start+(u_int)stubs[n].b*4);
   set_jump_target(stubs[n].addr, out);
   int i=stubs[n].b;
   if(stubs[n].d==NULLDS) {
     // Delay slot instruction is nullified ("likely" branch)
-    wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
+    wb_dirtys(regs[i].regmap,regs[i].dirty);
   }
   else if(stubs[n].d!=TAKEN) {
-    wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
+    wb_dirtys(branch_regs[i].regmap,branch_regs[i].dirty);
   }
   else {
-    if(internal_branch(branch_regs[i].is32,ba[i]))
-      wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+    if(internal_branch(ba[i]))
+      wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
   }
   if(stubs[n].c!=-1)
   {
@@ -3761,36 +4698,31 @@ static void do_ccstub(int n)
   else
   {
     // Return address depends on which way the branch goes
-    if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+    if(itype[i]==CJUMP||itype[i]==SJUMP)
     {
       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
-      int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
-      int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
       if(rs1[i]==0)
       {
-        s1l=s2l;s1h=s2h;
-        s2l=s2h=-1;
+        s1l=s2l;
+        s2l=-1;
       }
       else if(rs2[i]==0)
       {
-        s2l=s2h=-1;
-      }
-      if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
-        s1h=s2h=-1;
+        s2l=-1;
       }
       assert(s1l>=0);
       #ifdef DESTRUCTIVE_WRITEBACK
       if(rs1[i]) {
-        if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
+        if((branch_regs[i].dirty>>s1l)&&1)
           emit_loadreg(rs1[i],s1l);
       }
       else {
-        if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
+        if((branch_regs[i].dirty>>s1l)&1)
           emit_loadreg(rs2[i],s1l);
       }
       if(s2l>=0)
-        if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
+        if((branch_regs[i].dirty>>s2l)&1)
           emit_loadreg(rs2[i],s2l);
       #endif
       int hr=0;
@@ -3832,46 +4764,28 @@ static void do_ccstub(int n)
       if((opcode[i]&0x2f)==4) // BEQ
       {
         #ifdef HAVE_CMOV_IMM
-        if(s1h<0) {
-          if(s2l>=0) emit_cmp(s1l,s2l);
-          else emit_test(s1l,s1l);
-          emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
-        }
-        else
+        if(s2l>=0) emit_cmp(s1l,s2l);
+        else emit_test(s1l,s1l);
+        emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
+        #else
+        emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
+        if(s2l>=0) emit_cmp(s1l,s2l);
+        else emit_test(s1l,s1l);
+        emit_cmovne_reg(alt,addr);
         #endif
-        {
-          emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
-          if(s1h>=0) {
-            if(s2h>=0) emit_cmp(s1h,s2h);
-            else emit_test(s1h,s1h);
-            emit_cmovne_reg(alt,addr);
-          }
-          if(s2l>=0) emit_cmp(s1l,s2l);
-          else emit_test(s1l,s1l);
-          emit_cmovne_reg(alt,addr);
-        }
       }
       if((opcode[i]&0x2f)==5) // BNE
       {
         #ifdef HAVE_CMOV_IMM
-        if(s1h<0) {
-          if(s2l>=0) emit_cmp(s1l,s2l);
-          else emit_test(s1l,s1l);
-          emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
-        }
-        else
+        if(s2l>=0) emit_cmp(s1l,s2l);
+        else emit_test(s1l,s1l);
+        emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
+        #else
+        emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
+        if(s2l>=0) emit_cmp(s1l,s2l);
+        else emit_test(s1l,s1l);
+        emit_cmovne_reg(alt,addr);
         #endif
-        {
-          emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
-          if(s1h>=0) {
-            if(s2h>=0) emit_cmp(s1h,s2h);
-            else emit_test(s1h,s1h);
-            emit_cmovne_reg(alt,addr);
-          }
-          if(s2l>=0) emit_cmp(s1l,s2l);
-          else emit_test(s1l,s1l);
-          emit_cmovne_reg(alt,addr);
-        }
       }
       if((opcode[i]&0x2f)==6) // BLEZ
       {
@@ -3879,13 +4793,7 @@ static void do_ccstub(int n)
         //emit_movimm(start+i*4+8,addr);
         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
         emit_cmpimm(s1l,1);
-        if(s1h>=0) emit_mov(addr,ntaddr);
         emit_cmovl_reg(alt,addr);
-        if(s1h>=0) {
-          emit_test(s1h,s1h);
-          emit_cmovne_reg(ntaddr,addr);
-          emit_cmovs_reg(alt,addr);
-        }
       }
       if((opcode[i]&0x2f)==7) // BGTZ
       {
@@ -3893,21 +4801,14 @@ static void do_ccstub(int n)
         //emit_movimm(start+i*4+8,ntaddr);
         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
         emit_cmpimm(s1l,1);
-        if(s1h>=0) emit_mov(addr,alt);
         emit_cmovl_reg(ntaddr,addr);
-        if(s1h>=0) {
-          emit_test(s1h,s1h);
-          emit_cmovne_reg(alt,addr);
-          emit_cmovs_reg(ntaddr,addr);
-        }
       }
       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
       {
         //emit_movimm(ba[i],alt);
         //emit_movimm(start+i*4+8,addr);
         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
-        if(s1h>=0) emit_test(s1h,s1h);
-        else emit_test(s1l,s1l);
+        emit_test(s1l,s1l);
         emit_cmovs_reg(alt,addr);
       }
       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
@@ -3915,8 +4816,7 @@ static void do_ccstub(int n)
         //emit_movimm(ba[i],addr);
         //emit_movimm(start+i*4+8,alt);
         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
-        if(s1h>=0) emit_test(s1h,s1h);
-        else emit_test(s1l,s1l);
+        emit_test(s1l,s1l);
         emit_cmovs_reg(alt,addr);
       }
       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
@@ -3948,15 +4848,15 @@ static void do_ccstub(int n)
       }
       emit_writeword(r,&pcaddr);
     }
-    else {SysPrintf("Unknown branch type in do_ccstub\n");exit(1);}
+    else {SysPrintf("Unknown branch type in do_ccstub\n");abort();}
   }
   // Update cycle count
   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
   if(stubs[n].a) emit_addimm(HOST_CCREG,CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
-  emit_call(cc_interrupt);
+  emit_far_call(cc_interrupt);
   if(stubs[n].a) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
   if(stubs[n].d==TAKEN) {
-    if(internal_branch(branch_regs[i].is32,ba[i]))
+    if(internal_branch(ba[i]))
       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
     else if(itype[i]==RJUMP) {
       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
@@ -3974,7 +4874,10 @@ static void do_ccstub(int n)
   }else{
     load_all_regs(branch_regs[i].regmap);
   }
-  emit_jmp(stubs[n].retaddr);
+  if (stubs[n].retaddr)
+    emit_jmp(stubs[n].retaddr);
+  else
+    do_jump_vaddr(stubs[n].e);
 }
 
 static void add_to_linker(void *addr, u_int target, int ext)
@@ -3996,7 +4899,7 @@ static void ujump_assemble_write_ra(int i)
   return_address=start+i*4+8;
   if(rt>=0) {
     #ifdef USE_MINI_HT
-    if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
+    if(internal_branch(return_address)&&rt1[i+1]!=31) {
       int temp=-1; // note: must be ds-safe
       #ifdef HOST_TEMPREG
       temp=HOST_TEMPREG;
@@ -4021,7 +4924,7 @@ static void ujump_assemble_write_ra(int i)
   }
 }
 
-void ujump_assemble(int i,struct regstat *i_regs)
+static void ujump_assemble(int i,struct regstat *i_regs)
 {
   int ra_done=0;
   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
@@ -4043,29 +4946,29 @@ void ujump_assemble(int i,struct regstat *i_regs)
   ds_assemble(i+1,i_regs);
   uint64_t bc_unneeded=branch_regs[i].u;
   bc_unneeded|=1|(1LL<<rt1[i]);
-  wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,bc_unneeded);
-  load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
+  wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
+  load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
   if(!ra_done&&rt1[i]==31)
     ujump_assemble_write_ra(i);
   int cc,adj;
   cc=get_reg(branch_regs[i].regmap,CCREG);
   assert(cc==HOST_CCREG);
-  store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+  store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
   #ifdef REG_PREFETCH
   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
   #endif
   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
   if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
-  load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
-  if(internal_branch(branch_regs[i].is32,ba[i]))
+  load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
+  if(internal_branch(ba[i]))
     assem_debug("branch: internal\n");
   else
     assem_debug("branch: external\n");
-  if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
+  if(internal_branch(ba[i])&&is_ds[(ba[i]-start)>>2]) {
     ds_assemble_entry(i);
   }
   else {
-    add_to_linker(out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
+    add_to_linker(out,ba[i],internal_branch(ba[i]));
     emit_jmp(0);
   }
 }
@@ -4091,7 +4994,7 @@ static void rjump_assemble_write_ra(int i)
   #endif
 }
 
-void rjump_assemble(int i,struct regstat *i_regs)
+static void rjump_assemble(int i,struct regstat *i_regs)
 {
   int temp;
   int rs,cc;
@@ -4131,8 +5034,8 @@ void rjump_assemble(int i,struct regstat *i_regs)
   uint64_t bc_unneeded=branch_regs[i].u;
   bc_unneeded|=1|(1LL<<rt1[i]);
   bc_unneeded&=~(1LL<<rs1[i]);
-  wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,bc_unneeded);
-  load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
+  wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
+  load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],CCREG);
   if(!ra_done&&rt1[i]!=0)
     rjump_assemble_write_ra(i);
   cc=get_reg(branch_regs[i].regmap,CCREG);
@@ -4147,9 +5050,9 @@ void rjump_assemble(int i,struct regstat *i_regs)
     do_rhash(rs,rh);
   }
   #endif
-  store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
+  store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
   #ifdef DESTRUCTIVE_WRITEBACK
-  if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
+  if((branch_regs[i].dirty>>rs)&1) {
     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
       emit_loadreg(rs1[i],rs);
     }
@@ -4167,13 +5070,13 @@ void rjump_assemble(int i,struct regstat *i_regs)
   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
   //assert(adj==0);
   emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
-  add_stub(CC_STUB,out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
+  add_stub(CC_STUB,out,NULL,0,i,-1,TAKEN,rs);
   if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
     // special case for RFE
     emit_jmp(0);
   else
     emit_jns(0);
-  //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
+  //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
   #ifdef USE_MINI_HT
   if(rs1[i]==31) {
     do_miniht_jump(rs,rh,ht);
@@ -4181,42 +5084,40 @@ void rjump_assemble(int i,struct regstat *i_regs)
   else
   #endif
   {
-    emit_jmp(jump_vaddr_reg[rs]);
+    do_jump_vaddr(rs);
   }
   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
   #endif
 }
 
-void cjump_assemble(int i,struct regstat *i_regs)
+static void cjump_assemble(int i,struct regstat *i_regs)
 {
   signed char *i_regmap=i_regs->regmap;
   int cc;
   int match;
-  match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+  match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
   assem_debug("match=%d\n",match);
-  int s1h,s1l,s2h,s2l;
+  int s1l,s2l;
   int unconditional=0,nop=0;
-  int only32=0;
   int invert=0;
-  int internal=internal_branch(branch_regs[i].is32,ba[i]);
+  int internal=internal_branch(ba[i]);
   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
   if(!match) invert=1;
   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
   if(i>(ba[i]-start)>>2) invert=1;
   #endif
+  #ifdef __aarch64__
+  invert=1; // because of near cond. branches
+  #endif
 
   if(ooo[i]) {
     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
-    s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
-    s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
   }
   else {
     s1l=get_reg(i_regmap,rs1[i]);
-    s1h=get_reg(i_regmap,rs1[i]|64);
     s2l=get_reg(i_regmap,rs2[i]);
-    s2h=get_reg(i_regmap,rs2[i]|64);
   }
   if(rs1[i]==0&&rs2[i]==0)
   {
@@ -4229,17 +5130,12 @@ void cjump_assemble(int i,struct regstat *i_regs)
   }
   else if(rs1[i]==0)
   {
-    s1l=s2l;s1h=s2h;
-    s2l=s2h=-1;
-    only32=(regs[i].was32>>rs2[i])&1;
+    s1l=s2l;
+    s2l=-1;
   }
   else if(rs2[i]==0)
   {
-    s2l=s2h=-1;
-    only32=(regs[i].was32>>rs1[i])&1;
-  }
-  else {
-    only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
+    s2l=-1;
   }
 
   if(ooo[i]) {
@@ -4251,20 +5147,20 @@ void cjump_assemble(int i,struct regstat *i_regs)
     uint64_t bc_unneeded=branch_regs[i].u;
     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
     bc_unneeded|=1;
-    wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,bc_unneeded);
-    load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
-    load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
+    wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
+    load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],rs2[i]);
+    load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
     cc=get_reg(branch_regs[i].regmap,CCREG);
     assert(cc==HOST_CCREG);
     if(unconditional)
-      store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+      store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
     //assem_debug("cycle count (adj)\n");
     if(unconditional) {
       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
-        load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+        load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
         if(internal)
           assem_debug("branch: internal\n");
         else
@@ -4291,43 +5187,6 @@ void cjump_assemble(int i,struct regstat *i_regs)
       void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
-      if(!only32)
-      {
-        assert(s1h>=0);
-        if(opcode[i]==4) // BEQ
-        {
-          if(s2h>=0) emit_cmp(s1h,s2h);
-          else emit_test(s1h,s1h);
-          nottaken1=out;
-          emit_jne((void *)1l);
-        }
-        if(opcode[i]==5) // BNE
-        {
-          if(s2h>=0) emit_cmp(s1h,s2h);
-          else emit_test(s1h,s1h);
-          if(invert) taken=out;
-          else add_to_linker(out,ba[i],internal);
-          emit_jne(0);
-        }
-        if(opcode[i]==6) // BLEZ
-        {
-          emit_test(s1h,s1h);
-          if(invert) taken=out;
-          else add_to_linker(out,ba[i],internal);
-          emit_js(0);
-          nottaken1=out;
-          emit_jne((void *)1l);
-        }
-        if(opcode[i]==7) // BGTZ
-        {
-          emit_test(s1h,s1h);
-          nottaken1=out;
-          emit_js(1);
-          if(invert) taken=out;
-          else add_to_linker(out,ba[i],internal);
-          emit_jne(0);
-        }
-      } // if(!only32)
 
       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
       assert(s1l>=0);
@@ -4337,7 +5196,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
         else emit_test(s1l,s1l);
         if(invert){
           nottaken=out;
-          emit_jne((void *)1l);
+          emit_jne(DJT_1);
         }else{
           add_to_linker(out,ba[i],internal);
           emit_jeq(0);
@@ -4349,7 +5208,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
         else emit_test(s1l,s1l);
         if(invert){
           nottaken=out;
-          emit_jeq(1);
+          emit_jeq(DJT_1);
         }else{
           add_to_linker(out,ba[i],internal);
           emit_jne(0);
@@ -4360,7 +5219,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
         emit_cmpimm(s1l,1);
         if(invert){
           nottaken=out;
-          emit_jge(1);
+          emit_jge(DJT_1);
         }else{
           add_to_linker(out,ba[i],internal);
           emit_jl(0);
@@ -4371,7 +5230,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
         emit_cmpimm(s1l,1);
         if(invert){
           nottaken=out;
-          emit_jl(1);
+          emit_jl(DJT_1);
         }else{
           add_to_linker(out,ba[i],internal);
           emit_jge(0);
@@ -4393,8 +5252,8 @@ void cjump_assemble(int i,struct regstat *i_regs)
         #endif
         {
           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
-          store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
-          load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+          store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
+          load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
           if(internal)
             assem_debug("branch: internal\n");
           else
@@ -4424,41 +5283,6 @@ void cjump_assemble(int i,struct regstat *i_regs)
     //printf("IOE\n");
     void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
     if(!unconditional&&!nop) {
-      if(!only32)
-      {
-        assert(s1h>=0);
-        if((opcode[i]&0x2f)==4) // BEQ
-        {
-          if(s2h>=0) emit_cmp(s1h,s2h);
-          else emit_test(s1h,s1h);
-          nottaken1=out;
-          emit_jne((void *)2l);
-        }
-        if((opcode[i]&0x2f)==5) // BNE
-        {
-          if(s2h>=0) emit_cmp(s1h,s2h);
-          else emit_test(s1h,s1h);
-          taken=out;
-          emit_jne((void *)1l);
-        }
-        if((opcode[i]&0x2f)==6) // BLEZ
-        {
-          emit_test(s1h,s1h);
-          taken=out;
-          emit_js(1);
-          nottaken1=out;
-          emit_jne((void *)2l);
-        }
-        if((opcode[i]&0x2f)==7) // BGTZ
-        {
-          emit_test(s1h,s1h);
-          nottaken1=out;
-          emit_js(2);
-          taken=out;
-          emit_jne((void *)1l);
-        }
-      } // if(!only32)
-
       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
       assert(s1l>=0);
       if((opcode[i]&0x2f)==4) // BEQ
@@ -4466,26 +5290,26 @@ void cjump_assemble(int i,struct regstat *i_regs)
         if(s2l>=0) emit_cmp(s1l,s2l);
         else emit_test(s1l,s1l);
         nottaken=out;
-        emit_jne((void *)2l);
+        emit_jne(DJT_2);
       }
       if((opcode[i]&0x2f)==5) // BNE
       {
         if(s2l>=0) emit_cmp(s1l,s2l);
         else emit_test(s1l,s1l);
         nottaken=out;
-        emit_jeq(2);
+        emit_jeq(DJT_2);
       }
       if((opcode[i]&0x2f)==6) // BLEZ
       {
         emit_cmpimm(s1l,1);
         nottaken=out;
-        emit_jge(2);
+        emit_jge(DJT_2);
       }
       if((opcode[i]&0x2f)==7) // BGTZ
       {
         emit_cmpimm(s1l,1);
         nottaken=out;
-        emit_jl(2);
+        emit_jl(DJT_2);
       }
     } // if(!unconditional)
     int adj;
@@ -4496,11 +5320,11 @@ void cjump_assemble(int i,struct regstat *i_regs)
     if(!nop) {
       if(taken) set_jump_target(taken, out);
       assem_debug("1:\n");
-      wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,ds_unneeded);
+      wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
       // load regs
-      load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
+      load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
       address_generation(i+1,&branch_regs[i],0);
-      load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
+      load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
       ds_assemble(i+1,&branch_regs[i]);
       cc=get_reg(branch_regs[i].regmap,CCREG);
       if(cc==-1) {
@@ -4508,11 +5332,11 @@ void cjump_assemble(int i,struct regstat *i_regs)
         // CHECK: Is the following instruction (fall thru) allocated ok?
       }
       assert(cc==HOST_CCREG);
-      store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+      store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
       assem_debug("cycle count (adj)\n");
       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
-      load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+      load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
       if(internal)
         assem_debug("branch: internal\n");
       else
@@ -4531,10 +5355,10 @@ void cjump_assemble(int i,struct regstat *i_regs)
       set_jump_target(nottaken, out);
       assem_debug("2:\n");
       if(!likely[i]) {
-        wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,ds_unneeded);
-        load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
+        wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
+        load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
         address_generation(i+1,&branch_regs[i],0);
-        load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
+        load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
         ds_assemble(i+1,&branch_regs[i]);
       }
       cc=get_reg(branch_regs[i].regmap,CCREG);
@@ -4559,34 +5383,34 @@ void cjump_assemble(int i,struct regstat *i_regs)
   }
 }
 
-void sjump_assemble(int i,struct regstat *i_regs)
+static void sjump_assemble(int i,struct regstat *i_regs)
 {
   signed char *i_regmap=i_regs->regmap;
   int cc;
   int match;
-  match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+  match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
   assem_debug("smatch=%d\n",match);
-  int s1h,s1l;
+  int s1l;
   int unconditional=0,nevertaken=0;
-  int only32=0;
   int invert=0;
-  int internal=internal_branch(branch_regs[i].is32,ba[i]);
+  int internal=internal_branch(ba[i]);
   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
   if(!match) invert=1;
   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
   if(i>(ba[i]-start)>>2) invert=1;
   #endif
+  #ifdef __aarch64__
+  invert=1; // because of near cond. branches
+  #endif
 
   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
 
   if(ooo[i]) {
     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
-    s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
   }
   else {
     s1l=get_reg(i_regmap,rs1[i]);
-    s1h=get_reg(i_regmap,rs1[i]|64);
   }
   if(rs1[i]==0)
   {
@@ -4598,9 +5422,6 @@ void sjump_assemble(int i,struct regstat *i_regs)
     //assert(opcode2[i]!=0x10);
     //assert(opcode2[i]!=0x12);
   }
-  else {
-    only32=(regs[i].was32>>rs1[i])&1;
-  }
 
   if(ooo[i]) {
     // Out of order execution (delay slot first)
@@ -4611,9 +5432,9 @@ void sjump_assemble(int i,struct regstat *i_regs)
     uint64_t bc_unneeded=branch_regs[i].u;
     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
     bc_unneeded|=1;
-    wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,bc_unneeded);
-    load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
-    load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
+    wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
+    load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],rs1[i]);
+    load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
     if(rt1[i]==31) {
       int rt,return_address;
       rt=get_reg(branch_regs[i].regmap,31);
@@ -4630,14 +5451,14 @@ void sjump_assemble(int i,struct regstat *i_regs)
     cc=get_reg(branch_regs[i].regmap,CCREG);
     assert(cc==HOST_CCREG);
     if(unconditional)
-      store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+      store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
     assem_debug("cycle count (adj)\n");
     if(unconditional) {
       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
-        load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+        load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
         if(internal)
           assem_debug("branch: internal\n");
         else
@@ -4664,33 +5485,6 @@ void sjump_assemble(int i,struct regstat *i_regs)
       void *nottaken = NULL;
       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
-      if(!only32)
-      {
-        assert(s1h>=0);
-        if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
-        {
-          emit_test(s1h,s1h);
-          if(invert){
-            nottaken=out;
-            emit_jns(1);
-          }else{
-            add_to_linker(out,ba[i],internal);
-            emit_js(0);
-          }
-        }
-        if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
-        {
-          emit_test(s1h,s1h);
-          if(invert){
-            nottaken=out;
-            emit_js(1);
-          }else{
-            add_to_linker(out,ba[i],internal);
-            emit_jns(0);
-          }
-        }
-      } // if(!only32)
-      else
       {
         assert(s1l>=0);
         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
@@ -4698,7 +5492,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
           emit_test(s1l,s1l);
           if(invert){
             nottaken=out;
-            emit_jns(1);
+            emit_jns(DJT_1);
           }else{
             add_to_linker(out,ba[i],internal);
             emit_js(0);
@@ -4709,13 +5503,13 @@ void sjump_assemble(int i,struct regstat *i_regs)
           emit_test(s1l,s1l);
           if(invert){
             nottaken=out;
-            emit_js(1);
+            emit_js(DJT_1);
           }else{
             add_to_linker(out,ba[i],internal);
             emit_jns(0);
           }
         }
-      } // if(!only32)
+      }
 
       if(invert) {
         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
@@ -4732,8 +5526,8 @@ void sjump_assemble(int i,struct regstat *i_regs)
         #endif
         {
           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
-          store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
-          load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+          store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
+          load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
           if(internal)
             assem_debug("branch: internal\n");
           else
@@ -4773,38 +5567,19 @@ void sjump_assemble(int i,struct regstat *i_regs)
     }
     if(!unconditional) {
       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
-      if(!only32)
-      {
-        assert(s1h>=0);
-        if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
-        {
-          emit_test(s1h,s1h);
-          nottaken=out;
-          emit_jns(1);
-        }
-        if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
-        {
-          emit_test(s1h,s1h);
-          nottaken=out;
-          emit_js(1);
-        }
-      } // if(!only32)
-      else
-      {
         assert(s1l>=0);
         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
         {
           emit_test(s1l,s1l);
           nottaken=out;
-          emit_jns(1);
+          emit_jns(DJT_1);
         }
         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
         {
           emit_test(s1l,s1l);
           nottaken=out;
-          emit_js(1);
+          emit_js(DJT_1);
         }
-      }
     } // if(!unconditional)
     int adj;
     uint64_t ds_unneeded=branch_regs[i].u;
@@ -4813,11 +5588,11 @@ void sjump_assemble(int i,struct regstat *i_regs)
     // branch taken
     if(!nevertaken) {
       //assem_debug("1:\n");
-      wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,ds_unneeded);
+      wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
       // load regs
-      load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
+      load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
       address_generation(i+1,&branch_regs[i],0);
-      load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
+      load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
       ds_assemble(i+1,&branch_regs[i]);
       cc=get_reg(branch_regs[i].regmap,CCREG);
       if(cc==-1) {
@@ -4825,11 +5600,11 @@ void sjump_assemble(int i,struct regstat *i_regs)
         // CHECK: Is the following instruction (fall thru) allocated ok?
       }
       assert(cc==HOST_CCREG);
-      store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+      store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
       assem_debug("cycle count (adj)\n");
       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
-      load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
+      load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
       if(internal)
         assem_debug("branch: internal\n");
       else
@@ -4847,10 +5622,10 @@ void sjump_assemble(int i,struct regstat *i_regs)
       set_jump_target(nottaken, out);
       assem_debug("1:\n");
       if(!likely[i]) {
-        wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,ds_unneeded);
-        load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
+        wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
+        load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
         address_generation(i+1,&branch_regs[i],0);
-        load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
+        load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
         ds_assemble(i+1,&branch_regs[i]);
       }
       cc=get_reg(branch_regs[i].regmap,CCREG);
@@ -4878,23 +5653,18 @@ void sjump_assemble(int i,struct regstat *i_regs)
 static void pagespan_assemble(int i,struct regstat *i_regs)
 {
   int s1l=get_reg(i_regs->regmap,rs1[i]);
-  int s1h=get_reg(i_regs->regmap,rs1[i]|64);
   int s2l=get_reg(i_regs->regmap,rs2[i]);
-  int s2h=get_reg(i_regs->regmap,rs2[i]|64);
   void *taken = NULL;
   void *nottaken = NULL;
   int unconditional=0;
   if(rs1[i]==0)
   {
-    s1l=s2l;s1h=s2h;
-    s2l=s2h=-1;
+    s1l=s2l;
+    s2l=-1;
   }
   else if(rs2[i]==0)
   {
-    s2l=s2h=-1;
-  }
-  if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
-    s1h=s2h=-1;
+    s2l=-1;
   }
   int hr=0;
   int addr=-1,alt=-1,ntaddr=-1;
@@ -4936,7 +5706,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
   }
   assert(hr<HOST_REGS);
   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
-    load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
+    load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
   }
   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
   if(opcode[i]==2) // J
@@ -4967,7 +5737,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
     }
     else
     #ifdef HAVE_CMOV_IMM
-    if(s1h<0) {
+    if(1) {
       if(s2l>=0) emit_cmp(s1l,s2l);
       else emit_test(s1l,s1l);
       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
@@ -4977,11 +5747,6 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
     {
       assert(s1l>=0);
       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
-      if(s1h>=0) {
-        if(s2h>=0) emit_cmp(s1h,s2h);
-        else emit_test(s1h,s1h);
-        emit_cmovne_reg(alt,addr);
-      }
       if(s2l>=0) emit_cmp(s1l,s2l);
       else emit_test(s1l,s1l);
       emit_cmovne_reg(alt,addr);
@@ -4990,34 +5755,19 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
   if((opcode[i]&0x3f)==5) // BNE
   {
     #ifdef HAVE_CMOV_IMM
-    if(s1h<0) {
-      if(s2l>=0) emit_cmp(s1l,s2l);
-      else emit_test(s1l,s1l);
-      emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
-    }
-    else
+    if(s2l>=0) emit_cmp(s1l,s2l);
+    else emit_test(s1l,s1l);
+    emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
+    #else
+    assert(s1l>=0);
+    emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
+    if(s2l>=0) emit_cmp(s1l,s2l);
+    else emit_test(s1l,s1l);
+    emit_cmovne_reg(alt,addr);
     #endif
-    {
-      assert(s1l>=0);
-      emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
-      if(s1h>=0) {
-        if(s2h>=0) emit_cmp(s1h,s2h);
-        else emit_test(s1h,s1h);
-        emit_cmovne_reg(alt,addr);
-      }
-      if(s2l>=0) emit_cmp(s1l,s2l);
-      else emit_test(s1l,s1l);
-      emit_cmovne_reg(alt,addr);
-    }
   }
   if((opcode[i]&0x3f)==0x14) // BEQL
   {
-    if(s1h>=0) {
-      if(s2h>=0) emit_cmp(s1h,s2h);
-      else emit_test(s1h,s1h);
-      nottaken=out;
-      emit_jne(0);
-    }
     if(s2l>=0) emit_cmp(s1l,s2l);
     else emit_test(s1l,s1l);
     if(nottaken) set_jump_target(nottaken, out);
@@ -5026,12 +5776,6 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
   }
   if((opcode[i]&0x3f)==0x15) // BNEL
   {
-    if(s1h>=0) {
-      if(s2h>=0) emit_cmp(s1h,s2h);
-      else emit_test(s1h,s1h);
-      taken=out;
-      emit_jne(0);
-    }
     if(s2l>=0) emit_cmp(s1l,s2l);
     else emit_test(s1l,s1l);
     nottaken=out;
@@ -5042,25 +5786,13 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
   {
     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
     emit_cmpimm(s1l,1);
-    if(s1h>=0) emit_mov(addr,ntaddr);
     emit_cmovl_reg(alt,addr);
-    if(s1h>=0) {
-      emit_test(s1h,s1h);
-      emit_cmovne_reg(ntaddr,addr);
-      emit_cmovs_reg(alt,addr);
-    }
   }
   if((opcode[i]&0x3f)==7) // BGTZ
   {
     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
     emit_cmpimm(s1l,1);
-    if(s1h>=0) emit_mov(addr,alt);
     emit_cmovl_reg(ntaddr,addr);
-    if(s1h>=0) {
-      emit_test(s1h,s1h);
-      emit_cmovne_reg(alt,addr);
-      emit_cmovs_reg(ntaddr,addr);
-    }
   }
   if((opcode[i]&0x3f)==0x16) // BLEZL
   {
@@ -5101,7 +5833,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
   }
 
   assert(i_regs->regmap[HOST_CCREG]==CCREG);
-  wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
+  wb_dirtys(regs[i].regmap,regs[i].dirty);
   if(likely[i]||unconditional)
   {
     emit_movimm(ba[i],HOST_BTREG);
@@ -5124,7 +5856,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
   if(likely[i]) {
     // Not-taken path
     set_jump_target(nottaken, out);
-    wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
+    wb_dirtys(regs[i].regmap,regs[i].dirty);
     void *branch_addr=out;
     emit_jmp(0);
     int target_addr=start+i*4+8;
@@ -5151,13 +5883,13 @@ static void pagespan_ds()
   ll_add(jump_in+page,vaddr,(void *)out);
   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
   if(regs[0].regmap[HOST_CCREG]!=CCREG)
-    wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
+    wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty);
   if(regs[0].regmap[HOST_BTREG]!=BTREG)
     emit_writeword(HOST_BTREG,&branch_target);
-  load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
+  load_regs(regs[0].regmap_entry,regs[0].regmap,rs1[0],rs2[0]);
   address_generation(0,&regs[0],regs[0].regmap_entry);
   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
-    load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
+    load_regs(regs[0].regmap_entry,regs[0].regmap,INVCP,INVCP);
   is_delayslot=0;
   switch(itype[0]) {
     case ALU:
@@ -5200,7 +5932,6 @@ static void pagespan_ds()
     case RJUMP:
     case CJUMP:
     case SJUMP:
-    case FJUMP:
       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
   }
   int btaddr=get_reg(regs[0].regmap,BTREG);
@@ -5211,18 +5942,20 @@ static void pagespan_ds()
   assert(btaddr!=HOST_CCREG);
   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
 #ifdef HOST_IMM8
+  host_tempreg_acquire();
   emit_movimm(start+4,HOST_TEMPREG);
   emit_cmp(btaddr,HOST_TEMPREG);
+  host_tempreg_release();
 #else
   emit_cmpimm(btaddr,start+4);
 #endif
   void *branch = out;
   emit_jeq(0);
-  store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
-  emit_jmp(jump_vaddr_reg[btaddr]);
+  store_regs_bt(regs[0].regmap,regs[0].dirty,-1);
+  do_jump_vaddr(btaddr);
   set_jump_target(branch, out);
-  store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
-  load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
+  store_regs_bt(regs[0].regmap,regs[0].dirty,start+4);
+  load_regs_bt(regs[0].regmap,regs[0].dirty,start+4);
 }
 
 // Basic liveness analysis for MIPS registers
@@ -5232,7 +5965,7 @@ void unneeded_registers(int istart,int iend,int r)
   uint64_t u,gte_u,b,gte_b;
   uint64_t temp_u,temp_gte_u=0;
   uint64_t gte_u_unknown=0;
-  if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
+  if (HACK_ENABLED(NDHACK_GTE_UNNEEDED))
     gte_u_unknown=~0ll;
   if(iend==slen-1) {
     u=1;
@@ -5246,7 +5979,7 @@ void unneeded_registers(int istart,int iend,int r)
   for (i=iend;i>=istart;i--)
   {
     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
-    if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+    if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
     {
       // If subroutine call, flag return address as a possible branch target
       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
@@ -5283,7 +6016,7 @@ void unneeded_registers(int istart,int iend,int r)
         bt[(ba[i]-start)>>2]=1;
         if(ba[i]<=start+i*4) {
           // Backward branch
-          if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+          if(is_ujump(i))
           {
             // Unconditional branch
             temp_u=1;
@@ -5328,7 +6061,7 @@ void unneeded_registers(int istart,int iend,int r)
             gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
           }
         } /*else*/ if(1) {
-          if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+          if (is_ujump(i))
           {
             // Unconditional branch
             u=unneeded_reg[(ba[i]-start)>>2];
@@ -5433,12 +6166,12 @@ void clean_registers(int istart,int iend,int wr)
   }
   for (i=iend;i>=istart;i--)
   {
-    if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+    if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
     {
       if(ba[i]<start || ba[i]>=(start+slen*4))
       {
         // Branch out of this block, flush all regs
-        if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+        if (is_ujump(i))
         {
           // Unconditional branch
           will_dirty_i=0;
@@ -5518,7 +6251,7 @@ void clean_registers(int istart,int iend,int wr)
         // Internal branch
         if(ba[i]<=start+i*4) {
           // Backward branch
-          if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+          if (is_ujump(i))
           {
             // Unconditional branch
             temp_will_dirty=0;
@@ -5615,7 +6348,7 @@ void clean_registers(int istart,int iend,int wr)
         }
         /*else*/ if(1)
         {
-          if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
+          if (is_ujump(i))
           {
             // Unconditional branch
             will_dirty_i=0;
@@ -5753,7 +6486,7 @@ void clean_registers(int istart,int iend,int wr)
         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
         if(i>istart) {
-          if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
+          if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP)
           {
             // Don't store a register immediately after writing it,
             // may prevent dual-issue.
@@ -5776,13 +6509,13 @@ void clean_registers(int istart,int iend,int wr)
       }
       printf("\n");*/
 
-      //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
+      //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP)) {
         regs[i].dirty|=will_dirty_i;
         #ifndef DESTRUCTIVE_WRITEBACK
         regs[i].dirty&=wont_dirty_i;
-        if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+        if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
         {
-          if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
+          if (i < iend-1 && !is_ujump(i)) {
             for(r=0;r<HOST_REGS;r++) {
               if(r!=EXCLUDE_REG) {
                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
@@ -5862,8 +6595,6 @@ void disassemble_inst(int i)
         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
       case SJUMP:
         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
-      case FJUMP:
-        printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
       case RJUMP:
         if (opcode[i]==0x9&&rt1[i]!=31)
           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
@@ -5945,33 +6676,46 @@ static void disassemble_inst(int i) {}
 
 #define DRC_TEST_VAL 0x74657374
 
-static int new_dynarec_test(void)
+static void new_dynarec_test(void)
 {
-  int (*testfunc)(void) = (void *)out;
+  int (*testfunc)(void);
   void *beginning;
-  int ret;
+  int ret[2];
+  size_t i;
 
-  beginning = start_block();
-  emit_movimm(DRC_TEST_VAL,0); // test
-  emit_jmpreg(14);
-  literal_pool(0);
-  end_block(beginning);
-  SysPrintf("testing if we can run recompiled code..\n");
-  ret = testfunc();
-  if (ret == DRC_TEST_VAL)
+  // check structure linkage
+  if ((u_char *)rcnts - (u_char *)&psxRegs != sizeof(psxRegs))
+  {
+    SysPrintf("linkage_arm* miscompilation/breakage detected.\n");
+  }
+
+  SysPrintf("testing if we can run recompiled code...\n");
+  ((volatile u_int *)out)[0]++; // make cache dirty
+
+  for (i = 0; i < ARRAY_SIZE(ret); i++) {
+    out = ndrc->translation_cache;
+    beginning = start_block();
+    emit_movimm(DRC_TEST_VAL + i, 0); // test
+    emit_ret();
+    literal_pool(0);
+    end_block(beginning);
+    testfunc = beginning;
+    ret[i] = testfunc();
+  }
+
+  if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1)
     SysPrintf("test passed.\n");
   else
-    SysPrintf("test failed: %08x\n", ret);
-  out = translation_cache;
-  return ret == DRC_TEST_VAL;
+    SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
+  out = ndrc->translation_cache;
 }
 
 // clear the state completely, instead of just marking
 // things invalid like invalidate_all_pages() does
-void new_dynarec_clear_full()
+void new_dynarec_clear_full(void)
 {
   int n;
-  out = translation_cache;
+  out = ndrc->translation_cache;
   memset(invalid_code,1,sizeof(invalid_code));
   memset(hash_table,0xff,sizeof(hash_table));
   memset(mini_ht,-1,sizeof(mini_ht));
@@ -5989,34 +6733,28 @@ void new_dynarec_clear_full()
   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
 }
 
-void new_dynarec_init()
+void new_dynarec_init(void)
 {
   SysPrintf("Init new dynarec\n");
 
-  // allocate/prepare a buffer for translation cache
-  // see assem_arm.h for some explanation
-#if   defined(BASE_ADDR_FIXED)
-  if (mmap(translation_cache, 1 << TARGET_SIZE_2,
-            PROT_READ | PROT_WRITE | PROT_EXEC,
-            MAP_PRIVATE | MAP_ANONYMOUS,
-            -1, 0) != translation_cache) {
-    SysPrintf("mmap() failed: %s\n", strerror(errno));
-    SysPrintf("disable BASE_ADDR_FIXED and recompile\n");
-    abort();
-  }
-#elif defined(BASE_ADDR_DYNAMIC)
+#ifdef BASE_ADDR_DYNAMIC
   #ifdef VITA
   sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
   if (sceBlock < 0)
     SysPrintf("sceKernelAllocMemBlockForVM failed\n");
-  int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&translation_cache);
+  int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&ndrc);
   if (ret < 0)
     SysPrintf("sceKernelGetMemBlockBase failed\n");
   #else
-  translation_cache = mmap (NULL, 1 << TARGET_SIZE_2,
+  uintptr_t desired_addr = 0;
+  #ifdef __ELF__
+  extern char _end;
+  desired_addr = ((uintptr_t)&_end + 0xffffff) & ~0xffffffl;
+  #endif
+  ndrc = mmap((void *)desired_addr, sizeof(*ndrc),
             PROT_READ | PROT_WRITE | PROT_EXEC,
             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  if (translation_cache == MAP_FAILED) {
+  if (ndrc == MAP_FAILED) {
     SysPrintf("mmap() failed: %s\n", strerror(errno));
     abort();
   }
@@ -6024,11 +6762,12 @@ void new_dynarec_init()
 #else
   #ifndef NO_WRITE_EXEC
   // not all systems allow execute in data segment by default
-  if (mprotect(translation_cache, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
+  if (mprotect(ndrc, sizeof(ndrc->translation_cache) + sizeof(ndrc->tramp.ops),
+               PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
     SysPrintf("mprotect() failed: %s\n", strerror(errno));
   #endif
 #endif
-  out = translation_cache;
+  out = ndrc->translation_cache;
   cycle_multiplier=200;
   new_dynarec_clear_full();
 #ifdef HOST_IMM8
@@ -6044,15 +6783,15 @@ void new_dynarec_init()
     SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
 }
 
-void new_dynarec_cleanup()
+void new_dynarec_cleanup(void)
 {
   int n;
-#if defined(BASE_ADDR_FIXED) || defined(BASE_ADDR_DYNAMIC)
+#ifdef BASE_ADDR_DYNAMIC
   #ifdef VITA
   sceKernelFreeMemBlock(sceBlock);
   sceBlock = -1;
   #else
-  if (munmap(translation_cache, 1<<TARGET_SIZE_2) < 0)
+  if (munmap(ndrc, sizeof(*ndrc)) < 0)
     SysPrintf("munmap() failed\n");
   #endif
 #endif
@@ -6066,16 +6805,25 @@ void new_dynarec_cleanup()
 
 static u_int *get_source_start(u_int addr, u_int *limit)
 {
+  if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
+    cycle_multiplier_override = 0;
+
   if (addr < 0x00200000 ||
-    (0xa0000000 <= addr && addr < 0xa0200000)) {
+    (0xa0000000 <= addr && addr < 0xa0200000))
+  {
     // used for BIOS calls mostly?
     *limit = (addr&0xa0000000)|0x00200000;
     return (u_int *)(rdram + (addr&0x1fffff));
   }
   else if (!Config.HLE && (
     /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
-    (0xbfc00000 <= addr && addr < 0xbfc80000))) {
-    // BIOS
+    (0xbfc00000 <= addr && addr < 0xbfc80000)))
+  {
+    // BIOS. The multiplier should be much higher as it's uncached 8bit mem,
+    // but timings in PCSX are too tied to the interpreter's BIAS
+    if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
+      cycle_multiplier_override = 200;
+
     *limit = (addr & 0xfff00000) | 0x80000;
     return (u_int *)((u_char *)psxR + (addr&0x7ffff));
   }
@@ -6186,7 +6934,7 @@ void new_dynarec_load_blocks(const void *save, int size)
   memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
 }
 
-int new_recompile_block(int addr)
+int new_recompile_block(u_int addr)
 {
   u_int pagelimit = 0;
   u_int state_rflags = 0;
@@ -6204,7 +6952,7 @@ int new_recompile_block(int addr)
   }
 
   start = (u_int)addr&~3;
-  //assert(((u_int)addr&1)==0);
+  //assert(((u_int)addr&1)==0); // start-in-delay-slot flag
   new_dynarec_did_compile=1;
   if (Config.HLE && start == 0x80001000) // hlecall
   {
@@ -6215,7 +6963,7 @@ int new_recompile_block(int addr)
     invalid_code[start>>12]=0;
     emit_movimm(start,0);
     emit_writeword(0,&pcaddr);
-    emit_jmp(new_dyna_leave);
+    emit_far_jump(new_dyna_leave);
     literal_pool(0);
     end_block(beginning);
     ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
@@ -6225,7 +6973,7 @@ int new_recompile_block(int addr)
   source = get_source_start(start, &pagelimit);
   if (source == NULL) {
     SysPrintf("Compile at bogus memory address: %08x\n", addr);
-    exit(1);
+    abort();
   }
 
   /* Pass 1: disassemble */
@@ -6407,7 +7155,7 @@ int new_recompile_block(int addr)
 #endif
       case 0x12: strcpy(insn[i],"COP2"); type=NI;
         op2=(source[i]>>21)&0x1f;
-        //if (op2 & 0x10) {
+        //if (op2 & 0x10)
         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
           if (gte_handlers[source[i]&0x3f]!=NULL) {
             if (gte_regnames[source[i]&0x3f]!=NULL)
@@ -6436,8 +7184,6 @@ int new_recompile_block(int addr)
     opcode2[i]=op2;
     /* Get registers/immediates */
     lt1[i]=0;
-    us1[i]=0;
-    us2[i]=0;
     dep1[i]=0;
     dep2[i]=0;
     gte_rs[i]=gte_rt[i]=0;
@@ -6456,7 +7202,6 @@ int new_recompile_block(int addr)
         rt1[i]=0;
         rt2[i]=0;
         imm[i]=(short)source[i];
-        if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
         break;
       case LOADLR:
         // LWL/LWR only load part of the register,
@@ -6466,7 +7211,6 @@ int new_recompile_block(int addr)
         rt1[i]=(source[i]>>16)&0x1f;
         rt2[i]=0;
         imm[i]=(short)source[i];
-        if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
         if(op==0x26) dep1[i]=rt1[i]; // LWR
         break;
       case IMM16:
@@ -6480,8 +7224,6 @@ int new_recompile_block(int addr)
         }else{
           imm[i]=(short)source[i];
         }
-        if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
-        if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
         break;
       case UJUMP:
@@ -6514,8 +7256,6 @@ int new_recompile_block(int addr)
         if(op&2) { // BGTZ/BLEZ
           rs2[i]=0;
         }
-        us1[i]=rs1[i];
-        us2[i]=rs2[i];
         likely[i]=op>>4;
         break;
       case SJUMP:
@@ -6523,29 +7263,18 @@ int new_recompile_block(int addr)
         rs2[i]=CCREG;
         rt1[i]=0;
         rt2[i]=0;
-        us1[i]=rs1[i];
         if(op2&0x10) { // BxxAL
           rt1[i]=31;
           // NOTE: If the branch is not taken, r31 is still overwritten
         }
         likely[i]=(op2&2)>>1;
         break;
-      case FJUMP:
-        rs1[i]=FSREG;
-        rs2[i]=CSREG;
-        rt1[i]=0;
-        rt2[i]=0;
-        likely[i]=((source[i])>>17)&1;
-        break;
       case ALU:
         rs1[i]=(source[i]>>21)&0x1f; // source
         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
         rt1[i]=(source[i]>>11)&0x1f; // destination
         rt2[i]=0;
-        if(op2==0x2a||op2==0x2b) { // SLT/SLTU
-          us1[i]=rs1[i];us2[i]=rs2[i];
-        }
-        else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
+        if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
           dep1[i]=rs1[i];dep2[i]=rs2[i];
         }
         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
@@ -6557,9 +7286,6 @@ int new_recompile_block(int addr)
         rs2[i]=(source[i]>>16)&0x1f; // divisor
         rt1[i]=HIREG;
         rt2[i]=LOREG;
-        if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
-          us1[i]=rs1[i];us2[i]=rs2[i];
-        }
         break;
       case MOV:
         rs1[i]=0;
@@ -6579,8 +7305,6 @@ int new_recompile_block(int addr)
         rs2[i]=(source[i]>>21)&0x1f; // shift amount
         rt1[i]=(source[i]>>11)&0x1f; // destination
         rt2[i]=0;
-        // DSLLV/DSRLV/DSRAV are 64-bit
-        if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
         break;
       case SHIFTIMM:
         rs1[i]=(source[i]>>16)&0x1f;
@@ -6590,8 +7314,6 @@ int new_recompile_block(int addr)
         imm[i]=(source[i]>>6)&0x1f;
         // DSxx32 instructions
         if(op2>=0x3c) imm[i]|=0x20;
-        // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
-        if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
         break;
       case COP0:
         rs1[i]=0;
@@ -6610,7 +7332,6 @@ int new_recompile_block(int addr)
         rt2[i]=0;
         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
-        if(op2==5) us1[i]=rs1[i]; // DMTC1
         rs2[i]=CSREG;
         break;
       case COP2:
@@ -6682,13 +7403,13 @@ int new_recompile_block(int addr)
       ba[i]=start+i*4+8; // Ignore never taken branch
     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
       ba[i]=start+i*4+8; // Ignore never taken branch
-    else if(type==CJUMP||type==SJUMP||type==FJUMP)
+    else if(type==CJUMP||type==SJUMP)
       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
     else ba[i]=-1;
-    if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
+    if (i > 0 && is_jump(i-1)) {
       int do_in_intrp=0;
       // branch in delay slot?
-      if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
+      if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP) {
         // don't handle first branch and call interpreter if it's hit
         SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
         do_in_intrp=1;
@@ -6703,7 +7424,7 @@ int new_recompile_block(int addr)
           bt[t+1]=1; // expected return from interpreter
         }
         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
-              !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
+              !(i>=3&&is_jump(i-3))) {
           // v0 overwrite like this is a sign of trouble, bail out
           SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
           do_in_intrp=1;
@@ -6719,7 +7440,7 @@ int new_recompile_block(int addr)
       }
     }
     /* Is this the end of the block? */
-    if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
+    if (i > 0 && is_ujump(i-1)) {
       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
         done=2;
       }
@@ -6755,7 +7476,7 @@ int new_recompile_block(int addr)
     }
   }
   slen=i;
-  if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
+  if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP) {
     if(start+i*4==pagelimit) {
       itype[i-1]=SPAN;
     }
@@ -6769,7 +7490,6 @@ int new_recompile_block(int addr)
   /* Pass 3 - Register allocation */
 
   struct regstat current; // Current register allocations/status
-  current.is32=1;
   current.dirty=0;
   current.u=unneeded_reg[0];
   clear_all_regs(current.regmap);
@@ -6804,33 +7524,12 @@ int new_recompile_block(int addr)
       current.isconst=0;
       current.waswritten=0;
     }
-    if(i>1)
-    {
-      if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
-      {
-        if(rs1[i-2]==0||rs2[i-2]==0)
-        {
-          if(rs1[i-2]) {
-            current.is32|=1LL<<rs1[i-2];
-            int hr=get_reg(current.regmap,rs1[i-2]|64);
-            if(hr>=0) current.regmap[hr]=-1;
-          }
-          if(rs2[i-2]) {
-            current.is32|=1LL<<rs2[i-2];
-            int hr=get_reg(current.regmap,rs2[i-2]|64);
-            if(hr>=0) current.regmap[hr]=-1;
-          }
-        }
-      }
-    }
-    current.is32=-1LL;
 
     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
     regs[i].wasconst=current.isconst;
-    regs[i].was32=current.is32;
     regs[i].wasdirty=current.dirty;
     regs[i].loadedconst=0;
-    if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
+    if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP) {
       if(i+1<slen) {
         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
         current.u|=1;
@@ -6842,7 +7541,7 @@ int new_recompile_block(int addr)
         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
         current.u|=1;
-      } else { SysPrintf("oops, branch at end of block with no delay slot\n");exit(1); }
+      } else { SysPrintf("oops, branch at end of block with no delay slot\n");abort(); }
     }
     is_ds[i]=ds;
     if(ds) {
@@ -6858,14 +7557,11 @@ int new_recompile_block(int addr)
       struct regstat temp;
       memcpy(&temp,&current,sizeof(current));
       temp.wasdirty=temp.dirty;
-      temp.was32=temp.is32;
       // TODO: Take into account unconditional branches, as below
       delayslot_alloc(&temp,i);
       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
       regs[i].wasdirty=temp.wasdirty;
-      regs[i].was32=temp.was32;
       regs[i].dirty=temp.dirty;
-      regs[i].is32=temp.is32;
       regs[i].isconst=0;
       regs[i].wasconst=0;
       current.isconst=0;
@@ -6879,7 +7575,7 @@ int new_recompile_block(int addr)
           }
           else
           {
-            if(r<64){
+              assert(r < 64);
               if((current.u>>r)&1) {
                 regs[i].regmap_entry[hr]=-1;
                 regs[i].regmap[hr]=-1;
@@ -6887,10 +7583,6 @@ int new_recompile_block(int addr)
                 //current.regmap[hr]=-1;
               }else
                 regs[i].regmap_entry[hr]=r;
-            }
-            else {
-              assert(0);
-            }
           }
         } else {
           // First instruction expects CCREG to be allocated
@@ -6918,7 +7610,6 @@ int new_recompile_block(int addr)
             #ifdef REG_PREFETCH
             alloc_reg(&current,i,PTEMP);
             #endif
-            //current.is32|=1LL<<rt1[i];
           }
           ooo[i]=1;
           delayslot_alloc(&current,i+1);
@@ -6978,10 +7669,6 @@ int new_recompile_block(int addr)
             dirty_reg(&current,CCREG);
             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
-            if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
-            {
-              assert(0);
-            }
             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
               // The delay slot overwrites one of our conditions.
@@ -6991,10 +7678,6 @@ int new_recompile_block(int addr)
               regs[i].wasconst=0;
               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
-              if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
-              {
-                assert(0);
-              }
             }
             else
             {
@@ -7008,10 +7691,6 @@ int new_recompile_block(int addr)
             alloc_cc(&current,i);
             dirty_reg(&current,CCREG);
             alloc_reg(&current,i,rs1[i]);
-            if(!(current.is32>>rs1[i]&1))
-            {
-              assert(0);
-            }
             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
               // The delay slot overwrites one of our conditions.
               // Allocate the branch condition registers instead.
@@ -7019,10 +7698,6 @@ int new_recompile_block(int addr)
               current.wasconst=0;
               regs[i].wasconst=0;
               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
-              if(!((current.is32>>rs1[i])&1))
-              {
-                assert(0);
-              }
             }
             else
             {
@@ -7041,10 +7716,6 @@ int new_recompile_block(int addr)
             dirty_reg(&current,CCREG);
             alloc_reg(&current,i,rs1[i]);
             alloc_reg(&current,i,rs2[i]);
-            if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
-            {
-              assert(0);
-            }
           }
           else
           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
@@ -7055,10 +7726,6 @@ int new_recompile_block(int addr)
             alloc_cc(&current,i);
             dirty_reg(&current,CCREG);
             alloc_reg(&current,i,rs1[i]);
-            if(!(current.is32>>rs1[i]&1))
-            {
-              assert(0);
-            }
           }
           ds=1;
           //current.isconst=0;
@@ -7075,17 +7742,12 @@ int new_recompile_block(int addr)
             alloc_cc(&current,i);
             dirty_reg(&current,CCREG);
             alloc_reg(&current,i,rs1[i]);
-            if(!(current.is32>>rs1[i]&1))
-            {
-              assert(0);
-            }
             if (rt1[i]==31) { // BLTZAL/BGEZAL
               alloc_reg(&current,i,31);
               dirty_reg(&current,31);
               //#ifdef REG_PREFETCH
               //alloc_reg(&current,i,PTEMP);
               //#endif
-              //current.is32|=1LL<<rt1[i];
             }
             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
@@ -7094,10 +7756,6 @@ int new_recompile_block(int addr)
               current.wasconst=0;
               regs[i].wasconst=0;
               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
-              if(!((current.is32>>rs1[i])&1))
-              {
-                assert(0);
-              }
             }
             else
             {
@@ -7115,17 +7773,10 @@ int new_recompile_block(int addr)
             alloc_cc(&current,i);
             dirty_reg(&current,CCREG);
             alloc_reg(&current,i,rs1[i]);
-            if(!(current.is32>>rs1[i]&1))
-            {
-              assert(0);
-            }
           }
           ds=1;
           //current.isconst=0;
           break;
-        case FJUMP:
-          assert(0);
-          break;
         case IMM16:
           imm16_alloc(&current,i);
           break;
@@ -7156,8 +7807,9 @@ int new_recompile_block(int addr)
           cop0_alloc(&current,i);
           break;
         case COP1:
+          break;
         case COP2:
-          cop12_alloc(&current,i);
+          cop2_alloc(&current,i);
           break;
         case C1LS:
           c1ls_alloc(&current,i);
@@ -7205,7 +7857,8 @@ int new_recompile_block(int addr)
               regs[i].regmap_entry[hr]=0;
             }
             else
-            if(r<64){
+            {
+              assert(r<64);
               if((current.u>>r)&1) {
                 regs[i].regmap_entry[hr]=-1;
                 //regs[i].regmap[hr]=-1;
@@ -7213,9 +7866,6 @@ int new_recompile_block(int addr)
               }else
                 regs[i].regmap_entry[hr]=r;
             }
-            else {
-              assert(0);
-            }
           }
         } else {
           // Branches expect CCREG to be allocated at the target
@@ -7238,7 +7888,6 @@ int new_recompile_block(int addr)
     /* Branch post-alloc */
     if(i>0)
     {
-      current.was32=current.is32;
       current.wasdirty=current.dirty;
       switch(itype[i-1]) {
         case UJUMP:
@@ -7251,10 +7900,9 @@ int new_recompile_block(int addr)
           if(rt1[i-1]==31) { // JAL
             alloc_reg(&branch_regs[i-1],i-1,31);
             dirty_reg(&branch_regs[i-1],31);
-            branch_regs[i-1].is32|=1LL<<31;
           }
           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
-          memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+          memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
           break;
         case RJUMP:
           memcpy(&branch_regs[i-1],&current,sizeof(current));
@@ -7267,7 +7915,6 @@ int new_recompile_block(int addr)
           if(rt1[i-1]!=0) { // JALR
             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
             dirty_reg(&branch_regs[i-1],rt1[i-1]);
-            branch_regs[i-1].is32|=1LL<<rt1[i-1];
           }
           #ifdef USE_MINI_HT
           if(rs1[i-1]==31) { // JALR
@@ -7276,7 +7923,7 @@ int new_recompile_block(int addr)
           }
           #endif
           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
-          memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+          memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
           break;
         case CJUMP:
           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
@@ -7298,16 +7945,12 @@ int new_recompile_block(int addr)
               // Alloc the branch condition registers
               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
-              if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
-              {
-                assert(0);
-              }
             }
             memcpy(&branch_regs[i-1],&current,sizeof(current));
             branch_regs[i-1].isconst=0;
             branch_regs[i-1].wasconst=0;
             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
-            memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+            memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
           }
           else
           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
@@ -7327,16 +7970,12 @@ int new_recompile_block(int addr)
               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
               // Alloc the branch condition register
               alloc_reg(&current,i-1,rs1[i-1]);
-              if(!(current.is32>>rs1[i-1]&1))
-              {
-                assert(0);
-              }
             }
             memcpy(&branch_regs[i-1],&current,sizeof(current));
             branch_regs[i-1].isconst=0;
             branch_regs[i-1].wasconst=0;
             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
-            memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+            memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
           }
           else
           // Alloc the delay slot in case the branch is taken
@@ -7385,16 +8024,12 @@ int new_recompile_block(int addr)
               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
               // Alloc the branch condition register
               alloc_reg(&current,i-1,rs1[i-1]);
-              if(!(current.is32>>rs1[i-1]&1))
-              {
-                assert(0);
-              }
             }
             memcpy(&branch_regs[i-1],&current,sizeof(current));
             branch_regs[i-1].isconst=0;
             branch_regs[i-1].wasconst=0;
             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
-            memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
+            memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
           }
           else
           // Alloc the delay slot in case the branch is taken
@@ -7414,20 +8049,15 @@ int new_recompile_block(int addr)
           if(opcode2[i-1]&0x10) { // BxxZAL
             alloc_reg(&branch_regs[i-1],i-1,31);
             dirty_reg(&branch_regs[i-1],31);
-            branch_regs[i-1].is32|=1LL<<31;
           }
           break;
-        case FJUMP:
-          assert(0);
-          break;
       }
 
-      if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
+      if (is_ujump(i-1))
       {
         if(rt1[i-1]==31) // JAL/JALR
         {
           // Subroutine call will return here, don't alloc any registers
-          current.is32=1;
           current.dirty=0;
           clear_all_regs(current.regmap);
           alloc_reg(&current,i,CCREG);
@@ -7436,7 +8066,6 @@ int new_recompile_block(int addr)
         else if(i+1<slen)
         {
           // Internal branch will jump here, match registers to caller
-          current.is32=0x3FFFFFFFFLL;
           current.dirty=0;
           clear_all_regs(current.regmap);
           alloc_reg(&current,i,CCREG);
@@ -7445,7 +8074,6 @@ int new_recompile_block(int addr)
           {
             if(ba[j]==start+i*4+4) {
               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
-              current.is32=branch_regs[j].is32;
               current.dirty=branch_regs[j].dirty;
               break;
             }
@@ -7456,7 +8084,6 @@ int new_recompile_block(int addr)
                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
                   current.regmap[hr]=-1;
                 }
-                current.is32&=branch_regs[j].is32;
                 current.dirty&=branch_regs[j].dirty;
               }
             }
@@ -7468,19 +8095,17 @@ int new_recompile_block(int addr)
 
     // Count cycles in between branches
     ccadj[i]=cc;
-    if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
+    if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
     {
       cc=0;
     }
 #if !defined(DRC_DBG)
     else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
     {
-      // GTE runs in parallel until accessed, divide by 2 for a rough guess
-      cc+=gte_cycletab[source[i]&0x3f]/2;
-    }
-    else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues
-    {
-      cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
+      // this should really be removed since the real stalls have been implemented,
+      // but doing so causes sizeable perf regression against the older version
+      u_int gtec = gte_cycletab[source[i] & 0x3f];
+      cc += HACK_ENABLED(NDHACK_GTE_NO_STALL) ? gtec/2 : 2;
     }
     else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
     {
@@ -7488,7 +8113,8 @@ int new_recompile_block(int addr)
     }
     else if(itype[i]==C2LS)
     {
-      cc+=4;
+      // same as with C2OP
+      cc += HACK_ENABLED(NDHACK_GTE_NO_STALL) ? 4 : 2;
     }
 #endif
     else
@@ -7496,12 +8122,10 @@ int new_recompile_block(int addr)
       cc++;
     }
 
-    flush_dirty_uppers(&current);
     if(!is_ds[i]) {
-      regs[i].is32=current.is32;
       regs[i].dirty=current.dirty;
       regs[i].isconst=current.isconst;
-      memcpy(constmap[i],current_constmap,sizeof(current_constmap));
+      memcpy(constmap[i],current_constmap,sizeof(constmap[i]));
     }
     for(hr=0;hr<HOST_REGS;hr++) {
       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
@@ -7521,7 +8145,7 @@ int new_recompile_block(int addr)
   for (i=slen-1;i>=0;i--)
   {
     int hr;
-    if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+    if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
     {
       if(ba[i]<start || ba[i]>=(start+slen*4))
       {
@@ -7542,7 +8166,7 @@ int new_recompile_block(int addr)
         }
       }
       // Conditional branch may need registers for following instructions
-      if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
+      if (!is_ujump(i))
       {
         if(i<slen-2) {
           nr|=needed_reg[i+2];
@@ -7565,12 +8189,8 @@ int new_recompile_block(int addr)
           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
         }
-        if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
-        if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
-        if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
-        if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
@@ -7607,12 +8227,8 @@ int new_recompile_block(int addr)
       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
       // Source registers are needed
-      if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
-      if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
-      if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
-      if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
@@ -7624,18 +8240,18 @@ int new_recompile_block(int addr)
       // But do so if this is a branch target, otherwise we
       // might have to load the register before the branch.
       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
-        if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1))) {
+        if((regmap_pre[i][hr]>0&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1))) {
           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
         }
-        if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1))) {
+        if((regs[i].regmap_entry[hr]>0&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1))) {
           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
         }
       }
     }
     // Cycle count is needed at branches.  Assume it is needed at the target too.
-    if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
+    if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==SPAN) {
       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
     }
@@ -7651,7 +8267,7 @@ int new_recompile_block(int addr)
            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
         {
-          if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
+          if (!is_ujump(i))
           {
             if(likely[i]) {
               regs[i].regmap[hr]=-1;
@@ -7663,14 +8279,9 @@ int new_recompile_block(int addr)
             }
           }
         }
-        if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+        if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
         {
-          int d1=0,d2=0,map=0,temp=0;
-          if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
-          {
-            d1=dep1[i+1];
-            d2=dep2[i+1];
-          }
+          int map=0,temp=0;
           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
             map=INVCP;
@@ -7681,8 +8292,6 @@ int new_recompile_block(int addr)
           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
-             (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
-             (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
@@ -7694,8 +8303,6 @@ int new_recompile_block(int addr)
             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
-               (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
-               (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
@@ -7704,7 +8311,7 @@ int new_recompile_block(int addr)
             {
               branch_regs[i].regmap[hr]=-1;
               branch_regs[i].regmap_entry[hr]=-1;
-              if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
+              if (!is_ujump(i))
               {
                 if(!likely[i]&&i<slen-2) {
                   regmap_pre[i+2][hr]=-1;
@@ -7719,12 +8326,7 @@ int new_recompile_block(int addr)
           // Non-branch
           if(i>0)
           {
-            int d1=0,d2=0,map=-1,temp=-1;
-            if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
-            {
-              d1=dep1[i];
-              d2=dep2[i];
-            }
+            int map=-1,temp=-1;
             if(itype[i]==STORE || itype[i]==STORELR ||
                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
               map=INVCP;
@@ -7733,16 +8335,14 @@ int new_recompile_block(int addr)
                itype[i]==C1LS || itype[i]==C2LS)
               temp=FTEMP;
             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
-               (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
-               (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
             {
               if(i<slen-1&&!is_ds[i]) {
-                if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
+                assert(regs[i].regmap[hr]<64);
+                if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]>0)
                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
-                if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
                 {
                   SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
@@ -7756,8 +8356,8 @@ int new_recompile_block(int addr)
             }
           }
         }
-      }
-    }
+      } // if needed
+    } // for hr
   }
 
   /* Pass 5 - Pre-allocate registers */
@@ -7770,7 +8370,7 @@ int new_recompile_block(int addr)
   clear_all_regs(f_regmap);
   for(i=0;i<slen-1;i++)
   {
-    if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+    if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
     {
       if(ba[i]>=start && ba[i]<(start+i*4))
       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
@@ -7780,16 +8380,11 @@ int new_recompile_block(int addr)
       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
       {
         int t=(ba[i]-start)>>2;
-        if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
+        if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP)) // loop_preload can't handle jumps into delay slots
         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
         for(hr=0;hr<HOST_REGS;hr++)
         {
-          if(regs[i].regmap[hr]>64) {
-            if(!((regs[i].dirty>>hr)&1))
-              f_regmap[hr]=regs[i].regmap[hr];
-            else f_regmap[hr]=-1;
-          }
-          else if(regs[i].regmap[hr]>=0) {
+          if(regs[i].regmap[hr]>=0) {
             if(f_regmap[hr]!=regs[i].regmap[hr]) {
               // dealloc old register
               int n;
@@ -7801,12 +8396,7 @@ int new_recompile_block(int addr)
               f_regmap[hr]=regs[i].regmap[hr];
             }
           }
-          if(branch_regs[i].regmap[hr]>64) {
-            if(!((branch_regs[i].dirty>>hr)&1))
-              f_regmap[hr]=branch_regs[i].regmap[hr];
-            else f_regmap[hr]=-1;
-          }
-          else if(branch_regs[i].regmap[hr]>=0) {
+          if(branch_regs[i].regmap[hr]>=0) {
             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
               // dealloc old register
               int n;
@@ -7863,7 +8453,7 @@ int new_recompile_block(int addr)
                         //printf("no-match due to different register\n");
                         break;
                       }
-                      if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
+                      if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP) {
                         //printf("no-match due to branch\n");
                         break;
                       }
@@ -7871,22 +8461,9 @@ int new_recompile_block(int addr)
                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
                         break;
                       }
-                      if(r>63) {
-                        // NB This can exclude the case where the upper-half
-                        // register is lower numbered than the lower-half
-                        // register.  Not sure if it's worth fixing...
-                        if(get_reg(regs[k-1].regmap,r&63)<0) break;
-                        if(regs[k-1].is32&(1LL<<(r&63))) break;
-                      }
+                      assert(r < 64);
                       k--;
                     }
-                    if(i<slen-1) {
-                      if((regs[k].is32&(1LL<<f_regmap[hr]))!=
-                        (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
-                        //printf("bad match after branch\n");
-                        break;
-                      }
-                    }
                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
                       while(k<i) {
@@ -7925,12 +8502,10 @@ int new_recompile_block(int addr)
                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
                       branch_regs[i].wasconst&=~(1<<hr);
                       branch_regs[i].isconst&=~(1<<hr);
-                      if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
+                      if (!is_ujump(i)) {
                         regmap_pre[i+2][hr]=f_regmap[hr];
                         regs[i+2].wasdirty&=~(1<<hr);
                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
-                        assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
-                          (regs[i+2].was32&(1LL<<f_regmap[hr])));
                       }
                     }
                   }
@@ -7942,17 +8517,15 @@ int new_recompile_block(int addr)
                     regs[k].dirty&=~(1<<hr);
                     regs[k].wasconst&=~(1<<hr);
                     regs[k].isconst&=~(1<<hr);
-                    if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
+                    if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP) {
                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
                       branch_regs[k].regmap[hr]=f_regmap[hr];
                       branch_regs[k].dirty&=~(1<<hr);
                       branch_regs[k].wasconst&=~(1<<hr);
                       branch_regs[k].isconst&=~(1<<hr);
-                      if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
+                      if (!is_ujump(k)) {
                         regmap_pre[k+2][hr]=f_regmap[hr];
                         regs[k+2].wasdirty&=~(1<<hr);
-                        assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
-                          (regs[k+2].was32&(1LL<<f_regmap[hr])));
                       }
                     }
                     else
@@ -7972,16 +8545,12 @@ int new_recompile_block(int addr)
                   //printf("no-match due to different register\n");
                   break;
                 }
-                if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
-                  //printf("32/64 mismatch %x %d\n",start+j*4,hr);
-                  break;
-                }
-                if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
+                if (is_ujump(j))
                 {
                   // Stop on unconditional branch
                   break;
                 }
-                if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
+                if(itype[j]==CJUMP||itype[j]==SJUMP)
                 {
                   if(ooo[j]) {
                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
@@ -7999,17 +8568,7 @@ int new_recompile_block(int addr)
                   //printf("No free regs for store %x\n",start+j*4);
                   break;
                 }
-                if(f_regmap[hr]>=64) {
-                  if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
-                    break;
-                  }
-                  else
-                  {
-                    if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
-                      break;
-                    }
-                  }
-                }
+                assert(f_regmap[hr]<64);
               }
             }
           }
@@ -8020,11 +8579,7 @@ int new_recompile_block(int addr)
       for(hr=0;hr<HOST_REGS;hr++)
       {
         if(hr!=EXCLUDE_REG) {
-          if(regs[i].regmap[hr]>64) {
-            if(!((regs[i].dirty>>hr)&1))
-              f_regmap[hr]=regs[i].regmap[hr];
-          }
-          else if(regs[i].regmap[hr]>=0) {
+          if(regs[i].regmap[hr]>=0) {
             if(f_regmap[hr]!=regs[i].regmap[hr]) {
               // dealloc old register
               int n;
@@ -8106,7 +8661,7 @@ int new_recompile_block(int addr)
   // to use, which can avoid a load-use penalty on certain CPUs.
   for(i=0;i<slen-1;i++)
   {
-    if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
+    if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP))
     {
       if(!bt[i+1])
       {
@@ -8267,7 +8822,7 @@ int new_recompile_block(int addr)
   /* Pass 7 - Identify 32-bit registers */
   for (i=slen-1;i>=0;i--)
   {
-    if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+    if(itype[i]==CJUMP||itype[i]==SJUMP)
     {
       // Conditional branch
       if((source[i]>>16)!=0x1000&&i<slen-2) {
@@ -8302,6 +8857,7 @@ int new_recompile_block(int addr)
     #ifdef __arm__
     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
     #endif
+    #if defined(__i386__) || defined(__x86_64__)
     printf("needs: ");
     if(needed_reg[i]&1) printf("eax ");
     if((needed_reg[i]>>1)&1) printf("ecx ");
@@ -8311,7 +8867,6 @@ int new_recompile_block(int addr)
     if((needed_reg[i]>>6)&1) printf("esi ");
     if((needed_reg[i]>>7)&1) printf("edi ");
     printf("\n");
-    #if defined(__i386__) || defined(__x86_64__)
     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
     printf("dirty: ");
     if(regs[i].wasdirty&1) printf("eax ");
@@ -8378,7 +8933,7 @@ int new_recompile_block(int addr)
       if((regs[i].isconst>>6)&1) printf("esi=%x ",(u_int)constmap[i][6]);
       if((regs[i].isconst>>7)&1) printf("edi=%x ",(u_int)constmap[i][7]);
       #endif
-      #ifdef __arm__
+      #if defined(__arm__) || defined(__aarch64__)
       int r;
       for (r = 0; r < ARRAY_SIZE(constmap[i]); r++)
         if ((regs[i].isconst >> r) & 1)
@@ -8386,7 +8941,7 @@ int new_recompile_block(int addr)
       #endif
       printf("\n");
     }
-    if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
+    if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
       #if defined(__i386__) || defined(__x86_64__)
       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
       if(branch_regs[i].dirty&1) printf("eax ");
@@ -8419,7 +8974,6 @@ int new_recompile_block(int addr)
   /* Pass 8 - Assembly */
   linkcount=0;stubcount=0;
   ds=0;is_delayslot=0;
-  uint64_t is32_pre=0;
   u_int dirty_pre=0;
   void *beginning=start_block();
   if((u_int)addr&1) {
@@ -8429,7 +8983,7 @@ int new_recompile_block(int addr)
   void *instr_addr0_override = NULL;
 
   if (start == 0x80030000) {
-    // nasty hack for fastbios thing
+    // nasty hack for the fastbios thing
     // override block entry to this code
     instr_addr0_override = out;
     emit_movimm(start,0);
@@ -8439,7 +8993,12 @@ int new_recompile_block(int addr)
     emit_writeword(0,&pcaddr);
     emit_writeword(0,&address);
     emit_cmp(0,1);
+    #ifdef __aarch64__
+    emit_jeq(out + 4*2);
+    emit_far_jump(new_dyna_leave);
+    #else
     emit_jne(new_dyna_leave);
+    #endif
   }
   for(i=0;i<slen;i++)
   {
@@ -8452,23 +9011,20 @@ int new_recompile_block(int addr)
     } else {
       speculate_register_values(i);
       #ifndef DESTRUCTIVE_WRITEBACK
-      if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
+      if (i < 2 || !is_ujump(i-2))
       {
-        wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
-              unneeded_reg[i]);
+        wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]);
       }
-      if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
-        is32_pre=branch_regs[i].is32;
+      if((itype[i]==CJUMP||itype[i]==SJUMP)&&!likely[i]) {
         dirty_pre=branch_regs[i].dirty;
       }else{
-        is32_pre=regs[i].is32;
         dirty_pre=regs[i].dirty;
       }
       #endif
       // write back
-      if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
+      if (i < 2 || !is_ujump(i-2))
       {
-        wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,unneeded_reg[i]);
+        wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]);
         loop_preload(regmap_pre[i],regs[i].regmap_entry);
       }
       // branch target entry point
@@ -8478,35 +9034,35 @@ int new_recompile_block(int addr)
 
       // load regs
       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
-        wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
-      load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
+        wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty);
+      load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i],rs2[i]);
       address_generation(i,&regs[i],regs[i].regmap_entry);
-      load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
-      if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+      load_consts(regmap_pre[i],regs[i].regmap,i);
+      if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
       {
         // Load the delay slot registers if necessary
         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
-          load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
+          load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i+1],rs1[i+1]);
         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
-          load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
+          load_regs(regs[i].regmap_entry,regs[i].regmap,rs2[i+1],rs2[i+1]);
         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
-          load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
+          load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
       }
       else if(i+1<slen)
       {
         // Preload registers for following instruction
         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
-            load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
+            load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i+1],rs1[i+1]);
         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
-            load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
+            load_regs(regs[i].regmap_entry,regs[i].regmap,rs2[i+1],rs2[i+1]);
       }
       // TODO: if(is_ooo(i)) address_generation(i+1);
-      if(itype[i]==CJUMP||itype[i]==FJUMP)
-        load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
+      if(itype[i]==CJUMP)
+        load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
-        load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
+        load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
       // assemble
       switch(itype[i]) {
         case ALU:
@@ -8555,38 +9111,36 @@ int new_recompile_block(int addr)
           cjump_assemble(i,&regs[i]);ds=1;break;
         case SJUMP:
           sjump_assemble(i,&regs[i]);ds=1;break;
-        case FJUMP:
-          assert(0);ds=1;break;
         case SPAN:
           pagespan_assemble(i,&regs[i]);break;
       }
-      if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
+      if (is_ujump(i))
         literal_pool(1024);
       else
         literal_pool_jumpover(256);
     }
   }
-  //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
+  //assert(is_ujump(i-2));
   // If the block did not end with an unconditional branch,
   // add a jump to the next instruction.
   if(i>1) {
-    if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
-      assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
+    if(!is_ujump(i-2)&&itype[i-1]!=SPAN) {
+      assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP);
       assert(i==slen);
-      if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
-        store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
+      if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP) {
+        store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
           emit_loadreg(CCREG,HOST_CCREG);
         emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
       }
       else if(!likely[i-2])
       {
-        store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
+        store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].dirty,start+i*4);
         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
       }
       else
       {
-        store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
+        store_regs_bt(regs[i-2].regmap,regs[i-2].dirty,start+i*4);
         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
       }
       add_to_linker(out,start+i*4,0);
@@ -8596,8 +9150,8 @@ int new_recompile_block(int addr)
   else
   {
     assert(i>0);
-    assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
-    store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
+    assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP);
+    store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
       emit_loadreg(CCREG,HOST_CCREG);
     emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
@@ -8713,8 +9267,8 @@ int new_recompile_block(int addr)
 
   // If we're within 256K of the end of the buffer,
   // start over from the beginning. (Is 256K enough?)
-  if (out > translation_cache+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE)
-    out = translation_cache;
+  if (out > ndrc->translation_cache + sizeof(ndrc->translation_cache) - MAX_OUTPUT_BLOCK_SIZE)
+    out = ndrc->translation_cache;
 
   // Trap writes to any of the pages we compiled
   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
@@ -8731,11 +9285,11 @@ int new_recompile_block(int addr)
 
   /* Pass 10 - Free memory by expiring oldest blocks */
 
-  int end=(((out-translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535;
+  int end=(((out-ndrc->translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535;
   while(expirep!=end)
   {
     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
-    uintptr_t base=(uintptr_t)translation_cache+((expirep>>13)<<shift); // Base address of this block
+    uintptr_t base=(uintptr_t)ndrc->translation_cache+((expirep>>13)<<shift); // Base address of this block
     inv_debug("EXP: Phase %d\n",expirep);
     switch((expirep>>11)&3)
     {
@@ -8773,10 +9327,8 @@ int new_recompile_block(int addr)
         break;
       case 3:
         // Clear jump_out
-        #ifdef __arm__
         if((expirep&2047)==0)
           do_clear_cache();
-        #endif
         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
         break;