drc: inv: fix ram ofset and mirror handling
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
index 8d3688b..59d4208 100644 (file)
@@ -1,6 +1,6 @@
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
  *   Mupen64plus - new_dynarec.c                                           *
- *   Copyright (C) 2009-2010 Ari64                                         *
+ *   Copyright (C) 2009-2011 Ari64                                         *
  *                                                                         *
  *   This program is free software; you can redistribute it and/or modify  *
  *   it under the terms of the GNU General Public License as published by  *
 #include <stdlib.h>
 #include <stdint.h> //include for uint64_t
 #include <assert.h>
+#include <sys/mman.h>
 
 #include "emu_if.h" //emulator interface
 
-#include <sys/mman.h>
+//#define DISASM
+//#define assem_debug printf
+//#define inv_debug printf
+#define assem_debug(...)
+#define inv_debug(...)
 
 #ifdef __i386__
 #include "assem_x86.h"
@@ -38,7 +43,6 @@
 
 #define MAXBLOCK 4096
 #define MAX_OUTPUT_BLOCK_SIZE 262144
-#define CLOCK_DIVIDER 2
 
 struct regstat
 {
@@ -52,7 +56,8 @@ struct regstat
   uint64_t uu;
   u_int wasconst;
   u_int isconst;
-  uint64_t constmap[HOST_REGS];
+  u_int loadedconst;             // host regs that have constants loaded
+  u_int waswritten;              // MIPS regs that were used as store base before
 };
 
 struct ll_entry
@@ -80,10 +85,19 @@ struct ll_entry
   u_char dep1[MAXBLOCK];
   u_char dep2[MAXBLOCK];
   u_char lt1[MAXBLOCK];
+  static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
+  static uint64_t gte_rt[MAXBLOCK];
+  static uint64_t gte_unneeded[MAXBLOCK];
+  static u_int smrv[32]; // speculated MIPS register values
+  static u_int smrv_strong; // mask or regs that are likely to have correct values
+  static u_int smrv_weak; // same, but somewhat less likely
+  static u_int smrv_strong_next; // same, but after current insn executes
+  static u_int smrv_weak_next;
   int imm[MAXBLOCK];
   u_int ba[MAXBLOCK];
   char likely[MAXBLOCK];
   char is_ds[MAXBLOCK];
+  char ooo[MAXBLOCK];
   uint64_t unneeded_reg[MAXBLOCK];
   uint64_t unneeded_reg_upper[MAXBLOCK];
   uint64_t branch_unneeded_reg[MAXBLOCK];
@@ -91,13 +105,11 @@ struct ll_entry
   uint64_t p32[MAXBLOCK];
   uint64_t pr32[MAXBLOCK];
   signed char regmap_pre[MAXBLOCK][HOST_REGS];
-  signed char regmap[MAXBLOCK][HOST_REGS];
-  signed char regmap_entry[MAXBLOCK][HOST_REGS];
-  uint64_t constmap[MAXBLOCK][HOST_REGS];
-  uint64_t known_value[HOST_REGS];
-  u_int known_reg;
-  struct regstat regs[MAXBLOCK];
-  struct regstat branch_regs[MAXBLOCK];
+  static uint64_t current_constmap[HOST_REGS];
+  static uint64_t constmap[MAXBLOCK][HOST_REGS];
+  static struct regstat regs[MAXBLOCK];
+  static struct regstat branch_regs[MAXBLOCK];
+  signed char minimum_free_regs[MAXBLOCK];
   u_int needed_reg[MAXBLOCK];
   uint64_t requires_32bit[MAXBLOCK];
   u_int wont_dirty[MAXBLOCK];
@@ -121,8 +133,19 @@ struct ll_entry
   char shadow[1048576]  __attribute__((aligned(16)));
   void *copy;
   int expirep;
+#ifndef PCSX
   u_int using_tlb;
+#else
+  static const u_int using_tlb=0;
+#endif
+  int new_dynarec_did_compile;
+  int new_dynarec_hacks;
   u_int stop_after_jal;
+#ifndef RAM_FIXED
+  static u_int ram_offset;
+#else
+  static const u_int ram_offset=0;
+#endif
   extern u_char restore_candidate[512];
   extern int cycle_count;
 
@@ -134,19 +157,21 @@ struct ll_entry
 #define CSREG 35 // Coprocessor status
 #define CCREG 36 // Cycle count
 #define INVCP 37 // Pointer to invalid_code
-#define TEMPREG 38
-#define FTEMP 38 // FPU/LDL/LDR temporary register
-#define PTEMP 39 // Prefetch temporary register
-#define TLREG 40 // TLB mapping offset
-#define RHASH 41 // Return address hash
-#define RHTBL 42 // Return address hash table address
-#define RTEMP 43 // JR/JALR address register
-#define MAXREG 43
-#define AGEN1 44 // Address generation temporary register
-#define AGEN2 45 // Address generation temporary register
-#define MGEN1 46 // Maptable address generation temporary register
-#define MGEN2 47 // Maptable address generation temporary register
-#define BTREG 48 // Branch target temporary register
+#define MMREG 38 // Pointer to memory_map
+#define ROREG 39 // ram offset (if rdram!=0x80000000)
+#define TEMPREG 40
+#define FTEMP 40 // FPU temporary register
+#define PTEMP 41 // Prefetch temporary register
+#define TLREG 42 // TLB mapping offset
+#define RHASH 43 // Return address hash
+#define RHTBL 44 // Return address hash table address
+#define RTEMP 45 // JR/JALR address register
+#define MAXREG 45
+#define AGEN1 46 // Address generation temporary register
+#define AGEN2 47 // Address generation temporary register
+#define MGEN1 48 // Maptable address generation temporary register
+#define MGEN2 49 // Maptable address generation temporary register
+#define BTREG 50 // Branch target temporary register
 
   /* instruction types */
 #define NOP 0     // No operation
@@ -179,6 +204,7 @@ struct ll_entry
 #define COP2 27   // Coprocessor 2 move
 #define C2LS 28   // Coprocessor 2 load/store
 #define C2OP 29   // Coprocessor 2 operation
+#define INTCALL 30// Call interpreter to handle rare corner cases
 
   /* stubs */
 #define CC_STUB 1
@@ -220,6 +246,7 @@ void jump_syscall();
 void jump_syscall_hle();
 void jump_eret();
 void jump_hlecall();
+void jump_intcall();
 void new_dyna_leave();
 
 // TLB
@@ -252,11 +279,13 @@ int tracedebug=0;
 
 //#define DEBUG_CYCLE_COUNT 1
 
-void nullf() {}
-//#define assem_debug printf
-//#define inv_debug printf
-#define assem_debug nullf
-#define inv_debug nullf
+int cycle_multiplier; // 100 for 1.0
+
+static int CLOCK_ADJUST(int x)
+{
+  int s=(x>>31)|1;
+  return (x * cycle_multiplier + s * 50) / 100;
+}
 
 static void tlb_hacks()
 {
@@ -307,7 +336,14 @@ static void tlb_hacks()
 
 static u_int get_page(u_int vaddr)
 {
+#ifndef PCSX
   u_int page=(vaddr^0x80000000)>>12;
+#else
+  u_int page=vaddr&~0xe0000000;
+  if (page < 0x1000000)
+    page &= ~0x0e00000; // RAM mirrors
+  page>>=12;
+#endif
 #ifndef DISABLE_TLB
   if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
 #endif
@@ -315,6 +351,7 @@ static u_int get_page(u_int vaddr)
   return page;
 }
 
+#ifndef PCSX
 static u_int get_vpage(u_int vaddr)
 {
   u_int vpage=(vaddr^0x80000000)>>12;
@@ -324,6 +361,13 @@ static u_int get_vpage(u_int vaddr)
   if(vpage>2048) vpage=2048+(vpage&2047);
   return vpage;
 }
+#else
+// no virtual mem in PCSX
+static u_int get_vpage(u_int vaddr)
+{
+  return get_page(vaddr);
+}
+#endif
 
 // Get address from virtual address
 // This is called from the recompiled JR/JALR instructions
@@ -355,7 +399,10 @@ void *get_addr(u_int vaddr)
       if(verify_dirty(head->addr)) {
         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
         invalid_code[vaddr>>12]=0;
+        inv_code_start=inv_code_end=~0;
+#ifndef DISABLE_TLB
         memory_map[vaddr>>12]|=0x40000000;
+#endif
         if(vpage<2048) {
 #ifndef DISABLE_TLB
           if(tlb_LUT_r[vaddr>>12]) {
@@ -408,7 +455,7 @@ void *get_addr_32(u_int vaddr,u_int flags)
 {
 #ifdef FORCE32
   return get_addr(vaddr);
-#endif
+#else
   //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
@@ -447,6 +494,7 @@ void *get_addr_32(u_int vaddr,u_int flags)
       if(verify_dirty(head->addr)) {
         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
         invalid_code[vaddr>>12]=0;
+        inv_code_start=inv_code_end=~0;
         memory_map[vaddr>>12]|=0x40000000;
         if(vpage<2048) {
 #ifndef DISABLE_TLB
@@ -488,6 +536,7 @@ void *get_addr_32(u_int vaddr,u_int flags)
   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
   EntryHi=BadVAddr&0xFFFFE000;
   return get_addr_ht(0x80000000);
+#endif
 }
 
 void clear_all_regs(signed char regmap[])
@@ -559,11 +608,11 @@ void set_const(struct regstat *cur,signed char reg,uint64_t value)
   for (hr=0;hr<HOST_REGS;hr++) {
     if(cur->regmap[hr]==reg) {
       cur->isconst|=1<<hr;
-      cur->constmap[hr]=value;
+      current_constmap[hr]=value;
     }
     else if((cur->regmap[hr]^64)==reg) {
       cur->isconst|=1<<hr;
-      cur->constmap[hr]=value>>32;
+      current_constmap[hr]=value>>32;
     }
   }
 }
@@ -582,6 +631,7 @@ void clear_const(struct regstat *cur,signed char reg)
 int is_const(struct regstat *cur,signed char reg)
 {
   int hr;
+  if(reg<0) return 0;
   if(!reg) return 1;
   for (hr=0;hr<HOST_REGS;hr++) {
     if((cur->regmap[hr]&63)==reg) {
@@ -596,7 +646,7 @@ uint64_t get_const(struct regstat *cur,signed char reg)
   if(!reg) return 0;
   for (hr=0;hr<HOST_REGS;hr++) {
     if(cur->regmap[hr]==reg) {
-      return cur->constmap[hr];
+      return current_constmap[hr];
     }
   }
   printf("Unknown constant in r%d\n",reg);
@@ -702,12 +752,6 @@ int needed_again(int r, int i)
   int j;
   int b=-1;
   int rn=10;
-  int hr;
-  u_char hsn[MAXREG+1];
-  int preferred_reg;
-  
-  memset(hsn,10,sizeof(hsn));
-  lsn(hsn,i,&preferred_reg);
   
   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
   {
@@ -726,7 +770,7 @@ int needed_again(int r, int i)
       j++;
       break;
     }
-    if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||((source[i+j]&0xfc00003f)==0x0d))
+    if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
     {
       break;
     }
@@ -760,11 +804,7 @@ int needed_again(int r, int i)
       }
     }
   }*/
-  for(hr=0;hr<HOST_REGS;hr++) {
-    if(hr!=EXCLUDE_REG) {
-      if(rn<hsn[hr]) return 1;
-    }
-  }
+  if(rn<10) return 1;
   return 0;
 }
 
@@ -834,7 +874,7 @@ void alloc_all(struct regstat *cur,int i)
   }
 }
 
-
+#ifndef FORCE32
 void div64(int64_t dividend,int64_t divisor)
 {
   lo=dividend/divisor;
@@ -945,6 +985,7 @@ uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
   else original=loaded;
   return original;
 }
+#endif
 
 #ifdef __i386__
 #include "assem_x86.c"
@@ -1076,29 +1117,20 @@ void ll_clear(struct ll_entry **head)
 // Dereference the pointers and remove if it matches
 void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
 {
-  u_int old_host_addr=0;
   while(head) {
     int ptr=get_pointer(head->addr);
     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
     if(((ptr>>shift)==(addr>>shift)) ||
        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
     {
-      printf("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
+      inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
       u_int host_addr=(u_int)kill_pointer(head->addr);
-
-      if((host_addr>>12)!=(old_host_addr>>12)) {
-        #ifdef __arm__
-        __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
-        #endif
-        old_host_addr=host_addr;
-      }
+      #ifdef __arm__
+        needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
+      #endif
     }
     head=head->next;
   }
-  #ifdef __arm__
-  if (old_host_addr)
-    __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
-  #endif
 }
 
 // This is called when we write to a compiled block (see do_invstub)
@@ -1106,7 +1138,6 @@ void invalidate_page(u_int page)
 {
   struct ll_entry *head;
   struct ll_entry *next;
-  u_int old_host_addr=0;
   head=jump_in[page];
   jump_in[page]=0;
   while(head!=NULL) {
@@ -1121,22 +1152,54 @@ void invalidate_page(u_int page)
   while(head!=NULL) {
     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
     u_int host_addr=(u_int)kill_pointer(head->addr);
-
-    if((host_addr>>12)!=(old_host_addr>>12)) {
-      #ifdef __arm__
-      __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
-      #endif
-      old_host_addr=host_addr;
-    }
+    #ifdef __arm__
+      needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
+    #endif
     next=head->next;
     free(head);
     head=next;
   }
+}
+
+static void invalidate_block_range(u_int block, u_int first, u_int last)
+{
+  u_int page=get_page(block<<12);
+  //printf("first=%d last=%d\n",first,last);
+  invalidate_page(page);
+  assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
+  assert(last<page+5);
+  // Invalidate the adjacent pages if a block crosses a 4K boundary
+  while(first<page) {
+    invalidate_page(first);
+    first++;
+  }
+  for(first=page+1;first<last;first++) {
+    invalidate_page(first);
+  }
   #ifdef __arm__
-  if (old_host_addr)
-    __clear_cache((void *)(old_host_addr&~0xfff),(void *)(old_host_addr|0xfff));
+    do_clear_cache();
+  #endif
+  
+  // Don't trap writes
+  invalid_code[block]=1;
+#ifndef DISABLE_TLB
+  // If there is a valid TLB entry for this page, remove write protect
+  if(tlb_LUT_w[block]) {
+    assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
+    // CHECK: Is this right?
+    memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
+    u_int real_block=tlb_LUT_w[block]>>12;
+    invalid_code[real_block]=1;
+    if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
+  }
+  else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
+#endif
+
+  #ifdef USE_MINI_HT
+  memset(mini_ht,-1,sizeof(mini_ht));
   #endif
 }
+
 void invalidate_block(u_int block)
 {
   u_int page=get_page(block<<12);
@@ -1153,7 +1216,7 @@ void invalidate_block(u_int block)
     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
       get_bounds((int)head->addr,&start,&end);
       //printf("start: %x end: %x\n",start,end);
-      if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
+      if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) {
         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
@@ -1170,42 +1233,71 @@ void invalidate_block(u_int block)
     }
     head=head->next;
   }
-  //printf("first=%d last=%d\n",first,last);
-  invalidate_page(page);
-  assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
-  assert(last<page+5);
-  // Invalidate the adjacent pages if a block crosses a 4K boundary
-  while(first<page) {
-    invalidate_page(first);
-    first++;
-  }
-  for(first=page+1;first<last;first++) {
-    invalidate_page(first);
-  }
-  
-  // Don't trap writes
-  invalid_code[block]=1;
-#ifndef DISABLE_TLB
-  // If there is a valid TLB entry for this page, remove write protect
-  if(tlb_LUT_w[block]) {
-    assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
-    // CHECK: Is this right?
-    memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
-    u_int real_block=tlb_LUT_w[block]>>12;
-    invalid_code[real_block]=1;
-    if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
-  }
-  else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
-#endif
-
-  #ifdef USE_MINI_HT
-  memset(mini_ht,-1,sizeof(mini_ht));
-  #endif
+  invalidate_block_range(block,first,last);
 }
+
 void invalidate_addr(u_int addr)
 {
+#ifdef PCSX
+  //static int rhits;
+  // this check is done by the caller
+  //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
+  u_int page=get_vpage(addr);
+  if(page<2048) { // RAM
+    struct ll_entry *head;
+    u_int addr_min=~0, addr_max=0;
+    u_int mask=RAM_SIZE-1;
+    u_int addr_main=0x80000000|(addr&mask);
+    int pg1;
+    inv_code_start=addr_main&~0xfff;
+    inv_code_end=addr_main|0xfff;
+    pg1=page;
+    if (pg1>0) {
+      // must check previous page too because of spans..
+      pg1--;
+      inv_code_start-=0x1000;
+    }
+    for(;pg1<=page;pg1++) {
+      for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
+        u_int start,end;
+        get_bounds((int)head->addr,&start,&end);
+        if(ram_offset) {
+          start-=ram_offset;
+          end-=ram_offset;
+        }
+        if(start<=addr_main&&addr_main<end) {
+          if(start<addr_min) addr_min=start;
+          if(end>addr_max) addr_max=end;
+        }
+        else if(addr_main<start) {
+          if(start<inv_code_end)
+            inv_code_end=start-1;
+        }
+        else {
+          if(end>inv_code_start)
+            inv_code_start=end;
+        }
+      }
+    }
+    if (addr_min!=~0) {
+      inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
+      inv_code_start=inv_code_end=~0;
+      invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
+      return;
+    }
+    else {
+      inv_code_start=(addr&~mask)|(inv_code_start&mask);
+      inv_code_end=(addr&~mask)|(inv_code_end&mask);
+      inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
+      return;
+    }
+  }
+#endif
   invalidate_block(addr>>12);
 }
+
+// This is called when loading a save state.
+// Anything could have changed, so invalidate everything.
 void invalidate_all_pages()
 {
   u_int page,n;
@@ -1242,6 +1334,8 @@ void add_link(u_int vaddr,void *src)
 {
   u_int page=get_page(vaddr);
   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
+  int *ptr=(int *)(src+4);
+  assert((*ptr&0x0fff0000)==0x059f0000);
   ll_add(jump_out+page,vaddr,src);
   //int ptr=get_pointer(src);
   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
@@ -1271,11 +1365,13 @@ void clean_blocks(u_int page)
               inv|=invalid_code[i];
             }
           }
+#ifndef DISABLE_TLB
           if((signed int)head->vaddr>=(signed int)0xC0000000) {
             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
             //printf("addr=%x start=%x end=%x\n",addr,start,end);
             if(addr<start||addr>=end) inv=1;
           }
+#endif
           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
             inv=1;
           }
@@ -1328,8 +1424,6 @@ void mov_alloc(struct regstat *current,int i)
 
 void shiftimm_alloc(struct regstat *current,int i)
 {
-  clear_const(current,rs1[i]);
-  clear_const(current,rt1[i]);
   if(opcode2[i]<=0x3) // SLL/SRL/SRA
   {
     if(rt1[i]) {
@@ -1338,8 +1432,21 @@ void shiftimm_alloc(struct regstat *current,int i)
       alloc_reg(current,i,rt1[i]);
       current->is32|=1LL<<rt1[i];
       dirty_reg(current,rt1[i]);
+      if(is_const(current,rs1[i])) {
+        int v=get_const(current,rs1[i]);
+        if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
+        if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
+        if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
+      }
+      else clear_const(current,rt1[i]);
     }
   }
+  else
+  {
+    clear_const(current,rs1[i]);
+    clear_const(current,rt1[i]);
+  }
+
   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
   {
     if(rt1[i]) {
@@ -1391,7 +1498,10 @@ void shift_alloc(struct regstat *current,int i)
       if(rs1[i]) alloc_reg(current,i,rs1[i]);
       if(rs2[i]) alloc_reg(current,i,rs2[i]);
       alloc_reg(current,i,rt1[i]);
-      if(rt1[i]==rs2[i]) alloc_reg_temp(current,i,-1);
+      if(rt1[i]==rs2[i]) {
+        alloc_reg_temp(current,i,-1);
+        minimum_free_regs[i]=1;
+      }
       current->is32|=1LL<<rt1[i];
     } else { // DSLLV/DSRLV/DSRAV
       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
@@ -1399,7 +1509,10 @@ void shift_alloc(struct regstat *current,int i)
       alloc_reg64(current,i,rt1[i]);
       current->is32&=~(1LL<<rt1[i]);
       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
+      {
         alloc_reg_temp(current,i,-1);
+        minimum_free_regs[i]=1;
+      }
     }
     clear_const(current,rs1[i]);
     clear_const(current,rs2[i]);
@@ -1589,8 +1702,9 @@ void load_alloc(struct regstat *current,int i)
   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
-  if(rt1[i]) {
+  if(rt1[i]&&!((current->u>>rt1[i])&1)) {
     alloc_reg(current,i,rt1[i]);
+    assert(get_reg(current->regmap,rt1[i])>=0);
     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
     {
       current->is32&=~(1LL<<rt1[i]);
@@ -1602,6 +1716,7 @@ void load_alloc(struct regstat *current,int i)
       alloc_reg64(current,i,rt1[i]);
       alloc_all(current,i);
       alloc_reg64(current,i,FTEMP);
+      minimum_free_regs[i]=HOST_REGS;
     }
     else current->is32|=1LL<<rt1[i];
     dirty_reg(current,rt1[i]);
@@ -1612,13 +1727,27 @@ void load_alloc(struct regstat *current,int i)
     {
       alloc_reg(current,i,FTEMP);
       alloc_reg_temp(current,i,-1);
+      minimum_free_regs[i]=1;
     }
   }
   else
   {
-    // Load to r0 (dummy load)
+    // Load to r0 or unneeded register (dummy load)
     // but we still need a register to calculate the address
+    if(opcode[i]==0x22||opcode[i]==0x26)
+    {
+      alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
+    }
+    // If using TLB, need a register for pointer to the mapping table
+    if(using_tlb) alloc_reg(current,i,TLREG);
     alloc_reg_temp(current,i,-1);
+    minimum_free_regs[i]=1;
+    if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
+    {
+      alloc_all(current,i);
+      alloc_reg64(current,i,FTEMP);
+      minimum_free_regs[i]=HOST_REGS;
+    }
   }
 }
 
@@ -1643,6 +1772,7 @@ void store_alloc(struct regstat *current,int i)
   }
   // We need a temporary register for address generation
   alloc_reg_temp(current,i,-1);
+  minimum_free_regs[i]=1;
 }
 
 void c1ls_alloc(struct regstat *current,int i)
@@ -1680,6 +1810,7 @@ void c2ls_alloc(struct regstat *current,int i)
   #endif
   // We need a temporary register for address generation
   alloc_reg_temp(current,i,-1);
+  minimum_free_regs[i]=1;
 }
 
 #ifndef multdiv_alloc
@@ -1725,6 +1856,7 @@ void multdiv_alloc(struct regstat *current,int i)
       current->is32&=~(1LL<<LOREG);
       dirty_reg(current,HIREG);
       dirty_reg(current,LOREG);
+      minimum_free_regs[i]=HOST_REGS;
     }
   }
   else
@@ -1773,6 +1905,7 @@ void cop0_alloc(struct regstat *current,int i)
     assert(opcode2[i]==0x10);
     alloc_all(current,i);
   }
+  minimum_free_regs[i]=HOST_REGS;
 }
 
 void cop1_alloc(struct regstat *current,int i)
@@ -1780,16 +1913,17 @@ void cop1_alloc(struct regstat *current,int i)
   alloc_reg(current,i,CSREG); // Load status
   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
   {
-    assert(rt1[i]);
-    clear_const(current,rt1[i]);
-    if(opcode2[i]==1) {
-      alloc_reg64(current,i,rt1[i]); // DMFC1
-      current->is32&=~(1LL<<rt1[i]);
-    }else{
-      alloc_reg(current,i,rt1[i]); // MFC1/CFC1
-      current->is32|=1LL<<rt1[i];
+    if(rt1[i]){
+      clear_const(current,rt1[i]);
+      if(opcode2[i]==1) {
+        alloc_reg64(current,i,rt1[i]); // DMFC1
+        current->is32&=~(1LL<<rt1[i]);
+      }else{
+        alloc_reg(current,i,rt1[i]); // MFC1/CFC1
+        current->is32|=1LL<<rt1[i];
+      }
+      dirty_reg(current,rt1[i]);
     }
-    dirty_reg(current,rt1[i]);
     alloc_reg_temp(current,i,-1);
   }
   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
@@ -1808,16 +1942,19 @@ void cop1_alloc(struct regstat *current,int i)
       alloc_reg_temp(current,i,-1);
     }
   }
+  minimum_free_regs[i]=1;
 }
 void fconv_alloc(struct regstat *current,int i)
 {
   alloc_reg(current,i,CSREG); // Load status
   alloc_reg_temp(current,i,-1);
+  minimum_free_regs[i]=1;
 }
 void float_alloc(struct regstat *current,int i)
 {
   alloc_reg(current,i,CSREG); // Load status
   alloc_reg_temp(current,i,-1);
+  minimum_free_regs[i]=1;
 }
 void c2op_alloc(struct regstat *current,int i)
 {
@@ -1829,6 +1966,7 @@ void fcomp_alloc(struct regstat *current,int i)
   alloc_reg(current,i,FSREG); // Load flags
   dirty_reg(current,FSREG); // Flag will be modified
   alloc_reg_temp(current,i,-1);
+  minimum_free_regs[i]=1;
 }
 
 void syscall_alloc(struct regstat *current,int i)
@@ -1836,6 +1974,7 @@ void syscall_alloc(struct regstat *current,int i)
   alloc_cc(current,i);
   dirty_reg(current,CCREG);
   alloc_all(current,i);
+  minimum_free_regs[i]=HOST_REGS;
   current->isconst=0;
 }
 
@@ -1914,6 +2053,7 @@ static void pagespan_alloc(struct regstat *current,int i)
   current->isconst=0;
   current->wasconst=0;
   regs[i].wasconst=0;
+  minimum_free_regs[i]=HOST_REGS;
   alloc_all(current,i);
   alloc_cc(current,i);
   dirty_reg(current,CCREG);
@@ -2592,7 +2732,7 @@ void shiftimm_assemble(int i,struct regstat *i_regs)
       t=get_reg(i_regs->regmap,rt1[i]);
       s=get_reg(i_regs->regmap,rs1[i]);
       //assert(t>=0);
-      if(t>=0){
+      if(t>=0&&!((i_regs->isconst>>t)&1)){
         if(rs1[i]==0)
         {
           emit_zeroreg(t);
@@ -2735,6 +2875,7 @@ void load_assemble(int i,struct regstat *i_regs)
   int offset;
   int jaddr=0;
   int memtarget=0,c=0;
+  int fastload_reg_override=0;
   u_int hr,reglist=0;
   th=get_reg(i_regs->regmap,rt1[i]|64);
   tl=get_reg(i_regs->regmap,rt1[i]);
@@ -2746,59 +2887,66 @@ void load_assemble(int i,struct regstat *i_regs)
   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
   if(s>=0) {
     c=(i_regs->wasconst>>s)&1;
-    memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
-    if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
+    if (c) {
+      memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
+      if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
+    }
   }
   //printf("load_assemble: c=%d\n",c);
   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
   // FIXME: Even if the load is a NOP, we should check for pagefaults...
 #ifdef PCSX
-  if(tl<0) {
-    if(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80) {
+  if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
+    ||rt1[i]==0) {
       // could be FIFO, must perform the read
+      // ||dummy read
       assem_debug("(forced read)\n");
       tl=get_reg(i_regs->regmap,-1);
       assert(tl>=0);
-    }
   }
+#endif
   if(offset||s<0||c) addr=tl;
   else addr=s;
-#endif
-  if(tl>=0) {
-    //assert(tl>=0);
-    //assert(rt1[i]);
-    reglist&=~(1<<tl);
-    if(th>=0) reglist&=~(1<<th);
-    if(!using_tlb) {
-      if(!c) {
+  //if(tl<0) tl=get_reg(i_regs->regmap,-1);
+ if(tl>=0) {
+  //printf("load_assemble: c=%d\n",c);
+  //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
+  assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
+  reglist&=~(1<<tl);
+  if(th>=0) reglist&=~(1<<th);
+  if(!using_tlb) {
+    if(!c) {
+      #ifdef RAM_OFFSET
+      map=get_reg(i_regs->regmap,ROREG);
+      if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
+      #endif
 //#define R29_HACK 1
-        #ifdef R29_HACK
-        // Strmnnrmn's speed hack
-        if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
-        #endif
-        {
-          emit_cmpimm(addr,RAM_SIZE);
-          jaddr=(int)out;
-          #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
-          // Hint to branch predictor that the branch is unlikely to be taken
-          if(rs1[i]>=28)
-            emit_jno_unlikely(0);
-          else
-          #endif
-          emit_jno(0);
-        }
+      #ifdef R29_HACK
+      // Strmnnrmn's speed hack
+      if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
+      #endif
+      {
+        jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
       }
-    }else{ // using tlb
-      int x=0;
-      if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
-      if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
-      map=get_reg(i_regs->regmap,TLREG);
-      assert(map>=0);
-      map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
-      do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
     }
-    if (opcode[i]==0x20) { // LB
-      if(!c||memtarget) {
+    else if(ram_offset&&memtarget) {
+      emit_addimm(addr,ram_offset,HOST_TEMPREG);
+      fastload_reg_override=HOST_TEMPREG;
+    }
+  }else{ // using tlb
+    int x=0;
+    if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
+    if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
+    map=get_reg(i_regs->regmap,TLREG);
+    assert(map>=0);
+    reglist&=~(1<<map);
+    map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
+    do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
+  }
+  int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
+  if (opcode[i]==0x20) { // LB
+    if(!c||memtarget) {
+      if(!dummy) {
         #ifdef HOST_IMM_ADDR32
         if(c)
           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
@@ -2808,70 +2956,84 @@ void load_assemble(int i,struct regstat *i_regs)
           //emit_xorimm(addr,3,tl);
           //gen_tlb_addr_r(tl,map);
           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
-          int x=0;
+          int x=0,a=tl;
 #ifdef BIG_ENDIAN_MIPS
           if(!c) emit_xorimm(addr,3,tl);
           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
 #else
-          if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
-          else if (tl!=addr) emit_mov(addr,tl);
+          if(!c) a=addr;
 #endif
-          emit_movsbl_indexed_tlb(x,tl,map,tl);
+          if(fastload_reg_override) a=fastload_reg_override;
+
+          emit_movsbl_indexed_tlb(x,a,map,tl);
         }
-        if(jaddr)
-          add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
       }
-      else
-        inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+      if(jaddr)
+        add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
     }
-    if (opcode[i]==0x21) { // LH
-      if(!c||memtarget) {
+    else
+      inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+  }
+  if (opcode[i]==0x21) { // LH
+    if(!c||memtarget) {
+      if(!dummy) {
         #ifdef HOST_IMM_ADDR32
         if(c)
           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
         else
         #endif
         {
-          int x=0;
+          int x=0,a=tl;
 #ifdef BIG_ENDIAN_MIPS
           if(!c) emit_xorimm(addr,2,tl);
           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
 #else
-          if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
-          else if (tl!=addr) emit_mov(addr,tl);
+          if(!c) a=addr;
 #endif
+          if(fastload_reg_override) a=fastload_reg_override;
           //#ifdef
           //emit_movswl_indexed_tlb(x,tl,map,tl);
           //else
           if(map>=0) {
-            gen_tlb_addr_r(tl,map);
-            emit_movswl_indexed(x,tl,tl);
-          }else
-            emit_movswl_indexed((int)rdram-0x80000000+x,tl,tl);
+            gen_tlb_addr_r(a,map);
+            emit_movswl_indexed(x,a,tl);
+          }else{
+            #if 1 //def RAM_OFFSET
+            emit_movswl_indexed(x,a,tl);
+            #else
+            emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
+            #endif
+          }
         }
-        if(jaddr)
-          add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
       }
-      else
-        inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+      if(jaddr)
+        add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
     }
-    if (opcode[i]==0x23) { // LW
-      if(!c||memtarget) {
+    else
+      inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+  }
+  if (opcode[i]==0x23) { // LW
+    if(!c||memtarget) {
+      if(!dummy) {
+        int a=addr;
+        if(fastload_reg_override) a=fastload_reg_override;
         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
         #ifdef HOST_IMM_ADDR32
         if(c)
           emit_readword_tlb(constmap[i][s]+offset,map,tl);
         else
         #endif
-        emit_readword_indexed_tlb(0,addr,map,tl);
-        if(jaddr)
-          add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
+        emit_readword_indexed_tlb(0,a,map,tl);
       }
-      else
-        inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+      if(jaddr)
+        add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
     }
-    if (opcode[i]==0x24) { // LBU
-      if(!c||memtarget) {
+    else
+      inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+  }
+  if (opcode[i]==0x24) { // LBU
+    if(!c||memtarget) {
+      if(!dummy) {
         #ifdef HOST_IMM_ADDR32
         if(c)
           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
@@ -2881,73 +3043,89 @@ void load_assemble(int i,struct regstat *i_regs)
           //emit_xorimm(addr,3,tl);
           //gen_tlb_addr_r(tl,map);
           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
-          int x=0;
+          int x=0,a=tl;
 #ifdef BIG_ENDIAN_MIPS
           if(!c) emit_xorimm(addr,3,tl);
           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
 #else
-          if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
-          else if (tl!=addr) emit_mov(addr,tl);
+          if(!c) a=addr;
 #endif
-          emit_movzbl_indexed_tlb(x,tl,map,tl);
+          if(fastload_reg_override) a=fastload_reg_override;
+
+          emit_movzbl_indexed_tlb(x,a,map,tl);
         }
-        if(jaddr)
-          add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
       }
-      else
-        inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+      if(jaddr)
+        add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
     }
-    if (opcode[i]==0x25) { // LHU
-      if(!c||memtarget) {
+    else
+      inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+  }
+  if (opcode[i]==0x25) { // LHU
+    if(!c||memtarget) {
+      if(!dummy) {
         #ifdef HOST_IMM_ADDR32
         if(c)
           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
         else
         #endif
         {
-          int x=0;
+          int x=0,a=tl;
 #ifdef BIG_ENDIAN_MIPS
           if(!c) emit_xorimm(addr,2,tl);
           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
 #else
-          if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
-          else if (tl!=addr) emit_mov(addr,tl);
+          if(!c) a=addr;
 #endif
+          if(fastload_reg_override) a=fastload_reg_override;
           //#ifdef
           //emit_movzwl_indexed_tlb(x,tl,map,tl);
           //#else
           if(map>=0) {
-            gen_tlb_addr_r(tl,map);
-            emit_movzwl_indexed(x,tl,tl);
-          }else
-            emit_movzwl_indexed((int)rdram-0x80000000+x,tl,tl);
-          if(jaddr)
-            add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
+            gen_tlb_addr_r(a,map);
+            emit_movzwl_indexed(x,a,tl);
+          }else{
+            #if 1 //def RAM_OFFSET
+            emit_movzwl_indexed(x,a,tl);
+            #else
+            emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
+            #endif
+          }
         }
       }
-      else
-        inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+      if(jaddr)
+        add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
     }
-    if (opcode[i]==0x27) { // LWU
-      assert(th>=0);
-      if(!c||memtarget) {
+    else
+      inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+  }
+  if (opcode[i]==0x27) { // LWU
+    assert(th>=0);
+    if(!c||memtarget) {
+      if(!dummy) {
+        int a=addr;
+        if(fastload_reg_override) a=fastload_reg_override;
         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
         #ifdef HOST_IMM_ADDR32
         if(c)
           emit_readword_tlb(constmap[i][s]+offset,map,tl);
         else
         #endif
-        emit_readword_indexed_tlb(0,addr,map,tl);
-        if(jaddr)
-          add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
-      }
-      else {
-        inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+        emit_readword_indexed_tlb(0,a,map,tl);
       }
-      emit_zeroreg(th);
+      if(jaddr)
+        add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
     }
-    if (opcode[i]==0x37) { // LD
-      if(!c||memtarget) {
+    else {
+      inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+    }
+    emit_zeroreg(th);
+  }
+  if (opcode[i]==0x37) { // LD
+    if(!c||memtarget) {
+      if(!dummy) {
+        int a=addr;
+        if(fastload_reg_override) a=fastload_reg_override;
         //gen_tlb_addr_r(tl,map);
         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
@@ -2956,15 +3134,16 @@ void load_assemble(int i,struct regstat *i_regs)
           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
         else
         #endif
-        emit_readdword_indexed_tlb(0,addr,map,th,tl);
-        if(jaddr)
-          add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
+        emit_readdword_indexed_tlb(0,a,map,th,tl);
       }
-      else
-        inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
+      if(jaddr)
+        add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
     }
-    //emit_storereg(rt1[i],tl); // DEBUG
+    else
+      inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
   }
+ }
+  //emit_storereg(rt1[i],tl); // DEBUG
   //if(opcode[i]==0x23)
   //if(opcode[i]==0x24)
   //if(opcode[i]==0x23||opcode[i]==0x24)
@@ -3011,6 +3190,7 @@ void store_assemble(int i,struct regstat *i_regs)
   int jaddr=0,jaddr2,type;
   int memtarget=0,c=0;
   int agr=AGEN1+(i&1);
+  int faststore_reg_override=0;
   u_int hr,reglist=0;
   th=get_reg(i_regs->regmap,rs2[i]|64);
   tl=get_reg(i_regs->regmap,rs2[i]);
@@ -3020,8 +3200,10 @@ void store_assemble(int i,struct regstat *i_regs)
   offset=imm[i];
   if(s>=0) {
     c=(i_regs->wasconst>>s)&1;
-    memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
-    if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
+    if(c) {
+      memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
+      if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
+    }
   }
   assert(tl>=0);
   assert(temp>=0);
@@ -3033,9 +3215,9 @@ void store_assemble(int i,struct regstat *i_regs)
   else addr=s;
   if(!using_tlb) {
     if(!c) {
+      #ifndef PCSX
       #ifdef R29_HACK
       // Strmnnrmn's speed hack
-      memtarget=1;
       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
       #endif
       emit_cmpimm(addr,RAM_SIZE);
@@ -3043,6 +3225,7 @@ void store_assemble(int i,struct regstat *i_regs)
       if(s==addr) emit_mov(s,temp);
       #endif
       #ifdef R29_HACK
+      memtarget=1;
       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
       #endif
       {
@@ -3055,6 +3238,13 @@ void store_assemble(int i,struct regstat *i_regs)
         #endif
         emit_jno(0);
       }
+      #else
+        jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
+      #endif
+    }
+    else if(ram_offset&&memtarget) {
+      emit_addimm(addr,ram_offset,HOST_TEMPREG);
+      faststore_reg_override=HOST_TEMPREG;
     }
   }else{ // using tlb
     int x=0;
@@ -3062,78 +3252,85 @@ void store_assemble(int i,struct regstat *i_regs)
     if (opcode[i]==0x29) x=2; // SH
     map=get_reg(i_regs->regmap,TLREG);
     assert(map>=0);
+    reglist&=~(1<<map);
     map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
   }
 
   if (opcode[i]==0x28) { // SB
     if(!c||memtarget) {
-      int x=0;
+      int x=0,a=temp;
 #ifdef BIG_ENDIAN_MIPS
       if(!c) emit_xorimm(addr,3,temp);
       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
 #else
-      if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
-      else if (addr!=temp) emit_mov(addr,temp);
+      if(!c) a=addr;
 #endif
+      if(faststore_reg_override) a=faststore_reg_override;
       //gen_tlb_addr_w(temp,map);
       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
-      emit_writebyte_indexed_tlb(tl,x,temp,map,temp);
+      emit_writebyte_indexed_tlb(tl,x,a,map,a);
     }
     type=STOREB_STUB;
   }
   if (opcode[i]==0x29) { // SH
     if(!c||memtarget) {
-      int x=0;
+      int x=0,a=temp;
 #ifdef BIG_ENDIAN_MIPS
       if(!c) emit_xorimm(addr,2,temp);
       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
 #else
-      if(c) x=(constmap[i][s]+offset)-(constmap[i][s]+offset);
-      else if (addr!=temp) emit_mov(addr,temp);
+      if(!c) a=addr;
 #endif
+      if(faststore_reg_override) a=faststore_reg_override;
       //#ifdef
       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
       //#else
       if(map>=0) {
-        gen_tlb_addr_w(temp,map);
-        emit_writehword_indexed(tl,x,temp);
+        gen_tlb_addr_w(a,map);
+        emit_writehword_indexed(tl,x,a);
       }else
-        emit_writehword_indexed(tl,(int)rdram-0x80000000+x,temp);
+        //emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
+        emit_writehword_indexed(tl,x,a);
     }
     type=STOREH_STUB;
   }
   if (opcode[i]==0x2B) { // SW
-    if(!c||memtarget)
+    if(!c||memtarget) {
+      int a=addr;
+      if(faststore_reg_override) a=faststore_reg_override;
       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
-      emit_writeword_indexed_tlb(tl,0,addr,map,temp);
+      emit_writeword_indexed_tlb(tl,0,a,map,temp);
+    }
     type=STOREW_STUB;
   }
   if (opcode[i]==0x3F) { // SD
     if(!c||memtarget) {
+      int a=addr;
+      if(faststore_reg_override) a=faststore_reg_override;
       if(rs2[i]) {
         assert(th>=0);
         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
-        emit_writedword_indexed_tlb(th,tl,0,addr,map,temp);
+        emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
       }else{
         // Store zero
         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
-        emit_writedword_indexed_tlb(tl,tl,0,addr,map,temp);
+        emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
       }
     }
     type=STORED_STUB;
   }
-  if(!using_tlb&&(!c||memtarget))
-    // addr could be a temp, make sure it survives STORE*_STUB
-    reglist|=1<<addr;
+#ifdef PCSX
   if(jaddr) {
+    // PCSX store handlers don't check invcode again
+    reglist|=1<<addr;
     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
-  } else if(!memtarget) {
-    inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
+    jaddr=0;
   }
-  if(!using_tlb) {
+#endif
+  if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
     if(!c||memtarget) {
       #ifdef DESTRUCTIVE_SHIFT
       // The x86 shift operation is 'destructive'; it overwrites the
@@ -3147,9 +3344,32 @@ void store_assemble(int i,struct regstat *i_regs)
       #else
       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
       #endif
+      #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+      emit_callne(invalidate_addr_reg[addr]);
+      #else
       jaddr2=(int)out;
       emit_jne(0);
       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
+      #endif
+    }
+  }
+  u_int addr_val=constmap[i][s]+offset;
+  if(jaddr) {
+    add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
+  } else if(c&&!memtarget) {
+    inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
+  }
+  // basic current block modification detection..
+  // not looking back as that should be in mips cache already
+  if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
+    printf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
+    assert(i_regs->regmap==regs[i].regmap); // not delay slot
+    if(i_regs->regmap==regs[i].regmap) {
+      load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
+      wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty);
+      emit_movimm(start+i*4+4,0);
+      emit_writeword(0,(int)&pcaddr);
+      emit_jmp((int)do_interrupt);
     }
   }
   //if(opcode[i]==0x2B || opcode[i]==0x3F)
@@ -3158,8 +3378,12 @@ void store_assemble(int i,struct regstat *i_regs)
   //if(opcode[i]==0x2B)
   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
   {
-    //emit_pusha();
+    #ifdef __i386__
+    emit_pusha();
+    #endif
+    #ifdef __arm__
     save_regs(0x100f);
+    #endif
         emit_readword((int)&last_count,ECX);
         #ifdef __i386__
         if(get_reg(i_regs->regmap,CCREG)<0)
@@ -3178,8 +3402,12 @@ void store_assemble(int i,struct regstat *i_regs)
         emit_writeword(0,(int)&Count);
         #endif
     emit_call((int)memdebug);
-    //emit_popa();
+    #ifdef __i386__
+    emit_popa();
+    #endif
+    #ifdef __arm__
     restore_regs(0x100f);
+    #endif
   }/**/
 }
 
@@ -3192,7 +3420,7 @@ void storelr_assemble(int i,struct regstat *i_regs)
   int jaddr=0,jaddr2;
   int case1,case2,case3;
   int done0,done1,done2;
-  int memtarget,c=0;
+  int memtarget=0,c=0;
   int agr=AGEN1+(i&1);
   u_int hr,reglist=0;
   th=get_reg(i_regs->regmap,rs2[i]|64);
@@ -3203,188 +3431,201 @@ void storelr_assemble(int i,struct regstat *i_regs)
   offset=imm[i];
   if(s>=0) {
     c=(i_regs->isconst>>s)&1;
-    memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
-    if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
+    if(c) {
+      memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
+      if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
+    }
   }
   assert(tl>=0);
   for(hr=0;hr<HOST_REGS;hr++) {
     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
   }
-  if(tl>=0) {
-    assert(temp>=0);
-    if(!using_tlb) {
-      if(!c) {
-        emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
-        if(!offset&&s!=temp) emit_mov(s,temp);
-        jaddr=(int)out;
-        emit_jno(0);
-      }
-      else
-      {
-        if(!memtarget||!rs1[i]) {
-          jaddr=(int)out;
-          emit_jmp(0);
-        }
-      }
-      if((u_int)rdram!=0x80000000) 
-        emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
-    }else{ // using tlb
-      int map=get_reg(i_regs->regmap,TLREG);
-      assert(map>=0);
-      map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
-      if(!c&&!offset&&s>=0) emit_mov(s,temp);
-      do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
-      if(!jaddr&&!memtarget) {
+  assert(temp>=0);
+  if(!using_tlb) {
+    if(!c) {
+      emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
+      if(!offset&&s!=temp) emit_mov(s,temp);
+      jaddr=(int)out;
+      emit_jno(0);
+    }
+    else
+    {
+      if(!memtarget||!rs1[i]) {
         jaddr=(int)out;
         emit_jmp(0);
       }
-      gen_tlb_addr_w(temp,map);
     }
-
-    if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
-      temp2=get_reg(i_regs->regmap,FTEMP);
-      if(!rs2[i]) temp2=th=tl;
+    #ifdef RAM_OFFSET
+    int map=get_reg(i_regs->regmap,ROREG);
+    if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
+    gen_tlb_addr_w(temp,map);
+    #else
+    if((u_int)rdram!=0x80000000) 
+      emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
+    #endif
+  }else{ // using tlb
+    int map=get_reg(i_regs->regmap,TLREG);
+    assert(map>=0);
+    reglist&=~(1<<map);
+    map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
+    if(!c&&!offset&&s>=0) emit_mov(s,temp);
+    do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
+    if(!jaddr&&!memtarget) {
+      jaddr=(int)out;
+      emit_jmp(0);
     }
+    gen_tlb_addr_w(temp,map);
+  }
+
+  if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
+    temp2=get_reg(i_regs->regmap,FTEMP);
+    if(!rs2[i]) temp2=th=tl;
+  }
 
 #ifndef BIG_ENDIAN_MIPS
     emit_xorimm(temp,3,temp);
 #endif
-    emit_testimm(temp,2);
-    case2=(int)out;
-    emit_jne(0);
-    emit_testimm(temp,1);
-    case1=(int)out;
-    emit_jne(0);
-    // 0
-    if (opcode[i]==0x2A) { // SWL
-      emit_writeword_indexed(tl,0,temp);
-    }
-    if (opcode[i]==0x2E) { // SWR
-      emit_writebyte_indexed(tl,3,temp);
-    }
-    if (opcode[i]==0x2C) { // SDL
-      emit_writeword_indexed(th,0,temp);
-      if(rs2[i]) emit_mov(tl,temp2);
-    }
-    if (opcode[i]==0x2D) { // SDR
-      emit_writebyte_indexed(tl,3,temp);
-      if(rs2[i]) emit_shldimm(th,tl,24,temp2);
-    }
+  emit_testimm(temp,2);
+  case2=(int)out;
+  emit_jne(0);
+  emit_testimm(temp,1);
+  case1=(int)out;
+  emit_jne(0);
+  // 0
+  if (opcode[i]==0x2A) { // SWL
+    emit_writeword_indexed(tl,0,temp);
+  }
+  if (opcode[i]==0x2E) { // SWR
+    emit_writebyte_indexed(tl,3,temp);
+  }
+  if (opcode[i]==0x2C) { // SDL
+    emit_writeword_indexed(th,0,temp);
+    if(rs2[i]) emit_mov(tl,temp2);
+  }
+  if (opcode[i]==0x2D) { // SDR
+    emit_writebyte_indexed(tl,3,temp);
+    if(rs2[i]) emit_shldimm(th,tl,24,temp2);
+  }
+  done0=(int)out;
+  emit_jmp(0);
+  // 1
+  set_jump_target(case1,(int)out);
+  if (opcode[i]==0x2A) { // SWL
+    // Write 3 msb into three least significant bytes
+    if(rs2[i]) emit_rorimm(tl,8,tl);
+    emit_writehword_indexed(tl,-1,temp);
+    if(rs2[i]) emit_rorimm(tl,16,tl);
+    emit_writebyte_indexed(tl,1,temp);
+    if(rs2[i]) emit_rorimm(tl,8,tl);
+  }
+  if (opcode[i]==0x2E) { // SWR
+    // Write two lsb into two most significant bytes
+    emit_writehword_indexed(tl,1,temp);
+  }
+  if (opcode[i]==0x2C) { // SDL
+    if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
+    // Write 3 msb into three least significant bytes
+    if(rs2[i]) emit_rorimm(th,8,th);
+    emit_writehword_indexed(th,-1,temp);
+    if(rs2[i]) emit_rorimm(th,16,th);
+    emit_writebyte_indexed(th,1,temp);
+    if(rs2[i]) emit_rorimm(th,8,th);
+  }
+  if (opcode[i]==0x2D) { // SDR
+    if(rs2[i]) emit_shldimm(th,tl,16,temp2);
+    // Write two lsb into two most significant bytes
+    emit_writehword_indexed(tl,1,temp);
+  }
+  done1=(int)out;
+  emit_jmp(0);
+  // 2
+  set_jump_target(case2,(int)out);
+  emit_testimm(temp,1);
+  case3=(int)out;
+  emit_jne(0);
+  if (opcode[i]==0x2A) { // SWL
+    // Write two msb into two least significant bytes
+    if(rs2[i]) emit_rorimm(tl,16,tl);
+    emit_writehword_indexed(tl,-2,temp);
+    if(rs2[i]) emit_rorimm(tl,16,tl);
+  }
+  if (opcode[i]==0x2E) { // SWR
+    // Write 3 lsb into three most significant bytes
+    emit_writebyte_indexed(tl,-1,temp);
+    if(rs2[i]) emit_rorimm(tl,8,tl);
+    emit_writehword_indexed(tl,0,temp);
+    if(rs2[i]) emit_rorimm(tl,24,tl);
+  }
+  if (opcode[i]==0x2C) { // SDL
+    if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
+    // Write two msb into two least significant bytes
+    if(rs2[i]) emit_rorimm(th,16,th);
+    emit_writehword_indexed(th,-2,temp);
+    if(rs2[i]) emit_rorimm(th,16,th);
+  }
+  if (opcode[i]==0x2D) { // SDR
+    if(rs2[i]) emit_shldimm(th,tl,8,temp2);
+    // Write 3 lsb into three most significant bytes
+    emit_writebyte_indexed(tl,-1,temp);
+    if(rs2[i]) emit_rorimm(tl,8,tl);
+    emit_writehword_indexed(tl,0,temp);
+    if(rs2[i]) emit_rorimm(tl,24,tl);
+  }
+  done2=(int)out;
+  emit_jmp(0);
+  // 3
+  set_jump_target(case3,(int)out);
+  if (opcode[i]==0x2A) { // SWL
+    // Write msb into least significant byte
+    if(rs2[i]) emit_rorimm(tl,24,tl);
+    emit_writebyte_indexed(tl,-3,temp);
+    if(rs2[i]) emit_rorimm(tl,8,tl);
+  }
+  if (opcode[i]==0x2E) { // SWR
+    // Write entire word
+    emit_writeword_indexed(tl,-3,temp);
+  }
+  if (opcode[i]==0x2C) { // SDL
+    if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
+    // Write msb into least significant byte
+    if(rs2[i]) emit_rorimm(th,24,th);
+    emit_writebyte_indexed(th,-3,temp);
+    if(rs2[i]) emit_rorimm(th,8,th);
+  }
+  if (opcode[i]==0x2D) { // SDR
+    if(rs2[i]) emit_mov(th,temp2);
+    // Write entire word
+    emit_writeword_indexed(tl,-3,temp);
+  }
+  set_jump_target(done0,(int)out);
+  set_jump_target(done1,(int)out);
+  set_jump_target(done2,(int)out);
+  if (opcode[i]==0x2C) { // SDL
+    emit_testimm(temp,4);
     done0=(int)out;
-    emit_jmp(0);
-    // 1
-    set_jump_target(case1,(int)out);
-    if (opcode[i]==0x2A) { // SWL
-      // Write 3 msb into three least significant bytes
-      if(rs2[i]) emit_rorimm(tl,8,tl);
-      emit_writehword_indexed(tl,-1,temp);
-      if(rs2[i]) emit_rorimm(tl,16,tl);
-      emit_writebyte_indexed(tl,1,temp);
-      if(rs2[i]) emit_rorimm(tl,8,tl);
-    }
-    if (opcode[i]==0x2E) { // SWR
-      // Write two lsb into two most significant bytes
-      emit_writehword_indexed(tl,1,temp);
-    }
-    if (opcode[i]==0x2C) { // SDL
-      if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
-      // Write 3 msb into three least significant bytes
-      if(rs2[i]) emit_rorimm(th,8,th);
-      emit_writehword_indexed(th,-1,temp);
-      if(rs2[i]) emit_rorimm(th,16,th);
-      emit_writebyte_indexed(th,1,temp);
-      if(rs2[i]) emit_rorimm(th,8,th);
-    }
-    if (opcode[i]==0x2D) { // SDR
-      if(rs2[i]) emit_shldimm(th,tl,16,temp2);
-      // Write two lsb into two most significant bytes
-      emit_writehword_indexed(tl,1,temp);
-    }
-    done1=(int)out;
-    emit_jmp(0);
-    // 2
-    set_jump_target(case2,(int)out);
-    emit_testimm(temp,1);
-    case3=(int)out;
     emit_jne(0);
-    if (opcode[i]==0x2A) { // SWL
-      // Write two msb into two least significant bytes
-      if(rs2[i]) emit_rorimm(tl,16,tl);
-      emit_writehword_indexed(tl,-2,temp);
-      if(rs2[i]) emit_rorimm(tl,16,tl);
-    }
-    if (opcode[i]==0x2E) { // SWR
-      // Write 3 lsb into three most significant bytes
-      emit_writebyte_indexed(tl,-1,temp);
-      if(rs2[i]) emit_rorimm(tl,8,tl);
-      emit_writehword_indexed(tl,0,temp);
-      if(rs2[i]) emit_rorimm(tl,24,tl);
-    }
-    if (opcode[i]==0x2C) { // SDL
-      if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
-      // Write two msb into two least significant bytes
-      if(rs2[i]) emit_rorimm(th,16,th);
-      emit_writehword_indexed(th,-2,temp);
-      if(rs2[i]) emit_rorimm(th,16,th);
-    }
-    if (opcode[i]==0x2D) { // SDR
-      if(rs2[i]) emit_shldimm(th,tl,8,temp2);
-      // Write 3 lsb into three most significant bytes
-      emit_writebyte_indexed(tl,-1,temp);
-      if(rs2[i]) emit_rorimm(tl,8,tl);
-      emit_writehword_indexed(tl,0,temp);
-      if(rs2[i]) emit_rorimm(tl,24,tl);
-    }
-    done2=(int)out;
-    emit_jmp(0);
-    // 3
-    set_jump_target(case3,(int)out);
-    if (opcode[i]==0x2A) { // SWL
-      // Write msb into least significant byte
-      if(rs2[i]) emit_rorimm(tl,24,tl);
-      emit_writebyte_indexed(tl,-3,temp);
-      if(rs2[i]) emit_rorimm(tl,8,tl);
-    }
-    if (opcode[i]==0x2E) { // SWR
-      // Write entire word
-      emit_writeword_indexed(tl,-3,temp);
-    }
-    if (opcode[i]==0x2C) { // SDL
-      if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
-      // Write msb into least significant byte
-      if(rs2[i]) emit_rorimm(th,24,th);
-      emit_writebyte_indexed(th,-3,temp);
-      if(rs2[i]) emit_rorimm(th,8,th);
-    }
-    if (opcode[i]==0x2D) { // SDR
-      if(rs2[i]) emit_mov(th,temp2);
-      // Write entire word
-      emit_writeword_indexed(tl,-3,temp);
-    }
+    emit_andimm(temp,~3,temp);
+    emit_writeword_indexed(temp2,4,temp);
     set_jump_target(done0,(int)out);
-    set_jump_target(done1,(int)out);
-    set_jump_target(done2,(int)out);
-    if (opcode[i]==0x2C) { // SDL
-      emit_testimm(temp,4);
-      done0=(int)out;
-      emit_jne(0);
-      emit_andimm(temp,~3,temp);
-      emit_writeword_indexed(temp2,4,temp);
-      set_jump_target(done0,(int)out);
-    }
-    if (opcode[i]==0x2D) { // SDR
-      emit_testimm(temp,4);
-      done0=(int)out;
-      emit_jeq(0);
-      emit_andimm(temp,~3,temp);
-      emit_writeword_indexed(temp2,-4,temp);
-      set_jump_target(done0,(int)out);
-    }
-    if(!c||!memtarget)
-      add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
   }
-  if(!using_tlb) {
+  if (opcode[i]==0x2D) { // SDR
+    emit_testimm(temp,4);
+    done0=(int)out;
+    emit_jeq(0);
+    emit_andimm(temp,~3,temp);
+    emit_writeword_indexed(temp2,-4,temp);
+    set_jump_target(done0,(int)out);
+  }
+  if(!c||!memtarget)
+    add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
+  if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
+    #ifdef RAM_OFFSET
+    int map=get_reg(i_regs->regmap,ROREG);
+    if(map<0) map=HOST_TEMPREG;
+    gen_orig_addr_w(temp,map);
+    #else
     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
+    #endif
     #if defined(HOST_IMM8)
     int ir=get_reg(i_regs->regmap,INVCP);
     assert(ir>=0);
@@ -3392,9 +3633,13 @@ void storelr_assemble(int i,struct regstat *i_regs)
     #else
     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
     #endif
+    #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+    emit_callne(invalidate_addr_reg[temp]);
+    #else
     jaddr2=(int)out;
     emit_jne(0);
     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
+    #endif
   }
   /*
     emit_pusha();
@@ -3472,6 +3717,7 @@ void c1ls_assemble(int i,struct regstat *i_regs)
   {
     map=get_reg(i_regs->regmap,TLREG);
     assert(map>=0);
+    reglist&=~(1<<map);
     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
       map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
     }
@@ -3550,7 +3796,7 @@ void c1ls_assemble(int i,struct regstat *i_regs)
     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
     type=STORED_STUB;
   }
-  if(!using_tlb) {
+  if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
       #ifndef DESTRUCTIVE_SHIFT
       temp=offset||c||s<0?ar:s;
@@ -3562,9 +3808,13 @@ void c1ls_assemble(int i,struct regstat *i_regs)
       #else
       emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
       #endif
+      #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+      emit_callne(invalidate_addr_reg[temp]);
+      #else
       jaddr3=(int)out;
       emit_jne(0);
       add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
+      #endif
     }
   }
   if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
@@ -3598,9 +3848,10 @@ void c2ls_assemble(int i,struct regstat *i_regs)
   int s,tl;
   int ar;
   int offset;
-  int c=0;
-  int jaddr,jaddr2=0,jaddr3,type;
+  int memtarget=0,c=0;
+  int jaddr2=0,jaddr3,type;
   int agr=AGEN1+(i&1);
+  int fastio_reg_override=0;
   u_int hr,reglist=0;
   u_int copr=(source[i]>>16)&0x1f;
   s=get_reg(i_regs->regmap,rs1[i]);
@@ -3624,40 +3875,52 @@ void c2ls_assemble(int i,struct regstat *i_regs)
   } else { // LWC2
     ar=tl;
   }
+  if(s>=0) c=(i_regs->wasconst>>s)&1;
+  memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
   if (!offset&&!c&&s>=0) ar=s;
   assert(ar>=0);
 
   if (opcode[i]==0x3a) { // SWC2
     cop2_get_dreg(copr,tl,HOST_TEMPREG);
+    type=STOREW_STUB;
   }
-  if(s>=0) c=(i_regs->wasconst>>s)&1;
-  if(!c) {
-    emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
-    jaddr2=(int)out;
-    emit_jno(0);
-  }
-  else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
-    jaddr2=(int)out;
-    emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
-  }
-  if (opcode[i]==0x32) { // LWC2
-    #ifdef HOST_IMM_ADDR32
-    if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
-    else
-    #endif
-    emit_readword_indexed(0,ar,tl);
+  else
     type=LOADW_STUB;
+
+  if(c&&!memtarget) {
+    jaddr2=(int)out;
+    emit_jmp(0); // inline_readstub/inline_writestub?
   }
-  if (opcode[i]==0x3a) { // SWC2
-#ifdef DESTRUCTIVE_SHIFT
-    if(!offset&&!c&&s>=0) emit_mov(s,ar);
-#endif
-    emit_writeword_indexed(tl,0,ar);
-    type=STOREW_STUB;
+  else {
+    if(!c) {
+      jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
+    }
+    else if(ram_offset&&memtarget) {
+      emit_addimm(ar,ram_offset,HOST_TEMPREG);
+      fastio_reg_override=HOST_TEMPREG;
+    }
+    if (opcode[i]==0x32) { // LWC2
+      #ifdef HOST_IMM_ADDR32
+      if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
+      else
+      #endif
+      int a=ar;
+      if(fastio_reg_override) a=fastio_reg_override;
+      emit_readword_indexed(0,a,tl);
+    }
+    if (opcode[i]==0x3a) { // SWC2
+      #ifdef DESTRUCTIVE_SHIFT
+      if(!offset&&!c&&s>=0) emit_mov(s,ar);
+      #endif
+      int a=ar;
+      if(fastio_reg_override) a=fastio_reg_override;
+      emit_writeword_indexed(tl,0,a);
+    }
   }
   if(jaddr2)
     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
-  if (opcode[i]==0x3a) { // SWC2
+  if(opcode[i]==0x3a) // SWC2
+  if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
 #if defined(HOST_IMM8)
     int ir=get_reg(i_regs->regmap,INVCP);
     assert(ir>=0);
@@ -3665,9 +3928,13 @@ void c2ls_assemble(int i,struct regstat *i_regs)
 #else
     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
 #endif
+    #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+    emit_callne(invalidate_addr_reg[ar]);
+    #else
     jaddr3=(int)out;
     emit_jne(0);
     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
+    #endif
   }
   if (opcode[i]==0x32) { // LWC2
     cop2_put_dreg(copr,tl,HOST_TEMPREG);
@@ -3686,7 +3953,6 @@ void mov_assemble(int i,struct regstat *i_regs)
 {
   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
-  assert(rt1[i]>0);
   if(rt1[i]) {
     signed char sh,sl,th,tl;
     th=get_reg(i_regs->regmap,rt1[i]|64);
@@ -3727,7 +3993,7 @@ void syscall_assemble(int i,struct regstat *i_regs)
   assert(ccreg==HOST_CCREG);
   assert(!is_delayslot);
   emit_movimm(start+i*4,EAX); // Get PC
-  emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
   emit_jmp((int)jump_syscall_hle); // XXX
 }
 
@@ -3738,12 +4004,23 @@ void hlecall_assemble(int i,struct regstat *i_regs)
   assert(!is_delayslot);
   emit_movimm(start+i*4+4,0); // Get PC
   emit_movimm((int)psxHLEt[source[i]&7],1);
-  emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
   emit_jmp((int)jump_hlecall);
 }
 
+void intcall_assemble(int i,struct regstat *i_regs)
+{
+  signed char ccreg=get_reg(i_regs->regmap,CCREG);
+  assert(ccreg==HOST_CCREG);
+  assert(!is_delayslot);
+  emit_movimm(start+i*4,0); // Get PC
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+  emit_jmp((int)jump_intcall);
+}
+
 void ds_assemble(int i,struct regstat *i_regs)
 {
+  speculate_register_values(i);
   is_delayslot=1;
   switch(itype[i]) {
     case ALU:
@@ -3786,6 +4063,7 @@ void ds_assemble(int i,struct regstat *i_regs)
       mov_assemble(i,i_regs);break;
     case SYSCALL:
     case HLECALL:
+    case INTCALL:
     case SPAN:
     case UJUMP:
     case RJUMP:
@@ -3813,8 +4091,11 @@ int internal_branch(uint64_t i_is32,int addr)
       else printf("optimizable: yes\n");
     }*/
     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
+#ifndef FORCE32
     if(requires_32bit[t]&~i_is32) return 0;
-    else return 1;
+    else
+#endif
+      return 1;
   }
   return 0;
 }
@@ -3947,12 +4228,13 @@ static void loop_preload(signed char pre[],signed char entry[])
 void address_generation(int i,struct regstat *i_regs,signed char entry[])
 {
   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
-    int ra;
+    int ra=-1;
     int agr=AGEN1+(i&1);
     int mgr=MGEN1+(i&1);
     if(itype[i]==LOAD) {
       ra=get_reg(i_regs->regmap,rt1[i]);
-      //if(rt1[i]) assert(ra>=0);
+      if(ra<0) ra=get_reg(i_regs->regmap,-1); 
+      assert(ra>=0);
     }
     if(itype[i]==LOADLR) {
       ra=get_reg(i_regs->regmap,FTEMP);
@@ -3964,7 +4246,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
     if(itype[i]==C1LS||itype[i]==C2LS) {
       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
         ra=get_reg(i_regs->regmap,FTEMP);
-      else { // SWC1/SDC1
+      else { // SWC1/SDC1/SWC2/SDC2
         ra=get_reg(i_regs->regmap,agr);
         if(ra<0) ra=get_reg(i_regs->regmap,-1);
       }
@@ -3998,6 +4280,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
         //  printf("poor load scheduling!\n");
       }
       else if(c) {
+#ifndef DISABLE_TLB
         if(rm>=0) {
           if(!entry||entry[rm]!=mgr) {
             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
@@ -4012,6 +4295,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
             }
           }
         }
+#endif
         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
           if(!entry||entry[ra]!=agr) {
             if (opcode[i]==0x22||opcode[i]==0x26) {
@@ -4024,6 +4308,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
               #endif
               emit_movimm(constmap[i][rs]+offset,ra);
+              regs[i].loadedconst|=1<<ra;
             }
           } // else did it in the previous cycle
         } // else load_consts already did it
@@ -4040,7 +4325,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
   // Preload constants for next instruction
   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
     int agr,ra;
-    #ifndef HOST_IMM_ADDR32
+    #if !defined(HOST_IMM_ADDR32) && !defined(DISABLE_TLB)
     // Mapper entry
     agr=MGEN1+((i+1)&1);
     ra=get_reg(i_regs->regmap,agr);
@@ -4084,6 +4369,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
           #endif
           emit_movimm(constmap[i+1][rs]+offset,ra);
+          regs[i+1].loadedconst|=1<<ra;
         }
       }
       else if(rs1[i+1]==0) {
@@ -4152,22 +4438,51 @@ int get_final_value(int hr, int i, int *value)
 // Load registers with known constants
 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
 {
-  int hr;
+  int hr,hr2;
+  // propagate loaded constant flags
+  if(i==0||bt[i])
+    regs[i].loadedconst=0;
+  else {
+    for(hr=0;hr<HOST_REGS;hr++) {
+      if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
+         &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
+      {
+        regs[i].loadedconst|=1<<hr;
+      }
+    }
+  }
   // Load 32-bit regs
   for(hr=0;hr<HOST_REGS;hr++) {
     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
       //if(entry[hr]!=regmap[hr]) {
-      if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
+      if(!((regs[i].loadedconst>>hr)&1)) {
         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
-          int value;
+          int value,similar=0;
           if(get_final_value(hr,i,&value)) {
-            if(value==0) {
+            // see if some other register has similar value
+            for(hr2=0;hr2<HOST_REGS;hr2++) {
+              if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
+                if(is_similar_value(value,constmap[i][hr2])) {
+                  similar=1;
+                  break;
+                }
+              }
+            }
+            if(similar) {
+              int value2;
+              if(get_final_value(hr2,i,&value2)) // is this needed?
+                emit_movimm_from(value2,hr2,value,hr);
+              else
+                emit_movimm(value,hr);
+            }
+            else if(value==0) {
               emit_zeroreg(hr);
             }
             else {
               emit_movimm(value,hr);
             }
           }
+          regs[i].loadedconst|=1<<hr;
         }
       }
     }
@@ -4322,7 +4637,7 @@ void load_all_regs(signed char i_regmap[])
         emit_zeroreg(hr);
       }
       else
-      if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
+      if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
       {
         emit_loadreg(i_regmap[hr],hr);
       }
@@ -4341,7 +4656,7 @@ void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
           emit_zeroreg(hr);
         }
         else
-        if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG)
+        if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
         {
           emit_loadreg(i_regmap[hr],hr);
         }
@@ -4354,14 +4669,14 @@ void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
 void load_regs_entry(int t)
 {
   int hr;
-  if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
-  else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
+  if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
+  else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
     emit_storereg(CCREG,HOST_CCREG);
   }
   // Load 32-bit regs
   for(hr=0;hr<HOST_REGS;hr++) {
-    if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
+    if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
       if(regs[t].regmap_entry[hr]==0) {
         emit_zeroreg(hr);
       }
@@ -4373,7 +4688,7 @@ void load_regs_entry(int t)
   }
   // Load 64-bit regs
   for(hr=0;hr<HOST_REGS;hr++) {
-    if(regs[t].regmap_entry[hr]>=64) {
+    if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
       assert(regs[t].regmap_entry[hr]!=64);
       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
@@ -4453,7 +4768,7 @@ void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int ad
     }
     // Load 32-bit regs
     for(hr=0;hr<HOST_REGS;hr++) {
-      if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<64) {
+      if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
         #ifdef DESTRUCTIVE_WRITEBACK
         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
         #else
@@ -4471,7 +4786,7 @@ void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int ad
     }
     //Load 64-bit regs
     for(hr=0;hr<HOST_REGS;hr++) {
-      if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64) {
+      if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
           assert(regs[t].regmap_entry[hr]!=64);
           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
@@ -4512,19 +4827,19 @@ int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
       {
         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
         {
-          if(regs[t].regmap_entry[hr]!=-1)
+          if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
           {
             return 0;
           }
           else 
           if((i_dirty>>hr)&1)
           {
-            if(i_regmap[hr]<64)
+            if(i_regmap[hr]<TEMPREG)
             {
               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
                 return 0;
             }
-            else
+            else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
             {
               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
                 return 0;
@@ -4554,7 +4869,9 @@ int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
       }
     }
     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
+#ifndef FORCE32
     if(requires_32bit[t]&~i_is32) return 0;
+#endif
     // Delay slots are not valid branch targets
     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
     // Delay slots require additional processing, so do not match
@@ -4639,6 +4956,7 @@ void ds_assemble_entry(int i)
       mov_assemble(t,&regs[t]);break;
     case SYSCALL:
     case HLECALL:
+    case INTCALL:
     case SPAN:
     case UJUMP:
     case RJUMP:
@@ -4689,13 +5007,13 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
     emit_jmp(0);
   }
   else if(*adj==0||invert) {
-    emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
+    emit_addimm_and_set_flags(CLOCK_ADJUST(count+2),HOST_CCREG);
     jaddr=(int)out;
     emit_jns(0);
   }
   else
   {
-    emit_cmpimm(HOST_CCREG,-2*(count+2));
+    emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
     jaddr=(int)out;
     emit_jns(0);
   }
@@ -4761,7 +5079,7 @@ void do_ccstub(int n)
           emit_loadreg(rs2[i],s2l);
       #endif
       int hr=0;
-      int addr,alt,ntaddr;
+      int addr=-1,alt=-1,ntaddr=-1;
       while(hr<HOST_REGS)
       {
         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
@@ -4919,9 +5237,9 @@ void do_ccstub(int n)
   }
   // Update cycle count
   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
-  if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
+  if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
   emit_call((int)cc_interrupt);
-  if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
+  if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
   if(stubs[n][6]==TAKEN) {
     if(internal_branch(branch_regs[i].is32,ba[i]))
       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
@@ -4980,9 +5298,45 @@ add_to_linker(int addr,int target,int ext)
   linkcount++;
 }
 
+static void ujump_assemble_write_ra(int i)
+{
+  int rt;
+  unsigned int return_address;
+  rt=get_reg(branch_regs[i].regmap,31);
+  assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
+  //assert(rt>=0);
+  return_address=start+i*4+8;
+  if(rt>=0) {
+    #ifdef USE_MINI_HT
+    if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
+      int temp=-1; // note: must be ds-safe
+      #ifdef HOST_TEMPREG
+      temp=HOST_TEMPREG;
+      #endif
+      if(temp>=0) do_miniht_insert(return_address,rt,temp);
+      else emit_movimm(return_address,rt);
+    }
+    else
+    #endif
+    {
+      #ifdef REG_PREFETCH
+      if(temp>=0) 
+      {
+        if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
+      }
+      #endif
+      emit_movimm(return_address,rt); // PC into link register
+      #ifdef IMM_PREFETCH
+      emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
+      #endif
+    }
+  }
+}
+
 void ujump_assemble(int i,struct regstat *i_regs)
 {
   signed char *i_regmap=i_regs->regmap;
+  int ra_done=0;
   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
   address_generation(i+1,i_regs,regs[i].regmap_entry);
   #ifdef REG_PREFETCH
@@ -4994,6 +5348,10 @@ void ujump_assemble(int i,struct regstat *i_regs)
     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
   }
   #endif
+  if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
+    ujump_assemble_write_ra(i); // writeback ra for DS
+    ra_done=1;
+  }
   ds_assemble(i+1,i_regs);
   uint64_t bc_unneeded=branch_regs[i].u;
   uint64_t bc_unneeded_upper=branch_regs[i].uu;
@@ -5002,46 +5360,8 @@ void ujump_assemble(int i,struct regstat *i_regs)
   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
                 bc_unneeded,bc_unneeded_upper);
   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
-  if(rt1[i]==31) {
-    int rt;
-    unsigned int return_address;
-    assert(rt1[i+1]!=31);
-    assert(rt2[i+1]!=31);
-    rt=get_reg(branch_regs[i].regmap,31);
-    assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
-    //assert(rt>=0);
-    return_address=start+i*4+8;
-    if(rt>=0) {
-      #ifdef USE_MINI_HT
-      if(internal_branch(branch_regs[i].is32,return_address)) {
-        int temp=rt+1;
-        if(temp==EXCLUDE_REG||temp>=HOST_REGS||
-           branch_regs[i].regmap[temp]>=0)
-        {
-          temp=get_reg(branch_regs[i].regmap,-1);
-        }
-        #ifdef HOST_TEMPREG
-        if(temp<0) temp=HOST_TEMPREG;
-        #endif
-        if(temp>=0) do_miniht_insert(return_address,rt,temp);
-        else emit_movimm(return_address,rt);
-      }
-      else
-      #endif
-      {
-        #ifdef REG_PREFETCH
-        if(temp>=0) 
-        {
-          if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
-        }
-        #endif
-        emit_movimm(return_address,rt); // PC into link register
-        #ifdef IMM_PREFETCH
-        emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
-        #endif
-      }
-    }
-  }
+  if(!ra_done&&rt1[i]==31)
+    ujump_assemble_write_ra(i);
   int cc,adj;
   cc=get_reg(branch_regs[i].regmap,CCREG);
   assert(cc==HOST_CCREG);
@@ -5050,7 +5370,7 @@ void ujump_assemble(int i,struct regstat *i_regs)
   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
   #endif
   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
-  if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+  if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
   if(internal_branch(branch_regs[i].is32,ba[i]))
     assem_debug("branch: internal\n");
@@ -5065,11 +5385,33 @@ void ujump_assemble(int i,struct regstat *i_regs)
   }
 }
 
+static void rjump_assemble_write_ra(int i)
+{
+  int rt,return_address;
+  assert(rt1[i+1]!=rt1[i]);
+  assert(rt2[i+1]!=rt1[i]);
+  rt=get_reg(branch_regs[i].regmap,rt1[i]);
+  assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
+  assert(rt>=0);
+  return_address=start+i*4+8;
+  #ifdef REG_PREFETCH
+  if(temp>=0) 
+  {
+    if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
+  }
+  #endif
+  emit_movimm(return_address,rt); // PC into link register
+  #ifdef IMM_PREFETCH
+  emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
+  #endif
+}
+
 void rjump_assemble(int i,struct regstat *i_regs)
 {
   signed char *i_regmap=i_regs->regmap;
   int temp;
   int rs,cc,adj;
+  int ra_done=0;
   rs=get_reg(branch_regs[i].regmap,rs1[i]);
   assert(rs>=0);
   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
@@ -5096,6 +5438,10 @@ void rjump_assemble(int i,struct regstat *i_regs)
     if(rh>=0) do_preload_rhash(rh);
   }
   #endif
+  if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
+    rjump_assemble_write_ra(i);
+    ra_done=1;
+  }
   ds_assemble(i+1,i_regs);
   uint64_t bc_unneeded=branch_regs[i].u;
   uint64_t bc_unneeded_upper=branch_regs[i].uu;
@@ -5105,25 +5451,8 @@ void rjump_assemble(int i,struct regstat *i_regs)
   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
                 bc_unneeded,bc_unneeded_upper);
   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
-  if(rt1[i]!=0) {
-    int rt,return_address;
-    assert(rt1[i+1]!=rt1[i]);
-    assert(rt2[i+1]!=rt1[i]);
-    rt=get_reg(branch_regs[i].regmap,rt1[i]);
-    assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
-    assert(rt>=0);
-    return_address=start+i*4+8;
-    #ifdef REG_PREFETCH
-    if(temp>=0) 
-    {
-      if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
-    }
-    #endif
-    emit_movimm(return_address,rt); // PC into link register
-    #ifdef IMM_PREFETCH
-    emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
-    #endif
-  }
+  if(!ra_done&&rt1[i]!=0)
+    rjump_assemble_write_ra(i);
   cc=get_reg(branch_regs[i].regmap,CCREG);
   assert(cc==HOST_CCREG);
   #ifdef USE_MINI_HT
@@ -5154,8 +5483,14 @@ void rjump_assemble(int i,struct regstat *i_regs)
   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
   //assert(adj==0);
-  emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+  emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
+#ifdef PCSX
+  if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
+    // special case for RFE
+    emit_jmp(0);
+  else
+#endif
   emit_jns(0);
   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
   #ifdef USE_MINI_HT
@@ -5216,26 +5551,15 @@ void cjump_assemble(int i,struct regstat *i_regs)
   int prev_cop1_usable=cop1_usable;
   int unconditional=0,nop=0;
   int only32=0;
-  int ooo=1;
   int invert=0;
   int internal=internal_branch(branch_regs[i].is32,ba[i]);
   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
-  if(likely[i]) ooo=0;
   if(!match) invert=1;
   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
   if(i>(ba[i]-start)>>2) invert=1;
   #endif
-    
-  if(ooo)
-    if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
-       (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1])))
-  {
-    // Write-after-read dependency prevents out of order execution
-    // First test branch condition, then execute delay slot, then branch
-    ooo=0;
-  }
-
-  if(ooo) {
+  
+  if(ooo[i]) {
     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
@@ -5271,7 +5595,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
   }
 
-  if(ooo) {
+  if(ooo[i]) {
     // Out of order execution (delay slot first)
     //printf("OOOE\n");
     address_generation(i+1,i_regs,regs[i].regmap_entry);
@@ -5296,7 +5620,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
     if(unconditional) {
       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
-        if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+        if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
         if(internal)
           assem_debug("branch: internal\n");
@@ -5315,7 +5639,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
       }
     }
     else if(nop) {
-      emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+      emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
       int jaddr=(int)out;
       emit_jns(0);
       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -5323,7 +5647,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
     else {
       int taken=0,nottaken=0,nottaken1=0;
       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
-      if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       if(!only32)
       {
         assert(s1h>=0);
@@ -5415,7 +5739,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
           if(adj) {
-            emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+            emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
             add_to_linker((int)out,ba[i],internal);
           }else{
             emit_addnop(13);
@@ -5425,7 +5749,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
         }else
         #endif
         {
-          if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+          if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
           if(internal)
@@ -5445,7 +5769,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
 
       if(nottaken1) set_jump_target(nottaken1,(int)out);
       if(adj) {
-        if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
+        if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
       }
     } // (!unconditional)
   } // if(ooo)
@@ -5549,7 +5873,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
       assem_debug("cycle count (adj)\n");
-      if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
       if(internal)
         assem_debug("branch: internal\n");
@@ -5581,7 +5905,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
       if(cc==-1&&!likely[i]) {
         // Cycle count isn't in a register, temporarily load it then write it out
         emit_loadreg(CCREG,HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -5590,7 +5914,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
       else{
         cc=get_reg(i_regmap,CCREG);
         assert(cc==HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
@@ -5610,11 +5934,9 @@ void sjump_assemble(int i,struct regstat *i_regs)
   int prev_cop1_usable=cop1_usable;
   int unconditional=0,nevertaken=0;
   int only32=0;
-  int ooo=1;
   int invert=0;
   int internal=internal_branch(branch_regs[i].is32,ba[i]);
   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
-  if(likely[i]) ooo=0;
   if(!match) invert=1;
   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
   if(i>(ba[i]-start)>>2) invert=1;
@@ -5623,16 +5945,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
 
-  if(ooo)
-    if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))
-  {
-    // Write-after-read dependency prevents out of order execution
-    // First test branch condition, then execute delay slot, then branch
-    ooo=0;
-  }
-  assert(opcode2[i]<0x10||ooo); // FIXME (BxxZALL)
-
-  if(ooo) {
+  if(ooo[i]) {
     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
   }
@@ -5654,7 +5967,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
     only32=(regs[i].was32>>rs1[i])&1;
   }
 
-  if(ooo) {
+  if(ooo[i]) {
     // Out of order execution (delay slot first)
     //printf("OOOE\n");
     address_generation(i+1,i_regs,regs[i].regmap_entry);
@@ -5672,8 +5985,6 @@ void sjump_assemble(int i,struct regstat *i_regs)
     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
     if(rt1[i]==31) {
       int rt,return_address;
-      assert(rt1[i+1]!=31);
-      assert(rt2[i+1]!=31);
       rt=get_reg(branch_regs[i].regmap,31);
       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
       if(rt>=0) {
@@ -5694,7 +6005,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
     if(unconditional) {
       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
-        if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+        if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
         if(internal)
           assem_debug("branch: internal\n");
@@ -5713,7 +6024,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       }
     }
     else if(nevertaken) {
-      emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+      emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
       int jaddr=(int)out;
       emit_jns(0);
       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -5721,7 +6032,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
     else {
       int nottaken=0;
       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
-      if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       if(!only32)
       {
         assert(s1h>=0);
@@ -5779,7 +6090,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
           if(adj) {
-            emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+            emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
             add_to_linker((int)out,ba[i],internal);
           }else{
             emit_addnop(13);
@@ -5789,7 +6100,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
         }else
         #endif
         {
-          if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+          if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
           if(internal)
@@ -5808,7 +6119,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       }
 
       if(adj) {
-        if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
+        if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
       }
     } // (!unconditional)
   } // if(ooo)
@@ -5817,18 +6128,30 @@ void sjump_assemble(int i,struct regstat *i_regs)
     // In-order execution (branch first)
     //printf("IOE\n");
     int nottaken=0;
+    if(rt1[i]==31) {
+      int rt,return_address;
+      rt=get_reg(branch_regs[i].regmap,31);
+      if(rt>=0) {
+        // Save the PC even if the branch is not taken
+        return_address=start+i*4+8;
+        emit_movimm(return_address,rt); // PC into link register
+        #ifdef IMM_PREFETCH
+        emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
+        #endif
+      }
+    }
     if(!unconditional) {
       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
       if(!only32)
       {
         assert(s1h>=0);
-        if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
+        if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
         {
           emit_test(s1h,s1h);
           nottaken=(int)out;
           emit_jns(1);
         }
-        if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
+        if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
         {
           emit_test(s1h,s1h);
           nottaken=(int)out;
@@ -5838,13 +6161,13 @@ void sjump_assemble(int i,struct regstat *i_regs)
       else
       {
         assert(s1l>=0);
-        if((opcode2[i]&0x1d)==0) // BLTZ/BLTZL
+        if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
         {
           emit_test(s1l,s1l);
           nottaken=(int)out;
           emit_jns(1);
         }
-        if((opcode2[i]&0x1d)==1) // BGEZ/BGEZL
+        if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
         {
           emit_test(s1l,s1l);
           nottaken=(int)out;
@@ -5879,7 +6202,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
       assem_debug("cycle count (adj)\n");
-      if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
       if(internal)
         assem_debug("branch: internal\n");
@@ -5910,7 +6233,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       if(cc==-1&&!likely[i]) {
         // Cycle count isn't in a register, temporarily load it then write it out
         emit_loadreg(CCREG,HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -5919,7 +6242,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       else{
         cc=get_reg(i_regmap,CCREG);
         assert(cc==HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
@@ -5937,25 +6260,15 @@ void fjump_assemble(int i,struct regstat *i_regs)
   assem_debug("fmatch=%d\n",match);
   int fs,cs;
   int eaddr;
-  int ooo=1;
   int invert=0;
   int internal=internal_branch(branch_regs[i].is32,ba[i]);
   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
-  if(likely[i]) ooo=0;
   if(!match) invert=1;
   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
   if(i>(ba[i]-start)>>2) invert=1;
   #endif
 
-  if(ooo)
-    if(itype[i+1]==FCOMP)
-  {
-    // Write-after-read dependency prevents out of order execution
-    // First test branch condition, then execute delay slot, then branch
-    ooo=0;
-  }
-
-  if(ooo) {
+  if(ooo[i]) {
     fs=get_reg(branch_regs[i].regmap,FSREG);
     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
   }
@@ -5974,7 +6287,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
     cop1_usable=1;
   }
 
-  if(ooo) {
+  if(ooo[i]) {
     // Out of order execution (delay slot first)
     //printf("OOOE\n");
     ds_assemble(i+1,i_regs);
@@ -5995,7 +6308,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
     assem_debug("cycle count (adj)\n");
     if(1) {
       int nottaken=0;
-      if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       if(1) {
         assert(fs>=0);
         emit_testimm(fs,0x800000);
@@ -6022,7 +6335,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
       } // if(!only32)
           
       if(invert) {
-        if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+        if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
         else if(match) emit_addnop(13);
         #endif
@@ -6043,7 +6356,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
       }
 
       if(adj) {
-        if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
+        if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
       }
     } // (!unconditional)
   } // if(ooo)
@@ -6095,7 +6408,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
     assem_debug("cycle count (adj)\n");
-    if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+    if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
     if(internal)
       assem_debug("branch: internal\n");
@@ -6125,7 +6438,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
       if(cc==-1&&!likely[i]) {
         // Cycle count isn't in a register, temporarily load it then write it out
         emit_loadreg(CCREG,HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -6134,7 +6447,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
       else{
         cc=get_reg(i_regmap,CCREG);
         assert(cc==HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
@@ -6207,7 +6520,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
   }
-  emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
   if(opcode[i]==2) // J
   {
     unconditional=1;
@@ -6470,6 +6783,7 @@ static void pagespan_ds()
       mov_assemble(0,&regs[0]);break;
     case SYSCALL:
     case HLECALL:
+    case INTCALL:
     case SPAN:
     case UJUMP:
     case RJUMP:
@@ -6504,16 +6818,22 @@ static void pagespan_ds()
 void unneeded_registers(int istart,int iend,int r)
 {
   int i;
-  uint64_t u,uu,b,bu;
-  uint64_t temp_u,temp_uu;
+  uint64_t u,uu,gte_u,b,bu,gte_bu;
+  uint64_t temp_u,temp_uu,temp_gte_u=0;
   uint64_t tdep;
+  uint64_t gte_u_unknown=0;
+  if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
+    gte_u_unknown=~0ll;
   if(iend==slen-1) {
     u=1;uu=1;
+    gte_u=gte_u_unknown;
   }else{
     u=unneeded_reg[iend+1];
     uu=unneeded_reg_upper[iend+1];
     u=1;uu=1;
+    gte_u=gte_unneeded[iend+1];
   }
+
   for (i=iend;i>=istart;i--)
   {
     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
@@ -6527,6 +6847,7 @@ void unneeded_registers(int istart,int iend,int r)
         // Branch out of this block, flush all regs
         u=1;
         uu=1;
+        gte_u=gte_u_unknown;
         /* Hexagon hack 
         if(itype[i]==UJUMP&&rt1[i]==31)
         {
@@ -6558,17 +6879,21 @@ void unneeded_registers(int istart,int iend,int r)
         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
         u|=1;uu|=1;
+        gte_u|=gte_rt[i+1];
+        gte_u&=~gte_rs[i+1];
         // If branch is "likely" (and conditional)
         // then we skip the delay slot on the fall-thru path
         if(likely[i]) {
           if(i<slen-1) {
             u&=unneeded_reg[i+2];
             uu&=unneeded_reg_upper[i+2];
+            gte_u&=gte_unneeded[i+2];
           }
           else
           {
             u=1;
             uu=1;
+            gte_u=gte_u_unknown;
           }
         }
       }
@@ -6582,10 +6907,12 @@ void unneeded_registers(int istart,int iend,int r)
           {
             // Unconditional branch
             temp_u=1;temp_uu=1;
+            temp_gte_u=0;
           } else {
             // Conditional branch (not taken case)
             temp_u=unneeded_reg[i+2];
             temp_uu=unneeded_reg_upper[i+2];
+            temp_gte_u&=gte_unneeded[i+2];
           }
           // Merge in delay slot
           tdep=(~temp_uu>>rt1[i+1])&1;
@@ -6595,17 +6922,21 @@ void unneeded_registers(int istart,int iend,int r)
           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
           temp_u|=1;temp_uu|=1;
+          temp_gte_u|=gte_rt[i+1];
+          temp_gte_u&=~gte_rs[i+1];
           // If branch is "likely" (and conditional)
           // then we skip the delay slot on the fall-thru path
           if(likely[i]) {
             if(i<slen-1) {
               temp_u&=unneeded_reg[i+2];
               temp_uu&=unneeded_reg_upper[i+2];
+              temp_gte_u&=gte_unneeded[i+2];
             }
             else
             {
               temp_u=1;
               temp_uu=1;
+              temp_gte_u=gte_u_unknown;
             }
           }
           tdep=(~temp_uu>>rt1[i])&1;
@@ -6615,8 +6946,11 @@ void unneeded_registers(int istart,int iend,int r)
           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
           temp_u|=1;temp_uu|=1;
+          temp_gte_u|=gte_rt[i];
+          temp_gte_u&=~gte_rs[i];
           unneeded_reg[i]=temp_u;
           unneeded_reg_upper[i]=temp_uu;
+          gte_unneeded[i]=temp_gte_u;
           // Only go three levels deep.  This recursion can take an
           // excessive amount of time if there are a lot of nested loops.
           if(r<2) {
@@ -6624,6 +6958,7 @@ void unneeded_registers(int istart,int iend,int r)
           }else{
             unneeded_reg[(ba[i]-start)>>2]=1;
             unneeded_reg_upper[(ba[i]-start)>>2]=1;
+            gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
           }
         } /*else*/ if(1) {
           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
@@ -6631,6 +6966,7 @@ void unneeded_registers(int istart,int iend,int r)
             // Unconditional branch
             u=unneeded_reg[(ba[i]-start)>>2];
             uu=unneeded_reg_upper[(ba[i]-start)>>2];
+            gte_u=gte_unneeded[(ba[i]-start)>>2];
             branch_unneeded_reg[i]=u;
             branch_unneeded_reg_upper[i]=uu;
         //u=1;
@@ -6645,10 +6981,13 @@ void unneeded_registers(int istart,int iend,int r)
             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
             u|=1;uu|=1;
+            gte_u|=gte_rt[i+1];
+            gte_u&=~gte_rs[i+1];
           } else {
             // Conditional branch
             b=unneeded_reg[(ba[i]-start)>>2];
             bu=unneeded_reg_upper[(ba[i]-start)>>2];
+            gte_bu=gte_unneeded[(ba[i]-start)>>2];
             branch_unneeded_reg[i]=b;
             branch_unneeded_reg_upper[i]=bu;
         //b=1;
@@ -6663,20 +7002,25 @@ void unneeded_registers(int istart,int iend,int r)
             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
             b|=1;bu|=1;
+            gte_bu|=gte_rt[i+1];
+            gte_bu&=~gte_rs[i+1];
             // If branch is "likely" then we skip the
             // delay slot on the fall-thru path
             if(likely[i]) {
               u=b;
               uu=bu;
+              gte_u=gte_bu;
               if(i<slen-1) {
                 u&=unneeded_reg[i+2];
                 uu&=unneeded_reg_upper[i+2];
+                gte_u&=gte_unneeded[i+2];
         //u=1;
         //uu=1;
               }
             } else {
               u&=b;
               uu&=bu;
+              gte_u&=gte_bu;
         //u=1;
         //uu=1;
             }
@@ -6693,7 +7037,7 @@ void unneeded_registers(int istart,int iend,int r)
         }
       }
     }
-    else if(itype[i]==SYSCALL||itype[i]==HLECALL)
+    else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
     {
       // SYSCALL instruction (software interrupt)
       u=1;
@@ -6712,11 +7056,15 @@ void unneeded_registers(int istart,int iend,int r)
     u|=1LL<<rt2[i];
     uu|=1LL<<rt1[i];
     uu|=1LL<<rt2[i];
+    gte_u|=gte_rt[i];
     // Accessed registers are needed
     u&=~(1LL<<rs1[i]);
     u&=~(1LL<<rs2[i]);
     uu&=~(1LL<<us1[i]);
     uu&=~(1LL<<us2[i]);
+    gte_u&=~gte_rs[i];
+    if(gte_rs[i]&&rt1[i]&&(unneeded_reg[i+1]&(1ll<<rt1[i])))
+      gte_u|=gte_rs[i]&gte_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
     // Source-target dependencies
     uu&=~(tdep<<dep1[i]);
     uu&=~(tdep<<dep2[i]);
@@ -6725,6 +7073,7 @@ void unneeded_registers(int istart,int iend,int r)
     // Save it
     unneeded_reg[i]=u;
     unneeded_reg_upper[i]=uu;
+    gte_unneeded[i]=gte_u;
     /*
     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
     printf("U:");
@@ -7059,7 +7408,7 @@ static void provisional_r32()
         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
       }
     }
-    else if(itype[i]==SYSCALL||itype[i]==HLECALL)
+    else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
     {
       // SYSCALL instruction (software interrupt)
       r32=0;
@@ -7317,6 +7666,10 @@ void clean_registers(int istart,int iend,int wr)
                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
                 }
+                if(branch_regs[i].regmap[r]>=0) {
+                  will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
+                  wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
+                }
               }
             }
           //}
@@ -7346,13 +7699,14 @@ void clean_registers(int istart,int iend,int wr)
           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
             for(r=0;r<HOST_REGS;r++) {
               if(r!=EXCLUDE_REG) {
-                if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
+                signed char target_reg=branch_regs[i].regmap[r];
+                if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
                 }
-                else
-                {
-                  will_dirty_i&=~(1<<r);
+                else if(target_reg>=0) {
+                  will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
+                  wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
                 }
                 // Treat delay slot as part of branch too
                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
@@ -7389,7 +7743,7 @@ void clean_registers(int istart,int iend,int wr)
               }
             }
           }
-          // Merge in delay slot
+          // Merge in delay slot (won't dirty)
           for(r=0;r<HOST_REGS;r++) {
             if(r!=EXCLUDE_REG) {
               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
@@ -7413,7 +7767,7 @@ void clean_registers(int istart,int iend,int wr)
         }
       }
     }
-    else if(itype[i]==SYSCALL||itype[i]==HLECALL)
+    else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
     {
       // SYSCALL instruction (software interrupt)
       will_dirty_i=0;
@@ -7506,7 +7860,7 @@ void clean_registers(int istart,int iend,int wr)
             regs[i].wasdirty|=will_dirty_i&(1<<r);
           }
         }
-        else if((nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
+        else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
           // Register moved to a different register
           will_dirty_i&=~(1<<r);
           wont_dirty_i&=~(1<<r);
@@ -7535,6 +7889,7 @@ void clean_registers(int istart,int iend,int wr)
   }
 }
 
+#ifdef DISASM
   /* disassembly */
 void disassemble_inst(int i)
 {
@@ -7549,7 +7904,7 @@ void disassemble_inst(int i)
       case FJUMP:
         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
       case RJUMP:
-        if (rt1[i]!=31)
+        if (opcode[i]==0x9&&rt1[i]!=31)
           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
         else
           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
@@ -7615,50 +7970,77 @@ void disassemble_inst(int i)
       case C2LS:
         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
         break;
+      case INTCALL:
+        printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
+        break;
       default:
         //printf (" %s %8x\n",insn[i],source[i]);
         printf (" %x: %s\n",start+i*4,insn[i]);
     }
 }
+#else
+static void disassemble_inst(int i) {}
+#endif // DISASM
+
+// clear the state completely, instead of just marking
+// things invalid like invalidate_all_pages() does
+void new_dynarec_clear_full()
+{
+  int n;
+  out=(u_char *)BASE_ADDR;
+  memset(invalid_code,1,sizeof(invalid_code));
+  memset(hash_table,0xff,sizeof(hash_table));
+  memset(mini_ht,-1,sizeof(mini_ht));
+  memset(restore_candidate,0,sizeof(restore_candidate));
+  memset(shadow,0,sizeof(shadow));
+  copy=shadow;
+  expirep=16384; // Expiry pointer, +2 blocks
+  pending_exception=0;
+  literalcount=0;
+  stop_after_jal=0;
+  inv_code_start=inv_code_end=~0;
+  // TLB
+#ifndef DISABLE_TLB
+  using_tlb=0;
+  for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
+    memory_map[n]=-1;
+  for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
+    memory_map[n]=((u_int)rdram-0x80000000)>>2;
+  for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
+    memory_map[n]=-1;
+#endif
+  for(n=0;n<4096;n++) ll_clear(jump_in+n);
+  for(n=0;n<4096;n++) ll_clear(jump_out+n);
+  for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
+}
 
 void new_dynarec_init()
 {
   printf("Init new dynarec\n");
   out=(u_char *)BASE_ADDR;
+#if BASE_ADDR_FIXED
   if (mmap (out, 1<<TARGET_SIZE_2,
             PROT_READ | PROT_WRITE | PROT_EXEC,
             MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
             -1, 0) <= 0) {printf("mmap() failed\n");}
+#else
+  // not all systems allow execute in data segment by default
+  if (mprotect(out, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
+    printf("mprotect() failed\n");
+#endif
 #ifdef MUPEN64
   rdword=&readmem_dword;
   fake_pc.f.r.rs=&readmem_dword;
   fake_pc.f.r.rt=&readmem_dword;
   fake_pc.f.r.rd=&readmem_dword;
 #endif
-  int n;
-  for(n=0x80000;n<0x80800;n++)
-    invalid_code[n]=1;
-  for(n=0;n<65536;n++)
-    hash_table[n][0]=hash_table[n][2]=-1;
-  memset(mini_ht,-1,sizeof(mini_ht));
-  memset(restore_candidate,0,sizeof(restore_candidate));
-  copy=shadow;
-  expirep=16384; // Expiry pointer, +2 blocks
-  pending_exception=0;
-  literalcount=0;
+  int n;
+  cycle_multiplier=200;
+  new_dynarec_clear_full();
 #ifdef HOST_IMM8
   // Copy this into local area so we don't have to put it in every literal pool
   invc_ptr=invalid_code;
 #endif
-  stop_after_jal=0;
-  // TLB
-  using_tlb=0;
-  for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
-    memory_map[n]=-1;
-  for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
-    memory_map[n]=((u_int)rdram-0x80000000)>>2;
-  for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
-    memory_map[n]=-1;
 #ifdef MUPEN64
   for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
     writemem[n] = write_nomem_new;
@@ -7699,12 +8081,17 @@ void new_dynarec_init()
 #endif
   tlb_hacks();
   arch_init();
+#ifndef RAM_FIXED
+  ram_offset=(u_int)rdram-0x80000000;
+#endif
 }
 
 void new_dynarec_cleanup()
 {
   int n;
+  #if BASE_ADDR_FIXED
   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
+  #endif
   for(n=0;n<4096;n++) ll_clear(jump_in+n);
   for(n=0;n<4096;n++) ll_clear(jump_out+n);
   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
@@ -7736,25 +8123,36 @@ int new_recompile_block(int addr)
   //rlist();
   start = (u_int)addr&~3;
   //assert(((u_int)addr&1)==0);
+  new_dynarec_did_compile=1;
 #ifdef PCSX
-  if (Config.HLE && start == 0x80001000) {
+  if (Config.HLE && start == 0x80001000) // hlecall
+  {
     // XXX: is this enough? Maybe check hleSoftCall?
     u_int beginning=(u_int)out;
     u_int page=get_page(start);
-    ll_add(jump_in+page,start,out);
     invalid_code[start>>12]=0;
     emit_movimm(start,0);
     emit_writeword(0,(int)&pcaddr);
     emit_jmp((int)new_dyna_leave);
+    literal_pool(0);
 #ifdef __arm__
     __clear_cache((void *)beginning,out);
 #endif
+    ll_add(jump_in+page,start,(void *)beginning);
     return 0;
   }
-  else if ((u_int)addr < 0x00200000) {
+  else if ((u_int)addr < 0x00200000 ||
+    (0xa0000000 <= addr && addr < 0xa0200000)) {
     // used for BIOS calls mostly?
-    source = (u_int *)((u_int)rdram+start-0);
-    pagelimit = 0x00200000;
+    source = (u_int *)((u_int)rdram+(start&0x1fffff));
+    pagelimit = (addr&0xa0000000)|0x00200000;
+  }
+  else if (!Config.HLE && (
+/*    (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
+    (0xbfc00000 <= addr && addr < 0xbfc80000))) {
+    // BIOS
+    source = (u_int *)((u_int)psxR+(start&0x7ffff));
+    pagelimit = (addr&0xfff00000)|0x80000;
   }
   else
 #endif
@@ -7789,7 +8187,7 @@ int new_recompile_block(int addr)
     else {
       assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
       //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
-      return 1; // Caller will invoke exception handler
+      return -1; // Caller will invoke exception handler
     }
     //printf("source= %x\n",(int)source);
   }
@@ -7819,7 +8217,8 @@ int new_recompile_block(int addr)
   /* Pass 1 disassembly */
 
   for(i=0;!done;i++) {
-    bt[i]=0;likely[i]=0;op2=0;
+    bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
+    minimum_free_regs[i]=0;
     opcode[i]=op=source[i]>>26;
     switch(op)
     {
@@ -7842,17 +8241,10 @@ int new_recompile_block(int addr)
           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
-          case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
-          case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
-          case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
-          case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
-          case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
-          case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
-          case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
@@ -7863,22 +8255,31 @@ int new_recompile_block(int addr)
           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
-          case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
-          case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
-          case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
-          case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
+#ifndef FORCE32
+          case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
+          case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
+          case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
+          case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
+          case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
+          case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
+          case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
+          case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
+          case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
+          case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
+          case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
+#endif
         }
         break;
       case 0x01: strcpy(insn[i],"regimm"); type=NI;
@@ -7928,7 +8329,11 @@ int new_recompile_block(int addr)
             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
+#ifdef PCSX
+            case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
+#else
             case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
+#endif
           }
         }
         break;
@@ -8047,11 +8452,11 @@ int new_recompile_block(int addr)
           break;
         }
         break;
+#ifndef FORCE32
       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
-#ifndef FORCE32
       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
@@ -8064,7 +8469,9 @@ int new_recompile_block(int addr)
       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
+#ifndef FORCE32
       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
+#endif
       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
@@ -8092,18 +8499,22 @@ int new_recompile_block(int addr)
 #ifdef PCSX
       case 0x12: strcpy(insn[i],"COP2"); type=NI;
         op2=(source[i]>>21)&0x1f;
-        switch(op2)
+        //if (op2 & 0x10) {
+        if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
+          if (gte_handlers[source[i]&0x3f]!=NULL) {
+            if (gte_regnames[source[i]&0x3f]!=NULL)
+              strcpy(insn[i],gte_regnames[source[i]&0x3f]);
+            else
+              snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
+            type=C2OP;
+          }
+        }
+        else switch(op2)
         {
           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
-          default:
-            if (gte_handlers[source[i]&0x3f]!=NULL) {
-              snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
-              type=C2OP;
-            }
-            break;
         }
         break;
       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
@@ -8122,6 +8533,7 @@ int new_recompile_block(int addr)
     us2[i]=0;
     dep1[i]=0;
     dep2[i]=0;
+    gte_rs[i]=gte_rt[i]=0;
     switch(type) {
       case LOAD:
         rs1[i]=(source[i]>>21)&0x1f;
@@ -8285,7 +8697,6 @@ int new_recompile_block(int addr)
         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
         break;
       case COP1:
-      case COP2:
         rs1[i]=0;
         rs2[i]=0;
         rt1[i]=0;
@@ -8295,6 +8706,23 @@ int new_recompile_block(int addr)
         if(op2==5) us1[i]=rs1[i]; // DMTC1
         rs2[i]=CSREG;
         break;
+      case COP2:
+        rs1[i]=0;
+        rs2[i]=0;
+        rt1[i]=0;
+        rt2[i]=0;
+        if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
+        if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
+        rs2[i]=CSREG;
+        int gr=(source[i]>>11)&0x1F;
+        switch(op2)
+        {
+          case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
+          case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
+          case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
+          case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
+        }
+        break;
       case C1LS:
         rs1[i]=(source[i]>>21)&0x1F;
         rs2[i]=CSREG;
@@ -8308,6 +8736,23 @@ int new_recompile_block(int addr)
         rt1[i]=0;
         rt2[i]=0;
         imm[i]=(short)source[i];
+        if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
+        else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
+        break;
+      case C2OP:
+        rs1[i]=0;
+        rs2[i]=0;
+        rt1[i]=0;
+        rt2[i]=0;
+        gte_rs[i]=gte_reg_reads[source[i]&0x3f];
+        gte_rt[i]=gte_reg_writes[source[i]&0x3f];
+        gte_rt[i]|=1ll<<63; // every op changes flags
+        if((source[i]&0x3f)==GTE_MVMVA) {
+          int v = (source[i] >> 15) & 3;
+          gte_rs[i]&=~0xe3fll;
+          if(v==3) gte_rs[i]|=0xe00ll;
+          else gte_rs[i]|=3ll<<(v*2);
+        }
         break;
       case FLOAT:
       case FCONV:
@@ -8324,6 +8769,7 @@ int new_recompile_block(int addr)
         break;
       case SYSCALL:
       case HLECALL:
+      case INTCALL:
         rs1[i]=CCREG;
         rs2[i]=0;
         rt1[i]=0;
@@ -8345,16 +8791,45 @@ int new_recompile_block(int addr)
     else if(type==CJUMP||type==SJUMP||type==FJUMP)
       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
     else ba[i]=-1;
+#ifdef PCSX
+    if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
+      int do_in_intrp=0;
+      // branch in delay slot?
+      if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
+        // don't handle first branch and call interpreter if it's hit
+        printf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
+        do_in_intrp=1;
+      }
+      // basic load delay detection
+      else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
+        int t=(ba[i-1]-start)/4;
+        if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
+          // jump target wants DS result - potential load delay effect
+          printf("load delay @%08x (%08x)\n", addr + i*4, addr);
+          do_in_intrp=1;
+          bt[t+1]=1; // expected return from interpreter
+        }
+        else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
+              !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
+          // v0 overwrite like this is a sign of trouble, bail out
+          printf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
+          do_in_intrp=1;
+        }
+      }
+      if(do_in_intrp) {
+        rs1[i-1]=CCREG;
+        rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
+        ba[i-1]=-1;
+        itype[i-1]=INTCALL;
+        done=2;
+        i--; // don't compile the DS
+      }
+    }
+#endif
     /* Is this the end of the block? */
     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
-        done=1;
-        // Does the block continue due to a branch?
-        for(j=i-1;j>=0;j--)
-        {
-          if(ba[j]==start+i*4+4) done=j=0;
-          if(ba[j]==start+i*4+8) done=j=0;
-        }
+        done=2;
       }
       else {
         if(stop_after_jal) done=1;
@@ -8367,7 +8842,16 @@ int new_recompile_block(int addr)
       if(i>MAXBLOCK/2) done=1;
     }
     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
-    if(itype[i]==HLECALL) done=1;
+    if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
+    if(done==2) {
+      // Does the block continue due to a branch?
+      for(j=i-1;j>=0;j--)
+      {
+        if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
+        if(ba[j]==start+i*4+4) done=j=0;
+        if(ba[j]==start+i*4+8) done=j=0;
+      }
+    }
     //assert(i<MAXBLOCK-1);
     if(start+i*4==pagelimit-4) done=1;
     assert(start+i*4<pagelimit);
@@ -8402,12 +8886,14 @@ int new_recompile_block(int addr)
   dirty_reg(&current,CCREG);
   current.isconst=0;
   current.wasconst=0;
+  current.waswritten=0;
   int ds=0;
   int cc=0;
-  int hr;
-  
+  int hr=-1;
+
+#ifndef FORCE32
   provisional_32bit();
-  
+#endif
   if((u_int)addr&1) {
     // First instruction is delay slot
     cc=-1;
@@ -8429,6 +8915,7 @@ int new_recompile_block(int addr)
         if(current.regmap[hr]==0) current.regmap[hr]=-1;
       }
       current.isconst=0;
+      current.waswritten=0;
     }
     if(i>1)
     {
@@ -8449,6 +8936,7 @@ int new_recompile_block(int addr)
         }
       }
     }
+#ifndef FORCE32
     // If something jumps here with 64-bit values
     // then promote those registers to 64 bits
     if(bt[i])
@@ -8467,7 +8955,9 @@ int new_recompile_block(int addr)
       }
       if(temp_is32!=current.is32) {
         //printf("dumping 32-bit regs (%x)\n",start+i*4);
-        #ifdef DESTRUCTIVE_WRITEBACK
+        #ifndef DESTRUCTIVE_WRITEBACK
+        if(ds)
+        #endif
         for(hr=0;hr<HOST_REGS;hr++)
         {
           int r=current.regmap[hr];
@@ -8479,12 +8969,10 @@ int new_recompile_block(int addr)
             }
           }
         }
-        #endif
         current.is32=temp_is32;
       }
     }
-#ifdef FORCE32
-    memset(p32, 0xff, sizeof(p32));
+#else
     current.is32=-1LL;
 #endif
 
@@ -8492,7 +8980,8 @@ int new_recompile_block(int addr)
     regs[i].wasconst=current.isconst;
     regs[i].was32=current.is32;
     regs[i].wasdirty=current.dirty;
-    #ifdef DESTRUCTIVE_WRITEBACK
+    regs[i].loadedconst=0;
+    #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
     // To change a dirty register from 32 to 64 bits, we must write
     // it out during the previous cycle (for branches, 2 cycles)
     if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
@@ -8669,12 +9158,14 @@ int new_recompile_block(int addr)
           if (rt1[i]==31) {
             alloc_reg(&current,i,31);
             dirty_reg(&current,31);
-            assert(rs1[i+1]!=31&&rs2[i+1]!=31);
+            //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
+            //assert(rt1[i+1]!=rt1[i]);
             #ifdef REG_PREFETCH
             alloc_reg(&current,i,PTEMP);
             #endif
             //current.is32|=1LL<<rt1[i];
           }
+          ooo[i]=1;
           delayslot_alloc(&current,i+1);
           //current.isconst=0; // DEBUG
           ds=1;
@@ -8693,7 +9184,8 @@ int new_recompile_block(int addr)
             if (rt1[i]!=0) {
               alloc_reg(&current,i,rt1[i]);
               dirty_reg(&current,rt1[i]);
-              assert(rs1[i+1]!=31&&rs2[i+1]!=31);
+              assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
+              assert(rt1[i+1]!=rt1[i]);
               #ifdef REG_PREFETCH
               alloc_reg(&current,i,PTEMP);
               #endif
@@ -8718,6 +9210,7 @@ int new_recompile_block(int addr)
             alloc_reg(&current,i,RTEMP);
           }
           //current.isconst=0; // DEBUG
+          ooo[i]=1;
           ds=1;
           break;
         case CJUMP:
@@ -8741,9 +9234,6 @@ int new_recompile_block(int addr)
                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
               // The delay slot overwrites one of our conditions.
               // Allocate the branch condition registers instead.
-              // Note that such a sequence of instructions could
-              // be considered a bug since the branch can not be
-              // re-executed if an exception occurs.
               current.isconst=0;
               current.wasconst=0;
               regs[i].wasconst=0;
@@ -8755,7 +9245,11 @@ int new_recompile_block(int addr)
                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
               }
             }
-            else delayslot_alloc(&current,i+1);
+            else
+            {
+              ooo[i]=1;
+              delayslot_alloc(&current,i+1);
+            }
           }
           else
           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
@@ -8770,9 +9264,6 @@ int new_recompile_block(int addr)
             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
               // The delay slot overwrites one of our conditions.
               // Allocate the branch condition registers instead.
-              // Note that such a sequence of instructions could
-              // be considered a bug since the branch can not be
-              // re-executed if an exception occurs.
               current.isconst=0;
               current.wasconst=0;
               regs[i].wasconst=0;
@@ -8782,7 +9273,11 @@ int new_recompile_block(int addr)
                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
               }
             }
-            else delayslot_alloc(&current,i+1);
+            else
+            {
+              ooo[i]=1;
+              delayslot_alloc(&current,i+1);
+            }
           }
           else
           // Don't alloc the delay slot yet because we might not execute it
@@ -8837,18 +9332,14 @@ int new_recompile_block(int addr)
             if (rt1[i]==31) { // BLTZAL/BGEZAL
               alloc_reg(&current,i,31);
               dirty_reg(&current,31);
-              assert(rs1[i+1]!=31&&rs2[i+1]!=31);
               //#ifdef REG_PREFETCH
               //alloc_reg(&current,i,PTEMP);
               //#endif
               //current.is32|=1LL<<rt1[i];
             }
-            if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
-              // The delay slot overwrites the branch condition.
+            if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
+               ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
               // Allocate the branch condition registers instead.
-              // Note that such a sequence of instructions could
-              // be considered a bug since the branch can not be
-              // re-executed if an exception occurs.
               current.isconst=0;
               current.wasconst=0;
               regs[i].wasconst=0;
@@ -8858,7 +9349,11 @@ int new_recompile_block(int addr)
                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
               }
             }
-            else delayslot_alloc(&current,i+1);
+            else
+            {
+              ooo[i]=1;
+              delayslot_alloc(&current,i+1);
+            }
           }
           else
           // Don't alloc the delay slot yet because we might not execute it
@@ -8896,15 +9391,13 @@ int new_recompile_block(int addr)
             if(itype[i+1]==FCOMP) {
               // The delay slot overwrites the branch condition.
               // Allocate the branch condition registers instead.
-              // Note that such a sequence of instructions could
-              // be considered a bug since the branch can not be
-              // re-executed if an exception occurs.
               alloc_cc(&current,i);
               dirty_reg(&current,CCREG);
               alloc_reg(&current,i,CSREG);
               alloc_reg(&current,i,FSREG);
             }
             else {
+              ooo[i]=1;
               delayslot_alloc(&current,i+1);
               alloc_reg(&current,i+1,CSREG);
             }
@@ -8974,6 +9467,7 @@ int new_recompile_block(int addr)
           break;
         case SYSCALL:
         case HLECALL:
+        case INTCALL:
           syscall_alloc(&current,i);
           break;
         case SPAN:
@@ -9049,6 +9543,14 @@ int new_recompile_block(int addr)
       }
       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
     }
+
+    if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
+      current.waswritten|=1<<rs1[i-1];
+    current.waswritten&=~(1<<rt1[i]);
+    current.waswritten&=~(1<<rt2[i]);
+    if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
+      current.waswritten&=~(1<<rs1[i]);
+
     /* Branch post-alloc */
     if(i>0)
     {
@@ -9069,7 +9571,7 @@ int new_recompile_block(int addr)
             branch_regs[i-1].is32|=1LL<<31;
           }
           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
-          memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
+          memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
           break;
         case RJUMP:
           memcpy(&branch_regs[i-1],&current,sizeof(current));
@@ -9094,7 +9596,7 @@ int new_recompile_block(int addr)
           }
           #endif
           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
-          memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
+          memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
           break;
         case CJUMP:
           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
@@ -9130,7 +9632,7 @@ int new_recompile_block(int addr)
             branch_regs[i-1].isconst=0;
             branch_regs[i-1].wasconst=0;
             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
-            memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
+            memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
           }
           else
           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
@@ -9163,7 +9665,7 @@ int new_recompile_block(int addr)
             branch_regs[i-1].isconst=0;
             branch_regs[i-1].wasconst=0;
             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
-            memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
+            memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
           }
           else
           // Alloc the delay slot in case the branch is taken
@@ -9229,7 +9731,7 @@ int new_recompile_block(int addr)
             branch_regs[i-1].isconst=0;
             branch_regs[i-1].wasconst=0;
             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
-            memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
+            memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
           }
           else
           // Alloc the delay slot in case the branch is taken
@@ -9343,6 +9845,21 @@ int new_recompile_block(int addr)
     {
       cc=0;
     }
+#if defined(PCSX) && !defined(DRC_DBG)
+    else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
+    {
+      // GTE runs in parallel until accessed, divide by 2 for a rough guess
+      cc+=gte_cycletab[source[i]&0x3f]/2;
+    }
+    else if(/*itype[i]==LOAD||*/itype[i]==STORE||itype[i]==C1LS) // load causes weird timing issues
+    {
+      cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
+    }
+    else if(itype[i]==C2LS)
+    {
+      cc+=4;
+    }
+#endif
     else
     {
       cc++;
@@ -9353,7 +9870,7 @@ int new_recompile_block(int addr)
       regs[i].is32=current.is32;
       regs[i].dirty=current.dirty;
       regs[i].isconst=current.isconst;
-      memcpy(constmap[i],current.constmap,sizeof(current.constmap));
+      memcpy(constmap[i],current_constmap,sizeof(current_constmap));
     }
     for(hr=0;hr<HOST_REGS;hr++) {
       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
@@ -9363,6 +9880,7 @@ int new_recompile_block(int addr)
       }
     }
     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
+    regs[i].waswritten=current.waswritten;
   }
   
   /* Pass 4 - Cull unused host registers */
@@ -9405,8 +9923,8 @@ int new_recompile_block(int addr)
         }
       }
       // Don't need stuff which is overwritten
-      if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
-      if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
+      //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
+      //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
       // Merge in delay slot
       for(hr=0;hr<HOST_REGS;hr++)
       {
@@ -9438,7 +9956,7 @@ int new_recompile_block(int addr)
         }
       }
     }
-    else if(itype[i]==SYSCALL||itype[i]==HLECALL)
+    else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
     {
       // SYSCALL instruction (software interrupt)
       nr=0;
@@ -9525,7 +10043,10 @@ int new_recompile_block(int addr)
             if(likely[i]) {
               regs[i].regmap[hr]=-1;
               regs[i].isconst&=~(1<<hr);
-              if(i<slen-2) regmap_pre[i+2][hr]=-1;
+              if(i<slen-2) {
+                regmap_pre[i+2][hr]=-1;
+                regs[i+2].wasconst&=~(1<<hr);
+              }
             }
           }
         }
@@ -9580,6 +10101,7 @@ int new_recompile_block(int addr)
               {
                 if(!likely[i]&&i<slen-2) {
                   regmap_pre[i+2][hr]=-1;
+                  regs[i+2].wasconst&=~(1<<hr);
                 }
               }
             }
@@ -9625,6 +10147,7 @@ int new_recompile_block(int addr)
                 }
                 regmap_pre[i+1][hr]=-1;
                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
+                regs[i+1].wasconst&=~(1<<hr);
               }
               regs[i].regmap[hr]=-1;
               regs[i].isconst&=~(1<<hr);
@@ -9640,7 +10163,7 @@ int new_recompile_block(int addr)
   // If a register is allocated during a loop, try to allocate it for the
   // entire loop, if possible.  This avoids loading/storing registers
   // inside of the loop.
-
+  
   signed char f_regmap[HOST_REGS];
   clear_all_regs(f_regmap);
   for(i=0;i<slen-1;i++)
@@ -9657,7 +10180,7 @@ int new_recompile_block(int addr)
       {
         int t=(ba[i]-start)>>2;
         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
-        if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
+        if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
         for(hr=0;hr<HOST_REGS;hr++)
         {
           if(regs[i].regmap[hr]>64) {
@@ -9665,29 +10188,55 @@ int new_recompile_block(int addr)
               f_regmap[hr]=regs[i].regmap[hr];
             else f_regmap[hr]=-1;
           }
-          else if(regs[i].regmap[hr]>=0) f_regmap[hr]=regs[i].regmap[hr];
+          else if(regs[i].regmap[hr]>=0) {
+            if(f_regmap[hr]!=regs[i].regmap[hr]) {
+              // dealloc old register
+              int n;
+              for(n=0;n<HOST_REGS;n++)
+              {
+                if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
+              }
+              // and alloc new one
+              f_regmap[hr]=regs[i].regmap[hr];
+            }
+          }
           if(branch_regs[i].regmap[hr]>64) {
             if(!((branch_regs[i].dirty>>hr)&1))
               f_regmap[hr]=branch_regs[i].regmap[hr];
             else f_regmap[hr]=-1;
           }
-          else if(branch_regs[i].regmap[hr]>=0) f_regmap[hr]=branch_regs[i].regmap[hr];
-          if(itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
-          ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
-          ||itype[i+1]==FCOMP||itype[i+1]==FCONV
-          ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
-          {
-            // Test both in case the delay slot is ooo,
-            // could be done better...
-            if(count_free_regs(branch_regs[i].regmap)<2
-             ||count_free_regs(regs[i].regmap)<2) 
+          else if(branch_regs[i].regmap[hr]>=0) {
+            if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
+              // dealloc old register
+              int n;
+              for(n=0;n<HOST_REGS;n++)
+              {
+                if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
+              }
+              // and alloc new one
+              f_regmap[hr]=branch_regs[i].regmap[hr];
+            }
+          }
+          if(ooo[i]) {
+            if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) 
+              f_regmap[hr]=branch_regs[i].regmap[hr];
+          }else{
+            if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) 
               f_regmap[hr]=branch_regs[i].regmap[hr];
           }
           // Avoid dirty->clean transition
-          // #ifdef DESTRUCTIVE_WRITEBACK here?
+          #ifdef DESTRUCTIVE_WRITEBACK
           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
+          #endif
+          // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
+          // case above, however it's always a good idea.  We can't hoist the
+          // load if the register was already allocated, so there's no point
+          // wasting time analyzing most of these cases.  It only "succeeds"
+          // when the mapping was different and the load can be replaced with
+          // a mov, which is of negligible benefit.  So such cases are
+          // skipped below.
           if(f_regmap[hr]>0) {
-            if(regs[t].regmap_entry[hr]<0) {
+            if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
               int r=f_regmap[hr];
               for(j=t;j<=i;j++)
               {
@@ -9699,6 +10248,7 @@ int new_recompile_block(int addr)
                   // register is lower numbered than the lower-half
                   // register.  Not sure if it's worth fixing...
                   if(get_reg(regs[j].regmap,r&63)<0) break;
+                  if(get_reg(regs[j].regmap_entry,r&63)<0) break;
                   if(regs[j].is32&(1LL<<(r&63))) break;
                 }
                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
@@ -9712,17 +10262,10 @@ int new_recompile_block(int addr)
                     }
                     k=i;
                     while(k>1&&regs[k-1].regmap[hr]==-1) {
-                      if(itype[k-1]==STORE||itype[k-1]==STORELR
-                      ||itype[k-1]==C1LS||itype[k-1]==SHIFT||itype[k-1]==COP1
-                      ||itype[k-1]==FLOAT||itype[k-1]==FCONV||itype[k-1]==FCOMP
-                      ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
-                        if(count_free_regs(regs[k-1].regmap)<2) {
-                          //printf("no free regs for store %x\n",start+(k-1)*4);
-                          break;
-                        }
+                      if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
+                        //printf("no free regs for store %x\n",start+(k-1)*4);
+                        break;
                       }
-                      else
-                      if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
                         //printf("no-match due to different register\n");
                         break;
@@ -9732,7 +10275,7 @@ int new_recompile_block(int addr)
                         break;
                       }
                       // call/ret fast path assumes no registers allocated
-                      if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
+                      if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
                         break;
                       }
                       if(r>63) {
@@ -9799,13 +10342,31 @@ int new_recompile_block(int addr)
                     }
                   }
                   for(k=t;k<j;k++) {
+                    // Alloc register clean at beginning of loop,
+                    // but may dirty it in pass 6
                     regs[k].regmap_entry[hr]=f_regmap[hr];
                     regs[k].regmap[hr]=f_regmap[hr];
-                    regmap_pre[k+1][hr]=f_regmap[hr];
-                    regs[k+1].wasdirty&=~(1<<hr);
                     regs[k].dirty&=~(1<<hr);
                     regs[k].wasconst&=~(1<<hr);
                     regs[k].isconst&=~(1<<hr);
+                    if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
+                      branch_regs[k].regmap_entry[hr]=f_regmap[hr];
+                      branch_regs[k].regmap[hr]=f_regmap[hr];
+                      branch_regs[k].dirty&=~(1<<hr);
+                      branch_regs[k].wasconst&=~(1<<hr);
+                      branch_regs[k].isconst&=~(1<<hr);
+                      if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
+                        regmap_pre[k+2][hr]=f_regmap[hr];
+                        regs[k+2].wasdirty&=~(1<<hr);
+                        assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
+                          (regs[k+2].was32&(1LL<<f_regmap[hr])));
+                      }
+                    }
+                    else
+                    {
+                      regmap_pre[k+1][hr]=f_regmap[hr];
+                      regs[k+1].wasdirty&=~(1<<hr);
+                    }
                   }
                   if(regs[j].regmap[hr]==f_regmap[hr])
                     regs[j].regmap_entry[hr]=f_regmap[hr];
@@ -9822,16 +10383,29 @@ int new_recompile_block(int addr)
                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
                   break;
                 }
-                if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
-                ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
-                ||itype[j]==FCOMP||itype[j]==FCONV
-                ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
-                  if(count_free_regs(regs[j].regmap)<2) {
-                    //printf("No free regs for store %x\n",start+j*4);
+                if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
+                {
+                  // Stop on unconditional branch
+                  break;
+                }
+                if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
+                {
+                  if(ooo[j]) {
+                    if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) 
+                      break;
+                  }else{
+                    if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) 
+                      break;
+                  }
+                  if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
+                    //printf("no-match due to different register (branch)\n");
                     break;
                   }
                 }
-                else if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
+                if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
+                  //printf("No free regs for store %x\n",start+j*4);
+                  break;
+                }
                 if(f_regmap[hr]>=64) {
                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
                     break;
@@ -9849,7 +10423,7 @@ int new_recompile_block(int addr)
         }
       }
     }else{
-      int count=0;
+      // Non branch or undetermined branch target
       for(hr=0;hr<HOST_REGS;hr++)
       {
         if(hr!=EXCLUDE_REG) {
@@ -9857,25 +10431,28 @@ int new_recompile_block(int addr)
             if(!((regs[i].dirty>>hr)&1))
               f_regmap[hr]=regs[i].regmap[hr];
           }
-          else if(regs[i].regmap[hr]>=0) f_regmap[hr]=regs[i].regmap[hr];
-          else if(regs[i].regmap[hr]<0) count++;
+          else if(regs[i].regmap[hr]>=0) {
+            if(f_regmap[hr]!=regs[i].regmap[hr]) {
+              // dealloc old register
+              int n;
+              for(n=0;n<HOST_REGS;n++)
+              {
+                if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
+              }
+              // and alloc new one
+              f_regmap[hr]=regs[i].regmap[hr];
+            }
+          }
         }
       }
       // Try to restore cycle count at branch targets
       if(bt[i]) {
         for(j=i;j<slen-1;j++) {
           if(regs[j].regmap[HOST_CCREG]!=-1) break;
-          if(itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS
-          ||itype[j]==SHIFT||itype[j]==COP1||itype[j]==FLOAT
-          ||itype[j]==FCOMP||itype[j]==FCONV
-          ||itype[j]==COP2||itype[j]==C2LS||itype[j]==C2OP) {
-            if(count_free_regs(regs[j].regmap)<2) {
-              //printf("no free regs for store %x\n",start+j*4);
-              break;
-            }
+          if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
+            //printf("no free regs for store %x\n",start+j*4);
+            break;
           }
-          else
-          if(itype[j]!=NOP&&itype[j]!=MOV&&itype[j]!=ALU&&itype[j]!=SHIFTIMM&&itype[j]!=IMM16&&itype[j]!=LOAD) break;
         }
         if(regs[j].regmap[HOST_CCREG]==CCREG) {
           int k=i;
@@ -9899,17 +10476,10 @@ int new_recompile_block(int addr)
           int k;
           k=i;
           while(regs[k-1].regmap[HOST_CCREG]==-1) {
-            if(itype[k-1]==STORE||itype[k-1]==STORELR||itype[k-1]==C1LS
-            ||itype[k-1]==SHIFT||itype[k-1]==COP1||itype[k-1]==FLOAT
-            ||itype[k-1]==FCONV||itype[k-1]==FCOMP
-            ||itype[k-1]==COP2||itype[k-1]==C2LS||itype[k-1]==C2OP) {
-              if(count_free_regs(regs[k-1].regmap)<2) {
-                //printf("no free regs for store %x\n",start+(k-1)*4);
-                break;
-              }
+            if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
+              //printf("no free regs for store %x\n",start+(k-1)*4);
+              break;
             }
-            else
-            if(itype[k-1]!=NOP&&itype[k-1]!=MOV&&itype[k-1]!=ALU&&itype[k-1]!=SHIFTIMM&&itype[k-1]!=IMM16&&itype[k-1]!=LOAD) break;
             k--;
           }
           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
@@ -9933,14 +10503,193 @@ int new_recompile_block(int addr)
       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
-         itype[i]!=FCONV&&itype[i]!=FCOMP&&
-         itype[i]!=COP2&&itype[i]!=C2LS&&itype[i]!=C2OP)
+         itype[i]!=FCONV&&itype[i]!=FCOMP)
       {
         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
       }
     }
   }
   
+  // Cache memory offset or tlb map pointer if a register is available
+  #ifndef HOST_IMM_ADDR32
+  #ifndef RAM_OFFSET
+  if(using_tlb)
+  #endif
+  {
+    int earliest_available[HOST_REGS];
+    int loop_start[HOST_REGS];
+    int score[HOST_REGS];
+    int end[HOST_REGS];
+    int reg=using_tlb?MMREG:ROREG;
+
+    // Init
+    for(hr=0;hr<HOST_REGS;hr++) {
+      score[hr]=0;earliest_available[hr]=0;
+      loop_start[hr]=MAXBLOCK;
+    }
+    for(i=0;i<slen-1;i++)
+    {
+      // Can't do anything if no registers are available
+      if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
+        for(hr=0;hr<HOST_REGS;hr++) {
+          score[hr]=0;earliest_available[hr]=i+1;
+          loop_start[hr]=MAXBLOCK;
+        }
+      }
+      if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
+        if(!ooo[i]) {
+          if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
+            for(hr=0;hr<HOST_REGS;hr++) {
+              score[hr]=0;earliest_available[hr]=i+1;
+              loop_start[hr]=MAXBLOCK;
+            }
+          }
+        }else{
+          if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
+            for(hr=0;hr<HOST_REGS;hr++) {
+              score[hr]=0;earliest_available[hr]=i+1;
+              loop_start[hr]=MAXBLOCK;
+            }
+          }
+        }
+      }
+      // Mark unavailable registers
+      for(hr=0;hr<HOST_REGS;hr++) {
+        if(regs[i].regmap[hr]>=0) {
+          score[hr]=0;earliest_available[hr]=i+1;
+          loop_start[hr]=MAXBLOCK;
+        }
+        if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
+          if(branch_regs[i].regmap[hr]>=0) {
+            score[hr]=0;earliest_available[hr]=i+2;
+            loop_start[hr]=MAXBLOCK;
+          }
+        }
+      }
+      // No register allocations after unconditional jumps
+      if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
+      {
+        for(hr=0;hr<HOST_REGS;hr++) {
+          score[hr]=0;earliest_available[hr]=i+2;
+          loop_start[hr]=MAXBLOCK;
+        }
+        i++; // Skip delay slot too
+        //printf("skip delay slot: %x\n",start+i*4);
+      }
+      else
+      // Possible match
+      if(itype[i]==LOAD||itype[i]==LOADLR||
+         itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
+        for(hr=0;hr<HOST_REGS;hr++) {
+          if(hr!=EXCLUDE_REG) {
+            end[hr]=i-1;
+            for(j=i;j<slen-1;j++) {
+              if(regs[j].regmap[hr]>=0) break;
+              if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
+                if(branch_regs[j].regmap[hr]>=0) break;
+                if(ooo[j]) {
+                  if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
+                }else{
+                  if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
+                }
+              }
+              else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
+              if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
+                int t=(ba[j]-start)>>2;
+                if(t<j&&t>=earliest_available[hr]) {
+                  if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
+                    // Score a point for hoisting loop invariant
+                    if(t<loop_start[hr]) loop_start[hr]=t;
+                    //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
+                    score[hr]++;
+                    end[hr]=j;
+                  }
+                }
+                else if(t<j) {
+                  if(regs[t].regmap[hr]==reg) {
+                    // Score a point if the branch target matches this register
+                    score[hr]++;
+                    end[hr]=j;
+                  }
+                }
+                if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
+                   itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
+                  score[hr]++;
+                  end[hr]=j;
+                }
+              }
+              if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
+              {
+                // Stop on unconditional branch
+                break;
+              }
+              else
+              if(itype[j]==LOAD||itype[j]==LOADLR||
+                 itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
+                score[hr]++;
+                end[hr]=j;
+              }
+            }
+          }
+        }
+        // Find highest score and allocate that register
+        int maxscore=0;
+        for(hr=0;hr<HOST_REGS;hr++) {
+          if(hr!=EXCLUDE_REG) {
+            if(score[hr]>score[maxscore]) {
+              maxscore=hr;
+              //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
+            }
+          }
+        }
+        if(score[maxscore]>1)
+        {
+          if(i<loop_start[maxscore]) loop_start[maxscore]=i;
+          for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
+            //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
+            assert(regs[j].regmap[maxscore]<0);
+            if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
+            regs[j].regmap[maxscore]=reg;
+            regs[j].dirty&=~(1<<maxscore);
+            regs[j].wasconst&=~(1<<maxscore);
+            regs[j].isconst&=~(1<<maxscore);
+            if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
+              branch_regs[j].regmap[maxscore]=reg;
+              branch_regs[j].wasdirty&=~(1<<maxscore);
+              branch_regs[j].dirty&=~(1<<maxscore);
+              branch_regs[j].wasconst&=~(1<<maxscore);
+              branch_regs[j].isconst&=~(1<<maxscore);
+              if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
+                regmap_pre[j+2][maxscore]=reg;
+                regs[j+2].wasdirty&=~(1<<maxscore);
+              }
+              // loop optimization (loop_preload)
+              int t=(ba[j]-start)>>2;
+              if(t==loop_start[maxscore]) {
+                if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
+                  regs[t].regmap_entry[maxscore]=reg;
+              }
+            }
+            else
+            {
+              if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
+                regmap_pre[j+1][maxscore]=reg;
+                regs[j+1].wasdirty&=~(1<<maxscore);
+              }
+            }
+          }
+          i=j-1;
+          if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
+          for(hr=0;hr<HOST_REGS;hr++) {
+            score[hr]=0;earliest_available[hr]=i+i;
+            loop_start[hr]=MAXBLOCK;
+          }
+        }
+      }
+    }
+  }
+  #endif
+  
   // This allocates registers (if possible) one instruction prior
   // to use, which can avoid a load-use penalty on certain CPUs.
   for(i=0;i<slen-1;i++)
@@ -9984,6 +10733,7 @@ int new_recompile_block(int addr)
               }
             }
           }
+          // Preload target address for load instruction (non-constant)
           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
             {
@@ -10000,6 +10750,7 @@ int new_recompile_block(int addr)
               }
             }
           }
+          // Load source into target register 
           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
             {
@@ -10016,6 +10767,7 @@ int new_recompile_block(int addr)
               }
             }
           }
+          // Preload map address
           #ifndef HOST_IMM_ADDR32
           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
             hr=get_reg(regs[i+1].regmap,TLREG);
@@ -10055,6 +10807,7 @@ int new_recompile_block(int addr)
             }
           }
           #endif
+          // Address for store instruction (non-constant)
           if(itype[i+1]==STORE||itype[i+1]==STORELR
              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
@@ -10140,7 +10893,7 @@ int new_recompile_block(int addr)
   clean_registers(0,slen-1,1);
   
   /* Pass 7 - Identify 32-bit registers */
-  
+#ifndef FORCE32
   provisional_r32();
 
   u_int r32=0;
@@ -10210,7 +10963,7 @@ int new_recompile_block(int addr)
         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
       }
     }
-    else if(itype[i]==SYSCALL||itype[i]==HLECALL)
+    else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
     {
       // SYSCALL instruction (software interrupt)
       r32=0;
@@ -10254,13 +11007,27 @@ int new_recompile_block(int addr)
     }
     //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
   }
+#else
+  for (i=slen-1;i>=0;i--)
+  {
+    if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
+    {
+      // Conditional branch
+      if((source[i]>>16)!=0x1000&&i<slen-2) {
+        // Mark this address as a branch target since it may be called
+        // upon return from interrupt
+        bt[i+2]=1;
+      }
+    }
+  }
+#endif
 
   if(itype[slen-1]==SPAN) {
     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
   }
-  
+
+#ifdef DISASM
   /* Debug/disassembly */
-  if((void*)assem_debug==(void*)printf) 
   for(i=0;i<slen;i++)
   {
     printf("U:");
@@ -10476,6 +11243,7 @@ int new_recompile_block(int addr)
 #endif
     }
   }
+#endif // DISASM
 
   /* Pass 8 - Assembly */
   linkcount=0;stubcount=0;
@@ -10488,15 +11256,33 @@ int new_recompile_block(int addr)
     ds=1;
     pagespan_ds();
   }
+  u_int instr_addr0_override=0;
+
+#ifdef PCSX
+  if (start == 0x80030000) {
+    // nasty hack for fastbios thing
+    // override block entry to this code
+    instr_addr0_override=(u_int)out;
+    emit_movimm(start,0);
+    // abuse io address var as a flag that we
+    // have already returned here once
+    emit_readword((int)&address,1);
+    emit_writeword(0,(int)&pcaddr);
+    emit_writeword(0,(int)&address);
+    emit_cmp(0,1);
+    emit_jne((int)new_dyna_leave);
+  }
+#endif
   for(i=0;i<slen;i++)
   {
     //if(ds) printf("ds: ");
-    if((void*)assem_debug==(void*)printf) disassemble_inst(i);
+    disassemble_inst(i);
     if(ds) {
       ds=0; // Skip delay slot
       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
       instr_addr[i]=0;
     } else {
+      speculate_register_values(i);
       #ifndef DESTRUCTIVE_WRITEBACK
       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
       {
@@ -10505,8 +11291,13 @@ int new_recompile_block(int addr)
         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
               unneeded_reg[i],unneeded_reg_upper[i]);
       }
-      is32_pre=regs[i].is32;
-      dirty_pre=regs[i].dirty;
+      if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
+        is32_pre=branch_regs[i].is32;
+        dirty_pre=branch_regs[i].dirty;
+      }else{
+        is32_pre=regs[i].is32;
+        dirty_pre=regs[i].dirty;
+      }
       #endif
       // write back
       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
@@ -10527,9 +11318,9 @@ int new_recompile_block(int addr)
       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
       {
         // Load the delay slot registers if necessary
-        if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
+        if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
-        if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
+        if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
@@ -10594,6 +11385,8 @@ int new_recompile_block(int addr)
           syscall_assemble(i,&regs[i]);break;
         case HLECALL:
           hlecall_assemble(i,&regs[i]);break;
+        case INTCALL:
+          intcall_assemble(i,&regs[i]);break;
         case UJUMP:
           ujump_assemble(i,&regs[i]);ds=1;break;
         case RJUMP:
@@ -10624,7 +11417,7 @@ int new_recompile_block(int addr)
         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
           emit_loadreg(CCREG,HOST_CCREG);
-        emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
+        emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
       }
       else if(!likely[i-2])
       {
@@ -10647,7 +11440,7 @@ int new_recompile_block(int addr)
     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
       emit_loadreg(CCREG,HOST_CCREG);
-    emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
+    emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
     add_to_linker((int)out,start+i*4,0);
     emit_jmp(0);
   }
@@ -10681,6 +11474,9 @@ int new_recompile_block(int addr)
     }
   }
 
+  if (instr_addr0_override)
+    instr_addr[0] = instr_addr0_override;
+
   /* Pass 9 - Linker */
   for(i=0;i<linkcount;i++)
   {
@@ -10723,7 +11519,11 @@ int new_recompile_block(int addr)
         u_int vpage=get_vpage(vaddr);
         literal_pool(256);
         //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
+#ifndef FORCE32
         if(!requires_32bit[i])
+#else
+        if(1)
+#endif
         {
           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
           assem_debug("jump_in: %x\n",start+i*4);
@@ -10779,7 +11579,7 @@ int new_recompile_block(int addr)
   
   // If we're within 256K of the end of the buffer,
   // start over from the beginning. (Is 256K enough?)
-  if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
+  if((u_int)out>(u_int)BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
   
   // Trap writes to any of the pages we compiled
   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
@@ -10795,14 +11595,23 @@ int new_recompile_block(int addr)
     }
 #endif
   }
+  inv_code_start=inv_code_end=~0;
+#ifdef PCSX
+  // for PCSX we need to mark all mirrors too
+  if(get_page(start)<(RAM_SIZE>>12))
+    for(i=start>>12;i<=(start+slen*4)>>12;i++)
+      invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
+      invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
+      invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
+#endif
   
   /* Pass 10 - Free memory by expiring oldest blocks */
   
-  int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
+  int end=((((int)out-(int)BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
   while(expirep!=end)
   {
     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
-    int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
+    int base=(int)BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
     inv_debug("EXP: Phase %d\n",expirep);
     switch((expirep>>11)&3)
     {
@@ -10838,6 +11647,10 @@ int new_recompile_block(int addr)
         break;
       case 3:
         // Clear jump_out
+        #ifdef __arm__
+        if((expirep&2047)==0) 
+          do_clear_cache();
+        #endif
         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
         break;