drc: add some hack options
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
index 74b64b7..b2eb21b 100644 (file)
 #include <stdlib.h>
 #include <stdint.h> //include for uint64_t
 #include <assert.h>
+#include <sys/mman.h>
 
 #include "emu_if.h" //emulator interface
 
-#include <sys/mman.h>
+//#define DISASM
+//#define assem_debug printf
+//#define inv_debug printf
+#define assem_debug(...)
+#define inv_debug(...)
 
 #ifdef __i386__
 #include "assem_x86.h"
@@ -38,7 +43,9 @@
 
 #define MAXBLOCK 4096
 #define MAX_OUTPUT_BLOCK_SIZE 262144
-#define CLOCK_DIVIDER 2
+
+int cycle_multiplier; // 100 for 1.0
+#define CLOCK_ADJUST(x) (((x) * cycle_multiplier + 50) / 100)
 
 struct regstat
 {
@@ -52,6 +59,8 @@ struct regstat
   uint64_t uu;
   u_int wasconst;
   u_int isconst;
+  u_int loadedconst;             // host regs that have constants loaded
+  u_int waswritten;              // MIPS regs that were used as store base before
   uint64_t constmap[HOST_REGS];
 };
 
@@ -80,6 +89,14 @@ struct ll_entry
   u_char dep1[MAXBLOCK];
   u_char dep2[MAXBLOCK];
   u_char lt1[MAXBLOCK];
+  static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
+  static uint64_t gte_rt[MAXBLOCK];
+  static uint64_t gte_unneeded[MAXBLOCK];
+  static u_int smrv[32]; // speculated MIPS register values
+  static u_int smrv_strong; // mask or regs that are likely to have correct values
+  static u_int smrv_weak; // same, but somewhat less likely
+  static u_int smrv_strong_next; // same, but after current insn executes
+  static u_int smrv_weak_next;
   int imm[MAXBLOCK];
   u_int ba[MAXBLOCK];
   char likely[MAXBLOCK];
@@ -126,7 +143,8 @@ struct ll_entry
 #else
   static const u_int using_tlb=0;
 #endif
-  static u_int sp_in_mirror;
+  int new_dynarec_did_compile;
+  int new_dynarec_hacks;
   u_int stop_after_jal;
   extern u_char restore_candidate[512];
   extern int cycle_count;
@@ -261,12 +279,6 @@ int tracedebug=0;
 
 //#define DEBUG_CYCLE_COUNT 1
 
-void nullf() {}
-//#define assem_debug printf
-//#define inv_debug printf
-#define assem_debug nullf
-#define inv_debug nullf
-
 static void tlb_hacks()
 {
 #ifndef DISABLE_TLB
@@ -371,7 +383,10 @@ void *get_addr(u_int vaddr)
       if(verify_dirty(head->addr)) {
         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
         invalid_code[vaddr>>12]=0;
+        inv_code_start=inv_code_end=~0;
+#ifndef DISABLE_TLB
         memory_map[vaddr>>12]|=0x40000000;
+#endif
         if(vpage<2048) {
 #ifndef DISABLE_TLB
           if(tlb_LUT_r[vaddr>>12]) {
@@ -463,6 +478,7 @@ void *get_addr_32(u_int vaddr,u_int flags)
       if(verify_dirty(head->addr)) {
         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
         invalid_code[vaddr>>12]=0;
+        inv_code_start=inv_code_end=~0;
         memory_map[vaddr>>12]|=0x40000000;
         if(vpage<2048) {
 #ifndef DISABLE_TLB
@@ -842,7 +858,7 @@ void alloc_all(struct regstat *cur,int i)
   }
 }
 
-
+#ifndef FORCE32
 void div64(int64_t dividend,int64_t divisor)
 {
   lo=dividend/divisor;
@@ -953,6 +969,7 @@ uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
   else original=loaded;
   return original;
 }
+#endif
 
 #ifdef __i386__
 #include "assem_x86.c"
@@ -1127,39 +1144,10 @@ void invalidate_page(u_int page)
     head=next;
   }
 }
-void invalidate_block(u_int block)
+
+static void invalidate_block_range(u_int block, u_int first, u_int last)
 {
   u_int page=get_page(block<<12);
-  u_int vpage=get_vpage(block<<12);
-  inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
-  //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
-  u_int first,last;
-  first=last=page;
-  struct ll_entry *head;
-  head=jump_dirty[vpage];
-  //printf("page=%d vpage=%d\n",page,vpage);
-  while(head!=NULL) {
-    u_int start,end;
-    if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
-      get_bounds((int)head->addr,&start,&end);
-      //printf("start: %x end: %x\n",start,end);
-      if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
-        if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
-          if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
-          if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
-        }
-      }
-#ifndef DISABLE_TLB
-      if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
-        if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
-          if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
-          if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
-        }
-      }
-#endif
-    }
-    head=head->next;
-  }
   //printf("first=%d last=%d\n",first,last);
   invalidate_page(page);
   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
@@ -1178,9 +1166,6 @@ void invalidate_block(u_int block)
   
   // Don't trap writes
   invalid_code[block]=1;
-#ifdef PCSX
-  invalid_code[((u_int)0x80000000>>12)|page]=1;
-#endif
 #ifndef DISABLE_TLB
   // If there is a valid TLB entry for this page, remove write protect
   if(tlb_LUT_w[block]) {
@@ -1198,10 +1183,98 @@ void invalidate_block(u_int block)
   memset(mini_ht,-1,sizeof(mini_ht));
   #endif
 }
+
+void invalidate_block(u_int block)
+{
+  u_int page=get_page(block<<12);
+  u_int vpage=get_vpage(block<<12);
+  inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
+  //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
+  u_int first,last;
+  first=last=page;
+  struct ll_entry *head;
+  head=jump_dirty[vpage];
+  //printf("page=%d vpage=%d\n",page,vpage);
+  while(head!=NULL) {
+    u_int start,end;
+    if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
+      get_bounds((int)head->addr,&start,&end);
+      //printf("start: %x end: %x\n",start,end);
+      if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
+        if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
+          if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
+          if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
+        }
+      }
+#ifndef DISABLE_TLB
+      if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
+        if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
+          if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
+          if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
+        }
+      }
+#endif
+    }
+    head=head->next;
+  }
+  invalidate_block_range(block,first,last);
+}
+
 void invalidate_addr(u_int addr)
 {
+#ifdef PCSX
+  //static int rhits;
+  // this check is done by the caller
+  //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
+  u_int page=get_page(addr);
+  if(page<2048) { // RAM
+    struct ll_entry *head;
+    u_int addr_min=~0, addr_max=0;
+    int mask=RAM_SIZE-1;
+    int pg1;
+    inv_code_start=addr&~0xfff;
+    inv_code_end=addr|0xfff;
+    pg1=page;
+    if (pg1>0) {
+      // must check previous page too because of spans..
+      pg1--;
+      inv_code_start-=0x1000;
+    }
+    for(;pg1<=page;pg1++) {
+      for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
+        u_int start,end;
+        get_bounds((int)head->addr,&start,&end);
+        if((start&mask)<=(addr&mask)&&(addr&mask)<(end&mask)) {
+          if(start<addr_min) addr_min=start;
+          if(end>addr_max) addr_max=end;
+        }
+        else if(addr<start) {
+          if(start<inv_code_end)
+            inv_code_end=start-1;
+        }
+        else {
+          if(end>inv_code_start)
+            inv_code_start=end;
+        }
+      }
+    }
+    if (addr_min!=~0) {
+      inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
+      inv_code_start=inv_code_end=~0;
+      invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
+      return;
+    }
+    else {
+      inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);//rhits);
+    }
+    //rhits=0;
+    if(page!=0) // FIXME: don't know what's up with page 0 (Klonoa)
+      return;
+  }
+#endif
   invalidate_block(addr>>12);
 }
+
 // This is called when loading a save state.
 // Anything could have changed, so invalidate everything.
 void invalidate_all_pages()
@@ -1240,6 +1313,8 @@ void add_link(u_int vaddr,void *src)
 {
   u_int page=get_page(vaddr);
   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
+  int *ptr=(int *)(src+4);
+  assert((*ptr&0x0fff0000)==0x059f0000);
   ll_add(jump_out+page,vaddr,src);
   //int ptr=get_pointer(src);
   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
@@ -1269,11 +1344,13 @@ void clean_blocks(u_int page)
               inv|=invalid_code[i];
             }
           }
+#ifndef DISABLE_TLB
           if((signed int)head->vaddr>=(signed int)0xC0000000) {
             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
             //printf("addr=%x start=%x end=%x\n",addr,start,end);
             if(addr<start||addr>=end) inv=1;
           }
+#endif
           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
             inv=1;
           }
@@ -1326,8 +1403,6 @@ void mov_alloc(struct regstat *current,int i)
 
 void shiftimm_alloc(struct regstat *current,int i)
 {
-  clear_const(current,rs1[i]);
-  clear_const(current,rt1[i]);
   if(opcode2[i]<=0x3) // SLL/SRL/SRA
   {
     if(rt1[i]) {
@@ -1336,8 +1411,21 @@ void shiftimm_alloc(struct regstat *current,int i)
       alloc_reg(current,i,rt1[i]);
       current->is32|=1LL<<rt1[i];
       dirty_reg(current,rt1[i]);
+      if(is_const(current,rs1[i])) {
+        int v=get_const(current,rs1[i]);
+        if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
+        if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
+        if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
+      }
+      else clear_const(current,rt1[i]);
     }
   }
+  else
+  {
+    clear_const(current,rs1[i]);
+    clear_const(current,rt1[i]);
+  }
+
   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
   {
     if(rt1[i]) {
@@ -2623,7 +2711,7 @@ void shiftimm_assemble(int i,struct regstat *i_regs)
       t=get_reg(i_regs->regmap,rt1[i]);
       s=get_reg(i_regs->regmap,rs1[i]);
       //assert(t>=0);
-      if(t>=0){
+      if(t>=0&&!((i_regs->isconst>>t)&1)){
         if(rs1[i]==0)
         {
           emit_zeroreg(t);
@@ -2766,6 +2854,7 @@ void load_assemble(int i,struct regstat *i_regs)
   int offset;
   int jaddr=0;
   int memtarget=0,c=0;
+  int fastload_reg_override=0;
   u_int hr,reglist=0;
   th=get_reg(i_regs->regmap,rt1[i]|64);
   tl=get_reg(i_regs->regmap,rt1[i]);
@@ -2816,22 +2905,7 @@ void load_assemble(int i,struct regstat *i_regs)
       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
       #endif
       {
-        #ifdef PCSX
-        if(sp_in_mirror&&rs1[i]==29) {
-          emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
-          emit_cmpimm(HOST_TEMPREG,RAM_SIZE);
-        }
-        else
-        #endif
-        emit_cmpimm(addr,RAM_SIZE);
-        jaddr=(int)out;
-        #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
-        // Hint to branch predictor that the branch is unlikely to be taken
-        if(rs1[i]>=28)
-          emit_jno_unlikely(0);
-        else
-        #endif
-        emit_jno(0);
+        jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
       }
     }
   }else{ // using tlb
@@ -2864,9 +2938,8 @@ void load_assemble(int i,struct regstat *i_regs)
 #else
           if(!c) a=addr;
 #endif
-#ifdef PCSX
-          if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+          if(fastload_reg_override) a=fastload_reg_override;
+
           emit_movsbl_indexed_tlb(x,a,map,tl);
         }
       }
@@ -2892,9 +2965,7 @@ void load_assemble(int i,struct regstat *i_regs)
 #else
           if(!c) a=addr;
 #endif
-#ifdef PCSX
-          if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+          if(fastload_reg_override) a=fastload_reg_override;
           //#ifdef
           //emit_movswl_indexed_tlb(x,tl,map,tl);
           //else
@@ -2920,9 +2991,7 @@ void load_assemble(int i,struct regstat *i_regs)
     if(!c||memtarget) {
       if(!dummy) {
         int a=addr;
-#ifdef PCSX
-        if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+        if(fastload_reg_override) a=fastload_reg_override;
         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
         #ifdef HOST_IMM_ADDR32
         if(c)
@@ -2956,9 +3025,8 @@ void load_assemble(int i,struct regstat *i_regs)
 #else
           if(!c) a=addr;
 #endif
-#ifdef PCSX
-          if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+          if(fastload_reg_override) a=fastload_reg_override;
+
           emit_movzbl_indexed_tlb(x,a,map,tl);
         }
       }
@@ -2984,9 +3052,7 @@ void load_assemble(int i,struct regstat *i_regs)
 #else
           if(!c) a=addr;
 #endif
-#ifdef PCSX
-          if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+          if(fastload_reg_override) a=fastload_reg_override;
           //#ifdef
           //emit_movzwl_indexed_tlb(x,tl,map,tl);
           //#else
@@ -3013,9 +3079,7 @@ void load_assemble(int i,struct regstat *i_regs)
     if(!c||memtarget) {
       if(!dummy) {
         int a=addr;
-#ifdef PCSX
-        if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+        if(fastload_reg_override) a=fastload_reg_override;
         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
         #ifdef HOST_IMM_ADDR32
         if(c)
@@ -3036,9 +3100,7 @@ void load_assemble(int i,struct regstat *i_regs)
     if(!c||memtarget) {
       if(!dummy) {
         int a=addr;
-#ifdef PCSX
-        if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+        if(fastload_reg_override) a=fastload_reg_override;
         //gen_tlb_addr_r(tl,map);
         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
@@ -3103,6 +3165,7 @@ void store_assemble(int i,struct regstat *i_regs)
   int jaddr=0,jaddr2,type;
   int memtarget=0,c=0;
   int agr=AGEN1+(i&1);
+  int faststore_reg_override=0;
   u_int hr,reglist=0;
   th=get_reg(i_regs->regmap,rs2[i]|64);
   tl=get_reg(i_regs->regmap,rs2[i]);
@@ -3127,13 +3190,7 @@ void store_assemble(int i,struct regstat *i_regs)
   else addr=s;
   if(!using_tlb) {
     if(!c) {
-      #ifdef PCSX
-      if(sp_in_mirror&&rs1[i]==29) {
-        emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
-        emit_cmpimm(HOST_TEMPREG,RAM_SIZE);
-      }
-      else
-      #endif
+      #ifndef PCSX
       #ifdef R29_HACK
       // Strmnnrmn's speed hack
       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
@@ -3156,6 +3213,9 @@ void store_assemble(int i,struct regstat *i_regs)
         #endif
         emit_jno(0);
       }
+      #else
+        jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
+      #endif
     }
   }else{ // using tlb
     int x=0;
@@ -3177,9 +3237,7 @@ void store_assemble(int i,struct regstat *i_regs)
 #else
       if(!c) a=addr;
 #endif
-#ifdef PCSX
-      if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+      if(faststore_reg_override) a=faststore_reg_override;
       //gen_tlb_addr_w(temp,map);
       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
       emit_writebyte_indexed_tlb(tl,x,a,map,a);
@@ -3195,9 +3253,7 @@ void store_assemble(int i,struct regstat *i_regs)
 #else
       if(!c) a=addr;
 #endif
-#ifdef PCSX
-      if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+      if(faststore_reg_override) a=faststore_reg_override;
       //#ifdef
       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
       //#else
@@ -3212,9 +3268,7 @@ void store_assemble(int i,struct regstat *i_regs)
   if (opcode[i]==0x2B) { // SW
     if(!c||memtarget) {
       int a=addr;
-#ifdef PCSX
-      if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+      if(faststore_reg_override) a=faststore_reg_override;
       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
       emit_writeword_indexed_tlb(tl,0,a,map,temp);
     }
@@ -3223,9 +3277,7 @@ void store_assemble(int i,struct regstat *i_regs)
   if (opcode[i]==0x3F) { // SD
     if(!c||memtarget) {
       int a=addr;
-#ifdef PCSX
-      if(sp_in_mirror&&rs1[i]==29) a=HOST_TEMPREG;
-#endif
+      if(faststore_reg_override) a=faststore_reg_override;
       if(rs2[i]) {
         assert(th>=0);
         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
@@ -3240,7 +3292,15 @@ void store_assemble(int i,struct regstat *i_regs)
     }
     type=STORED_STUB;
   }
-  if(!using_tlb) {
+#ifdef PCSX
+  if(jaddr) {
+    // PCSX store handlers don't check invcode again
+    reglist|=1<<addr;
+    add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
+    jaddr=0;
+  }
+#endif
+  if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
     if(!c||memtarget) {
       #ifdef DESTRUCTIVE_SHIFT
       // The x86 shift operation is 'destructive'; it overwrites the
@@ -3274,8 +3334,12 @@ void store_assemble(int i,struct regstat *i_regs)
   //if(opcode[i]==0x2B)
   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
   {
-    //emit_pusha();
+    #ifdef __i386__
+    emit_pusha();
+    #endif
+    #ifdef __arm__
     save_regs(0x100f);
+    #endif
         emit_readword((int)&last_count,ECX);
         #ifdef __i386__
         if(get_reg(i_regs->regmap,CCREG)<0)
@@ -3294,8 +3358,12 @@ void store_assemble(int i,struct regstat *i_regs)
         emit_writeword(0,(int)&Count);
         #endif
     emit_call((int)memdebug);
-    //emit_popa();
+    #ifdef __i386__
+    emit_popa();
+    #endif
+    #ifdef __arm__
     restore_regs(0x100f);
+    #endif
   }/**/
 }
 
@@ -3506,7 +3574,7 @@ void storelr_assemble(int i,struct regstat *i_regs)
   }
   if(!c||!memtarget)
     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
-  if(!using_tlb) {
+  if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
     #ifdef RAM_OFFSET
     int map=get_reg(i_regs->regmap,ROREG);
     if(map<0) map=HOST_TEMPREG;
@@ -3684,7 +3752,7 @@ void c1ls_assemble(int i,struct regstat *i_regs)
     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
     type=STORED_STUB;
   }
-  if(!using_tlb) {
+  if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
       #ifndef DESTRUCTIVE_SHIFT
       temp=offset||c||s<0?ar:s;
@@ -3739,6 +3807,7 @@ void c2ls_assemble(int i,struct regstat *i_regs)
   int memtarget=0,c=0;
   int jaddr2=0,jaddr3,type;
   int agr=AGEN1+(i&1);
+  int fastio_reg_override=0;
   u_int hr,reglist=0;
   u_int copr=(source[i]>>16)&0x1f;
   s=get_reg(i_regs->regmap,rs1[i]);
@@ -3780,27 +3849,30 @@ void c2ls_assemble(int i,struct regstat *i_regs)
   }
   else {
     if(!c) {
-      emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
-      jaddr2=(int)out;
-      emit_jno(0);
+      jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
     }
     if (opcode[i]==0x32) { // LWC2
       #ifdef HOST_IMM_ADDR32
       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
       else
       #endif
-      emit_readword_indexed(0,ar,tl);
+      int a=ar;
+      if(fastio_reg_override) a=fastio_reg_override;
+      emit_readword_indexed(0,a,tl);
     }
     if (opcode[i]==0x3a) { // SWC2
       #ifdef DESTRUCTIVE_SHIFT
       if(!offset&&!c&&s>=0) emit_mov(s,ar);
       #endif
-      emit_writeword_indexed(tl,0,ar);
+      int a=ar;
+      if(fastio_reg_override) a=fastio_reg_override;
+      emit_writeword_indexed(tl,0,a);
     }
   }
   if(jaddr2)
     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
-  if (opcode[i]==0x3a) { // SWC2
+  if(opcode[i]==0x3a) // SWC2
+  if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
 #if defined(HOST_IMM8)
     int ir=get_reg(i_regs->regmap,INVCP);
     assert(ir>=0);
@@ -3873,7 +3945,7 @@ void syscall_assemble(int i,struct regstat *i_regs)
   assert(ccreg==HOST_CCREG);
   assert(!is_delayslot);
   emit_movimm(start+i*4,EAX); // Get PC
-  emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
   emit_jmp((int)jump_syscall_hle); // XXX
 }
 
@@ -3884,7 +3956,7 @@ void hlecall_assemble(int i,struct regstat *i_regs)
   assert(!is_delayslot);
   emit_movimm(start+i*4+4,0); // Get PC
   emit_movimm((int)psxHLEt[source[i]&7],1);
-  emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
   emit_jmp((int)jump_hlecall);
 }
 
@@ -3894,12 +3966,13 @@ void intcall_assemble(int i,struct regstat *i_regs)
   assert(ccreg==HOST_CCREG);
   assert(!is_delayslot);
   emit_movimm(start+i*4,0); // Get PC
-  emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG);
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
   emit_jmp((int)jump_intcall);
 }
 
 void ds_assemble(int i,struct regstat *i_regs)
 {
+  speculate_register_values(i);
   is_delayslot=1;
   switch(itype[i]) {
     case ALU:
@@ -4159,6 +4232,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
         //  printf("poor load scheduling!\n");
       }
       else if(c) {
+#ifndef DISABLE_TLB
         if(rm>=0) {
           if(!entry||entry[rm]!=mgr) {
             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
@@ -4173,6 +4247,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
             }
           }
         }
+#endif
         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
           if(!entry||entry[ra]!=agr) {
             if (opcode[i]==0x22||opcode[i]==0x26) {
@@ -4185,6 +4260,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
               #endif
               emit_movimm(constmap[i][rs]+offset,ra);
+              regs[i].loadedconst|=1<<ra;
             }
           } // else did it in the previous cycle
         } // else load_consts already did it
@@ -4201,7 +4277,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
   // Preload constants for next instruction
   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
     int agr,ra;
-    #ifndef HOST_IMM_ADDR32
+    #if !defined(HOST_IMM_ADDR32) && !defined(DISABLE_TLB)
     // Mapper entry
     agr=MGEN1+((i+1)&1);
     ra=get_reg(i_regs->regmap,agr);
@@ -4245,6 +4321,7 @@ void address_generation(int i,struct regstat *i_regs,signed char entry[])
              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
           #endif
           emit_movimm(constmap[i+1][rs]+offset,ra);
+          regs[i+1].loadedconst|=1<<ra;
         }
       }
       else if(rs1[i+1]==0) {
@@ -4313,22 +4390,51 @@ int get_final_value(int hr, int i, int *value)
 // Load registers with known constants
 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
 {
-  int hr;
+  int hr,hr2;
+  // propagate loaded constant flags
+  if(i==0||bt[i])
+    regs[i].loadedconst=0;
+  else {
+    for(hr=0;hr<HOST_REGS;hr++) {
+      if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
+         &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
+      {
+        regs[i].loadedconst|=1<<hr;
+      }
+    }
+  }
   // Load 32-bit regs
   for(hr=0;hr<HOST_REGS;hr++) {
     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
       //if(entry[hr]!=regmap[hr]) {
-      if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
+      if(!((regs[i].loadedconst>>hr)&1)) {
         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
-          int value;
+          int value,similar=0;
           if(get_final_value(hr,i,&value)) {
-            if(value==0) {
+            // see if some other register has similar value
+            for(hr2=0;hr2<HOST_REGS;hr2++) {
+              if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
+                if(is_similar_value(value,constmap[i][hr2])) {
+                  similar=1;
+                  break;
+                }
+              }
+            }
+            if(similar) {
+              int value2;
+              if(get_final_value(hr2,i,&value2)) // is this needed?
+                emit_movimm_from(value2,hr2,value,hr);
+              else
+                emit_movimm(value,hr);
+            }
+            else if(value==0) {
               emit_zeroreg(hr);
             }
             else {
               emit_movimm(value,hr);
             }
           }
+          regs[i].loadedconst|=1<<hr;
         }
       }
     }
@@ -4515,8 +4621,8 @@ void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
 void load_regs_entry(int t)
 {
   int hr;
-  if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
-  else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
+  if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
+  else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
     emit_storereg(CCREG,HOST_CCREG);
   }
@@ -4853,13 +4959,13 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
     emit_jmp(0);
   }
   else if(*adj==0||invert) {
-    emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
+    emit_addimm_and_set_flags(CLOCK_ADJUST(count+2),HOST_CCREG);
     jaddr=(int)out;
     emit_jns(0);
   }
   else
   {
-    emit_cmpimm(HOST_CCREG,-CLOCK_DIVIDER*(count+2));
+    emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
     jaddr=(int)out;
     emit_jns(0);
   }
@@ -5083,9 +5189,9 @@ void do_ccstub(int n)
   }
   // Update cycle count
   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
-  if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
+  if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
   emit_call((int)cc_interrupt);
-  if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
+  if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
   if(stubs[n][6]==TAKEN) {
     if(internal_branch(branch_regs[i].is32,ba[i]))
       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
@@ -5144,9 +5250,45 @@ add_to_linker(int addr,int target,int ext)
   linkcount++;
 }
 
+static void ujump_assemble_write_ra(int i)
+{
+  int rt;
+  unsigned int return_address;
+  rt=get_reg(branch_regs[i].regmap,31);
+  assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
+  //assert(rt>=0);
+  return_address=start+i*4+8;
+  if(rt>=0) {
+    #ifdef USE_MINI_HT
+    if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
+      int temp=-1; // note: must be ds-safe
+      #ifdef HOST_TEMPREG
+      temp=HOST_TEMPREG;
+      #endif
+      if(temp>=0) do_miniht_insert(return_address,rt,temp);
+      else emit_movimm(return_address,rt);
+    }
+    else
+    #endif
+    {
+      #ifdef REG_PREFETCH
+      if(temp>=0) 
+      {
+        if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
+      }
+      #endif
+      emit_movimm(return_address,rt); // PC into link register
+      #ifdef IMM_PREFETCH
+      emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
+      #endif
+    }
+  }
+}
+
 void ujump_assemble(int i,struct regstat *i_regs)
 {
   signed char *i_regmap=i_regs->regmap;
+  int ra_done=0;
   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
   address_generation(i+1,i_regs,regs[i].regmap_entry);
   #ifdef REG_PREFETCH
@@ -5158,38 +5300,9 @@ void ujump_assemble(int i,struct regstat *i_regs)
     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
   }
   #endif
-  if(rt1[i]==31) {
-    int rt;
-    unsigned int return_address;
-    rt=get_reg(branch_regs[i].regmap,31);
-    assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
-    //assert(rt>=0);
-    return_address=start+i*4+8;
-    if(rt>=0) {
-      #ifdef USE_MINI_HT
-      if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
-        int temp=-1; // note: must be ds-safe
-        #ifdef HOST_TEMPREG
-        temp=HOST_TEMPREG;
-        #endif
-        if(temp>=0) do_miniht_insert(return_address,rt,temp);
-        else emit_movimm(return_address,rt);
-      }
-      else
-      #endif
-      {
-        #ifdef REG_PREFETCH
-        if(temp>=0) 
-        {
-          if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
-        }
-        #endif
-        emit_movimm(return_address,rt); // PC into link register
-        #ifdef IMM_PREFETCH
-        emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
-        #endif
-      }
-    }
+  if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
+    ujump_assemble_write_ra(i); // writeback ra for DS
+    ra_done=1;
   }
   ds_assemble(i+1,i_regs);
   uint64_t bc_unneeded=branch_regs[i].u;
@@ -5199,6 +5312,8 @@ void ujump_assemble(int i,struct regstat *i_regs)
   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
                 bc_unneeded,bc_unneeded_upper);
   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
+  if(!ra_done&&rt1[i]==31)
+    ujump_assemble_write_ra(i);
   int cc,adj;
   cc=get_reg(branch_regs[i].regmap,CCREG);
   assert(cc==HOST_CCREG);
@@ -5207,7 +5322,7 @@ void ujump_assemble(int i,struct regstat *i_regs)
   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
   #endif
   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
-  if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+  if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
   if(internal_branch(branch_regs[i].is32,ba[i]))
     assem_debug("branch: internal\n");
@@ -5222,11 +5337,33 @@ void ujump_assemble(int i,struct regstat *i_regs)
   }
 }
 
+static void rjump_assemble_write_ra(int i)
+{
+  int rt,return_address;
+  assert(rt1[i+1]!=rt1[i]);
+  assert(rt2[i+1]!=rt1[i]);
+  rt=get_reg(branch_regs[i].regmap,rt1[i]);
+  assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
+  assert(rt>=0);
+  return_address=start+i*4+8;
+  #ifdef REG_PREFETCH
+  if(temp>=0) 
+  {
+    if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
+  }
+  #endif
+  emit_movimm(return_address,rt); // PC into link register
+  #ifdef IMM_PREFETCH
+  emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
+  #endif
+}
+
 void rjump_assemble(int i,struct regstat *i_regs)
 {
   signed char *i_regmap=i_regs->regmap;
   int temp;
   int rs,cc,adj;
+  int ra_done=0;
   rs=get_reg(branch_regs[i].regmap,rs1[i]);
   assert(rs>=0);
   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
@@ -5253,6 +5390,10 @@ void rjump_assemble(int i,struct regstat *i_regs)
     if(rh>=0) do_preload_rhash(rh);
   }
   #endif
+  if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
+    rjump_assemble_write_ra(i);
+    ra_done=1;
+  }
   ds_assemble(i+1,i_regs);
   uint64_t bc_unneeded=branch_regs[i].u;
   uint64_t bc_unneeded_upper=branch_regs[i].uu;
@@ -5262,25 +5403,8 @@ void rjump_assemble(int i,struct regstat *i_regs)
   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
                 bc_unneeded,bc_unneeded_upper);
   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
-  if(rt1[i]!=0) {
-    int rt,return_address;
-    assert(rt1[i+1]!=rt1[i]);
-    assert(rt2[i+1]!=rt1[i]);
-    rt=get_reg(branch_regs[i].regmap,rt1[i]);
-    assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
-    assert(rt>=0);
-    return_address=start+i*4+8;
-    #ifdef REG_PREFETCH
-    if(temp>=0) 
-    {
-      if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
-    }
-    #endif
-    emit_movimm(return_address,rt); // PC into link register
-    #ifdef IMM_PREFETCH
-    emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
-    #endif
-  }
+  if(!ra_done&&rt1[i]!=0)
+    rjump_assemble_write_ra(i);
   cc=get_reg(branch_regs[i].regmap,CCREG);
   assert(cc==HOST_CCREG);
   #ifdef USE_MINI_HT
@@ -5311,8 +5435,14 @@ void rjump_assemble(int i,struct regstat *i_regs)
   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
   //assert(adj==0);
-  emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+  emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
+#ifdef PCSX
+  if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
+    // special case for RFE
+    emit_jmp(0);
+  else
+#endif
   emit_jns(0);
   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
   #ifdef USE_MINI_HT
@@ -5442,7 +5572,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
     if(unconditional) {
       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
-        if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+        if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
         if(internal)
           assem_debug("branch: internal\n");
@@ -5461,7 +5591,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
       }
     }
     else if(nop) {
-      emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+      emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
       int jaddr=(int)out;
       emit_jns(0);
       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -5469,7 +5599,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
     else {
       int taken=0,nottaken=0,nottaken1=0;
       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
-      if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       if(!only32)
       {
         assert(s1h>=0);
@@ -5561,7 +5691,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
           if(adj) {
-            emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+            emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
             add_to_linker((int)out,ba[i],internal);
           }else{
             emit_addnop(13);
@@ -5571,7 +5701,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
         }else
         #endif
         {
-          if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+          if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
           if(internal)
@@ -5591,7 +5721,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
 
       if(nottaken1) set_jump_target(nottaken1,(int)out);
       if(adj) {
-        if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
+        if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
       }
     } // (!unconditional)
   } // if(ooo)
@@ -5695,7 +5825,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
       assem_debug("cycle count (adj)\n");
-      if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
       if(internal)
         assem_debug("branch: internal\n");
@@ -5727,7 +5857,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
       if(cc==-1&&!likely[i]) {
         // Cycle count isn't in a register, temporarily load it then write it out
         emit_loadreg(CCREG,HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -5736,7 +5866,7 @@ void cjump_assemble(int i,struct regstat *i_regs)
       else{
         cc=get_reg(i_regmap,CCREG);
         assert(cc==HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
@@ -5827,7 +5957,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
     if(unconditional) {
       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
-        if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+        if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
         if(internal)
           assem_debug("branch: internal\n");
@@ -5846,7 +5976,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       }
     }
     else if(nevertaken) {
-      emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+      emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
       int jaddr=(int)out;
       emit_jns(0);
       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -5854,7 +5984,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
     else {
       int nottaken=0;
       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
-      if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       if(!only32)
       {
         assert(s1h>=0);
@@ -5912,7 +6042,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
           if(adj) {
-            emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+            emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
             add_to_linker((int)out,ba[i],internal);
           }else{
             emit_addnop(13);
@@ -5922,7 +6052,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
         }else
         #endif
         {
-          if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+          if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
           if(internal)
@@ -5941,7 +6071,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       }
 
       if(adj) {
-        if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
+        if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
       }
     } // (!unconditional)
   } // if(ooo)
@@ -6024,7 +6154,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
       assem_debug("cycle count (adj)\n");
-      if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
       if(internal)
         assem_debug("branch: internal\n");
@@ -6055,7 +6185,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       if(cc==-1&&!likely[i]) {
         // Cycle count isn't in a register, temporarily load it then write it out
         emit_loadreg(CCREG,HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -6064,7 +6194,7 @@ void sjump_assemble(int i,struct regstat *i_regs)
       else{
         cc=get_reg(i_regmap,CCREG);
         assert(cc==HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
@@ -6130,7 +6260,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
     assem_debug("cycle count (adj)\n");
     if(1) {
       int nottaken=0;
-      if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+      if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
       if(1) {
         assert(fs>=0);
         emit_testimm(fs,0x800000);
@@ -6157,7 +6287,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
       } // if(!only32)
           
       if(invert) {
-        if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
+        if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
         else if(match) emit_addnop(13);
         #endif
@@ -6178,7 +6308,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
       }
 
       if(adj) {
-        if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
+        if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
       }
     } // (!unconditional)
   } // if(ooo)
@@ -6230,7 +6360,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
     assem_debug("cycle count (adj)\n");
-    if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
+    if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
     if(internal)
       assem_debug("branch: internal\n");
@@ -6260,7 +6390,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
       if(cc==-1&&!likely[i]) {
         // Cycle count isn't in a register, temporarily load it then write it out
         emit_loadreg(CCREG,HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
@@ -6269,7 +6399,7 @@ void fjump_assemble(int i,struct regstat *i_regs)
       else{
         cc=get_reg(i_regmap,CCREG);
         assert(cc==HOST_CCREG);
-        emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
+        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
         int jaddr=(int)out;
         emit_jns(0);
         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
@@ -6342,7 +6472,7 @@ static void pagespan_assemble(int i,struct regstat *i_regs)
   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
   }
-  emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
+  emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
   if(opcode[i]==2) // J
   {
     unconditional=1;
@@ -6640,16 +6770,22 @@ static void pagespan_ds()
 void unneeded_registers(int istart,int iend,int r)
 {
   int i;
-  uint64_t u,uu,b,bu;
-  uint64_t temp_u,temp_uu;
+  uint64_t u,uu,gte_u,b,bu,gte_bu;
+  uint64_t temp_u,temp_uu,temp_gte_u=0;
   uint64_t tdep;
+  uint64_t gte_u_unknown=0;
+  if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
+    gte_u_unknown=~0ll;
   if(iend==slen-1) {
     u=1;uu=1;
+    gte_u=gte_u_unknown;
   }else{
     u=unneeded_reg[iend+1];
     uu=unneeded_reg_upper[iend+1];
     u=1;uu=1;
+    gte_u=gte_unneeded[iend+1];
   }
+
   for (i=iend;i>=istart;i--)
   {
     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
@@ -6663,6 +6799,7 @@ void unneeded_registers(int istart,int iend,int r)
         // Branch out of this block, flush all regs
         u=1;
         uu=1;
+        gte_u=gte_u_unknown;
         /* Hexagon hack 
         if(itype[i]==UJUMP&&rt1[i]==31)
         {
@@ -6694,17 +6831,21 @@ void unneeded_registers(int istart,int iend,int r)
         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
         u|=1;uu|=1;
+        gte_u|=gte_rt[i+1];
+        gte_u&=~gte_rs[i+1];
         // If branch is "likely" (and conditional)
         // then we skip the delay slot on the fall-thru path
         if(likely[i]) {
           if(i<slen-1) {
             u&=unneeded_reg[i+2];
             uu&=unneeded_reg_upper[i+2];
+            gte_u&=gte_unneeded[i+2];
           }
           else
           {
             u=1;
             uu=1;
+            gte_u=gte_u_unknown;
           }
         }
       }
@@ -6718,10 +6859,12 @@ void unneeded_registers(int istart,int iend,int r)
           {
             // Unconditional branch
             temp_u=1;temp_uu=1;
+            temp_gte_u=0;
           } else {
             // Conditional branch (not taken case)
             temp_u=unneeded_reg[i+2];
             temp_uu=unneeded_reg_upper[i+2];
+            temp_gte_u&=gte_unneeded[i+2];
           }
           // Merge in delay slot
           tdep=(~temp_uu>>rt1[i+1])&1;
@@ -6731,17 +6874,21 @@ void unneeded_registers(int istart,int iend,int r)
           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
           temp_u|=1;temp_uu|=1;
+          temp_gte_u|=gte_rt[i+1];
+          temp_gte_u&=~gte_rs[i+1];
           // If branch is "likely" (and conditional)
           // then we skip the delay slot on the fall-thru path
           if(likely[i]) {
             if(i<slen-1) {
               temp_u&=unneeded_reg[i+2];
               temp_uu&=unneeded_reg_upper[i+2];
+              temp_gte_u&=gte_unneeded[i+2];
             }
             else
             {
               temp_u=1;
               temp_uu=1;
+              temp_gte_u=gte_u_unknown;
             }
           }
           tdep=(~temp_uu>>rt1[i])&1;
@@ -6751,8 +6898,11 @@ void unneeded_registers(int istart,int iend,int r)
           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
           temp_u|=1;temp_uu|=1;
+          temp_gte_u|=gte_rt[i];
+          temp_gte_u&=~gte_rs[i];
           unneeded_reg[i]=temp_u;
           unneeded_reg_upper[i]=temp_uu;
+          gte_unneeded[i]=temp_gte_u;
           // Only go three levels deep.  This recursion can take an
           // excessive amount of time if there are a lot of nested loops.
           if(r<2) {
@@ -6760,6 +6910,7 @@ void unneeded_registers(int istart,int iend,int r)
           }else{
             unneeded_reg[(ba[i]-start)>>2]=1;
             unneeded_reg_upper[(ba[i]-start)>>2]=1;
+            gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
           }
         } /*else*/ if(1) {
           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
@@ -6767,6 +6918,7 @@ void unneeded_registers(int istart,int iend,int r)
             // Unconditional branch
             u=unneeded_reg[(ba[i]-start)>>2];
             uu=unneeded_reg_upper[(ba[i]-start)>>2];
+            gte_u=gte_unneeded[(ba[i]-start)>>2];
             branch_unneeded_reg[i]=u;
             branch_unneeded_reg_upper[i]=uu;
         //u=1;
@@ -6781,10 +6933,13 @@ void unneeded_registers(int istart,int iend,int r)
             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
             u|=1;uu|=1;
+            gte_u|=gte_rt[i+1];
+            gte_u&=~gte_rs[i+1];
           } else {
             // Conditional branch
             b=unneeded_reg[(ba[i]-start)>>2];
             bu=unneeded_reg_upper[(ba[i]-start)>>2];
+            gte_bu=gte_unneeded[(ba[i]-start)>>2];
             branch_unneeded_reg[i]=b;
             branch_unneeded_reg_upper[i]=bu;
         //b=1;
@@ -6799,20 +6954,25 @@ void unneeded_registers(int istart,int iend,int r)
             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
             b|=1;bu|=1;
+            gte_bu|=gte_rt[i+1];
+            gte_bu&=~gte_rs[i+1];
             // If branch is "likely" then we skip the
             // delay slot on the fall-thru path
             if(likely[i]) {
               u=b;
               uu=bu;
+              gte_u=gte_bu;
               if(i<slen-1) {
                 u&=unneeded_reg[i+2];
                 uu&=unneeded_reg_upper[i+2];
+                gte_u&=gte_unneeded[i+2];
         //u=1;
         //uu=1;
               }
             } else {
               u&=b;
               uu&=bu;
+              gte_u&=gte_bu;
         //u=1;
         //uu=1;
             }
@@ -6848,11 +7008,13 @@ void unneeded_registers(int istart,int iend,int r)
     u|=1LL<<rt2[i];
     uu|=1LL<<rt1[i];
     uu|=1LL<<rt2[i];
+    gte_u|=gte_rt[i];
     // Accessed registers are needed
     u&=~(1LL<<rs1[i]);
     u&=~(1LL<<rs2[i]);
     uu&=~(1LL<<us1[i]);
     uu&=~(1LL<<us2[i]);
+    gte_u&=~gte_rs[i];
     // Source-target dependencies
     uu&=~(tdep<<dep1[i]);
     uu&=~(tdep<<dep2[i]);
@@ -6861,6 +7023,7 @@ void unneeded_registers(int istart,int iend,int r)
     // Save it
     unneeded_reg[i]=u;
     unneeded_reg_upper[i]=uu;
+    gte_unneeded[i]=gte_u;
     /*
     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
     printf("U:");
@@ -7676,6 +7839,7 @@ void clean_registers(int istart,int iend,int wr)
   }
 }
 
+#ifdef DISASM
   /* disassembly */
 void disassemble_inst(int i)
 {
@@ -7764,6 +7928,9 @@ void disassemble_inst(int i)
         printf (" %x: %s\n",start+i*4,insn[i]);
     }
 }
+#else
+static void disassemble_inst(int i) {}
+#endif // DISASM
 
 // clear the state completely, instead of just marking
 // things invalid like invalidate_all_pages() does
@@ -7781,17 +7948,17 @@ void new_dynarec_clear_full()
   pending_exception=0;
   literalcount=0;
   stop_after_jal=0;
+  inv_code_start=inv_code_end=~0;
   // TLB
 #ifndef DISABLE_TLB
   using_tlb=0;
-#endif
-  sp_in_mirror=0;
   for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
     memory_map[n]=-1;
   for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
     memory_map[n]=((u_int)rdram-0x80000000)>>2;
   for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
     memory_map[n]=-1;
+#endif
   for(n=0;n<4096;n++) ll_clear(jump_in+n);
   for(n=0;n<4096;n++) ll_clear(jump_out+n);
   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
@@ -7812,6 +7979,7 @@ void new_dynarec_init()
   fake_pc.f.r.rd=&readmem_dword;
 #endif
   int n;
+  cycle_multiplier=200;
   new_dynarec_clear_full();
 #ifdef HOST_IMM8
   // Copy this into local area so we don't have to put it in every literal pool
@@ -7894,12 +8062,8 @@ int new_recompile_block(int addr)
   //rlist();
   start = (u_int)addr&~3;
   //assert(((u_int)addr&1)==0);
+  new_dynarec_did_compile=1;
 #ifdef PCSX
-  if(!sp_in_mirror&&(signed int)(psxRegs.GPR.n.sp&0xffe00000)>0x80200000&&
-     0x10000<=psxRegs.GPR.n.sp&&(psxRegs.GPR.n.sp&~0xe0e00000)<RAM_SIZE) {
-    printf("SP hack enabled (%08x), @%08x\n", psxRegs.GPR.n.sp, psxRegs.pc);
-    sp_in_mirror=1;
-  }
   if (Config.HLE && start == 0x80001000) // hlecall
   {
     // XXX: is this enough? Maybe check hleSoftCall?
@@ -7909,6 +8073,7 @@ int new_recompile_block(int addr)
     emit_movimm(start,0);
     emit_writeword(0,(int)&pcaddr);
     emit_jmp((int)new_dyna_leave);
+    literal_pool(0);
 #ifdef __arm__
     __clear_cache((void *)beginning,out);
 #endif
@@ -8243,7 +8408,9 @@ int new_recompile_block(int addr)
       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
+#ifndef FORCE32
       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
+#endif
       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
@@ -8270,11 +8437,14 @@ int new_recompile_block(int addr)
 #endif
 #ifdef PCSX
       case 0x12: strcpy(insn[i],"COP2"); type=NI;
-        // note: COP MIPS-1 encoding differs from MIPS32
         op2=(source[i]>>21)&0x1f;
-        if (source[i]&0x3f) {
+        //if (op2 & 0x10) {
+        if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
           if (gte_handlers[source[i]&0x3f]!=NULL) {
-            snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
+            if (gte_regnames[source[i]&0x3f]!=NULL)
+              strcpy(insn[i],gte_regnames[source[i]&0x3f]);
+            else
+              snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
             type=C2OP;
           }
         }
@@ -8302,6 +8472,7 @@ int new_recompile_block(int addr)
     us2[i]=0;
     dep1[i]=0;
     dep2[i]=0;
+    gte_rs[i]=gte_rt[i]=0;
     switch(type) {
       case LOAD:
         rs1[i]=(source[i]>>21)&0x1f;
@@ -8465,7 +8636,6 @@ int new_recompile_block(int addr)
         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
         break;
       case COP1:
-      case COP2:
         rs1[i]=0;
         rs2[i]=0;
         rt1[i]=0;
@@ -8475,6 +8645,23 @@ int new_recompile_block(int addr)
         if(op2==5) us1[i]=rs1[i]; // DMTC1
         rs2[i]=CSREG;
         break;
+      case COP2:
+        rs1[i]=0;
+        rs2[i]=0;
+        rt1[i]=0;
+        rt2[i]=0;
+        if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
+        if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
+        rs2[i]=CSREG;
+        int gr=(source[i]>>11)&0x1F;
+        switch(op2)
+        {
+          case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
+          case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
+          case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
+          case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
+        }
+        break;
       case C1LS:
         rs1[i]=(source[i]>>21)&0x1F;
         rs2[i]=CSREG;
@@ -8488,6 +8675,17 @@ int new_recompile_block(int addr)
         rt1[i]=0;
         rt2[i]=0;
         imm[i]=(short)source[i];
+        if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
+        else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
+        break;
+      case C2OP:
+        rs1[i]=0;
+        rs2[i]=0;
+        rt1[i]=0;
+        rt2[i]=0;
+        gte_rs[i]=gte_reg_reads[source[i]&0x3f];
+        gte_rt[i]=gte_reg_writes[source[i]&0x3f];
+        gte_rt[i]|=1ll<<63; // every op changes flags
         break;
       case FLOAT:
       case FCONV:
@@ -8621,6 +8819,7 @@ int new_recompile_block(int addr)
   dirty_reg(&current,CCREG);
   current.isconst=0;
   current.wasconst=0;
+  current.waswritten=0;
   int ds=0;
   int cc=0;
   int hr=-1;
@@ -8649,6 +8848,7 @@ int new_recompile_block(int addr)
         if(current.regmap[hr]==0) current.regmap[hr]=-1;
       }
       current.isconst=0;
+      current.waswritten=0;
     }
     if(i>1)
     {
@@ -8688,7 +8888,9 @@ int new_recompile_block(int addr)
       }
       if(temp_is32!=current.is32) {
         //printf("dumping 32-bit regs (%x)\n",start+i*4);
-        #ifdef DESTRUCTIVE_WRITEBACK
+        #ifndef DESTRUCTIVE_WRITEBACK
+        if(ds)
+        #endif
         for(hr=0;hr<HOST_REGS;hr++)
         {
           int r=current.regmap[hr];
@@ -8700,7 +8902,6 @@ int new_recompile_block(int addr)
             }
           }
         }
-        #endif
         current.is32=temp_is32;
       }
     }
@@ -8712,6 +8913,7 @@ int new_recompile_block(int addr)
     regs[i].wasconst=current.isconst;
     regs[i].was32=current.is32;
     regs[i].wasdirty=current.dirty;
+    regs[i].loadedconst=0;
     #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
     // To change a dirty register from 32 to 64 bits, we must write
     // it out during the previous cycle (for branches, 2 cycles)
@@ -8886,8 +9088,6 @@ int new_recompile_block(int addr)
           clear_const(&current,rt1[i]);
           alloc_cc(&current,i);
           dirty_reg(&current,CCREG);
-          ooo[i]=1;
-          delayslot_alloc(&current,i+1);
           if (rt1[i]==31) {
             alloc_reg(&current,i,31);
             dirty_reg(&current,31);
@@ -8898,6 +9098,8 @@ int new_recompile_block(int addr)
             #endif
             //current.is32|=1LL<<rt1[i];
           }
+          ooo[i]=1;
+          delayslot_alloc(&current,i+1);
           //current.isconst=0; // DEBUG
           ds=1;
           //printf("i=%d, isconst=%x\n",i,current.isconst);
@@ -9274,6 +9476,14 @@ int new_recompile_block(int addr)
       }
       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
     }
+
+    if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
+      current.waswritten|=1<<rs1[i-1];
+    current.waswritten&=~(1<<rt1[i]);
+    current.waswritten&=~(1<<rt2[i]);
+    if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
+      current.waswritten&=~(1<<rs1[i]);
+
     /* Branch post-alloc */
     if(i>0)
     {
@@ -9569,6 +9779,11 @@ int new_recompile_block(int addr)
       cc=0;
     }
 #ifdef PCSX
+    else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
+    {
+      // GTE runs in parallel until accessed, divide by 2 for a rough guess
+      cc+=gte_cycletab[source[i]&0x3f]/2;
+    }
     else if(/*itype[i]==LOAD||*/itype[i]==STORE||itype[i]==C1LS) // load causes weird timing issues
     {
       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
@@ -9598,6 +9813,7 @@ int new_recompile_block(int addr)
       }
     }
     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
+    regs[i].waswritten=current.waswritten;
   }
   
   /* Pass 4 - Cull unused host registers */
@@ -9880,7 +10096,7 @@ int new_recompile_block(int addr)
   // If a register is allocated during a loop, try to allocate it for the
   // entire loop, if possible.  This avoids loading/storing registers
   // inside of the loop.
-
+  
   signed char f_regmap[HOST_REGS];
   clear_all_regs(f_regmap);
   for(i=0;i<slen-1;i++)
@@ -9897,7 +10113,7 @@ int new_recompile_block(int addr)
       {
         int t=(ba[i]-start)>>2;
         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
-        if(t<2||(itype[t-2]!=UJUMP)) // call/ret assumes no registers allocated
+        if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
         for(hr=0;hr<HOST_REGS;hr++)
         {
           if(regs[i].regmap[hr]>64) {
@@ -9953,7 +10169,7 @@ int new_recompile_block(int addr)
           // a mov, which is of negligible benefit.  So such cases are
           // skipped below.
           if(f_regmap[hr]>0) {
-            if(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0) {
+            if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
               int r=f_regmap[hr];
               for(j=t;j<=i;j++)
               {
@@ -9992,7 +10208,7 @@ int new_recompile_block(int addr)
                         break;
                       }
                       // call/ret fast path assumes no registers allocated
-                      if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)) {
+                      if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
                         break;
                       }
                       if(r>63) {
@@ -10140,7 +10356,7 @@ int new_recompile_block(int addr)
         }
       }
     }else{
-      int count=0;
+      // Non branch or undetermined branch target
       for(hr=0;hr<HOST_REGS;hr++)
       {
         if(hr!=EXCLUDE_REG) {
@@ -10160,7 +10376,6 @@ int new_recompile_block(int addr)
               f_regmap[hr]=regs[i].regmap[hr];
             }
           }
-          else if(regs[i].regmap[hr]<0) count++;
         }
       }
       // Try to restore cycle count at branch targets
@@ -10262,6 +10477,13 @@ int new_recompile_block(int addr)
               loop_start[hr]=MAXBLOCK;
             }
           }
+        }else{
+          if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
+            for(hr=0;hr<HOST_REGS;hr++) {
+              score[hr]=0;earliest_available[hr]=i+1;
+              loop_start[hr]=MAXBLOCK;
+            }
+          }
         }
       }
       // Mark unavailable registers
@@ -10308,11 +10530,13 @@ int new_recompile_block(int addr)
               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
                 int t=(ba[j]-start)>>2;
                 if(t<j&&t>=earliest_available[hr]) {
-                  // Score a point for hoisting loop invariant
-                  if(t<loop_start[hr]) loop_start[hr]=t;
-                  //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
-                  score[hr]++;
-                  end[hr]=j;
+                  if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
+                    // Score a point for hoisting loop invariant
+                    if(t<loop_start[hr]) loop_start[hr]=t;
+                    //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
+                    score[hr]++;
+                    end[hr]=j;
+                  }
                 }
                 else if(t<j) {
                   if(regs[t].regmap[hr]==reg) {
@@ -10374,7 +10598,10 @@ int new_recompile_block(int addr)
               }
               // loop optimization (loop_preload)
               int t=(ba[j]-start)>>2;
-              if(t==loop_start[maxscore]) regs[t].regmap_entry[maxscore]=reg;
+              if(t==loop_start[maxscore]) {
+                if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
+                  regs[t].regmap_entry[maxscore]=reg;
+              }
             }
             else
             {
@@ -10439,6 +10666,7 @@ int new_recompile_block(int addr)
               }
             }
           }
+          // Preload target address for load instruction (non-constant)
           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
             {
@@ -10455,6 +10683,7 @@ int new_recompile_block(int addr)
               }
             }
           }
+          // Load source into target register 
           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
             {
@@ -10471,6 +10700,7 @@ int new_recompile_block(int addr)
               }
             }
           }
+          // Preload map address
           #ifndef HOST_IMM_ADDR32
           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
             hr=get_reg(regs[i+1].regmap,TLREG);
@@ -10510,6 +10740,7 @@ int new_recompile_block(int addr)
             }
           }
           #endif
+          // Address for store instruction (non-constant)
           if(itype[i+1]==STORE||itype[i+1]==STORELR
              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
@@ -10727,9 +10958,9 @@ int new_recompile_block(int addr)
   if(itype[slen-1]==SPAN) {
     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
   }
-  
+
+#ifdef DISASM
   /* Debug/disassembly */
-  if((void*)assem_debug==(void*)printf) 
   for(i=0;i<slen;i++)
   {
     printf("U:");
@@ -10945,6 +11176,7 @@ int new_recompile_block(int addr)
 #endif
     }
   }
+#endif // DISASM
 
   /* Pass 8 - Assembly */
   linkcount=0;stubcount=0;
@@ -10962,10 +11194,14 @@ int new_recompile_block(int addr)
 #ifdef PCSX
   if (start == 0x80030000) {
     // nasty hack for fastbios thing
+    // override block entry to this code
     instr_addr0_override=(u_int)out;
     emit_movimm(start,0);
-    emit_readword((int)&pcaddr,1);
+    // abuse io address var as a flag that we
+    // have already returned here once
+    emit_readword((int)&address,1);
     emit_writeword(0,(int)&pcaddr);
+    emit_writeword(0,(int)&address);
     emit_cmp(0,1);
     emit_jne((int)new_dyna_leave);
   }
@@ -10973,12 +11209,13 @@ int new_recompile_block(int addr)
   for(i=0;i<slen;i++)
   {
     //if(ds) printf("ds: ");
-    if((void*)assem_debug==(void*)printf) disassemble_inst(i);
+    disassemble_inst(i);
     if(ds) {
       ds=0; // Skip delay slot
       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
       instr_addr[i]=0;
     } else {
+      speculate_register_values(i);
       #ifndef DESTRUCTIVE_WRITEBACK
       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
       {
@@ -11113,7 +11350,7 @@ int new_recompile_block(int addr)
         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
           emit_loadreg(CCREG,HOST_CCREG);
-        emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
+        emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
       }
       else if(!likely[i-2])
       {
@@ -11136,7 +11373,7 @@ int new_recompile_block(int addr)
     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
       emit_loadreg(CCREG,HOST_CCREG);
-    emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
+    emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
     add_to_linker((int)out,start+i*4,0);
     emit_jmp(0);
   }
@@ -11291,11 +11528,14 @@ int new_recompile_block(int addr)
     }
 #endif
   }
+  inv_code_start=inv_code_end=~0;
 #ifdef PCSX
-  // PCSX maps all RAM mirror invalid_code tests to 0x80000000..0x80000000+RAM_SIZE
+  // for PCSX we need to mark all mirrors too
   if(get_page(start)<(RAM_SIZE>>12))
     for(i=start>>12;i<=(start+slen*4)>>12;i++)
-      invalid_code[((u_int)0x80000000>>12)|i]=0;
+      invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
+      invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
+      invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
 #endif
   
   /* Pass 10 - Free memory by expiring oldest blocks */