drc: arm64 wip
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / patches / trace_drc_chk
1 diff --git a/libpcsxcore/new_dynarec/linkage_arm.S b/libpcsxcore/new_dynarec/linkage_arm.S
2 index fcb4e1a..e2547c4 100644
3 --- a/libpcsxcore/new_dynarec/linkage_arm.S
4 +++ b/libpcsxcore/new_dynarec/linkage_arm.S
5 @@ -439,7 +439,7 @@ FUNCTION(cc_interrupt):
6         str     r1, [fp, #LO_pending_exception]
7         and     r2, r2, r10, lsr #17
8         add     r3, fp, #LO_restore_candidate
9 -       str     r10, [fp, #LO_cycle]            /* PCSX cycles */
10 +@@@    str     r10, [fp, #LO_cycle]            /* PCSX cycles */
11  @@     str     r10, [fp, #LO_reg_cop0+36]      /* Count */
12         ldr     r4, [r2, r3]
13         mov     r10, lr
14 @@ -519,7 +519,7 @@ FUNCTION(jump_syscall_hle):
15         mov     r1, #0    /* in delay slot */
16         add     r2, r2, r10
17         mov     r0, #0x20 /* cause */
18 -       str     r2, [fp, #LO_cycle] /* PCSX cycle counter */
19 +@@@    str     r2, [fp, #LO_cycle] /* PCSX cycle counter */
20         bl      psxException
21  
22         /* note: psxException might do recursive recompiler call from it's HLE code,
23 @@ -540,7 +540,7 @@ FUNCTION(jump_hlecall):
24         str     r0, [fp, #LO_pcaddr]
25         add     r2, r2, r10
26         adr     lr, pcsx_return
27 -       str     r2, [fp, #LO_cycle] /* PCSX cycle counter */
28 +@@@    str     r2, [fp, #LO_cycle] /* PCSX cycle counter */
29         bx      r1
30         .size   jump_hlecall, .-jump_hlecall
31  
32 @@ -550,7 +550,7 @@ FUNCTION(jump_intcall):
33         str     r0, [fp, #LO_pcaddr]
34         add     r2, r2, r10
35         adr     lr, pcsx_return
36 -       str     r2, [fp, #LO_cycle] /* PCSX cycle counter */
37 +@@@    str     r2, [fp, #LO_cycle] /* PCSX cycle counter */
38         b       execI
39         .size   jump_hlecall, .-jump_hlecall
40  
41 @@ -559,7 +559,7 @@ FUNCTION(new_dyna_leave):
42         ldr     r0, [fp, #LO_last_count]
43         add     r12, fp, #28
44         add     r10, r0, r10
45 -       str     r10, [fp, #LO_cycle]
46 +@@@    str     r10, [fp, #LO_cycle]
47         ldmfd   sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
48         .size   new_dyna_leave, .-new_dyna_leave
49  
50 @@ -676,7 +676,7 @@ FUNCTION(new_dyna_start):
51         \readop r0, [r1, r3, lsl #\tab_shift]
52  .endif
53         movcc   pc, lr
54 -       str     r2, [fp, #LO_cycle]
55 +@@@    str     r2, [fp, #LO_cycle]
56         bx      r1
57  .endm
58  
59 @@ -711,7 +711,7 @@ FUNCTION(jump_handler_read32):
60         mov     r0, r1
61         add     r2, r2, r12
62         push    {r2, lr}
63 -       str     r2, [fp, #LO_cycle]
64 +@@@    str     r2, [fp, #LO_cycle]
65         blx     r3
66  
67         ldr     r0, [fp, #LO_next_interupt]
68 @@ -739,7 +739,7 @@ FUNCTION(jump_handler_write_h):
69         add     r2, r2, r12
70         mov     r0, r1
71         push    {r2, lr}
72 -       str     r2, [fp, #LO_cycle]
73 +@@@    str     r2, [fp, #LO_cycle]
74         blx     r3
75  
76         ldr     r0, [fp, #LO_next_interupt]
77 diff --git a/libpcsxcore/new_dynarec/linkage_arm64.S b/libpcsxcore/new_dynarec/linkage_arm64.S
78 index 060ac48..e3007d3 100644
79 --- a/libpcsxcore/new_dynarec/linkage_arm64.S
80 +++ b/libpcsxcore/new_dynarec/linkage_arm64.S
81 @@ -131,7 +131,7 @@ FUNCTION(cc_interrupt):
82         str     wzr, [rFP, #LO_pending_exception]
83         and     w2, w2, rCC, lsr #17
84         add     x3, rFP, #LO_restore_candidate
85 -       str     rCC, [rFP, #LO_cycle]           /* PCSX cycles */
86 +##     str     rCC, [rFP, #LO_cycle]           /* PCSX cycles */
87  #      str     rCC, [rFP, #LO_reg_cop0+36]     /* Count */
88         ldr     w19, [x3, w2, uxtw]
89         mov     x21, lr
90 @@ -254,7 +254,7 @@ FUNCTION(new_dyna_start):
91  FUNCTION(new_dyna_leave):
92         ldr     w0,  [rFP, #LO_last_count]
93         add     rCC, rCC, w0
94 -       str     rCC, [rFP, #LO_cycle]
95 +##     str     rCC, [rFP, #LO_cycle]
96         ldp     x19, x20, [sp, #16*1]
97         ldp     x21, x22, [sp, #16*2]
98         ldp     x23, x24, [sp, #16*3]
99 @@ -272,7 +272,7 @@ FUNCTION(new_dyna_leave):
100         /* w0 = adddr/data, x1 = rhandler, w2 = cycles, x3 = whandler */
101         ldr     w4, [rFP, #LO_last_count]
102         add     w4, w4, w2
103 -       str     w4, [rFP, #LO_cycle]
104 +##     str     w4, [rFP, #LO_cycle]
105  .endm
106  
107  .macro memhandler_post
108 diff --git a/libpcsxcore/new_dynarec/new_dynarec.c b/libpcsxcore/new_dynarec/new_dynarec.c
109 index e7b55b6..caa06d0 100644
110 --- a/libpcsxcore/new_dynarec/new_dynarec.c
111 +++ b/libpcsxcore/new_dynarec/new_dynarec.c
112 @@ -43,10 +43,10 @@ static int sceBlock;
113  #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
114  #endif
115  
116 -//#define DISASM
117 -//#define assem_debug printf
118 +#define DISASM
119 +#define assem_debug printf
120  //#define inv_debug printf
121 -#define assem_debug(...)
122 +//#define assem_debug(...)
123  #define inv_debug(...)
124  
125  #ifdef __i386__
126 @@ -424,6 +424,9 @@ static int doesnt_expire_soon(void *tcaddr)
127  // This is called from the recompiled JR/JALR instructions
128  void noinline *get_addr(u_int vaddr)
129  {
130 +#ifdef DRC_DBG
131 +printf("get_addr %08x, pc=%08x\n", vaddr, psxRegs.pc);
132 +#endif
133    u_int page=get_page(vaddr);
134    u_int vpage=get_vpage(vaddr);
135    struct ll_entry *head;
136 @@ -4221,13 +4224,15 @@ void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
137      }
138      emit_addimm_and_set_flags(cycles,HOST_CCREG);
139      jaddr=out;
140 -    emit_jns(0);
141 +    //emit_jns(0);
142 +   emit_jmp(0);
143    }
144    else
145    {
146      emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
147      jaddr=out;
148 -    emit_jns(0);
149 +    //emit_jns(0);
150 +   emit_jmp(0);
151    }
152    add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
153  }
154 @@ -4635,7 +4640,8 @@ static void rjump_assemble(int i,struct regstat *i_regs)
155      // special case for RFE
156      emit_jmp(0);
157    else
158 -    emit_jns(0);
159 +    //emit_jns(0);
160 +   emit_jmp(0);
161    //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
162    #ifdef USE_MINI_HT
163    if(rs1[i]==31) {
164 @@ -4737,7 +4743,8 @@ static void cjump_assemble(int i,struct regstat *i_regs)
165      else if(nop) {
166        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
167        void *jaddr=out;
168 -      emit_jns(0);
169 +      //emit_jns(0);
170 +     emit_jmp(0);
171        add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
172      }
173      else {
174 @@ -4924,7 +4931,8 @@ static void cjump_assemble(int i,struct regstat *i_regs)
175          emit_loadreg(CCREG,HOST_CCREG);
176          emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
177          void *jaddr=out;
178 -        emit_jns(0);
179 +        //emit_jns(0);
180 +       emit_jmp(0);
181          add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
182          emit_storereg(CCREG,HOST_CCREG);
183        }
184 @@ -4933,7 +4941,8 @@ static void cjump_assemble(int i,struct regstat *i_regs)
185          assert(cc==HOST_CCREG);
186          emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
187          void *jaddr=out;
188 -        emit_jns(0);
189 +        //emit_jns(0);
190 +       emit_jmp(0);
191          add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
192        }
193      }
194 @@ -5032,7 +5041,8 @@ static void sjump_assemble(int i,struct regstat *i_regs)
195      else if(nevertaken) {
196        emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
197        void *jaddr=out;
198 -      emit_jns(0);
199 +      //emit_jns(0);
200 +     emit_jmp(0);
201        add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
202      }
203      else {
204 @@ -5188,7 +5198,8 @@ static void sjump_assemble(int i,struct regstat *i_regs)
205          emit_loadreg(CCREG,HOST_CCREG);
206          emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
207          void *jaddr=out;
208 -        emit_jns(0);
209 +        //emit_jns(0);
210 +       emit_jmp(0);
211          add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
212          emit_storereg(CCREG,HOST_CCREG);
213        }
214 @@ -5197,7 +5208,8 @@ static void sjump_assemble(int i,struct regstat *i_regs)
215          assert(cc==HOST_CCREG);
216          emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
217          void *jaddr=out;
218 -        emit_jns(0);
219 +        //emit_jns(0);
220 +       emit_jmp(0);
221          add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
222        }
223      }
224 @@ -5685,7 +5697,7 @@ void unneeded_registers(int istart,int iend,int r)
225      // R0 is always unneeded
226      u|=1;
227      // Save it
228 -    unneeded_reg[i]=u;
229 +    unneeded_reg[i]=1;//u;
230      gte_unneeded[i]=gte_u;
231      /*
232      printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
233 @@ -8209,6 +8221,7 @@ int new_recompile_block(int addr)
234  
235    // This allocates registers (if possible) one instruction prior
236    // to use, which can avoid a load-use penalty on certain CPUs.
237 +#if 0
238    for(i=0;i<slen-1;i++)
239    {
240      if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP))
241 @@ -8365,6 +8378,7 @@ int new_recompile_block(int addr)
242        }
243      }
244    }
245 +#endif
246  
247    /* Pass 6 - Optimize clean/dirty state */
248    clean_registers(0,slen-1,1);
249 @@ -8659,6 +8673,11 @@ int new_recompile_block(int addr)
250          case SPAN:
251            pagespan_assemble(i,&regs[i]);break;
252        }
253 +
254 +#ifdef DRC_DBG
255 +      if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP)
256 +        wb_dirtys(regs[i].regmap,regs[i].dirty);
257 +#endif
258        if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
259          literal_pool(1024);
260        else
261 @@ -8767,7 +8786,7 @@ int new_recompile_block(int addr)
262      }
263    }
264    // External Branch Targets (jump_in)
265 -  if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
266 +  if(copy+slen*4>(void *)shadow+sizeof(shadow)) {copy=shadow;printf("shadow overflow\n");}
267    for(i=0;i<slen;i++)
268    {
269      if(bt[i]||i==0)
270 @@ -8882,6 +8901,10 @@ int new_recompile_block(int addr)
271      }
272      expirep=(expirep+1)&65535;
273    }
274 +#ifdef DRC_DBG
275 +printf("new_recompile_block done\n");
276 +fflush(stdout);
277 +#endif
278    return 0;
279  }
280