630b122b |
1 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * |
2 | * linkage_arm.s for PCSX * |
3 | * Copyright (C) 2009-2011 Ari64 * |
4 | * Copyright (C) 2021 notaz * |
5 | * * |
6 | * This program is free software; you can redistribute it and/or modify * |
7 | * it under the terms of the GNU General Public License as published by * |
8 | * the Free Software Foundation; either version 2 of the License, or * |
9 | * (at your option) any later version. * |
10 | * * |
11 | * This program is distributed in the hope that it will be useful, * |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * |
14 | * GNU General Public License for more details. * |
15 | * * |
16 | * You should have received a copy of the GNU General Public License * |
17 | * along with this program; if not, write to the * |
18 | * Free Software Foundation, Inc., * |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * |
20 | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
21 | |
22 | #include "arm_features.h" |
23 | #include "new_dynarec_config.h" |
24 | #include "assem_arm64.h" |
25 | #include "linkage_offsets.h" |
26 | |
27 | #if (LO_mem_wtab & 7) |
28 | #error misligned pointers |
29 | #endif |
30 | |
31 | .bss |
32 | .align 4 |
33 | .global dynarec_local |
34 | .type dynarec_local, %object |
35 | .size dynarec_local, LO_dynarec_local_size |
36 | dynarec_local: |
37 | .space LO_dynarec_local_size |
38 | |
39 | #define DRC_VAR_(name, vname, size_) \ |
40 | vname = dynarec_local + LO_##name; \ |
41 | .global vname; \ |
42 | .type vname, %object; \ |
43 | .size vname, size_ |
44 | |
45 | #define DRC_VAR(name, size_) \ |
46 | DRC_VAR_(name, ESYM(name), size_) |
47 | |
48 | DRC_VAR(next_interupt, 4) |
49 | DRC_VAR(cycle_count, 4) |
50 | DRC_VAR(last_count, 4) |
51 | DRC_VAR(pending_exception, 4) |
52 | DRC_VAR(stop, 4) |
53 | DRC_VAR(branch_target, 4) |
54 | DRC_VAR(address, 4) |
55 | #DRC_VAR(align0, 16) /* unused/alignment */ |
56 | DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs) |
57 | |
58 | /* psxRegs */ |
59 | #DRC_VAR(reg, 128) |
60 | DRC_VAR(lo, 4) |
61 | DRC_VAR(hi, 4) |
62 | DRC_VAR(reg_cop0, 128) |
63 | DRC_VAR(reg_cop2d, 128) |
64 | DRC_VAR(reg_cop2c, 128) |
65 | DRC_VAR(pcaddr, 4) |
66 | #DRC_VAR(code, 4) |
67 | #DRC_VAR(cycle, 4) |
68 | #DRC_VAR(interrupt, 4) |
69 | #DRC_VAR(intCycle, 256) |
70 | |
71 | DRC_VAR(rcnts, 7*4*4) |
72 | DRC_VAR(inv_code_start, 4) |
73 | DRC_VAR(inv_code_end, 4) |
74 | DRC_VAR(mem_rtab, 8) |
75 | DRC_VAR(mem_wtab, 8) |
76 | DRC_VAR(psxH_ptr, 8) |
77 | DRC_VAR(invc_ptr, 8) |
78 | DRC_VAR(zeromem_ptr, 8) |
79 | DRC_VAR(scratch_buf_ptr, 8) |
80 | DRC_VAR(ram_offset, 8) |
81 | DRC_VAR(mini_ht, 256) |
82 | DRC_VAR(restore_candidate, 512) |
83 | |
84 | |
85 | .text |
86 | .align 2 |
87 | |
88 | /* r0 = virtual target address */ |
89 | /* r1 = instruction to patch */ |
90 | .macro dyna_linker_main |
91 | /* XXX TODO: should be able to do better than this... */ |
92 | bl get_addr_ht |
93 | br x0 |
94 | .endm |
95 | |
96 | |
97 | FUNCTION(dyna_linker): |
98 | /* r0 = virtual target address */ |
99 | /* r1 = instruction to patch */ |
100 | dyna_linker_main |
101 | .size dyna_linker, .-dyna_linker |
102 | |
103 | FUNCTION(exec_pagefault): |
104 | /* r0 = instruction pointer */ |
105 | /* r1 = fault address */ |
106 | /* r2 = cause */ |
107 | bl abort |
108 | .size exec_pagefault, .-exec_pagefault |
109 | |
110 | /* Special dynamic linker for the case where a page fault |
111 | may occur in a branch delay slot */ |
112 | FUNCTION(dyna_linker_ds): |
113 | /* r0 = virtual target address */ |
114 | /* r1 = instruction to patch */ |
115 | dyna_linker_main |
116 | .size dyna_linker_ds, .-dyna_linker_ds |
117 | |
118 | .align 2 |
119 | FUNCTION(cc_interrupt): |
120 | ldr w0, [rFP, #LO_last_count] |
121 | mov w2, #0x1fc |
122 | add rCC, w0, rCC |
123 | str wzr, [rFP, #LO_pending_exception] |
124 | and w2, w2, rCC, lsr #17 |
125 | add x3, rFP, #LO_restore_candidate |
126 | str rCC, [rFP, #LO_cycle] /* PCSX cycles */ |
127 | # str rCC, [rFP, #LO_reg_cop0+36] /* Count */ |
128 | ldr w19, [x3, w2, uxtw] |
129 | mov x21, lr |
130 | cbnz w19, 4f |
131 | 1: |
132 | bl gen_interupt |
133 | mov lr, x21 |
134 | ldr rCC, [rFP, #LO_cycle] |
135 | ldr w0, [rFP, #LO_next_interupt] |
136 | ldr w1, [rFP, #LO_pending_exception] |
137 | ldr w2, [rFP, #LO_stop] |
138 | str w0, [rFP, #LO_last_count] |
139 | sub rCC, rCC, w0 |
140 | cbnz w2, new_dyna_leave |
141 | cbnz w1, 2f |
142 | ret |
143 | 2: |
144 | ldr w0, [rFP, #LO_pcaddr] |
145 | bl get_addr_ht |
146 | br x0 |
147 | 4: |
148 | /* Move 'dirty' blocks to the 'clean' list */ |
149 | lsl w20, w2, #3 |
150 | str wzr, [x3, w2, uxtw] |
151 | 5: |
152 | mov w0, w20 |
153 | add w20, w20, #1 |
154 | tbz w19, #0, 6f |
155 | bl clean_blocks |
156 | 6: |
157 | lsr w19, w19, #1 |
158 | tst w20, #31 |
159 | bne 5b |
160 | b 1b |
161 | .size cc_interrupt, .-cc_interrupt |
162 | |
163 | .align 2 |
164 | FUNCTION(fp_exception): |
165 | mov w2, #0x10000000 |
166 | 0: |
167 | ldr w1, [rFP, #LO_reg_cop0+48] /* Status */ |
168 | mov w3, #0x80000000 |
169 | str w0, [rFP, #LO_reg_cop0+56] /* EPC */ |
170 | orr w1, w1, #2 |
171 | add w2, w2, #0x2c |
172 | str w1, [rFP, #LO_reg_cop0+48] /* Status */ |
173 | str w2, [rFP, #LO_reg_cop0+52] /* Cause */ |
174 | add w0, w3, #0x80 |
175 | bl get_addr_ht |
176 | br x0 |
177 | .size fp_exception, .-fp_exception |
178 | .align 2 |
179 | FUNCTION(fp_exception_ds): |
180 | mov w2, #0x90000000 /* Set high bit if delay slot */ |
181 | b 0b |
182 | .size fp_exception_ds, .-fp_exception_ds |
183 | |
184 | .align 2 |
185 | FUNCTION(jump_syscall): |
186 | ldr w1, [rFP, #LO_reg_cop0+48] /* Status */ |
187 | mov w3, #0x80000000 |
188 | str w0, [rFP, #LO_reg_cop0+56] /* EPC */ |
189 | orr w1, w1, #2 |
190 | mov w2, #0x20 |
191 | str w1, [rFP, #LO_reg_cop0+48] /* Status */ |
192 | str w2, [rFP, #LO_reg_cop0+52] /* Cause */ |
193 | add w0, w3, #0x80 |
194 | bl get_addr_ht |
195 | br x0 |
196 | .size jump_syscall, .-jump_syscall |
197 | .align 2 |
198 | |
199 | /* note: psxException might do recursive recompiler call from it's HLE code, |
200 | * so be ready for this */ |
201 | FUNCTION(jump_to_new_pc): |
202 | ldr w1, [rFP, #LO_next_interupt] |
203 | ldr rCC, [rFP, #LO_cycle] |
204 | ldr w0, [rFP, #LO_pcaddr] |
205 | sub rCC, rCC, w1 |
206 | str w1, [rFP, #LO_last_count] |
207 | bl get_addr_ht |
208 | br x0 |
209 | .size jump_to_new_pc, .-jump_to_new_pc |
210 | |
211 | /* stack must be aligned by 16, and include space for save_regs() use */ |
212 | .align 2 |
213 | FUNCTION(new_dyna_start): |
214 | stp x29, x30, [sp, #-SSP_ALL]! |
215 | ldr w1, [x0, #LO_next_interupt] |
216 | ldr w2, [x0, #LO_cycle] |
217 | stp x19, x20, [sp, #16*1] |
218 | stp x21, x22, [sp, #16*2] |
219 | stp x23, x24, [sp, #16*3] |
220 | stp x25, x26, [sp, #16*4] |
221 | stp x27, x28, [sp, #16*5] |
222 | mov rFP, x0 |
223 | ldr w0, [rFP, #LO_pcaddr] |
224 | str w1, [rFP, #LO_last_count] |
225 | sub rCC, w2, w1 |
226 | bl get_addr_ht |
227 | br x0 |
228 | .size new_dyna_start, .-new_dyna_start |
229 | |
230 | .align 2 |
231 | FUNCTION(new_dyna_leave): |
232 | ldr w0, [rFP, #LO_last_count] |
233 | add rCC, rCC, w0 |
234 | str rCC, [rFP, #LO_cycle] |
235 | ldp x19, x20, [sp, #16*1] |
236 | ldp x21, x22, [sp, #16*2] |
237 | ldp x23, x24, [sp, #16*3] |
238 | ldp x25, x26, [sp, #16*4] |
239 | ldp x27, x28, [sp, #16*5] |
240 | ldp x29, x30, [sp], #SSP_ALL |
241 | ret |
242 | .size new_dyna_leave, .-new_dyna_leave |
243 | |
244 | /* --------------------------------------- */ |
245 | |
246 | .align 2 |
247 | |
248 | .macro memhandler_pre |
249 | /* w0 = adddr/data, x1 = rhandler, w2 = cycles, x3 = whandler */ |
250 | ldr w4, [rFP, #LO_last_count] |
251 | add w4, w4, w2 |
252 | str w4, [rFP, #LO_cycle] |
253 | .endm |
254 | |
255 | .macro memhandler_post |
256 | ldr w0, [rFP, #LO_next_interupt] |
257 | ldr w2, [rFP, #LO_cycle] // memhandlers can modify cc, like dma |
258 | str w0, [rFP, #LO_last_count] |
259 | sub w0, w2, w0 |
260 | .endm |
261 | |
262 | FUNCTION(do_memhandler_pre): |
263 | memhandler_pre |
264 | ret |
265 | |
266 | FUNCTION(do_memhandler_post): |
267 | memhandler_post |
268 | ret |
269 | |
270 | .macro pcsx_read_mem readop tab_shift |
271 | /* w0 = address, x1 = handler_tab, w2 = cycles */ |
272 | ubfm w4, w0, #\tab_shift, #11 |
273 | ldr x3, [x1, w4, uxtw #3] |
274 | adds x3, x3, x3 |
275 | bcs 0f |
276 | \readop w0, [x3, w4, uxtw #\tab_shift] |
277 | ret |
278 | 0: |
279 | stp xzr, x30, [sp, #-16]! |
280 | memhandler_pre |
281 | blr x3 |
282 | .endm |
283 | |
284 | FUNCTION(jump_handler_read8): |
285 | add x1, x1, #0x1000/4*8 + 0x1000/2*8 /* shift to r8 part */ |
286 | pcsx_read_mem ldrb, 0 |
287 | b handler_read_end |
288 | |
289 | FUNCTION(jump_handler_read16): |
290 | add x1, x1, #0x1000/4*8 /* shift to r16 part */ |
291 | pcsx_read_mem ldrh, 1 |
292 | b handler_read_end |
293 | |
294 | FUNCTION(jump_handler_read32): |
295 | pcsx_read_mem ldr, 2 |
296 | |
297 | handler_read_end: |
298 | ldp xzr, x30, [sp], #16 |
299 | ret |
300 | |
301 | .macro pcsx_write_mem wrtop movop tab_shift |
302 | /* w0 = address, w1 = data, w2 = cycles, x3 = handler_tab */ |
303 | ubfm w4, w0, #\tab_shift, #11 |
304 | ldr x3, [x3, w4, uxtw #3] |
305 | adds x3, x3, x3 |
306 | bcs 0f |
307 | mov w0, w2 /* cycle return */ |
308 | \wrtop w1, [x3, w4, uxtw #\tab_shift] |
309 | ret |
310 | 0: |
311 | stp xzr, x30, [sp, #-16]! |
312 | str w0, [rFP, #LO_address] /* some handlers still need it... */ |
313 | \movop w0, w1 |
314 | memhandler_pre |
315 | blr x3 |
316 | .endm |
317 | |
318 | FUNCTION(jump_handler_write8): |
319 | add x3, x3, #0x1000/4*8 + 0x1000/2*8 /* shift to r8 part */ |
320 | pcsx_write_mem strb uxtb 0 |
321 | b handler_write_end |
322 | |
323 | FUNCTION(jump_handler_write16): |
324 | add x3, x3, #0x1000/4*8 /* shift to r16 part */ |
325 | pcsx_write_mem strh uxth 1 |
326 | b handler_write_end |
327 | |
328 | FUNCTION(jump_handler_write32): |
329 | pcsx_write_mem str mov 2 |
330 | |
331 | handler_write_end: |
332 | memhandler_post |
333 | ldp xzr, x30, [sp], #16 |
334 | ret |
335 | |
336 | FUNCTION(jump_handle_swl): |
337 | /* w0 = address, w1 = data, w2 = cycles */ |
338 | ldr x3, [rFP, #LO_mem_wtab] |
339 | orr w4, wzr, w0, lsr #12 |
340 | ldr x3, [x3, w4, uxtw #3] |
341 | adds x3, x3, x3 |
342 | bcs 4f |
343 | add x3, x0, x3 |
344 | mov w0, w2 |
345 | tbz x3, #1, 10f // & 2 |
346 | tbz x3, #0, 2f // & 1 |
347 | 3: |
348 | stur w1, [x3, #-3] |
349 | ret |
350 | 2: |
351 | lsr w2, w1, #8 |
352 | lsr w1, w1, #24 |
353 | sturh w2, [x3, #-2] |
354 | strb w1, [x3] |
355 | ret |
356 | 10: |
357 | tbz x3, #0, 0f // & 1 |
358 | 1: |
359 | lsr w1, w1, #16 |
360 | sturh w1, [x3, #-1] |
361 | ret |
362 | 0: |
363 | lsr w2, w1, #24 |
364 | strb w2, [x3] |
365 | ret |
366 | 4: |
367 | mov w0, w2 // todo |
368 | bl abort |
369 | ret |
370 | |
371 | FUNCTION(jump_handle_swr): |
372 | /* w0 = address, w1 = data, w2 = cycles */ |
373 | ldr x3, [rFP, #LO_mem_wtab] |
374 | orr w4, wzr, w0, lsr #12 |
375 | ldr x3, [x3, w4, uxtw #3] |
376 | adds x3, x3, x3 |
377 | bcs 4f |
378 | add x3, x0, x3 |
379 | mov w0, w2 |
380 | tbz x3, #1, 10f // & 2 |
381 | tbz x3, #0, 2f // & 1 |
382 | 3: |
383 | strb w1, [x3] |
384 | ret |
385 | 2: |
386 | strh w1, [x3] |
387 | ret |
388 | 10: |
389 | tbz x3, #0, 0f // & 1 |
390 | 1: |
391 | lsr w2, w1, #8 |
392 | strb w1, [x3] |
393 | sturh w2, [x3, #1] |
394 | ret |
395 | 0: |
396 | str w1, [x3] |
397 | ret |
398 | 4: |
399 | mov w0, w2 // todo |
400 | bl abort |
401 | ret |
402 | |
403 | FUNCTION(call_gteStall): |
404 | /* w0 = op_cycles, w1 = cycles */ |
405 | ldr w2, [rFP, #LO_last_count] |
406 | str lr, [rFP, #LO_saved_lr] |
407 | add w1, w1, w2 |
408 | str w1, [rFP, #LO_cycle] |
409 | add x1, rFP, #LO_psxRegs |
410 | bl gteCheckStallRaw |
411 | ldr lr, [rFP, #LO_saved_lr] |
412 | add rCC, rCC, w0 |
413 | ret |
414 | |