psxcounters: avoid doing excessive updates
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / linkage_arm.s
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * linkage_arm.s for PCSX *
3 * Copyright (C) 2009-2011 Ari64 *
4 * Copyright (C) 2010-2011 GraÅžvydas "notaz" Ignotas *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
20 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
21
22/* .equiv HAVE_ARMV7, 1 */
23
24 .global rdram
25rdram = 0x80000000
26 .global dynarec_local
27 .global reg
28 .global hi
29 .global lo
30 .global reg_cop0
31 .global reg_cop2d
32 .global reg_cop2c
33 .global FCR0
34 .global FCR31
35 .global next_interupt
36 .global cycle_count
37 .global last_count
38 .global pending_exception
39 .global pcaddr
40 .global stop
41 .global invc_ptr
42 .global address
43 .global branch_target
44 .global PC
45 .global mini_ht
46 .global restore_candidate
47 /* psx */
48 .global psxRegs
49 .global mem_rtab
50 .global mem_wtab
51 .global psxH_ptr
52 .global inv_code_start
53 .global inv_code_end
54
55 .bss
56 .align 4
57 .type dynarec_local, %object
58 .size dynarec_local, dynarec_local_end-dynarec_local
59dynarec_local:
60 .space dynarec_local_end-dynarec_local /*0x400630*/
61next_interupt = dynarec_local + 64
62 .type next_interupt, %object
63 .size next_interupt, 4
64cycle_count = next_interupt + 4
65 .type cycle_count, %object
66 .size cycle_count, 4
67last_count = cycle_count + 4
68 .type last_count, %object
69 .size last_count, 4
70pending_exception = last_count + 4
71 .type pending_exception, %object
72 .size pending_exception, 4
73stop = pending_exception + 4
74 .type stop, %object
75 .size stop, 4
76invc_ptr = stop + 4
77 .type invc_ptr, %object
78 .size invc_ptr, 4
79address = invc_ptr + 4
80 .type address, %object
81 .size address, 4
82psxRegs = address + 4
83
84/* psxRegs */
85 .type psxRegs, %object
86 .size psxRegs, psxRegs_end-psxRegs
87reg = psxRegs
88 .type reg, %object
89 .size reg, 128
90lo = reg + 128
91 .type lo, %object
92 .size lo, 4
93hi = lo + 4
94 .type hi, %object
95 .size hi, 4
96reg_cop0 = hi + 4
97 .type reg_cop0, %object
98 .size reg_cop0, 128
99reg_cop2d = reg_cop0 + 128
100 .type reg_cop2d, %object
101 .size reg_cop2d, 128
102reg_cop2c = reg_cop2d + 128
103 .type reg_cop2c, %object
104 .size reg_cop2c, 128
105PC = reg_cop2c + 128
106pcaddr = PC
107 .type PC, %object
108 .size PC, 4
109code = PC + 4
110 .type code, %object
111 .size code, 4
112cycle = code + 4
113 .type cycle, %object
114 .size cycle, 4
115interrupt = cycle + 4
116 .type interrupt, %object
117 .size interrupt, 4
118intCycle = interrupt + 4
119 .type intCycle, %object
120 .size intCycle, 256
121psxRegs_end = intCycle + 256
122
123mem_rtab = psxRegs_end
124 .type mem_rtab, %object
125 .size mem_rtab, 4
126mem_wtab = mem_rtab + 4
127 .type mem_wtab, %object
128 .size mem_wtab, 4
129psxH_ptr = mem_wtab + 4
130 .type psxH_ptr, %object
131 .size psxH_ptr, 4
132inv_code_start = psxH_ptr + 4
133 .type inv_code_start, %object
134 .size inv_code_start, 4
135inv_code_end = inv_code_start + 4
136 .type inv_code_end, %object
137 .size inv_code_end, 4
138branch_target = inv_code_end + 4
139 .type branch_target, %object
140 .size branch_target, 4
141align0 = branch_target + 4 /* unused/alignment */
142 .type align0, %object
143 .size align0, 4
144mini_ht = align0 + 4
145 .type mini_ht, %object
146 .size mini_ht, 256
147restore_candidate = mini_ht + 256
148 .type restore_candidate, %object
149 .size restore_candidate, 512
150dynarec_local_end = restore_candidate + 512
151
152/* unused */
153FCR0 = align0
154 .type FCR0, %object
155 .size FCR0, 4
156FCR31 = align0
157 .type FCR31, %object
158 .size FCR31, 4
159
160.macro load_var_adr reg var
161.if HAVE_ARMV7
162 movw \reg, #:lower16:\var
163 movt \reg, #:upper16:\var
164.else
165 ldr \reg, =\var
166.endif
167.endm
168
169.macro dyna_linker_main
170 /* r0 = virtual target address */
171 /* r1 = instruction to patch */
172 ldr r3, .jiptr
173 /* get_page */
174 lsr r2, r0, #12
175 mov r6, #4096
176 bic r2, r2, #0xe0000
177 sub r6, r6, #1
178 cmp r2, #0x1000
179 ldr r7, [r1]
180 biclt r2, #0x0e00
181 and r6, r6, r2
182 cmp r2, #2048
183 add r12, r7, #2
184 orrcs r2, r6, #2048
185 ldr r5, [r3, r2, lsl #2]
186 lsl r12, r12, #8
187 add r6, r1, r12, asr #6
188 mov r8, #0
189 /* jump_in lookup */
1901:
191 movs r4, r5
192 beq 2f
193 ldr r3, [r5]
194 ldr r5, [r4, #12]
195 teq r3, r0
196 bne 1b
197 ldr r3, [r4, #4]
198 ldr r4, [r4, #8]
199 tst r3, r3
200 bne 1b
201 teq r4, r6
202 moveq pc, r4 /* Stale i-cache */
203 mov r8, r4
204 b 1b /* jump_in may have dupes, continue search */
2052:
206 tst r8, r8
207 beq 3f /* r0 not in jump_in */
208
209 mov r5, r1
210 mov r1, r6
211 bl add_link
212 sub r2, r8, r5
213 and r1, r7, #0xff000000
214 lsl r2, r2, #6
215 sub r1, r1, #2
216 add r1, r1, r2, lsr #8
217 str r1, [r5]
218 mov pc, r8
2193:
220 /* hash_table lookup */
221 cmp r2, #2048
222 ldr r3, .jdptr
223 eor r4, r0, r0, lsl #16
224 lslcc r2, r0, #9
225 ldr r6, .htptr
226 lsr r4, r4, #12
227 lsrcc r2, r2, #21
228 bic r4, r4, #15
229 ldr r5, [r3, r2, lsl #2]
230 ldr r7, [r6, r4]!
231 teq r7, r0
232 ldreq pc, [r6, #4]
233 ldr r7, [r6, #8]
234 teq r7, r0
235 ldreq pc, [r6, #12]
236 /* jump_dirty lookup */
2376:
238 movs r4, r5
239 beq 8f
240 ldr r3, [r5]
241 ldr r5, [r4, #12]
242 teq r3, r0
243 bne 6b
2447:
245 ldr r1, [r4, #8]
246 /* hash_table insert */
247 ldr r2, [r6]
248 ldr r3, [r6, #4]
249 str r0, [r6]
250 str r1, [r6, #4]
251 str r2, [r6, #8]
252 str r3, [r6, #12]
253 mov pc, r1
2548:
255.endm
256
257 .text
258 .align 2
259 .global dyna_linker
260 .type dyna_linker, %function
261dyna_linker:
262 /* r0 = virtual target address */
263 /* r1 = instruction to patch */
264 dyna_linker_main
265
266 mov r4, r0
267 mov r5, r1
268 bl new_recompile_block
269 tst r0, r0
270 mov r0, r4
271 mov r1, r5
272 beq dyna_linker
273 /* pagefault */
274 mov r1, r0
275 mov r2, #8
276 .size dyna_linker, .-dyna_linker
277 .global exec_pagefault
278 .type exec_pagefault, %function
279exec_pagefault:
280 /* r0 = instruction pointer */
281 /* r1 = fault address */
282 /* r2 = cause */
283 ldr r3, [fp, #reg_cop0+48-dynarec_local] /* Status */
284 mvn r6, #0xF000000F
285 ldr r4, [fp, #reg_cop0+16-dynarec_local] /* Context */
286 bic r6, r6, #0x0F800000
287 str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
288 orr r3, r3, #2
289 str r1, [fp, #reg_cop0+32-dynarec_local] /* BadVAddr */
290 bic r4, r4, r6
291 str r3, [fp, #reg_cop0+48-dynarec_local] /* Status */
292 and r5, r6, r1, lsr #9
293 str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
294 and r1, r1, r6, lsl #9
295 str r1, [fp, #reg_cop0+40-dynarec_local] /* EntryHi */
296 orr r4, r4, r5
297 str r4, [fp, #reg_cop0+16-dynarec_local] /* Context */
298 mov r0, #0x80000000
299 bl get_addr_ht
300 mov pc, r0
301 .size exec_pagefault, .-exec_pagefault
302
303/* Special dynamic linker for the case where a page fault
304 may occur in a branch delay slot */
305 .global dyna_linker_ds
306 .type dyna_linker_ds, %function
307dyna_linker_ds:
308 /* r0 = virtual target address */
309 /* r1 = instruction to patch */
310 dyna_linker_main
311
312 mov r4, r0
313 bic r0, r0, #7
314 mov r5, r1
315 orr r0, r0, #1
316 bl new_recompile_block
317 tst r0, r0
318 mov r0, r4
319 mov r1, r5
320 beq dyna_linker_ds
321 /* pagefault */
322 bic r1, r0, #7
323 mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */
324 sub r0, r1, #4
325 b exec_pagefault
326 .size dyna_linker_ds, .-dyna_linker_ds
327.jiptr:
328 .word jump_in
329.jdptr:
330 .word jump_dirty
331.htptr:
332 .word hash_table
333
334 .align 2
335 .global jump_vaddr_r0
336 .type jump_vaddr_r0, %function
337jump_vaddr_r0:
338 eor r2, r0, r0, lsl #16
339 b jump_vaddr
340 .size jump_vaddr_r0, .-jump_vaddr_r0
341 .global jump_vaddr_r1
342 .type jump_vaddr_r1, %function
343jump_vaddr_r1:
344 eor r2, r1, r1, lsl #16
345 mov r0, r1
346 b jump_vaddr
347 .size jump_vaddr_r1, .-jump_vaddr_r1
348 .global jump_vaddr_r2
349 .type jump_vaddr_r2, %function
350jump_vaddr_r2:
351 mov r0, r2
352 eor r2, r2, r2, lsl #16
353 b jump_vaddr
354 .size jump_vaddr_r2, .-jump_vaddr_r2
355 .global jump_vaddr_r3
356 .type jump_vaddr_r3, %function
357jump_vaddr_r3:
358 eor r2, r3, r3, lsl #16
359 mov r0, r3
360 b jump_vaddr
361 .size jump_vaddr_r3, .-jump_vaddr_r3
362 .global jump_vaddr_r4
363 .type jump_vaddr_r4, %function
364jump_vaddr_r4:
365 eor r2, r4, r4, lsl #16
366 mov r0, r4
367 b jump_vaddr
368 .size jump_vaddr_r4, .-jump_vaddr_r4
369 .global jump_vaddr_r5
370 .type jump_vaddr_r5, %function
371jump_vaddr_r5:
372 eor r2, r5, r5, lsl #16
373 mov r0, r5
374 b jump_vaddr
375 .size jump_vaddr_r5, .-jump_vaddr_r5
376 .global jump_vaddr_r6
377 .type jump_vaddr_r6, %function
378jump_vaddr_r6:
379 eor r2, r6, r6, lsl #16
380 mov r0, r6
381 b jump_vaddr
382 .size jump_vaddr_r6, .-jump_vaddr_r6
383 .global jump_vaddr_r8
384 .type jump_vaddr_r8, %function
385jump_vaddr_r8:
386 eor r2, r8, r8, lsl #16
387 mov r0, r8
388 b jump_vaddr
389 .size jump_vaddr_r8, .-jump_vaddr_r8
390 .global jump_vaddr_r9
391 .type jump_vaddr_r9, %function
392jump_vaddr_r9:
393 eor r2, r9, r9, lsl #16
394 mov r0, r9
395 b jump_vaddr
396 .size jump_vaddr_r9, .-jump_vaddr_r9
397 .global jump_vaddr_r10
398 .type jump_vaddr_r10, %function
399jump_vaddr_r10:
400 eor r2, r10, r10, lsl #16
401 mov r0, r10
402 b jump_vaddr
403 .size jump_vaddr_r10, .-jump_vaddr_r10
404 .global jump_vaddr_r12
405 .type jump_vaddr_r12, %function
406jump_vaddr_r12:
407 eor r2, r12, r12, lsl #16
408 mov r0, r12
409 b jump_vaddr
410 .size jump_vaddr_r12, .-jump_vaddr_r12
411 .global jump_vaddr_r7
412 .type jump_vaddr_r7, %function
413jump_vaddr_r7:
414 eor r2, r7, r7, lsl #16
415 add r0, r7, #0
416 .size jump_vaddr_r7, .-jump_vaddr_r7
417 .global jump_vaddr
418 .type jump_vaddr, %function
419jump_vaddr:
420 ldr r1, .htptr
421 mvn r3, #15
422 and r2, r3, r2, lsr #12
423 ldr r2, [r1, r2]!
424 teq r2, r0
425 ldreq pc, [r1, #4]
426 ldr r2, [r1, #8]
427 teq r2, r0
428 ldreq pc, [r1, #12]
429 str r10, [fp, #cycle_count-dynarec_local]
430 bl get_addr
431 ldr r10, [fp, #cycle_count-dynarec_local]
432 mov pc, r0
433 .size jump_vaddr, .-jump_vaddr
434
435 .align 2
436 .global verify_code_ds
437 .type verify_code_ds, %function
438verify_code_ds:
439 str r8, [fp, #branch_target-dynarec_local]
440 .size verify_code_ds, .-verify_code_ds
441 .global verify_code_vm
442 .type verify_code_vm, %function
443verify_code_vm:
444 .global verify_code
445 .type verify_code, %function
446verify_code:
447 /* r1 = source */
448 /* r2 = target */
449 /* r3 = length */
450 tst r3, #4
451 mov r4, #0
452 add r3, r1, r3
453 mov r5, #0
454 ldrne r4, [r1], #4
455 mov r12, #0
456 ldrne r5, [r2], #4
457 teq r1, r3
458 beq .D3
459.D2:
460 ldr r7, [r1], #4
461 eor r9, r4, r5
462 ldr r8, [r2], #4
463 orrs r9, r9, r12
464 bne .D4
465 ldr r4, [r1], #4
466 eor r12, r7, r8
467 ldr r5, [r2], #4
468 cmp r1, r3
469 bcc .D2
470 teq r7, r8
471.D3:
472 teqeq r4, r5
473.D4:
474 ldr r8, [fp, #branch_target-dynarec_local]
475 moveq pc, lr
476.D5:
477 bl get_addr
478 mov pc, r0
479 .size verify_code, .-verify_code
480 .size verify_code_vm, .-verify_code_vm
481
482 .align 2
483 .global cc_interrupt
484 .type cc_interrupt, %function
485cc_interrupt:
486 ldr r0, [fp, #last_count-dynarec_local]
487 mov r1, #0
488 mov r2, #0x1fc
489 add r10, r0, r10
490 str r1, [fp, #pending_exception-dynarec_local]
491 and r2, r2, r10, lsr #17
492 add r3, fp, #restore_candidate-dynarec_local
493 str r10, [fp, #cycle-dynarec_local] /* PCSX cycles */
494@@ str r10, [fp, #reg_cop0+36-dynarec_local] /* Count */
495 ldr r4, [r2, r3]
496 mov r10, lr
497 tst r4, r4
498 bne .E4
499.E1:
500 bl gen_interupt
501 mov lr, r10
502 ldr r10, [fp, #cycle-dynarec_local]
503 ldr r0, [fp, #next_interupt-dynarec_local]
504 ldr r1, [fp, #pending_exception-dynarec_local]
505 ldr r2, [fp, #stop-dynarec_local]
506 str r0, [fp, #last_count-dynarec_local]
507 sub r10, r10, r0
508 tst r2, r2
509 ldmnefd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
510 tst r1, r1
511 moveq pc, lr
512.E2:
513 ldr r0, [fp, #pcaddr-dynarec_local]
514 bl get_addr_ht
515 mov pc, r0
516.E4:
517 /* Move 'dirty' blocks to the 'clean' list */
518 lsl r5, r2, #3
519 str r1, [r2, r3]
520.E5:
521 lsrs r4, r4, #1
522 mov r0, r5
523 add r5, r5, #1
524 blcs clean_blocks
525 tst r5, #31
526 bne .E5
527 b .E1
528 .size cc_interrupt, .-cc_interrupt
529
530 .align 2
531 .global do_interrupt
532 .type do_interrupt, %function
533do_interrupt:
534 ldr r0, [fp, #pcaddr-dynarec_local]
535 bl get_addr_ht
536 add r10, r10, #2
537 mov pc, r0
538 .size do_interrupt, .-do_interrupt
539
540 .align 2
541 .global fp_exception
542 .type fp_exception, %function
543fp_exception:
544 mov r2, #0x10000000
545.E7:
546 ldr r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
547 mov r3, #0x80000000
548 str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
549 orr r1, #2
550 add r2, r2, #0x2c
551 str r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
552 str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
553 add r0, r3, #0x80
554 bl get_addr_ht
555 mov pc, r0
556 .size fp_exception, .-fp_exception
557 .align 2
558 .global fp_exception_ds
559 .type fp_exception_ds, %function
560fp_exception_ds:
561 mov r2, #0x90000000 /* Set high bit if delay slot */
562 b .E7
563 .size fp_exception_ds, .-fp_exception_ds
564
565 .align 2
566 .global jump_syscall
567 .type jump_syscall, %function
568jump_syscall:
569 ldr r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
570 mov r3, #0x80000000
571 str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
572 orr r1, #2
573 mov r2, #0x20
574 str r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
575 str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
576 add r0, r3, #0x80
577 bl get_addr_ht
578 mov pc, r0
579 .size jump_syscall, .-jump_syscall
580 .align 2
581
582 .align 2
583 .global jump_syscall_hle
584 .type jump_syscall_hle, %function
585jump_syscall_hle:
586 str r0, [fp, #pcaddr-dynarec_local] /* PC must be set to EPC for psxException */
587 ldr r2, [fp, #last_count-dynarec_local]
588 mov r1, #0 /* in delay slot */
589 add r2, r2, r10
590 mov r0, #0x20 /* cause */
591 str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
592 bl psxException
593
594 /* note: psxException might do recorsive recompiler call from it's HLE code,
595 * so be ready for this */
596pcsx_return:
597 ldr r1, [fp, #next_interupt-dynarec_local]
598 ldr r10, [fp, #cycle-dynarec_local]
599 ldr r0, [fp, #pcaddr-dynarec_local]
600 sub r10, r10, r1
601 str r1, [fp, #last_count-dynarec_local]
602 bl get_addr_ht
603 mov pc, r0
604 .size jump_syscall_hle, .-jump_syscall_hle
605
606 .align 2
607 .global jump_hlecall
608 .type jump_hlecall, %function
609jump_hlecall:
610 ldr r2, [fp, #last_count-dynarec_local]
611 str r0, [fp, #pcaddr-dynarec_local]
612 add r2, r2, r10
613 adr lr, pcsx_return
614 str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
615 bx r1
616 .size jump_hlecall, .-jump_hlecall
617
618 .align 2
619 .global jump_intcall
620 .type jump_intcall, %function
621jump_intcall:
622 ldr r2, [fp, #last_count-dynarec_local]
623 str r0, [fp, #pcaddr-dynarec_local]
624 add r2, r2, r10
625 adr lr, pcsx_return
626 str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
627 b execI
628 .size jump_hlecall, .-jump_hlecall
629
630new_dyna_leave:
631 .align 2
632 .global new_dyna_leave
633 .type new_dyna_leave, %function
634 ldr r0, [fp, #last_count-dynarec_local]
635 add r12, fp, #28
636 add r10, r0, r10
637 str r10, [fp, #cycle-dynarec_local]
638 ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
639 .size new_dyna_leave, .-new_dyna_leave
640
641 .align 2
642 .global invalidate_addr_r0
643 .type invalidate_addr_r0, %function
644invalidate_addr_r0:
645 stmia fp, {r0, r1, r2, r3, r12, lr}
646 b invalidate_addr_call
647 .size invalidate_addr_r0, .-invalidate_addr_r0
648 .align 2
649 .global invalidate_addr_r1
650 .type invalidate_addr_r1, %function
651invalidate_addr_r1:
652 stmia fp, {r0, r1, r2, r3, r12, lr}
653 mov r0, r1
654 b invalidate_addr_call
655 .size invalidate_addr_r1, .-invalidate_addr_r1
656 .align 2
657 .global invalidate_addr_r2
658 .type invalidate_addr_r2, %function
659invalidate_addr_r2:
660 stmia fp, {r0, r1, r2, r3, r12, lr}
661 mov r0, r2
662 b invalidate_addr_call
663 .size invalidate_addr_r2, .-invalidate_addr_r2
664 .align 2
665 .global invalidate_addr_r3
666 .type invalidate_addr_r3, %function
667invalidate_addr_r3:
668 stmia fp, {r0, r1, r2, r3, r12, lr}
669 mov r0, r3
670 b invalidate_addr_call
671 .size invalidate_addr_r3, .-invalidate_addr_r3
672 .align 2
673 .global invalidate_addr_r4
674 .type invalidate_addr_r4, %function
675invalidate_addr_r4:
676 stmia fp, {r0, r1, r2, r3, r12, lr}
677 mov r0, r4
678 b invalidate_addr_call
679 .size invalidate_addr_r4, .-invalidate_addr_r4
680 .align 2
681 .global invalidate_addr_r5
682 .type invalidate_addr_r5, %function
683invalidate_addr_r5:
684 stmia fp, {r0, r1, r2, r3, r12, lr}
685 mov r0, r5
686 b invalidate_addr_call
687 .size invalidate_addr_r5, .-invalidate_addr_r5
688 .align 2
689 .global invalidate_addr_r6
690 .type invalidate_addr_r6, %function
691invalidate_addr_r6:
692 stmia fp, {r0, r1, r2, r3, r12, lr}
693 mov r0, r6
694 b invalidate_addr_call
695 .size invalidate_addr_r6, .-invalidate_addr_r6
696 .align 2
697 .global invalidate_addr_r7
698 .type invalidate_addr_r7, %function
699invalidate_addr_r7:
700 stmia fp, {r0, r1, r2, r3, r12, lr}
701 mov r0, r7
702 b invalidate_addr_call
703 .size invalidate_addr_r7, .-invalidate_addr_r7
704 .align 2
705 .global invalidate_addr_r8
706 .type invalidate_addr_r8, %function
707invalidate_addr_r8:
708 stmia fp, {r0, r1, r2, r3, r12, lr}
709 mov r0, r8
710 b invalidate_addr_call
711 .size invalidate_addr_r8, .-invalidate_addr_r8
712 .align 2
713 .global invalidate_addr_r9
714 .type invalidate_addr_r9, %function
715invalidate_addr_r9:
716 stmia fp, {r0, r1, r2, r3, r12, lr}
717 mov r0, r9
718 b invalidate_addr_call
719 .size invalidate_addr_r9, .-invalidate_addr_r9
720 .align 2
721 .global invalidate_addr_r10
722 .type invalidate_addr_r10, %function
723invalidate_addr_r10:
724 stmia fp, {r0, r1, r2, r3, r12, lr}
725 mov r0, r10
726 b invalidate_addr_call
727 .size invalidate_addr_r10, .-invalidate_addr_r10
728 .align 2
729 .global invalidate_addr_r12
730 .type invalidate_addr_r12, %function
731invalidate_addr_r12:
732 stmia fp, {r0, r1, r2, r3, r12, lr}
733 mov r0, r12
734 .size invalidate_addr_r12, .-invalidate_addr_r12
735 .align 2
736 .global invalidate_addr_call
737 .type invalidate_addr_call, %function
738invalidate_addr_call:
739 ldr r12, [fp, #inv_code_start-dynarec_local]
740 ldr lr, [fp, #inv_code_end-dynarec_local]
741 cmp r0, r12
742 cmpcs lr, r0
743 blcc invalidate_addr
744 ldmia fp, {r0, r1, r2, r3, r12, pc}
745 .size invalidate_addr_call, .-invalidate_addr_call
746
747 .align 2
748 .global new_dyna_start
749 .type new_dyna_start, %function
750new_dyna_start:
751 /* ip is stored to conform EABI alignment */
752 stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr}
753 load_var_adr fp, dynarec_local
754 ldr r0, [fp, #pcaddr-dynarec_local]
755 bl get_addr_ht
756 ldr r1, [fp, #next_interupt-dynarec_local]
757 ldr r10, [fp, #cycle-dynarec_local]
758 str r1, [fp, #last_count-dynarec_local]
759 sub r10, r10, r1
760 mov pc, r0
761 .size new_dyna_start, .-new_dyna_start
762
763/* --------------------------------------- */
764
765.align 2
766.global jump_handler_read8
767.global jump_handler_read16
768.global jump_handler_read32
769.global jump_handler_write8
770.global jump_handler_write16
771.global jump_handler_write32
772.global jump_handler_write_h
773.global jump_handle_swl
774.global jump_handle_swr
775
776
777.macro pcsx_read_mem readop tab_shift
778 /* r0 = address, r1 = handler_tab, r2 = cycles */
779 lsl r3, r0, #20
780 lsr r3, #(20+\tab_shift)
781 ldr r12, [fp, #last_count-dynarec_local]
782 ldr r1, [r1, r3, lsl #2]
783 add r2, r2, r12
784 lsls r1, #1
785.if \tab_shift == 1
786 lsl r3, #1
787 \readop r0, [r1, r3]
788.else
789 \readop r0, [r1, r3, lsl #\tab_shift]
790.endif
791 movcc pc, lr
792 str r2, [fp, #cycle-dynarec_local]
793 bx r1
794.endm
795
796jump_handler_read8:
797 add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
798 pcsx_read_mem ldrccb, 0
799
800jump_handler_read16:
801 add r1, #0x1000/4*4 @ shift to r16 part
802 pcsx_read_mem ldrcch, 1
803
804jump_handler_read32:
805 pcsx_read_mem ldrcc, 2
806
807
808.macro pcsx_write_mem wrtop tab_shift
809 /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */
810 lsl r12,r0, #20
811 lsr r12, #(20+\tab_shift)
812 ldr r3, [r3, r12, lsl #2]
813 str r0, [fp, #address-dynarec_local] @ some handlers still need it..
814 lsls r3, #1
815 mov r0, r2 @ cycle return in case of direct store
816.if \tab_shift == 1
817 lsl r12, #1
818 \wrtop r1, [r3, r12]
819.else
820 \wrtop r1, [r3, r12, lsl #\tab_shift]
821.endif
822 movcc pc, lr
823 ldr r12, [fp, #last_count-dynarec_local]
824 mov r0, r1
825 add r2, r2, r12
826 push {r2, lr}
827 str r2, [fp, #cycle-dynarec_local]
828 blx r3
829
830 ldr r0, [fp, #next_interupt-dynarec_local]
831 pop {r2, r3}
832 str r0, [fp, #last_count-dynarec_local]
833 sub r0, r2, r0
834 bx r3
835.endm
836
837jump_handler_write8:
838 add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
839 pcsx_write_mem strccb, 0
840
841jump_handler_write16:
842 add r3, #0x1000/4*4 @ shift to r16 part
843 pcsx_write_mem strcch, 1
844
845jump_handler_write32:
846 pcsx_write_mem strcc, 2
847
848jump_handler_write_h:
849 /* r0 = address, r1 = data, r2 = cycles, r3 = handler */
850 ldr r12, [fp, #last_count-dynarec_local]
851 str r0, [fp, #address-dynarec_local] @ some handlers still need it..
852 add r2, r2, r12
853 mov r0, r1
854 push {r2, lr}
855 str r2, [fp, #cycle-dynarec_local]
856 blx r3
857
858 ldr r0, [fp, #next_interupt-dynarec_local]
859 pop {r2, r3}
860 str r0, [fp, #last_count-dynarec_local]
861 sub r0, r2, r0
862 bx r3
863
864jump_handle_swl:
865 /* r0 = address, r1 = data, r2 = cycles */
866 ldr r3, [fp, #mem_wtab-dynarec_local]
867 mov r12,r0,lsr #12
868 ldr r3, [r3, r12, lsl #2]
869 lsls r3, #1
870 bcs 4f
871 add r3, r0, r3
872 mov r0, r2
873 tst r3, #2
874 beq 101f
875 tst r3, #1
876 beq 2f
8773:
878 str r1, [r3, #-3]
879 bx lr
8802:
881 lsr r2, r1, #8
882 lsr r1, #24
883 strh r2, [r3, #-2]
884 strb r1, [r3]
885 bx lr
886101:
887 tst r3, #1
888 lsrne r1, #16 @ 1
889 lsreq r12, r1, #24 @ 0
890 strneh r1, [r3, #-1]
891 streqb r12, [r3]
892 bx lr
8934:
894 mov r0, r2
895@ b abort
896 bx lr @ TODO?
897
898
899jump_handle_swr:
900 /* r0 = address, r1 = data, r2 = cycles */
901 ldr r3, [fp, #mem_wtab-dynarec_local]
902 mov r12,r0,lsr #12
903 ldr r3, [r3, r12, lsl #2]
904 lsls r3, #1
905 bcs 4f
906 add r3, r0, r3
907 and r12,r3, #3
908 mov r0, r2
909 cmp r12,#2
910 strgtb r1, [r3] @ 3
911 streqh r1, [r3] @ 2
912 cmp r12,#1
913 strlt r1, [r3] @ 0
914 bxne lr
915 lsr r2, r1, #8 @ 1
916 strb r1, [r3]
917 strh r2, [r3, #1]
918 bx lr
9194:
920 mov r0, r2
921@ b abort
922 bx lr @ TODO?
923
924
925@ vim:filetype=armasm