asm: use a macro for functions
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / linkage_arm.S
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * linkage_arm.s for PCSX *
3 * Copyright (C) 2009-2011 Ari64 *
4 * Copyright (C) 2010-2011 GraÅžvydas "notaz" Ignotas *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
20 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
21
22#include "arm_features.h"
23
24
25 .global dynarec_local
26 .global reg
27 .global hi
28 .global lo
29 .global reg_cop0
30 .global reg_cop2d
31 .global reg_cop2c
32 .global FCR0
33 .global FCR31
34 .global next_interupt
35 .global cycle_count
36 .global last_count
37 .global pending_exception
38 .global pcaddr
39 .global stop
40 .global invc_ptr
41 .global address
42 .global branch_target
43 .global PC
44 .global mini_ht
45 .global restore_candidate
46 /* psx */
47 .global psxRegs
48 .global mem_rtab
49 .global mem_wtab
50 .global psxH_ptr
51 .global zeromem_ptr
52 .global inv_code_start
53 .global inv_code_end
54 .global rcnts
55
56 .bss
57 .align 4
58 .type dynarec_local, %object
59 .size dynarec_local, dynarec_local_end-dynarec_local
60dynarec_local:
61 .space dynarec_local_end-dynarec_local
62next_interupt = dynarec_local + 64
63 .type next_interupt, %object
64 .size next_interupt, 4
65cycle_count = next_interupt + 4
66 .type cycle_count, %object
67 .size cycle_count, 4
68last_count = cycle_count + 4
69 .type last_count, %object
70 .size last_count, 4
71pending_exception = last_count + 4
72 .type pending_exception, %object
73 .size pending_exception, 4
74stop = pending_exception + 4
75 .type stop, %object
76 .size stop, 4
77invc_ptr = stop + 4
78 .type invc_ptr, %object
79 .size invc_ptr, 4
80address = invc_ptr + 4
81 .type address, %object
82 .size address, 4
83psxRegs = address + 4
84
85/* psxRegs */
86 .type psxRegs, %object
87 .size psxRegs, psxRegs_end-psxRegs
88reg = psxRegs
89 .type reg, %object
90 .size reg, 128
91lo = reg + 128
92 .type lo, %object
93 .size lo, 4
94hi = lo + 4
95 .type hi, %object
96 .size hi, 4
97reg_cop0 = hi + 4
98 .type reg_cop0, %object
99 .size reg_cop0, 128
100reg_cop2d = reg_cop0 + 128
101 .type reg_cop2d, %object
102 .size reg_cop2d, 128
103reg_cop2c = reg_cop2d + 128
104 .type reg_cop2c, %object
105 .size reg_cop2c, 128
106PC = reg_cop2c + 128
107pcaddr = PC
108 .type PC, %object
109 .size PC, 4
110code = PC + 4
111 .type code, %object
112 .size code, 4
113cycle = code + 4
114 .type cycle, %object
115 .size cycle, 4
116interrupt = cycle + 4
117 .type interrupt, %object
118 .size interrupt, 4
119intCycle = interrupt + 4
120 .type intCycle, %object
121 .size intCycle, 256
122psxRegs_end = intCycle + 256
123
124rcnts = psxRegs_end
125 .type rcnts, %object
126 .size rcnts, 7*4*4
127rcnts_end = rcnts + 7*4*4
128
129mem_rtab = rcnts_end
130 .type mem_rtab, %object
131 .size mem_rtab, 4
132mem_wtab = mem_rtab + 4
133 .type mem_wtab, %object
134 .size mem_wtab, 4
135psxH_ptr = mem_wtab + 4
136 .type psxH_ptr, %object
137 .size psxH_ptr, 4
138zeromem_ptr = psxH_ptr + 4
139 .type zeromem_ptr, %object
140 .size zeromem_ptr, 4
141inv_code_start = zeromem_ptr + 4
142 .type inv_code_start, %object
143 .size inv_code_start, 4
144inv_code_end = inv_code_start + 4
145 .type inv_code_end, %object
146 .size inv_code_end, 4
147branch_target = inv_code_end + 4
148 .type branch_target, %object
149 .size branch_target, 4
150align0 = branch_target + 4 /* unused/alignment */
151 .type align0, %object
152 .size align0, 16
153mini_ht = align0 + 16
154 .type mini_ht, %object
155 .size mini_ht, 256
156restore_candidate = mini_ht + 256
157 .type restore_candidate, %object
158 .size restore_candidate, 512
159dynarec_local_end = restore_candidate + 512
160
161/* unused */
162FCR0 = align0
163 .type FCR0, %object
164 .size FCR0, 4
165FCR31 = align0
166 .type FCR31, %object
167 .size FCR31, 4
168
169#ifndef HAVE_ARMV5
170.macro blx rd
171 mov lr, pc
172 bx \rd
173.endm
174#endif
175
176.macro load_varadr reg var
177#if defined(__ARM_ARCH_7A__) && !defined(__PIC__)
178 movw \reg, #:lower16:\var
179 movt \reg, #:upper16:\var
180#else
181 ldr \reg, =\var
182#endif
183.endm
184
185.macro mov_16 reg imm
186#ifdef __ARM_ARCH_7A__
187 movw \reg, #\imm
188#else
189 mov \reg, #(\imm & 0x00ff)
190 orr \reg, #(\imm & 0xff00)
191#endif
192.endm
193
194.macro mov_24 reg imm
195#ifdef __ARM_ARCH_7A__
196 movw \reg, #(\imm & 0xffff)
197 movt \reg, #(\imm >> 16)
198#else
199 mov \reg, #(\imm & 0x0000ff)
200 orr \reg, #(\imm & 0x00ff00)
201 orr \reg, #(\imm & 0xff0000)
202#endif
203.endm
204
205.macro dyna_linker_main
206 /* r0 = virtual target address */
207 /* r1 = instruction to patch */
208 ldr r3, .jiptr
209 /* get_page */
210 lsr r2, r0, #12
211 mov r6, #4096
212 bic r2, r2, #0xe0000
213 sub r6, r6, #1
214 cmp r2, #0x1000
215 ldr r7, [r1]
216 biclt r2, #0x0e00
217 and r6, r6, r2
218 cmp r2, #2048
219 add r12, r7, #2
220 orrcs r2, r6, #2048
221 ldr r5, [r3, r2, lsl #2]
222 lsl r12, r12, #8
223 add r6, r1, r12, asr #6
224 mov r8, #0
225 /* jump_in lookup */
2261:
227 movs r4, r5
228 beq 2f
229 ldr r3, [r5]
230 ldr r5, [r4, #12]
231 teq r3, r0
232 bne 1b
233 ldr r3, [r4, #4]
234 ldr r4, [r4, #8]
235 tst r3, r3
236 bne 1b
237 teq r4, r6
238 moveq pc, r4 /* Stale i-cache */
239 mov r8, r4
240 b 1b /* jump_in may have dupes, continue search */
2412:
242 tst r8, r8
243 beq 3f /* r0 not in jump_in */
244
245 mov r5, r1
246 mov r1, r6
247 bl add_link
248 sub r2, r8, r5
249 and r1, r7, #0xff000000
250 lsl r2, r2, #6
251 sub r1, r1, #2
252 add r1, r1, r2, lsr #8
253 str r1, [r5]
254 mov pc, r8
2553:
256 /* hash_table lookup */
257 cmp r2, #2048
258 ldr r3, .jdptr
259 eor r4, r0, r0, lsl #16
260 lslcc r2, r0, #9
261 ldr r6, .htptr
262 lsr r4, r4, #12
263 lsrcc r2, r2, #21
264 bic r4, r4, #15
265 ldr r5, [r3, r2, lsl #2]
266 ldr r7, [r6, r4]!
267 teq r7, r0
268 ldreq pc, [r6, #4]
269 ldr r7, [r6, #8]
270 teq r7, r0
271 ldreq pc, [r6, #12]
272 /* jump_dirty lookup */
2736:
274 movs r4, r5
275 beq 8f
276 ldr r3, [r5]
277 ldr r5, [r4, #12]
278 teq r3, r0
279 bne 6b
2807:
281 ldr r1, [r4, #8]
282 /* hash_table insert */
283 ldr r2, [r6]
284 ldr r3, [r6, #4]
285 str r0, [r6]
286 str r1, [r6, #4]
287 str r2, [r6, #8]
288 str r3, [r6, #12]
289 mov pc, r1
2908:
291.endm
292
293 .text
294 .align 2
295
296FUNCTION(dyna_linker):
297 /* r0 = virtual target address */
298 /* r1 = instruction to patch */
299 dyna_linker_main
300
301 mov r4, r0
302 mov r5, r1
303 bl new_recompile_block
304 tst r0, r0
305 mov r0, r4
306 mov r1, r5
307 beq dyna_linker
308 /* pagefault */
309 mov r1, r0
310 mov r2, #8
311 .size dyna_linker, .-dyna_linker
312
313FUNCTION(exec_pagefault):
314 /* r0 = instruction pointer */
315 /* r1 = fault address */
316 /* r2 = cause */
317 ldr r3, [fp, #reg_cop0+48-dynarec_local] /* Status */
318 mvn r6, #0xF000000F
319 ldr r4, [fp, #reg_cop0+16-dynarec_local] /* Context */
320 bic r6, r6, #0x0F800000
321 str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
322 orr r3, r3, #2
323 str r1, [fp, #reg_cop0+32-dynarec_local] /* BadVAddr */
324 bic r4, r4, r6
325 str r3, [fp, #reg_cop0+48-dynarec_local] /* Status */
326 and r5, r6, r1, lsr #9
327 str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
328 and r1, r1, r6, lsl #9
329 str r1, [fp, #reg_cop0+40-dynarec_local] /* EntryHi */
330 orr r4, r4, r5
331 str r4, [fp, #reg_cop0+16-dynarec_local] /* Context */
332 mov r0, #0x80000000
333 bl get_addr_ht
334 mov pc, r0
335 .size exec_pagefault, .-exec_pagefault
336
337/* Special dynamic linker for the case where a page fault
338 may occur in a branch delay slot */
339FUNCTION(dyna_linker_ds):
340 /* r0 = virtual target address */
341 /* r1 = instruction to patch */
342 dyna_linker_main
343
344 mov r4, r0
345 bic r0, r0, #7
346 mov r5, r1
347 orr r0, r0, #1
348 bl new_recompile_block
349 tst r0, r0
350 mov r0, r4
351 mov r1, r5
352 beq dyna_linker_ds
353 /* pagefault */
354 bic r1, r0, #7
355 mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */
356 sub r0, r1, #4
357 b exec_pagefault
358 .size dyna_linker_ds, .-dyna_linker_ds
359.jiptr:
360 .word jump_in
361.jdptr:
362 .word jump_dirty
363.htptr:
364 .word hash_table
365
366 .align 2
367
368FUNCTION(jump_vaddr_r0):
369 eor r2, r0, r0, lsl #16
370 b jump_vaddr
371 .size jump_vaddr_r0, .-jump_vaddr_r0
372FUNCTION(jump_vaddr_r1):
373 eor r2, r1, r1, lsl #16
374 mov r0, r1
375 b jump_vaddr
376 .size jump_vaddr_r1, .-jump_vaddr_r1
377FUNCTION(jump_vaddr_r2):
378 mov r0, r2
379 eor r2, r2, r2, lsl #16
380 b jump_vaddr
381 .size jump_vaddr_r2, .-jump_vaddr_r2
382FUNCTION(jump_vaddr_r3):
383 eor r2, r3, r3, lsl #16
384 mov r0, r3
385 b jump_vaddr
386 .size jump_vaddr_r3, .-jump_vaddr_r3
387FUNCTION(jump_vaddr_r4):
388 eor r2, r4, r4, lsl #16
389 mov r0, r4
390 b jump_vaddr
391 .size jump_vaddr_r4, .-jump_vaddr_r4
392FUNCTION(jump_vaddr_r5):
393 eor r2, r5, r5, lsl #16
394 mov r0, r5
395 b jump_vaddr
396 .size jump_vaddr_r5, .-jump_vaddr_r5
397FUNCTION(jump_vaddr_r6):
398 eor r2, r6, r6, lsl #16
399 mov r0, r6
400 b jump_vaddr
401 .size jump_vaddr_r6, .-jump_vaddr_r6
402FUNCTION(jump_vaddr_r8):
403 eor r2, r8, r8, lsl #16
404 mov r0, r8
405 b jump_vaddr
406 .size jump_vaddr_r8, .-jump_vaddr_r8
407FUNCTION(jump_vaddr_r9):
408 eor r2, r9, r9, lsl #16
409 mov r0, r9
410 b jump_vaddr
411 .size jump_vaddr_r9, .-jump_vaddr_r9
412FUNCTION(jump_vaddr_r10):
413 eor r2, r10, r10, lsl #16
414 mov r0, r10
415 b jump_vaddr
416 .size jump_vaddr_r10, .-jump_vaddr_r10
417FUNCTION(jump_vaddr_r12):
418 eor r2, r12, r12, lsl #16
419 mov r0, r12
420 b jump_vaddr
421 .size jump_vaddr_r12, .-jump_vaddr_r12
422FUNCTION(jump_vaddr_r7):
423 eor r2, r7, r7, lsl #16
424 add r0, r7, #0
425 .size jump_vaddr_r7, .-jump_vaddr_r7
426FUNCTION(jump_vaddr):
427 ldr r1, .htptr
428 mvn r3, #15
429 and r2, r3, r2, lsr #12
430 ldr r2, [r1, r2]!
431 teq r2, r0
432 ldreq pc, [r1, #4]
433 ldr r2, [r1, #8]
434 teq r2, r0
435 ldreq pc, [r1, #12]
436 str r10, [fp, #cycle_count-dynarec_local]
437 bl get_addr
438 ldr r10, [fp, #cycle_count-dynarec_local]
439 mov pc, r0
440 .size jump_vaddr, .-jump_vaddr
441
442 .align 2
443
444FUNCTION(verify_code_ds):
445 str r8, [fp, #branch_target-dynarec_local]
446FUNCTION(verify_code_vm):
447FUNCTION(verify_code):
448 /* r1 = source */
449 /* r2 = target */
450 /* r3 = length */
451 tst r3, #4
452 mov r4, #0
453 add r3, r1, r3
454 mov r5, #0
455 ldrne r4, [r1], #4
456 mov r12, #0
457 ldrne r5, [r2], #4
458 teq r1, r3
459 beq .D3
460.D2:
461 ldr r7, [r1], #4
462 eor r9, r4, r5
463 ldr r8, [r2], #4
464 orrs r9, r9, r12
465 bne .D4
466 ldr r4, [r1], #4
467 eor r12, r7, r8
468 ldr r5, [r2], #4
469 cmp r1, r3
470 bcc .D2
471 teq r7, r8
472.D3:
473 teqeq r4, r5
474.D4:
475 ldr r8, [fp, #branch_target-dynarec_local]
476 moveq pc, lr
477.D5:
478 bl get_addr
479 mov pc, r0
480 .size verify_code, .-verify_code
481 .size verify_code_vm, .-verify_code_vm
482
483 .align 2
484FUNCTION(cc_interrupt):
485 ldr r0, [fp, #last_count-dynarec_local]
486 mov r1, #0
487 mov r2, #0x1fc
488 add r10, r0, r10
489 str r1, [fp, #pending_exception-dynarec_local]
490 and r2, r2, r10, lsr #17
491 add r3, fp, #restore_candidate-dynarec_local
492 str r10, [fp, #cycle-dynarec_local] /* PCSX cycles */
493@@ str r10, [fp, #reg_cop0+36-dynarec_local] /* Count */
494 ldr r4, [r2, r3]
495 mov r10, lr
496 tst r4, r4
497 bne .E4
498.E1:
499 bl gen_interupt
500 mov lr, r10
501 ldr r10, [fp, #cycle-dynarec_local]
502 ldr r0, [fp, #next_interupt-dynarec_local]
503 ldr r1, [fp, #pending_exception-dynarec_local]
504 ldr r2, [fp, #stop-dynarec_local]
505 str r0, [fp, #last_count-dynarec_local]
506 sub r10, r10, r0
507 tst r2, r2
508 ldmnefd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
509 tst r1, r1
510 moveq pc, lr
511.E2:
512 ldr r0, [fp, #pcaddr-dynarec_local]
513 bl get_addr_ht
514 mov pc, r0
515.E4:
516 /* Move 'dirty' blocks to the 'clean' list */
517 lsl r5, r2, #3
518 str r1, [r2, r3]
519.E5:
520 lsrs r4, r4, #1
521 mov r0, r5
522 add r5, r5, #1
523 blcs clean_blocks
524 tst r5, #31
525 bne .E5
526 b .E1
527 .size cc_interrupt, .-cc_interrupt
528
529 .align 2
530FUNCTION(do_interrupt):
531 ldr r0, [fp, #pcaddr-dynarec_local]
532 bl get_addr_ht
533 add r10, r10, #2
534 mov pc, r0
535 .size do_interrupt, .-do_interrupt
536
537 .align 2
538FUNCTION(fp_exception):
539 mov r2, #0x10000000
540.E7:
541 ldr r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
542 mov r3, #0x80000000
543 str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
544 orr r1, #2
545 add r2, r2, #0x2c
546 str r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
547 str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
548 add r0, r3, #0x80
549 bl get_addr_ht
550 mov pc, r0
551 .size fp_exception, .-fp_exception
552 .align 2
553FUNCTION(fp_exception_ds):
554 mov r2, #0x90000000 /* Set high bit if delay slot */
555 b .E7
556 .size fp_exception_ds, .-fp_exception_ds
557
558 .align 2
559FUNCTION(jump_syscall):
560 ldr r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
561 mov r3, #0x80000000
562 str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
563 orr r1, #2
564 mov r2, #0x20
565 str r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
566 str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
567 add r0, r3, #0x80
568 bl get_addr_ht
569 mov pc, r0
570 .size jump_syscall, .-jump_syscall
571 .align 2
572
573 .align 2
574FUNCTION(jump_syscall_hle):
575 str r0, [fp, #pcaddr-dynarec_local] /* PC must be set to EPC for psxException */
576 ldr r2, [fp, #last_count-dynarec_local]
577 mov r1, #0 /* in delay slot */
578 add r2, r2, r10
579 mov r0, #0x20 /* cause */
580 str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
581 bl psxException
582
583 /* note: psxException might do recorsive recompiler call from it's HLE code,
584 * so be ready for this */
585pcsx_return:
586 ldr r1, [fp, #next_interupt-dynarec_local]
587 ldr r10, [fp, #cycle-dynarec_local]
588 ldr r0, [fp, #pcaddr-dynarec_local]
589 sub r10, r10, r1
590 str r1, [fp, #last_count-dynarec_local]
591 bl get_addr_ht
592 mov pc, r0
593 .size jump_syscall_hle, .-jump_syscall_hle
594
595 .align 2
596FUNCTION(jump_hlecall):
597 ldr r2, [fp, #last_count-dynarec_local]
598 str r0, [fp, #pcaddr-dynarec_local]
599 add r2, r2, r10
600 adr lr, pcsx_return
601 str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
602 bx r1
603 .size jump_hlecall, .-jump_hlecall
604
605 .align 2
606FUNCTION(jump_intcall):
607 ldr r2, [fp, #last_count-dynarec_local]
608 str r0, [fp, #pcaddr-dynarec_local]
609 add r2, r2, r10
610 adr lr, pcsx_return
611 str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
612 b execI
613 .size jump_hlecall, .-jump_hlecall
614
615 .align 2
616FUNCTION(new_dyna_leave):
617 ldr r0, [fp, #last_count-dynarec_local]
618 add r12, fp, #28
619 add r10, r0, r10
620 str r10, [fp, #cycle-dynarec_local]
621 ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
622 .size new_dyna_leave, .-new_dyna_leave
623
624 .align 2
625FUNCTION(invalidate_addr_r0):
626 stmia fp, {r0, r1, r2, r3, r12, lr}
627 b invalidate_addr_call
628 .size invalidate_addr_r0, .-invalidate_addr_r0
629 .align 2
630FUNCTION(invalidate_addr_r1):
631 stmia fp, {r0, r1, r2, r3, r12, lr}
632 mov r0, r1
633 b invalidate_addr_call
634 .size invalidate_addr_r1, .-invalidate_addr_r1
635 .align 2
636FUNCTION(invalidate_addr_r2):
637 stmia fp, {r0, r1, r2, r3, r12, lr}
638 mov r0, r2
639 b invalidate_addr_call
640 .size invalidate_addr_r2, .-invalidate_addr_r2
641 .align 2
642FUNCTION(invalidate_addr_r3):
643 stmia fp, {r0, r1, r2, r3, r12, lr}
644 mov r0, r3
645 b invalidate_addr_call
646 .size invalidate_addr_r3, .-invalidate_addr_r3
647 .align 2
648FUNCTION(invalidate_addr_r4):
649 stmia fp, {r0, r1, r2, r3, r12, lr}
650 mov r0, r4
651 b invalidate_addr_call
652 .size invalidate_addr_r4, .-invalidate_addr_r4
653 .align 2
654FUNCTION(invalidate_addr_r5):
655 stmia fp, {r0, r1, r2, r3, r12, lr}
656 mov r0, r5
657 b invalidate_addr_call
658 .size invalidate_addr_r5, .-invalidate_addr_r5
659 .align 2
660FUNCTION(invalidate_addr_r6):
661 stmia fp, {r0, r1, r2, r3, r12, lr}
662 mov r0, r6
663 b invalidate_addr_call
664 .size invalidate_addr_r6, .-invalidate_addr_r6
665 .align 2
666FUNCTION(invalidate_addr_r7):
667 stmia fp, {r0, r1, r2, r3, r12, lr}
668 mov r0, r7
669 b invalidate_addr_call
670 .size invalidate_addr_r7, .-invalidate_addr_r7
671 .align 2
672FUNCTION(invalidate_addr_r8):
673 stmia fp, {r0, r1, r2, r3, r12, lr}
674 mov r0, r8
675 b invalidate_addr_call
676 .size invalidate_addr_r8, .-invalidate_addr_r8
677 .align 2
678FUNCTION(invalidate_addr_r9):
679 stmia fp, {r0, r1, r2, r3, r12, lr}
680 mov r0, r9
681 b invalidate_addr_call
682 .size invalidate_addr_r9, .-invalidate_addr_r9
683 .align 2
684FUNCTION(invalidate_addr_r10):
685 stmia fp, {r0, r1, r2, r3, r12, lr}
686 mov r0, r10
687 b invalidate_addr_call
688 .size invalidate_addr_r10, .-invalidate_addr_r10
689 .align 2
690FUNCTION(invalidate_addr_r12):
691 stmia fp, {r0, r1, r2, r3, r12, lr}
692 mov r0, r12
693 .size invalidate_addr_r12, .-invalidate_addr_r12
694 .align 2
695FUNCTION(invalidate_addr_call):
696 ldr r12, [fp, #inv_code_start-dynarec_local]
697 ldr lr, [fp, #inv_code_end-dynarec_local]
698 cmp r0, r12
699 cmpcs lr, r0
700 blcc invalidate_addr
701 ldmia fp, {r0, r1, r2, r3, r12, pc}
702 .size invalidate_addr_call, .-invalidate_addr_call
703
704 .align 2
705FUNCTION(new_dyna_start):
706 /* ip is stored to conform EABI alignment */
707 stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr}
708 load_varadr fp, dynarec_local
709 ldr r0, [fp, #pcaddr-dynarec_local]
710 bl get_addr_ht
711 ldr r1, [fp, #next_interupt-dynarec_local]
712 ldr r10, [fp, #cycle-dynarec_local]
713 str r1, [fp, #last_count-dynarec_local]
714 sub r10, r10, r1
715 mov pc, r0
716 .size new_dyna_start, .-new_dyna_start
717
718/* --------------------------------------- */
719
720.align 2
721
722.macro pcsx_read_mem readop tab_shift
723 /* r0 = address, r1 = handler_tab, r2 = cycles */
724 lsl r3, r0, #20
725 lsr r3, #(20+\tab_shift)
726 ldr r12, [fp, #last_count-dynarec_local]
727 ldr r1, [r1, r3, lsl #2]
728 add r2, r2, r12
729 lsls r1, #1
730.if \tab_shift == 1
731 lsl r3, #1
732 \readop r0, [r1, r3]
733.else
734 \readop r0, [r1, r3, lsl #\tab_shift]
735.endif
736 movcc pc, lr
737 str r2, [fp, #cycle-dynarec_local]
738 bx r1
739.endm
740
741FUNCTION(jump_handler_read8):
742 add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
743 pcsx_read_mem ldrccb, 0
744
745FUNCTION(jump_handler_read16):
746 add r1, #0x1000/4*4 @ shift to r16 part
747 pcsx_read_mem ldrcch, 1
748
749FUNCTION(jump_handler_read32):
750 pcsx_read_mem ldrcc, 2
751
752
753.macro pcsx_write_mem wrtop tab_shift
754 /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */
755 lsl r12,r0, #20
756 lsr r12, #(20+\tab_shift)
757 ldr r3, [r3, r12, lsl #2]
758 str r0, [fp, #address-dynarec_local] @ some handlers still need it..
759 lsls r3, #1
760 mov r0, r2 @ cycle return in case of direct store
761.if \tab_shift == 1
762 lsl r12, #1
763 \wrtop r1, [r3, r12]
764.else
765 \wrtop r1, [r3, r12, lsl #\tab_shift]
766.endif
767 movcc pc, lr
768 ldr r12, [fp, #last_count-dynarec_local]
769 mov r0, r1
770 add r2, r2, r12
771 push {r2, lr}
772 str r2, [fp, #cycle-dynarec_local]
773 blx r3
774
775 ldr r0, [fp, #next_interupt-dynarec_local]
776 pop {r2, r3}
777 str r0, [fp, #last_count-dynarec_local]
778 sub r0, r2, r0
779 bx r3
780.endm
781
782FUNCTION(jump_handler_write8):
783 add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
784 pcsx_write_mem strccb, 0
785
786FUNCTION(jump_handler_write16):
787 add r3, #0x1000/4*4 @ shift to r16 part
788 pcsx_write_mem strcch, 1
789
790FUNCTION(jump_handler_write32):
791 pcsx_write_mem strcc, 2
792
793FUNCTION(jump_handler_write_h):
794 /* r0 = address, r1 = data, r2 = cycles, r3 = handler */
795 ldr r12, [fp, #last_count-dynarec_local]
796 str r0, [fp, #address-dynarec_local] @ some handlers still need it..
797 add r2, r2, r12
798 mov r0, r1
799 push {r2, lr}
800 str r2, [fp, #cycle-dynarec_local]
801 blx r3
802
803 ldr r0, [fp, #next_interupt-dynarec_local]
804 pop {r2, r3}
805 str r0, [fp, #last_count-dynarec_local]
806 sub r0, r2, r0
807 bx r3
808
809FUNCTION(jump_handle_swl):
810 /* r0 = address, r1 = data, r2 = cycles */
811 ldr r3, [fp, #mem_wtab-dynarec_local]
812 mov r12,r0,lsr #12
813 ldr r3, [r3, r12, lsl #2]
814 lsls r3, #1
815 bcs 4f
816 add r3, r0, r3
817 mov r0, r2
818 tst r3, #2
819 beq 101f
820 tst r3, #1
821 beq 2f
8223:
823 str r1, [r3, #-3]
824 bx lr
8252:
826 lsr r2, r1, #8
827 lsr r1, #24
828 strh r2, [r3, #-2]
829 strb r1, [r3]
830 bx lr
831101:
832 tst r3, #1
833 lsrne r1, #16 @ 1
834 lsreq r12, r1, #24 @ 0
835 strneh r1, [r3, #-1]
836 streqb r12, [r3]
837 bx lr
8384:
839 mov r0, r2
840@ b abort
841 bx lr @ TODO?
842
843
844FUNCTION(jump_handle_swr):
845 /* r0 = address, r1 = data, r2 = cycles */
846 ldr r3, [fp, #mem_wtab-dynarec_local]
847 mov r12,r0,lsr #12
848 ldr r3, [r3, r12, lsl #2]
849 lsls r3, #1
850 bcs 4f
851 add r3, r0, r3
852 and r12,r3, #3
853 mov r0, r2
854 cmp r12,#2
855 strgtb r1, [r3] @ 3
856 streqh r1, [r3] @ 2
857 cmp r12,#1
858 strlt r1, [r3] @ 0
859 bxne lr
860 lsr r2, r1, #8 @ 1
861 strb r1, [r3]
862 strh r2, [r3, #1]
863 bx lr
8644:
865 mov r0, r2
866@ b abort
867 bx lr @ TODO?
868
869
870.macro rcntx_read_mode0 num
871 /* r0 = address, r2 = cycles */
872 ldr r3, [fp, #rcnts-dynarec_local+6*4+7*4*\num] @ cycleStart
873 mov r0, r2, lsl #16
874 sub r0, r3, lsl #16
875 lsr r0, #16
876 bx lr
877.endm
878
879FUNCTION(rcnt0_read_count_m0):
880 rcntx_read_mode0 0
881
882FUNCTION(rcnt1_read_count_m0):
883 rcntx_read_mode0 1
884
885FUNCTION(rcnt2_read_count_m0):
886 rcntx_read_mode0 2
887
888FUNCTION(rcnt0_read_count_m1):
889 /* r0 = address, r2 = cycles */
890 ldr r3, [fp, #rcnts-dynarec_local+6*4+7*4*0] @ cycleStart
891 mov_16 r1, 0x3334
892 sub r2, r2, r3
893 mul r0, r1, r2 @ /= 5
894 lsr r0, #16
895 bx lr
896
897FUNCTION(rcnt1_read_count_m1):
898 /* r0 = address, r2 = cycles */
899 ldr r3, [fp, #rcnts-dynarec_local+6*4+7*4*1]
900 mov_24 r1, 0x1e6cde
901 sub r2, r2, r3
902 umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd
903 bx lr
904
905FUNCTION(rcnt2_read_count_m1):
906 /* r0 = address, r2 = cycles */
907 ldr r3, [fp, #rcnts-dynarec_local+6*4+7*4*2]
908 mov r0, r2, lsl #16-3
909 sub r0, r3, lsl #16-3
910 lsr r0, #16 @ /= 8
911 bx lr
912
913@ vim:filetype=armasm