drc: rework linkage_arm for better assembler compatibility
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / linkage_arm.S
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * linkage_arm.s for PCSX *
3 * Copyright (C) 2009-2011 Ari64 *
4 * Copyright (C) 2010-2013 GraÅžvydas "notaz" Ignotas *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
20 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
21
22#include "arm_features.h"
23#include "linkage_offsets.h"
24
25
26#ifdef __MACH__
27#define dynarec_local ESYM(dynarec_local)
28#define add_link ESYM(add_link)
29#define new_recompile_block ESYM(new_recompile_block)
30#define get_addr ESYM(get_addr)
31#define get_addr_ht ESYM(get_addr_ht)
32#define clean_blocks ESYM(clean_blocks)
33#define gen_interupt ESYM(gen_interupt)
34#define psxException ESYM(psxException)
35#define execI ESYM(execI)
36#define invalidate_addr ESYM(invalidate_addr)
37#endif
38
39 .bss
40 .align 4
41 .global dynarec_local
42 .type dynarec_local, %object
43 .size dynarec_local, LO_dynarec_local_size
44dynarec_local:
45 .space LO_dynarec_local_size
46
47#define DRC_VAR_(name, vname, size_) \
48 vname = dynarec_local + LO_##name; \
49 .global vname; \
50 .type vname, %object; \
51 .size vname, size_
52
53#define DRC_VAR(name, size_) \
54 DRC_VAR_(name, ESYM(name), size_)
55
56DRC_VAR(next_interupt, 4)
57DRC_VAR(cycle_count, 4)
58DRC_VAR(last_count, 4)
59DRC_VAR(pending_exception, 4)
60DRC_VAR(stop, 4)
61DRC_VAR(invc_ptr, 4)
62DRC_VAR(address, 4)
63DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs)
64
65/* psxRegs */
66DRC_VAR(reg, 128)
67DRC_VAR(lo, 4)
68DRC_VAR(hi, 4)
69DRC_VAR(reg_cop0, 128)
70DRC_VAR(reg_cop2d, 128)
71DRC_VAR(reg_cop2c, 128)
72DRC_VAR(pcaddr, 4)
73@DRC_VAR(code, 4)
74@DRC_VAR(cycle, 4)
75@DRC_VAR(interrupt, 4)
76@DRC_VAR(intCycle, 256)
77
78DRC_VAR(rcnts, 7*4*4)
79DRC_VAR(mem_rtab, 4)
80DRC_VAR(mem_wtab, 4)
81DRC_VAR(psxH_ptr, 4)
82DRC_VAR(zeromem_ptr, 4)
83DRC_VAR(inv_code_start, 4)
84DRC_VAR(inv_code_end, 4)
85DRC_VAR(branch_target, 4)
86@DRC_VAR(align0, 16) /* unused/alignment */
87DRC_VAR(mini_ht, 256)
88DRC_VAR(restore_candidate, 512)
89
90/* unused */
91DRC_VAR(FCR0, 4)
92DRC_VAR(FCR31, 4)
93
94#ifndef HAVE_ARMV5
95.macro blx rd
96 mov lr, pc
97 bx \rd
98.endm
99#endif
100
101.macro load_varadr reg var
102#if defined(__ARM_ARCH_7A__) && !defined(__PIC__)
103 movw \reg, #:lower16:\var
104 movt \reg, #:upper16:\var
105#else
106 ldr \reg, =\var
107#endif
108.endm
109
110.macro mov_16 reg imm
111#ifdef __ARM_ARCH_7A__
112 movw \reg, #\imm
113#else
114 mov \reg, #(\imm & 0x00ff)
115 orr \reg, #(\imm & 0xff00)
116#endif
117.endm
118
119.macro mov_24 reg imm
120#ifdef __ARM_ARCH_7A__
121 movw \reg, #(\imm & 0xffff)
122 movt \reg, #(\imm >> 16)
123#else
124 mov \reg, #(\imm & 0x0000ff)
125 orr \reg, #(\imm & 0x00ff00)
126 orr \reg, #(\imm & 0xff0000)
127#endif
128.endm
129
130.macro dyna_linker_main
131 /* r0 = virtual target address */
132 /* r1 = instruction to patch */
133 ldr r3, .jiptr
134 /* get_page */
135 lsr r2, r0, #12
136 mov r6, #4096
137 bic r2, r2, #0xe0000
138 sub r6, r6, #1
139 cmp r2, #0x1000
140 ldr r7, [r1]
141 biclt r2, #0x0e00
142 and r6, r6, r2
143 cmp r2, #2048
144 add r12, r7, #2
145 orrcs r2, r6, #2048
146 ldr r5, [r3, r2, lsl #2]
147 lsl r12, r12, #8
148 add r6, r1, r12, asr #6
149 mov r8, #0
150 /* jump_in lookup */
1511:
152 movs r4, r5
153 beq 2f
154 ldr r3, [r5]
155 ldr r5, [r4, #12]
156 teq r3, r0
157 bne 1b
158 ldr r3, [r4, #4]
159 ldr r4, [r4, #8]
160 tst r3, r3
161 bne 1b
162 teq r4, r6
163 moveq pc, r4 /* Stale i-cache */
164 mov r8, r4
165 b 1b /* jump_in may have dupes, continue search */
1662:
167 tst r8, r8
168 beq 3f /* r0 not in jump_in */
169
170 mov r5, r1
171 mov r1, r6
172 bl add_link
173 sub r2, r8, r5
174 and r1, r7, #0xff000000
175 lsl r2, r2, #6
176 sub r1, r1, #2
177 add r1, r1, r2, lsr #8
178 str r1, [r5]
179 mov pc, r8
1803:
181 /* hash_table lookup */
182 cmp r2, #2048
183 ldr r3, .jdptr
184 eor r4, r0, r0, lsl #16
185 lslcc r2, r0, #9
186 ldr r6, .htptr
187 lsr r4, r4, #12
188 lsrcc r2, r2, #21
189 bic r4, r4, #15
190 ldr r5, [r3, r2, lsl #2]
191 ldr r7, [r6, r4]!
192 teq r7, r0
193 ldreq pc, [r6, #4]
194 ldr r7, [r6, #8]
195 teq r7, r0
196 ldreq pc, [r6, #12]
197 /* jump_dirty lookup */
1986:
199 movs r4, r5
200 beq 8f
201 ldr r3, [r5]
202 ldr r5, [r4, #12]
203 teq r3, r0
204 bne 6b
2057:
206 ldr r1, [r4, #8]
207 /* hash_table insert */
208 ldr r2, [r6]
209 ldr r3, [r6, #4]
210 str r0, [r6]
211 str r1, [r6, #4]
212 str r2, [r6, #8]
213 str r3, [r6, #12]
214 mov pc, r1
2158:
216.endm
217
218 .text
219 .align 2
220
221FUNCTION(dyna_linker):
222 /* r0 = virtual target address */
223 /* r1 = instruction to patch */
224 dyna_linker_main
225
226 mov r4, r0
227 mov r5, r1
228 bl new_recompile_block
229 tst r0, r0
230 mov r0, r4
231 mov r1, r5
232 beq dyna_linker
233 /* pagefault */
234 mov r1, r0
235 mov r2, #8
236 .size dyna_linker, .-dyna_linker
237
238FUNCTION(exec_pagefault):
239 /* r0 = instruction pointer */
240 /* r1 = fault address */
241 /* r2 = cause */
242 ldr r3, [fp, #LO_reg_cop0+48] /* Status */
243 mvn r6, #0xF000000F
244 ldr r4, [fp, #LO_reg_cop0+16] /* Context */
245 bic r6, r6, #0x0F800000
246 str r0, [fp, #LO_reg_cop0+56] /* EPC */
247 orr r3, r3, #2
248 str r1, [fp, #LO_reg_cop0+32] /* BadVAddr */
249 bic r4, r4, r6
250 str r3, [fp, #LO_reg_cop0+48] /* Status */
251 and r5, r6, r1, lsr #9
252 str r2, [fp, #LO_reg_cop0+52] /* Cause */
253 and r1, r1, r6, lsl #9
254 str r1, [fp, #LO_reg_cop0+40] /* EntryHi */
255 orr r4, r4, r5
256 str r4, [fp, #LO_reg_cop0+16] /* Context */
257 mov r0, #0x80000000
258 bl get_addr_ht
259 mov pc, r0
260 .size exec_pagefault, .-exec_pagefault
261
262/* Special dynamic linker for the case where a page fault
263 may occur in a branch delay slot */
264FUNCTION(dyna_linker_ds):
265 /* r0 = virtual target address */
266 /* r1 = instruction to patch */
267 dyna_linker_main
268
269 mov r4, r0
270 bic r0, r0, #7
271 mov r5, r1
272 orr r0, r0, #1
273 bl new_recompile_block
274 tst r0, r0
275 mov r0, r4
276 mov r1, r5
277 beq dyna_linker_ds
278 /* pagefault */
279 bic r1, r0, #7
280 mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */
281 sub r0, r1, #4
282 b exec_pagefault
283 .size dyna_linker_ds, .-dyna_linker_ds
284.jiptr:
285 .word jump_in
286.jdptr:
287 .word jump_dirty
288.htptr:
289 .word hash_table
290
291 .align 2
292
293FUNCTION(jump_vaddr_r0):
294 eor r2, r0, r0, lsl #16
295 b jump_vaddr
296 .size jump_vaddr_r0, .-jump_vaddr_r0
297FUNCTION(jump_vaddr_r1):
298 eor r2, r1, r1, lsl #16
299 mov r0, r1
300 b jump_vaddr
301 .size jump_vaddr_r1, .-jump_vaddr_r1
302FUNCTION(jump_vaddr_r2):
303 mov r0, r2
304 eor r2, r2, r2, lsl #16
305 b jump_vaddr
306 .size jump_vaddr_r2, .-jump_vaddr_r2
307FUNCTION(jump_vaddr_r3):
308 eor r2, r3, r3, lsl #16
309 mov r0, r3
310 b jump_vaddr
311 .size jump_vaddr_r3, .-jump_vaddr_r3
312FUNCTION(jump_vaddr_r4):
313 eor r2, r4, r4, lsl #16
314 mov r0, r4
315 b jump_vaddr
316 .size jump_vaddr_r4, .-jump_vaddr_r4
317FUNCTION(jump_vaddr_r5):
318 eor r2, r5, r5, lsl #16
319 mov r0, r5
320 b jump_vaddr
321 .size jump_vaddr_r5, .-jump_vaddr_r5
322FUNCTION(jump_vaddr_r6):
323 eor r2, r6, r6, lsl #16
324 mov r0, r6
325 b jump_vaddr
326 .size jump_vaddr_r6, .-jump_vaddr_r6
327FUNCTION(jump_vaddr_r8):
328 eor r2, r8, r8, lsl #16
329 mov r0, r8
330 b jump_vaddr
331 .size jump_vaddr_r8, .-jump_vaddr_r8
332FUNCTION(jump_vaddr_r9):
333 eor r2, r9, r9, lsl #16
334 mov r0, r9
335 b jump_vaddr
336 .size jump_vaddr_r9, .-jump_vaddr_r9
337FUNCTION(jump_vaddr_r10):
338 eor r2, r10, r10, lsl #16
339 mov r0, r10
340 b jump_vaddr
341 .size jump_vaddr_r10, .-jump_vaddr_r10
342FUNCTION(jump_vaddr_r12):
343 eor r2, r12, r12, lsl #16
344 mov r0, r12
345 b jump_vaddr
346 .size jump_vaddr_r12, .-jump_vaddr_r12
347FUNCTION(jump_vaddr_r7):
348 eor r2, r7, r7, lsl #16
349 add r0, r7, #0
350 .size jump_vaddr_r7, .-jump_vaddr_r7
351FUNCTION(jump_vaddr):
352 ldr r1, .htptr
353 mvn r3, #15
354 and r2, r3, r2, lsr #12
355 ldr r2, [r1, r2]!
356 teq r2, r0
357 ldreq pc, [r1, #4]
358 ldr r2, [r1, #8]
359 teq r2, r0
360 ldreq pc, [r1, #12]
361 str r10, [fp, #LO_cycle_count]
362 bl get_addr
363 ldr r10, [fp, #LO_cycle_count]
364 mov pc, r0
365 .size jump_vaddr, .-jump_vaddr
366
367 .align 2
368
369FUNCTION(verify_code_ds):
370 str r8, [fp, #LO_branch_target]
371FUNCTION(verify_code_vm):
372FUNCTION(verify_code):
373 /* r1 = source */
374 /* r2 = target */
375 /* r3 = length */
376 tst r3, #4
377 mov r4, #0
378 add r3, r1, r3
379 mov r5, #0
380 ldrne r4, [r1], #4
381 mov r12, #0
382 ldrne r5, [r2], #4
383 teq r1, r3
384 beq .D3
385.D2:
386 ldr r7, [r1], #4
387 eor r9, r4, r5
388 ldr r8, [r2], #4
389 orrs r9, r9, r12
390 bne .D4
391 ldr r4, [r1], #4
392 eor r12, r7, r8
393 ldr r5, [r2], #4
394 cmp r1, r3
395 bcc .D2
396 teq r7, r8
397.D3:
398 teqeq r4, r5
399.D4:
400 ldr r8, [fp, #LO_branch_target]
401 moveq pc, lr
402.D5:
403 bl get_addr
404 mov pc, r0
405 .size verify_code, .-verify_code
406 .size verify_code_vm, .-verify_code_vm
407
408 .align 2
409FUNCTION(cc_interrupt):
410 ldr r0, [fp, #LO_last_count]
411 mov r1, #0
412 mov r2, #0x1fc
413 add r10, r0, r10
414 str r1, [fp, #LO_pending_exception]
415 and r2, r2, r10, lsr #17
416 add r3, fp, #LO_restore_candidate
417 str r10, [fp, #LO_cycle] /* PCSX cycles */
418@@ str r10, [fp, #LO_reg_cop0+36] /* Count */
419 ldr r4, [r2, r3]
420 mov r10, lr
421 tst r4, r4
422 bne .E4
423.E1:
424 bl gen_interupt
425 mov lr, r10
426 ldr r10, [fp, #LO_cycle]
427 ldr r0, [fp, #LO_next_interupt]
428 ldr r1, [fp, #LO_pending_exception]
429 ldr r2, [fp, #LO_stop]
430 str r0, [fp, #LO_last_count]
431 sub r10, r10, r0
432 tst r2, r2
433 ldmnefd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
434 tst r1, r1
435 moveq pc, lr
436.E2:
437 ldr r0, [fp, #LO_pcaddr]
438 bl get_addr_ht
439 mov pc, r0
440.E4:
441 /* Move 'dirty' blocks to the 'clean' list */
442 lsl r5, r2, #3
443 str r1, [r2, r3]
444.E5:
445 lsrs r4, r4, #1
446 mov r0, r5
447 add r5, r5, #1
448 blcs clean_blocks
449 tst r5, #31
450 bne .E5
451 b .E1
452 .size cc_interrupt, .-cc_interrupt
453
454 .align 2
455FUNCTION(do_interrupt):
456 ldr r0, [fp, #LO_pcaddr]
457 bl get_addr_ht
458 add r10, r10, #2
459 mov pc, r0
460 .size do_interrupt, .-do_interrupt
461
462 .align 2
463FUNCTION(fp_exception):
464 mov r2, #0x10000000
465.E7:
466 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
467 mov r3, #0x80000000
468 str r0, [fp, #LO_reg_cop0+56] /* EPC */
469 orr r1, #2
470 add r2, r2, #0x2c
471 str r1, [fp, #LO_reg_cop0+48] /* Status */
472 str r2, [fp, #LO_reg_cop0+52] /* Cause */
473 add r0, r3, #0x80
474 bl get_addr_ht
475 mov pc, r0
476 .size fp_exception, .-fp_exception
477 .align 2
478FUNCTION(fp_exception_ds):
479 mov r2, #0x90000000 /* Set high bit if delay slot */
480 b .E7
481 .size fp_exception_ds, .-fp_exception_ds
482
483 .align 2
484FUNCTION(jump_syscall):
485 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
486 mov r3, #0x80000000
487 str r0, [fp, #LO_reg_cop0+56] /* EPC */
488 orr r1, #2
489 mov r2, #0x20
490 str r1, [fp, #LO_reg_cop0+48] /* Status */
491 str r2, [fp, #LO_reg_cop0+52] /* Cause */
492 add r0, r3, #0x80
493 bl get_addr_ht
494 mov pc, r0
495 .size jump_syscall, .-jump_syscall
496 .align 2
497
498 .align 2
499FUNCTION(jump_syscall_hle):
500 str r0, [fp, #LO_pcaddr] /* PC must be set to EPC for psxException */
501 ldr r2, [fp, #LO_last_count]
502 mov r1, #0 /* in delay slot */
503 add r2, r2, r10
504 mov r0, #0x20 /* cause */
505 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
506 bl psxException
507
508 /* note: psxException might do recursive recompiler call from it's HLE code,
509 * so be ready for this */
510pcsx_return:
511 ldr r1, [fp, #LO_next_interupt]
512 ldr r10, [fp, #LO_cycle]
513 ldr r0, [fp, #LO_pcaddr]
514 sub r10, r10, r1
515 str r1, [fp, #LO_last_count]
516 bl get_addr_ht
517 mov pc, r0
518 .size jump_syscall_hle, .-jump_syscall_hle
519
520 .align 2
521FUNCTION(jump_hlecall):
522 ldr r2, [fp, #LO_last_count]
523 str r0, [fp, #LO_pcaddr]
524 add r2, r2, r10
525 adr lr, pcsx_return
526 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
527 bx r1
528 .size jump_hlecall, .-jump_hlecall
529
530 .align 2
531FUNCTION(jump_intcall):
532 ldr r2, [fp, #LO_last_count]
533 str r0, [fp, #LO_pcaddr]
534 add r2, r2, r10
535 adr lr, pcsx_return
536 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
537 b execI
538 .size jump_hlecall, .-jump_hlecall
539
540 .align 2
541FUNCTION(new_dyna_leave):
542 ldr r0, [fp, #LO_last_count]
543 add r12, fp, #28
544 add r10, r0, r10
545 str r10, [fp, #LO_cycle]
546 ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
547 .size new_dyna_leave, .-new_dyna_leave
548
549 .align 2
550FUNCTION(invalidate_addr_r0):
551 stmia fp, {r0, r1, r2, r3, r12, lr}
552 b invalidate_addr_call
553 .size invalidate_addr_r0, .-invalidate_addr_r0
554 .align 2
555FUNCTION(invalidate_addr_r1):
556 stmia fp, {r0, r1, r2, r3, r12, lr}
557 mov r0, r1
558 b invalidate_addr_call
559 .size invalidate_addr_r1, .-invalidate_addr_r1
560 .align 2
561FUNCTION(invalidate_addr_r2):
562 stmia fp, {r0, r1, r2, r3, r12, lr}
563 mov r0, r2
564 b invalidate_addr_call
565 .size invalidate_addr_r2, .-invalidate_addr_r2
566 .align 2
567FUNCTION(invalidate_addr_r3):
568 stmia fp, {r0, r1, r2, r3, r12, lr}
569 mov r0, r3
570 b invalidate_addr_call
571 .size invalidate_addr_r3, .-invalidate_addr_r3
572 .align 2
573FUNCTION(invalidate_addr_r4):
574 stmia fp, {r0, r1, r2, r3, r12, lr}
575 mov r0, r4
576 b invalidate_addr_call
577 .size invalidate_addr_r4, .-invalidate_addr_r4
578 .align 2
579FUNCTION(invalidate_addr_r5):
580 stmia fp, {r0, r1, r2, r3, r12, lr}
581 mov r0, r5
582 b invalidate_addr_call
583 .size invalidate_addr_r5, .-invalidate_addr_r5
584 .align 2
585FUNCTION(invalidate_addr_r6):
586 stmia fp, {r0, r1, r2, r3, r12, lr}
587 mov r0, r6
588 b invalidate_addr_call
589 .size invalidate_addr_r6, .-invalidate_addr_r6
590 .align 2
591FUNCTION(invalidate_addr_r7):
592 stmia fp, {r0, r1, r2, r3, r12, lr}
593 mov r0, r7
594 b invalidate_addr_call
595 .size invalidate_addr_r7, .-invalidate_addr_r7
596 .align 2
597FUNCTION(invalidate_addr_r8):
598 stmia fp, {r0, r1, r2, r3, r12, lr}
599 mov r0, r8
600 b invalidate_addr_call
601 .size invalidate_addr_r8, .-invalidate_addr_r8
602 .align 2
603FUNCTION(invalidate_addr_r9):
604 stmia fp, {r0, r1, r2, r3, r12, lr}
605 mov r0, r9
606 b invalidate_addr_call
607 .size invalidate_addr_r9, .-invalidate_addr_r9
608 .align 2
609FUNCTION(invalidate_addr_r10):
610 stmia fp, {r0, r1, r2, r3, r12, lr}
611 mov r0, r10
612 b invalidate_addr_call
613 .size invalidate_addr_r10, .-invalidate_addr_r10
614 .align 2
615FUNCTION(invalidate_addr_r12):
616 stmia fp, {r0, r1, r2, r3, r12, lr}
617 mov r0, r12
618 .size invalidate_addr_r12, .-invalidate_addr_r12
619 .align 2
620invalidate_addr_call:
621 ldr r12, [fp, #LO_inv_code_start]
622 ldr lr, [fp, #LO_inv_code_end]
623 cmp r0, r12
624 cmpcs lr, r0
625 blcc invalidate_addr
626 ldmia fp, {r0, r1, r2, r3, r12, pc}
627 .size invalidate_addr_call, .-invalidate_addr_call
628
629 .align 2
630FUNCTION(new_dyna_start):
631 /* ip is stored to conform EABI alignment */
632 stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr}
633 load_varadr fp, dynarec_local
634 ldr r0, [fp, #LO_pcaddr]
635 bl get_addr_ht
636 ldr r1, [fp, #LO_next_interupt]
637 ldr r10, [fp, #LO_cycle]
638 str r1, [fp, #LO_last_count]
639 sub r10, r10, r1
640 mov pc, r0
641 .size new_dyna_start, .-new_dyna_start
642
643/* --------------------------------------- */
644
645.align 2
646
647.macro pcsx_read_mem readop tab_shift
648 /* r0 = address, r1 = handler_tab, r2 = cycles */
649 lsl r3, r0, #20
650 lsr r3, #(20+\tab_shift)
651 ldr r12, [fp, #LO_last_count]
652 ldr r1, [r1, r3, lsl #2]
653 add r2, r2, r12
654 lsls r1, #1
655.if \tab_shift == 1
656 lsl r3, #1
657 \readop r0, [r1, r3]
658.else
659 \readop r0, [r1, r3, lsl #\tab_shift]
660.endif
661 movcc pc, lr
662 str r2, [fp, #LO_cycle]
663 bx r1
664.endm
665
666FUNCTION(jump_handler_read8):
667 add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
668 pcsx_read_mem ldrccb, 0
669
670FUNCTION(jump_handler_read16):
671 add r1, #0x1000/4*4 @ shift to r16 part
672 pcsx_read_mem ldrcch, 1
673
674FUNCTION(jump_handler_read32):
675 pcsx_read_mem ldrcc, 2
676
677
678.macro pcsx_write_mem wrtop tab_shift
679 /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */
680 lsl r12,r0, #20
681 lsr r12, #(20+\tab_shift)
682 ldr r3, [r3, r12, lsl #2]
683 str r0, [fp, #LO_address] @ some handlers still need it..
684 lsls r3, #1
685 mov r0, r2 @ cycle return in case of direct store
686.if \tab_shift == 1
687 lsl r12, #1
688 \wrtop r1, [r3, r12]
689.else
690 \wrtop r1, [r3, r12, lsl #\tab_shift]
691.endif
692 movcc pc, lr
693 ldr r12, [fp, #LO_last_count]
694 mov r0, r1
695 add r2, r2, r12
696 push {r2, lr}
697 str r2, [fp, #LO_cycle]
698 blx r3
699
700 ldr r0, [fp, #LO_next_interupt]
701 pop {r2, r3}
702 str r0, [fp, #LO_last_count]
703 sub r0, r2, r0
704 bx r3
705.endm
706
707FUNCTION(jump_handler_write8):
708 add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
709 pcsx_write_mem strccb, 0
710
711FUNCTION(jump_handler_write16):
712 add r3, #0x1000/4*4 @ shift to r16 part
713 pcsx_write_mem strcch, 1
714
715FUNCTION(jump_handler_write32):
716 pcsx_write_mem strcc, 2
717
718FUNCTION(jump_handler_write_h):
719 /* r0 = address, r1 = data, r2 = cycles, r3 = handler */
720 ldr r12, [fp, #LO_last_count]
721 str r0, [fp, #LO_address] @ some handlers still need it..
722 add r2, r2, r12
723 mov r0, r1
724 push {r2, lr}
725 str r2, [fp, #LO_cycle]
726 blx r3
727
728 ldr r0, [fp, #LO_next_interupt]
729 pop {r2, r3}
730 str r0, [fp, #LO_last_count]
731 sub r0, r2, r0
732 bx r3
733
734FUNCTION(jump_handle_swl):
735 /* r0 = address, r1 = data, r2 = cycles */
736 ldr r3, [fp, #LO_mem_wtab]
737 mov r12,r0,lsr #12
738 ldr r3, [r3, r12, lsl #2]
739 lsls r3, #1
740 bcs 4f
741 add r3, r0, r3
742 mov r0, r2
743 tst r3, #2
744 beq 101f
745 tst r3, #1
746 beq 2f
7473:
748 str r1, [r3, #-3]
749 bx lr
7502:
751 lsr r2, r1, #8
752 lsr r1, #24
753 strh r2, [r3, #-2]
754 strb r1, [r3]
755 bx lr
756101:
757 tst r3, #1
758 lsrne r1, #16 @ 1
759 lsreq r12, r1, #24 @ 0
760 strneh r1, [r3, #-1]
761 streqb r12, [r3]
762 bx lr
7634:
764 mov r0, r2
765@ b abort
766 bx lr @ TODO?
767
768
769FUNCTION(jump_handle_swr):
770 /* r0 = address, r1 = data, r2 = cycles */
771 ldr r3, [fp, #LO_mem_wtab]
772 mov r12,r0,lsr #12
773 ldr r3, [r3, r12, lsl #2]
774 lsls r3, #1
775 bcs 4f
776 add r3, r0, r3
777 and r12,r3, #3
778 mov r0, r2
779 cmp r12,#2
780 strgtb r1, [r3] @ 3
781 streqh r1, [r3] @ 2
782 cmp r12,#1
783 strlt r1, [r3] @ 0
784 bxne lr
785 lsr r2, r1, #8 @ 1
786 strb r1, [r3]
787 strh r2, [r3, #1]
788 bx lr
7894:
790 mov r0, r2
791@ b abort
792 bx lr @ TODO?
793
794
795.macro rcntx_read_mode0 num
796 /* r0 = address, r2 = cycles */
797 ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart
798 mov r0, r2, lsl #16
799 sub r0, r3, lsl #16
800 lsr r0, #16
801 bx lr
802.endm
803
804FUNCTION(rcnt0_read_count_m0):
805 rcntx_read_mode0 0
806
807FUNCTION(rcnt1_read_count_m0):
808 rcntx_read_mode0 1
809
810FUNCTION(rcnt2_read_count_m0):
811 rcntx_read_mode0 2
812
813FUNCTION(rcnt0_read_count_m1):
814 /* r0 = address, r2 = cycles */
815 ldr r3, [fp, #LO_rcnts+6*4+7*4*0] @ cycleStart
816 mov_16 r1, 0x3334
817 sub r2, r2, r3
818 mul r0, r1, r2 @ /= 5
819 lsr r0, #16
820 bx lr
821
822FUNCTION(rcnt1_read_count_m1):
823 /* r0 = address, r2 = cycles */
824 ldr r3, [fp, #LO_rcnts+6*4+7*4*1]
825 mov_24 r1, 0x1e6cde
826 sub r2, r2, r3
827 umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd
828 bx lr
829
830FUNCTION(rcnt2_read_count_m1):
831 /* r0 = address, r2 = cycles */
832 ldr r3, [fp, #LO_rcnts+6*4+7*4*2]
833 mov r0, r2, lsl #16-3
834 sub r0, r3, lsl #16-3
835 lsr r0, #16 @ /= 8
836 bx lr
837
838@ vim:filetype=armasm