fix clang warnings
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / linkage_arm.S
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * linkage_arm.s for PCSX *
3 * Copyright (C) 2009-2011 Ari64 *
4 * Copyright (C) 2010-2013 GraÅžvydas "notaz" Ignotas *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
20 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
21
22#include "arm_features.h"
23#include "linkage_offsets.h"
24
25
26#ifdef __MACH__
27#define dynarec_local ESYM(dynarec_local)
28#define add_link ESYM(add_link)
29#define new_recompile_block ESYM(new_recompile_block)
30#define get_addr ESYM(get_addr)
31#define get_addr_ht ESYM(get_addr_ht)
32#define clean_blocks ESYM(clean_blocks)
33#define gen_interupt ESYM(gen_interupt)
34#define psxException ESYM(psxException)
35#define execI ESYM(execI)
36#define invalidate_addr ESYM(invalidate_addr)
37#endif
38
39 .bss
40 .align 4
41 .global dynarec_local
42 .type dynarec_local, %object
43 .size dynarec_local, LO_dynarec_local_size
44dynarec_local:
45 .space LO_dynarec_local_size
46
47#define DRC_VAR_(name, vname, size_) \
48 vname = dynarec_local + LO_##name; \
49 .global vname; \
50 .type vname, %object; \
51 .size vname, size_
52
53#define DRC_VAR(name, size_) \
54 DRC_VAR_(name, ESYM(name), size_)
55
56DRC_VAR(next_interupt, 4)
57DRC_VAR(cycle_count, 4)
58DRC_VAR(last_count, 4)
59DRC_VAR(pending_exception, 4)
60DRC_VAR(stop, 4)
61DRC_VAR(invc_ptr, 4)
62DRC_VAR(address, 4)
63DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs)
64
65/* psxRegs */
66DRC_VAR(reg, 128)
67DRC_VAR(lo, 4)
68DRC_VAR(hi, 4)
69DRC_VAR(reg_cop0, 128)
70DRC_VAR(reg_cop2d, 128)
71DRC_VAR(reg_cop2c, 128)
72DRC_VAR(pcaddr, 4)
73@DRC_VAR(code, 4)
74@DRC_VAR(cycle, 4)
75@DRC_VAR(interrupt, 4)
76@DRC_VAR(intCycle, 256)
77
78DRC_VAR(rcnts, 7*4*4)
79DRC_VAR(mem_rtab, 4)
80DRC_VAR(mem_wtab, 4)
81DRC_VAR(psxH_ptr, 4)
82DRC_VAR(zeromem_ptr, 4)
83DRC_VAR(inv_code_start, 4)
84DRC_VAR(inv_code_end, 4)
85DRC_VAR(branch_target, 4)
86DRC_VAR(scratch_buf_ptr, 4)
87@DRC_VAR(align0, 12) /* unused/alignment */
88DRC_VAR(mini_ht, 256)
89DRC_VAR(restore_candidate, 512)
90
91/* unused */
92DRC_VAR(FCR0, 4)
93DRC_VAR(FCR31, 4)
94
95#ifndef HAVE_ARMV5
96.macro blx rd
97 mov lr, pc
98 bx \rd
99.endm
100#endif
101
102.macro load_varadr reg var
103#if defined(__ARM_ARCH_7A__) && !defined(__PIC__)
104 movw \reg, #:lower16:\var
105 movt \reg, #:upper16:\var
106#else
107 ldr \reg, =\var
108#endif
109.endm
110
111.macro mov_16 reg imm
112#ifdef __ARM_ARCH_7A__
113 movw \reg, #\imm
114#else
115 mov \reg, #(\imm & 0x00ff)
116 orr \reg, #(\imm & 0xff00)
117#endif
118.endm
119
120.macro mov_24 reg imm
121#ifdef __ARM_ARCH_7A__
122 movw \reg, #(\imm & 0xffff)
123 movt \reg, #(\imm >> 16)
124#else
125 mov \reg, #(\imm & 0x0000ff)
126 orr \reg, #(\imm & 0x00ff00)
127 orr \reg, #(\imm & 0xff0000)
128#endif
129.endm
130
131.macro dyna_linker_main
132 /* r0 = virtual target address */
133 /* r1 = instruction to patch */
134 ldr r3, .jiptr
135 /* get_page */
136 lsr r2, r0, #12
137 mov r6, #4096
138 bic r2, r2, #0xe0000
139 sub r6, r6, #1
140 cmp r2, #0x1000
141 ldr r7, [r1]
142 biclt r2, #0x0e00
143 and r6, r6, r2
144 cmp r2, #2048
145 add r12, r7, #2
146 orrcs r2, r6, #2048
147 ldr r5, [r3, r2, lsl #2]
148 lsl r12, r12, #8
149 add r6, r1, r12, asr #6
150 mov r8, #0
151 /* jump_in lookup */
1521:
153 movs r4, r5
154 beq 2f
155 ldr r3, [r5]
156 ldr r5, [r4, #12]
157 teq r3, r0
158 bne 1b
159 ldr r3, [r4, #4]
160 ldr r4, [r4, #8]
161 tst r3, r3
162 bne 1b
163 teq r4, r6
164 moveq pc, r4 /* Stale i-cache */
165 mov r8, r4
166 b 1b /* jump_in may have dupes, continue search */
1672:
168 tst r8, r8
169 beq 3f /* r0 not in jump_in */
170
171 mov r5, r1
172 mov r1, r6
173 bl add_link
174 sub r2, r8, r5
175 and r1, r7, #0xff000000
176 lsl r2, r2, #6
177 sub r1, r1, #2
178 add r1, r1, r2, lsr #8
179 str r1, [r5]
180 mov pc, r8
1813:
182 /* hash_table lookup */
183 cmp r2, #2048
184 ldr r3, .jdptr
185 eor r4, r0, r0, lsl #16
186 lslcc r2, r0, #9
187 ldr r6, .htptr
188 lsr r4, r4, #12
189 lsrcc r2, r2, #21
190 bic r4, r4, #15
191 ldr r5, [r3, r2, lsl #2]
192 ldr r7, [r6, r4]!
193 teq r7, r0
194 ldreq pc, [r6, #4]
195 ldr r7, [r6, #8]
196 teq r7, r0
197 ldreq pc, [r6, #12]
198 /* jump_dirty lookup */
1996:
200 movs r4, r5
201 beq 8f
202 ldr r3, [r5]
203 ldr r5, [r4, #12]
204 teq r3, r0
205 bne 6b
2067:
207 ldr r1, [r4, #8]
208 /* hash_table insert */
209 ldr r2, [r6]
210 ldr r3, [r6, #4]
211 str r0, [r6]
212 str r1, [r6, #4]
213 str r2, [r6, #8]
214 str r3, [r6, #12]
215 mov pc, r1
2168:
217.endm
218
219 .text
220 .align 2
221
222FUNCTION(dyna_linker):
223 /* r0 = virtual target address */
224 /* r1 = instruction to patch */
225 dyna_linker_main
226
227 mov r4, r0
228 mov r5, r1
229 bl new_recompile_block
230 tst r0, r0
231 mov r0, r4
232 mov r1, r5
233 beq dyna_linker
234 /* pagefault */
235 mov r1, r0
236 mov r2, #8
237 .size dyna_linker, .-dyna_linker
238
239FUNCTION(exec_pagefault):
240 /* r0 = instruction pointer */
241 /* r1 = fault address */
242 /* r2 = cause */
243 ldr r3, [fp, #LO_reg_cop0+48] /* Status */
244 mvn r6, #0xF000000F
245 ldr r4, [fp, #LO_reg_cop0+16] /* Context */
246 bic r6, r6, #0x0F800000
247 str r0, [fp, #LO_reg_cop0+56] /* EPC */
248 orr r3, r3, #2
249 str r1, [fp, #LO_reg_cop0+32] /* BadVAddr */
250 bic r4, r4, r6
251 str r3, [fp, #LO_reg_cop0+48] /* Status */
252 and r5, r6, r1, lsr #9
253 str r2, [fp, #LO_reg_cop0+52] /* Cause */
254 and r1, r1, r6, lsl #9
255 str r1, [fp, #LO_reg_cop0+40] /* EntryHi */
256 orr r4, r4, r5
257 str r4, [fp, #LO_reg_cop0+16] /* Context */
258 mov r0, #0x80000000
259 bl get_addr_ht
260 mov pc, r0
261 .size exec_pagefault, .-exec_pagefault
262
263/* Special dynamic linker for the case where a page fault
264 may occur in a branch delay slot */
265FUNCTION(dyna_linker_ds):
266 /* r0 = virtual target address */
267 /* r1 = instruction to patch */
268 dyna_linker_main
269
270 mov r4, r0
271 bic r0, r0, #7
272 mov r5, r1
273 orr r0, r0, #1
274 bl new_recompile_block
275 tst r0, r0
276 mov r0, r4
277 mov r1, r5
278 beq dyna_linker_ds
279 /* pagefault */
280 bic r1, r0, #7
281 mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */
282 sub r0, r1, #4
283 b exec_pagefault
284 .size dyna_linker_ds, .-dyna_linker_ds
285.jiptr:
286 .word jump_in
287.jdptr:
288 .word jump_dirty
289.htptr:
290 .word hash_table
291
292 .align 2
293
294FUNCTION(jump_vaddr_r0):
295 eor r2, r0, r0, lsl #16
296 b jump_vaddr
297 .size jump_vaddr_r0, .-jump_vaddr_r0
298FUNCTION(jump_vaddr_r1):
299 eor r2, r1, r1, lsl #16
300 mov r0, r1
301 b jump_vaddr
302 .size jump_vaddr_r1, .-jump_vaddr_r1
303FUNCTION(jump_vaddr_r2):
304 mov r0, r2
305 eor r2, r2, r2, lsl #16
306 b jump_vaddr
307 .size jump_vaddr_r2, .-jump_vaddr_r2
308FUNCTION(jump_vaddr_r3):
309 eor r2, r3, r3, lsl #16
310 mov r0, r3
311 b jump_vaddr
312 .size jump_vaddr_r3, .-jump_vaddr_r3
313FUNCTION(jump_vaddr_r4):
314 eor r2, r4, r4, lsl #16
315 mov r0, r4
316 b jump_vaddr
317 .size jump_vaddr_r4, .-jump_vaddr_r4
318FUNCTION(jump_vaddr_r5):
319 eor r2, r5, r5, lsl #16
320 mov r0, r5
321 b jump_vaddr
322 .size jump_vaddr_r5, .-jump_vaddr_r5
323FUNCTION(jump_vaddr_r6):
324 eor r2, r6, r6, lsl #16
325 mov r0, r6
326 b jump_vaddr
327 .size jump_vaddr_r6, .-jump_vaddr_r6
328FUNCTION(jump_vaddr_r8):
329 eor r2, r8, r8, lsl #16
330 mov r0, r8
331 b jump_vaddr
332 .size jump_vaddr_r8, .-jump_vaddr_r8
333FUNCTION(jump_vaddr_r9):
334 eor r2, r9, r9, lsl #16
335 mov r0, r9
336 b jump_vaddr
337 .size jump_vaddr_r9, .-jump_vaddr_r9
338FUNCTION(jump_vaddr_r10):
339 eor r2, r10, r10, lsl #16
340 mov r0, r10
341 b jump_vaddr
342 .size jump_vaddr_r10, .-jump_vaddr_r10
343FUNCTION(jump_vaddr_r12):
344 eor r2, r12, r12, lsl #16
345 mov r0, r12
346 b jump_vaddr
347 .size jump_vaddr_r12, .-jump_vaddr_r12
348FUNCTION(jump_vaddr_r7):
349 eor r2, r7, r7, lsl #16
350 add r0, r7, #0
351 .size jump_vaddr_r7, .-jump_vaddr_r7
352FUNCTION(jump_vaddr):
353 ldr r1, .htptr
354 mvn r3, #15
355 and r2, r3, r2, lsr #12
356 ldr r2, [r1, r2]!
357 teq r2, r0
358 ldreq pc, [r1, #4]
359 ldr r2, [r1, #8]
360 teq r2, r0
361 ldreq pc, [r1, #12]
362 str r10, [fp, #LO_cycle_count]
363 bl get_addr
364 ldr r10, [fp, #LO_cycle_count]
365 mov pc, r0
366 .size jump_vaddr, .-jump_vaddr
367
368 .align 2
369
370FUNCTION(verify_code_ds):
371 str r8, [fp, #LO_branch_target]
372FUNCTION(verify_code_vm):
373FUNCTION(verify_code):
374 /* r1 = source */
375 /* r2 = target */
376 /* r3 = length */
377 tst r3, #4
378 mov r4, #0
379 add r3, r1, r3
380 mov r5, #0
381 ldrne r4, [r1], #4
382 mov r12, #0
383 ldrne r5, [r2], #4
384 teq r1, r3
385 beq .D3
386.D2:
387 ldr r7, [r1], #4
388 eor r9, r4, r5
389 ldr r8, [r2], #4
390 orrs r9, r9, r12
391 bne .D4
392 ldr r4, [r1], #4
393 eor r12, r7, r8
394 ldr r5, [r2], #4
395 cmp r1, r3
396 bcc .D2
397 teq r7, r8
398.D3:
399 teqeq r4, r5
400.D4:
401 ldr r8, [fp, #LO_branch_target]
402 moveq pc, lr
403.D5:
404 bl get_addr
405 mov pc, r0
406 .size verify_code, .-verify_code
407 .size verify_code_vm, .-verify_code_vm
408
409 .align 2
410FUNCTION(cc_interrupt):
411 ldr r0, [fp, #LO_last_count]
412 mov r1, #0
413 mov r2, #0x1fc
414 add r10, r0, r10
415 str r1, [fp, #LO_pending_exception]
416 and r2, r2, r10, lsr #17
417 add r3, fp, #LO_restore_candidate
418 str r10, [fp, #LO_cycle] /* PCSX cycles */
419@@ str r10, [fp, #LO_reg_cop0+36] /* Count */
420 ldr r4, [r2, r3]
421 mov r10, lr
422 tst r4, r4
423 bne .E4
424.E1:
425 bl gen_interupt
426 mov lr, r10
427 ldr r10, [fp, #LO_cycle]
428 ldr r0, [fp, #LO_next_interupt]
429 ldr r1, [fp, #LO_pending_exception]
430 ldr r2, [fp, #LO_stop]
431 str r0, [fp, #LO_last_count]
432 sub r10, r10, r0
433 tst r2, r2
434 ldmnefd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
435 tst r1, r1
436 moveq pc, lr
437.E2:
438 ldr r0, [fp, #LO_pcaddr]
439 bl get_addr_ht
440 mov pc, r0
441.E4:
442 /* Move 'dirty' blocks to the 'clean' list */
443 lsl r5, r2, #3
444 str r1, [r2, r3]
445.E5:
446 lsrs r4, r4, #1
447 mov r0, r5
448 add r5, r5, #1
449 blcs clean_blocks
450 tst r5, #31
451 bne .E5
452 b .E1
453 .size cc_interrupt, .-cc_interrupt
454
455 .align 2
456FUNCTION(do_interrupt):
457 ldr r0, [fp, #LO_pcaddr]
458 bl get_addr_ht
459 add r10, r10, #2
460 mov pc, r0
461 .size do_interrupt, .-do_interrupt
462
463 .align 2
464FUNCTION(fp_exception):
465 mov r2, #0x10000000
466.E7:
467 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
468 mov r3, #0x80000000
469 str r0, [fp, #LO_reg_cop0+56] /* EPC */
470 orr r1, #2
471 add r2, r2, #0x2c
472 str r1, [fp, #LO_reg_cop0+48] /* Status */
473 str r2, [fp, #LO_reg_cop0+52] /* Cause */
474 add r0, r3, #0x80
475 bl get_addr_ht
476 mov pc, r0
477 .size fp_exception, .-fp_exception
478 .align 2
479FUNCTION(fp_exception_ds):
480 mov r2, #0x90000000 /* Set high bit if delay slot */
481 b .E7
482 .size fp_exception_ds, .-fp_exception_ds
483
484 .align 2
485FUNCTION(jump_syscall):
486 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
487 mov r3, #0x80000000
488 str r0, [fp, #LO_reg_cop0+56] /* EPC */
489 orr r1, #2
490 mov r2, #0x20
491 str r1, [fp, #LO_reg_cop0+48] /* Status */
492 str r2, [fp, #LO_reg_cop0+52] /* Cause */
493 add r0, r3, #0x80
494 bl get_addr_ht
495 mov pc, r0
496 .size jump_syscall, .-jump_syscall
497 .align 2
498
499 .align 2
500FUNCTION(jump_syscall_hle):
501 str r0, [fp, #LO_pcaddr] /* PC must be set to EPC for psxException */
502 ldr r2, [fp, #LO_last_count]
503 mov r1, #0 /* in delay slot */
504 add r2, r2, r10
505 mov r0, #0x20 /* cause */
506 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
507 bl psxException
508
509 /* note: psxException might do recursive recompiler call from it's HLE code,
510 * so be ready for this */
511pcsx_return:
512 ldr r1, [fp, #LO_next_interupt]
513 ldr r10, [fp, #LO_cycle]
514 ldr r0, [fp, #LO_pcaddr]
515 sub r10, r10, r1
516 str r1, [fp, #LO_last_count]
517 bl get_addr_ht
518 mov pc, r0
519 .size jump_syscall_hle, .-jump_syscall_hle
520
521 .align 2
522FUNCTION(jump_hlecall):
523 ldr r2, [fp, #LO_last_count]
524 str r0, [fp, #LO_pcaddr]
525 add r2, r2, r10
526 adr lr, pcsx_return
527 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
528 bx r1
529 .size jump_hlecall, .-jump_hlecall
530
531 .align 2
532FUNCTION(jump_intcall):
533 ldr r2, [fp, #LO_last_count]
534 str r0, [fp, #LO_pcaddr]
535 add r2, r2, r10
536 adr lr, pcsx_return
537 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
538 b execI
539 .size jump_hlecall, .-jump_hlecall
540
541 .align 2
542FUNCTION(new_dyna_leave):
543 ldr r0, [fp, #LO_last_count]
544 add r12, fp, #28
545 add r10, r0, r10
546 str r10, [fp, #LO_cycle]
547 ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
548 .size new_dyna_leave, .-new_dyna_leave
549
550 .align 2
551FUNCTION(invalidate_addr_r0):
552 stmia fp, {r0, r1, r2, r3, r12, lr}
553 b invalidate_addr_call
554 .size invalidate_addr_r0, .-invalidate_addr_r0
555 .align 2
556FUNCTION(invalidate_addr_r1):
557 stmia fp, {r0, r1, r2, r3, r12, lr}
558 mov r0, r1
559 b invalidate_addr_call
560 .size invalidate_addr_r1, .-invalidate_addr_r1
561 .align 2
562FUNCTION(invalidate_addr_r2):
563 stmia fp, {r0, r1, r2, r3, r12, lr}
564 mov r0, r2
565 b invalidate_addr_call
566 .size invalidate_addr_r2, .-invalidate_addr_r2
567 .align 2
568FUNCTION(invalidate_addr_r3):
569 stmia fp, {r0, r1, r2, r3, r12, lr}
570 mov r0, r3
571 b invalidate_addr_call
572 .size invalidate_addr_r3, .-invalidate_addr_r3
573 .align 2
574FUNCTION(invalidate_addr_r4):
575 stmia fp, {r0, r1, r2, r3, r12, lr}
576 mov r0, r4
577 b invalidate_addr_call
578 .size invalidate_addr_r4, .-invalidate_addr_r4
579 .align 2
580FUNCTION(invalidate_addr_r5):
581 stmia fp, {r0, r1, r2, r3, r12, lr}
582 mov r0, r5
583 b invalidate_addr_call
584 .size invalidate_addr_r5, .-invalidate_addr_r5
585 .align 2
586FUNCTION(invalidate_addr_r6):
587 stmia fp, {r0, r1, r2, r3, r12, lr}
588 mov r0, r6
589 b invalidate_addr_call
590 .size invalidate_addr_r6, .-invalidate_addr_r6
591 .align 2
592FUNCTION(invalidate_addr_r7):
593 stmia fp, {r0, r1, r2, r3, r12, lr}
594 mov r0, r7
595 b invalidate_addr_call
596 .size invalidate_addr_r7, .-invalidate_addr_r7
597 .align 2
598FUNCTION(invalidate_addr_r8):
599 stmia fp, {r0, r1, r2, r3, r12, lr}
600 mov r0, r8
601 b invalidate_addr_call
602 .size invalidate_addr_r8, .-invalidate_addr_r8
603 .align 2
604FUNCTION(invalidate_addr_r9):
605 stmia fp, {r0, r1, r2, r3, r12, lr}
606 mov r0, r9
607 b invalidate_addr_call
608 .size invalidate_addr_r9, .-invalidate_addr_r9
609 .align 2
610FUNCTION(invalidate_addr_r10):
611 stmia fp, {r0, r1, r2, r3, r12, lr}
612 mov r0, r10
613 b invalidate_addr_call
614 .size invalidate_addr_r10, .-invalidate_addr_r10
615 .align 2
616FUNCTION(invalidate_addr_r12):
617 stmia fp, {r0, r1, r2, r3, r12, lr}
618 mov r0, r12
619 .size invalidate_addr_r12, .-invalidate_addr_r12
620 .align 2
621invalidate_addr_call:
622 ldr r12, [fp, #LO_inv_code_start]
623 ldr lr, [fp, #LO_inv_code_end]
624 cmp r0, r12
625 cmpcs lr, r0
626 blcc invalidate_addr
627 ldmia fp, {r0, r1, r2, r3, r12, pc}
628 .size invalidate_addr_call, .-invalidate_addr_call
629
630 .align 2
631FUNCTION(new_dyna_start):
632 /* ip is stored to conform EABI alignment */
633 stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr}
634 load_varadr fp, dynarec_local
635 ldr r0, [fp, #LO_pcaddr]
636 bl get_addr_ht
637 ldr r1, [fp, #LO_next_interupt]
638 ldr r10, [fp, #LO_cycle]
639 str r1, [fp, #LO_last_count]
640 sub r10, r10, r1
641 mov pc, r0
642 .size new_dyna_start, .-new_dyna_start
643
644/* --------------------------------------- */
645
646.align 2
647
648.macro pcsx_read_mem readop tab_shift
649 /* r0 = address, r1 = handler_tab, r2 = cycles */
650 lsl r3, r0, #20
651 lsr r3, #(20+\tab_shift)
652 ldr r12, [fp, #LO_last_count]
653 ldr r1, [r1, r3, lsl #2]
654 add r2, r2, r12
655 lsls r1, #1
656.if \tab_shift == 1
657 lsl r3, #1
658 \readop r0, [r1, r3]
659.else
660 \readop r0, [r1, r3, lsl #\tab_shift]
661.endif
662 movcc pc, lr
663 str r2, [fp, #LO_cycle]
664 bx r1
665.endm
666
667FUNCTION(jump_handler_read8):
668 add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
669 pcsx_read_mem ldrccb, 0
670
671FUNCTION(jump_handler_read16):
672 add r1, #0x1000/4*4 @ shift to r16 part
673 pcsx_read_mem ldrcch, 1
674
675FUNCTION(jump_handler_read32):
676 pcsx_read_mem ldrcc, 2
677
678
679.macro pcsx_write_mem wrtop tab_shift
680 /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */
681 lsl r12,r0, #20
682 lsr r12, #(20+\tab_shift)
683 ldr r3, [r3, r12, lsl #2]
684 str r0, [fp, #LO_address] @ some handlers still need it..
685 lsls r3, #1
686 mov r0, r2 @ cycle return in case of direct store
687.if \tab_shift == 1
688 lsl r12, #1
689 \wrtop r1, [r3, r12]
690.else
691 \wrtop r1, [r3, r12, lsl #\tab_shift]
692.endif
693 movcc pc, lr
694 ldr r12, [fp, #LO_last_count]
695 mov r0, r1
696 add r2, r2, r12
697 push {r2, lr}
698 str r2, [fp, #LO_cycle]
699 blx r3
700
701 ldr r0, [fp, #LO_next_interupt]
702 pop {r2, r3}
703 str r0, [fp, #LO_last_count]
704 sub r0, r2, r0
705 bx r3
706.endm
707
708FUNCTION(jump_handler_write8):
709 add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
710 pcsx_write_mem strccb, 0
711
712FUNCTION(jump_handler_write16):
713 add r3, #0x1000/4*4 @ shift to r16 part
714 pcsx_write_mem strcch, 1
715
716FUNCTION(jump_handler_write32):
717 pcsx_write_mem strcc, 2
718
719FUNCTION(jump_handler_write_h):
720 /* r0 = address, r1 = data, r2 = cycles, r3 = handler */
721 ldr r12, [fp, #LO_last_count]
722 str r0, [fp, #LO_address] @ some handlers still need it..
723 add r2, r2, r12
724 mov r0, r1
725 push {r2, lr}
726 str r2, [fp, #LO_cycle]
727 blx r3
728
729 ldr r0, [fp, #LO_next_interupt]
730 pop {r2, r3}
731 str r0, [fp, #LO_last_count]
732 sub r0, r2, r0
733 bx r3
734
735FUNCTION(jump_handle_swl):
736 /* r0 = address, r1 = data, r2 = cycles */
737 ldr r3, [fp, #LO_mem_wtab]
738 mov r12,r0,lsr #12
739 ldr r3, [r3, r12, lsl #2]
740 lsls r3, #1
741 bcs 4f
742 add r3, r0, r3
743 mov r0, r2
744 tst r3, #2
745 beq 101f
746 tst r3, #1
747 beq 2f
7483:
749 str r1, [r3, #-3]
750 bx lr
7512:
752 lsr r2, r1, #8
753 lsr r1, #24
754 strh r2, [r3, #-2]
755 strb r1, [r3]
756 bx lr
757101:
758 tst r3, #1
759 lsrne r1, #16 @ 1
760 lsreq r12, r1, #24 @ 0
761 strneh r1, [r3, #-1]
762 streqb r12, [r3]
763 bx lr
7644:
765 mov r0, r2
766@ b abort
767 bx lr @ TODO?
768
769
770FUNCTION(jump_handle_swr):
771 /* r0 = address, r1 = data, r2 = cycles */
772 ldr r3, [fp, #LO_mem_wtab]
773 mov r12,r0,lsr #12
774 ldr r3, [r3, r12, lsl #2]
775 lsls r3, #1
776 bcs 4f
777 add r3, r0, r3
778 and r12,r3, #3
779 mov r0, r2
780 cmp r12,#2
781 strgtb r1, [r3] @ 3
782 streqh r1, [r3] @ 2
783 cmp r12,#1
784 strlt r1, [r3] @ 0
785 bxne lr
786 lsr r2, r1, #8 @ 1
787 strb r1, [r3]
788 strh r2, [r3, #1]
789 bx lr
7904:
791 mov r0, r2
792@ b abort
793 bx lr @ TODO?
794
795
796.macro rcntx_read_mode0 num
797 /* r0 = address, r2 = cycles */
798 ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart
799 mov r0, r2, lsl #16
800 sub r0, r3, lsl #16
801 lsr r0, #16
802 bx lr
803.endm
804
805FUNCTION(rcnt0_read_count_m0):
806 rcntx_read_mode0 0
807
808FUNCTION(rcnt1_read_count_m0):
809 rcntx_read_mode0 1
810
811FUNCTION(rcnt2_read_count_m0):
812 rcntx_read_mode0 2
813
814FUNCTION(rcnt0_read_count_m1):
815 /* r0 = address, r2 = cycles */
816 ldr r3, [fp, #LO_rcnts+6*4+7*4*0] @ cycleStart
817 mov_16 r1, 0x3334
818 sub r2, r2, r3
819 mul r0, r1, r2 @ /= 5
820 lsr r0, #16
821 bx lr
822
823FUNCTION(rcnt1_read_count_m1):
824 /* r0 = address, r2 = cycles */
825 ldr r3, [fp, #LO_rcnts+6*4+7*4*1]
826 mov_24 r1, 0x1e6cde
827 sub r2, r2, r3
828 umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd
829 bx lr
830
831FUNCTION(rcnt2_read_count_m1):
832 /* r0 = address, r2 = cycles */
833 ldr r3, [fp, #LO_rcnts+6*4+7*4*2]
834 mov r0, r2, lsl #16-3
835 sub r0, r3, lsl #16-3
836 lsr r0, #16 @ /= 8
837 bx lr
838
839@ vim:filetype=armasm