drc: add a timing hack for Internal Section
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / linkage_arm.S
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * linkage_arm.s for PCSX *
3 * Copyright (C) 2009-2011 Ari64 *
4 * Copyright (C) 2010-2013 GraÅžvydas "notaz" Ignotas *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
20 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
21
22#include "arm_features.h"
23#include "new_dynarec_config.h"
24#include "linkage_offsets.h"
25
26
27#ifdef __MACH__
28#define dynarec_local ESYM(dynarec_local)
29#define add_link ESYM(add_link)
30#define new_recompile_block ESYM(new_recompile_block)
31#define get_addr ESYM(get_addr)
32#define get_addr_ht ESYM(get_addr_ht)
33#define clean_blocks ESYM(clean_blocks)
34#define gen_interupt ESYM(gen_interupt)
35#define invalidate_addr ESYM(invalidate_addr)
36#endif
37
38 .bss
39 .align 4
40 .global dynarec_local
41 .type dynarec_local, %object
42 .size dynarec_local, LO_dynarec_local_size
43dynarec_local:
44 .space LO_dynarec_local_size
45
46#define DRC_VAR_(name, vname, size_) \
47 vname = dynarec_local + LO_##name; \
48 .global vname; \
49 .type vname, %object; \
50 .size vname, size_
51
52#define DRC_VAR(name, size_) \
53 DRC_VAR_(name, ESYM(name), size_)
54
55DRC_VAR(next_interupt, 4)
56DRC_VAR(cycle_count, 4)
57DRC_VAR(last_count, 4)
58DRC_VAR(pending_exception, 4)
59DRC_VAR(stop, 4)
60DRC_VAR(branch_target, 4)
61DRC_VAR(address, 4)
62@DRC_VAR(align0, 4) /* unused/alignment */
63DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs)
64
65/* psxRegs */
66@DRC_VAR(reg, 128)
67DRC_VAR(lo, 4)
68DRC_VAR(hi, 4)
69DRC_VAR(reg_cop0, 128)
70DRC_VAR(reg_cop2d, 128)
71DRC_VAR(reg_cop2c, 128)
72DRC_VAR(pcaddr, 4)
73@DRC_VAR(code, 4)
74@DRC_VAR(cycle, 4)
75@DRC_VAR(interrupt, 4)
76@DRC_VAR(intCycle, 256)
77
78DRC_VAR(rcnts, 7*4*4)
79DRC_VAR(inv_code_start, 4)
80DRC_VAR(inv_code_end, 4)
81DRC_VAR(mem_rtab, 4)
82DRC_VAR(mem_wtab, 4)
83DRC_VAR(psxH_ptr, 4)
84DRC_VAR(zeromem_ptr, 4)
85DRC_VAR(invc_ptr, 4)
86DRC_VAR(scratch_buf_ptr, 4)
87@DRC_VAR(align1, 8) /* unused/alignment */
88DRC_VAR(mini_ht, 256)
89DRC_VAR(restore_candidate, 512)
90
91
92#ifdef TEXRELS_FORBIDDEN
93 .data
94 .align 2
95ptr_jump_in:
96 .word ESYM(jump_in)
97ptr_jump_dirty:
98 .word ESYM(jump_dirty)
99ptr_hash_table:
100 .word ESYM(hash_table)
101#endif
102
103
104 .syntax unified
105 .text
106 .align 2
107
108#ifndef HAVE_ARMV5
109.macro blx rd
110 mov lr, pc
111 bx \rd
112.endm
113#endif
114
115.macro load_varadr reg var
116#if defined(HAVE_ARMV7) && defined(TEXRELS_FORBIDDEN)
117 movw \reg, #:lower16:(\var-(1678f+8))
118 movt \reg, #:upper16:(\var-(1678f+8))
1191678:
120 add \reg, pc
121#elif defined(HAVE_ARMV7) && !defined(__PIC__)
122 movw \reg, #:lower16:\var
123 movt \reg, #:upper16:\var
124#else
125 ldr \reg, =\var
126#endif
127.endm
128
129.macro load_varadr_ext reg var
130#if defined(HAVE_ARMV7) && defined(TEXRELS_FORBIDDEN)
131 movw \reg, #:lower16:(ptr_\var-(1678f+8))
132 movt \reg, #:upper16:(ptr_\var-(1678f+8))
1331678:
134 ldr \reg, [pc, \reg]
135#else
136 load_varadr \reg \var
137#endif
138.endm
139
140.macro mov_16 reg imm
141#ifdef HAVE_ARMV7
142 movw \reg, #\imm
143#else
144 mov \reg, #(\imm & 0x00ff)
145 orr \reg, #(\imm & 0xff00)
146#endif
147.endm
148
149.macro mov_24 reg imm
150#ifdef HAVE_ARMV7
151 movw \reg, #(\imm & 0xffff)
152 movt \reg, #(\imm >> 16)
153#else
154 mov \reg, #(\imm & 0x0000ff)
155 orr \reg, #(\imm & 0x00ff00)
156 orr \reg, #(\imm & 0xff0000)
157#endif
158.endm
159
160/* r0 = virtual target address */
161/* r1 = instruction to patch */
162.macro dyna_linker_main
163#ifndef NO_WRITE_EXEC
164 load_varadr_ext r3, jump_in
165 /* get_page */
166 lsr r2, r0, #12
167 mov r6, #4096
168 bic r2, r2, #0xe0000
169 sub r6, r6, #1
170 cmp r2, #0x1000
171 ldr r7, [r1]
172 biclt r2, #0x0e00
173 and r6, r6, r2
174 cmp r2, #2048
175 add r12, r7, #2
176 orrcs r2, r6, #2048
177 ldr r5, [r3, r2, lsl #2]
178 lsl r12, r12, #8
179 add r6, r1, r12, asr #6
180 mov r8, #0
181 /* jump_in lookup */
1821:
183 movs r4, r5
184 beq 2f
185 ldr r3, [r5] /* ll_entry .vaddr */
186 ldrd r4, r5, [r4, #8] /* ll_entry .next, .addr */
187 teq r3, r0
188 bne 1b
189 teq r4, r6
190 moveq pc, r4 /* Stale i-cache */
191 mov r8, r4
192 b 1b /* jump_in may have dupes, continue search */
1932:
194 tst r8, r8
195 beq 3f /* r0 not in jump_in */
196
197 mov r5, r1
198 mov r1, r6
199 bl add_link
200 sub r2, r8, r5
201 and r1, r7, #0xff000000
202 lsl r2, r2, #6
203 sub r1, r1, #2
204 add r1, r1, r2, lsr #8
205 str r1, [r5]
206 mov pc, r8
2073:
208 /* hash_table lookup */
209 cmp r2, #2048
210 load_varadr_ext r3, jump_dirty
211 eor r4, r0, r0, lsl #16
212 lslcc r2, r0, #9
213 load_varadr_ext r6, hash_table
214 lsr r4, r4, #12
215 lsrcc r2, r2, #21
216 bic r4, r4, #15
217 ldr r5, [r3, r2, lsl #2]
218 ldr r7, [r6, r4]!
219 teq r7, r0
220 ldreq pc, [r6, #8]
221 ldr r7, [r6, #4]
222 teq r7, r0
223 ldreq pc, [r6, #12]
224 /* jump_dirty lookup */
2256:
226 movs r4, r5
227 beq 8f
228 ldr r3, [r5]
229 ldr r5, [r4, #12]
230 teq r3, r0
231 bne 6b
2327:
233 ldr r1, [r4, #8]
234 /* hash_table insert */
235 ldr r2, [r6]
236 ldr r3, [r6, #8]
237 str r0, [r6]
238 str r1, [r6, #8]
239 str r2, [r6, #4]
240 str r3, [r6, #12]
241 mov pc, r1
2428:
243#else
244 /* XXX: should be able to do better than this... */
245 bl get_addr_ht
246 mov pc, r0
247#endif
248.endm
249
250
251FUNCTION(dyna_linker):
252 /* r0 = virtual target address */
253 /* r1 = instruction to patch */
254 dyna_linker_main
255
256 mov r4, r0
257 mov r5, r1
258 bl new_recompile_block
259 tst r0, r0
260 mov r0, r4
261 mov r1, r5
262 beq dyna_linker
263 /* pagefault */
264 mov r1, r0
265 mov r2, #8
266 .size dyna_linker, .-dyna_linker
267
268FUNCTION(exec_pagefault):
269 /* r0 = instruction pointer */
270 /* r1 = fault address */
271 /* r2 = cause */
272 ldr r3, [fp, #LO_reg_cop0+48] /* Status */
273 mvn r6, #0xF000000F
274 ldr r4, [fp, #LO_reg_cop0+16] /* Context */
275 bic r6, r6, #0x0F800000
276 str r0, [fp, #LO_reg_cop0+56] /* EPC */
277 orr r3, r3, #2
278 str r1, [fp, #LO_reg_cop0+32] /* BadVAddr */
279 bic r4, r4, r6
280 str r3, [fp, #LO_reg_cop0+48] /* Status */
281 and r5, r6, r1, lsr #9
282 str r2, [fp, #LO_reg_cop0+52] /* Cause */
283 and r1, r1, r6, lsl #9
284 str r1, [fp, #LO_reg_cop0+40] /* EntryHi */
285 orr r4, r4, r5
286 str r4, [fp, #LO_reg_cop0+16] /* Context */
287 mov r0, #0x80000000
288 bl get_addr_ht
289 mov pc, r0
290 .size exec_pagefault, .-exec_pagefault
291
292/* Special dynamic linker for the case where a page fault
293 may occur in a branch delay slot */
294FUNCTION(dyna_linker_ds):
295 /* r0 = virtual target address */
296 /* r1 = instruction to patch */
297 dyna_linker_main
298
299 mov r4, r0
300 bic r0, r0, #7
301 mov r5, r1
302 orr r0, r0, #1
303 bl new_recompile_block
304 tst r0, r0
305 mov r0, r4
306 mov r1, r5
307 beq dyna_linker_ds
308 /* pagefault */
309 bic r1, r0, #7
310 mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */
311 sub r0, r1, #4
312 b exec_pagefault
313 .size dyna_linker_ds, .-dyna_linker_ds
314
315 .align 2
316
317FUNCTION(jump_vaddr_r0):
318 eor r2, r0, r0, lsl #16
319 b jump_vaddr
320 .size jump_vaddr_r0, .-jump_vaddr_r0
321FUNCTION(jump_vaddr_r1):
322 eor r2, r1, r1, lsl #16
323 mov r0, r1
324 b jump_vaddr
325 .size jump_vaddr_r1, .-jump_vaddr_r1
326FUNCTION(jump_vaddr_r2):
327 mov r0, r2
328 eor r2, r2, r2, lsl #16
329 b jump_vaddr
330 .size jump_vaddr_r2, .-jump_vaddr_r2
331FUNCTION(jump_vaddr_r3):
332 eor r2, r3, r3, lsl #16
333 mov r0, r3
334 b jump_vaddr
335 .size jump_vaddr_r3, .-jump_vaddr_r3
336FUNCTION(jump_vaddr_r4):
337 eor r2, r4, r4, lsl #16
338 mov r0, r4
339 b jump_vaddr
340 .size jump_vaddr_r4, .-jump_vaddr_r4
341FUNCTION(jump_vaddr_r5):
342 eor r2, r5, r5, lsl #16
343 mov r0, r5
344 b jump_vaddr
345 .size jump_vaddr_r5, .-jump_vaddr_r5
346FUNCTION(jump_vaddr_r6):
347 eor r2, r6, r6, lsl #16
348 mov r0, r6
349 b jump_vaddr
350 .size jump_vaddr_r6, .-jump_vaddr_r6
351FUNCTION(jump_vaddr_r8):
352 eor r2, r8, r8, lsl #16
353 mov r0, r8
354 b jump_vaddr
355 .size jump_vaddr_r8, .-jump_vaddr_r8
356FUNCTION(jump_vaddr_r9):
357 eor r2, r9, r9, lsl #16
358 mov r0, r9
359 b jump_vaddr
360 .size jump_vaddr_r9, .-jump_vaddr_r9
361FUNCTION(jump_vaddr_r10):
362 eor r2, r10, r10, lsl #16
363 mov r0, r10
364 b jump_vaddr
365 .size jump_vaddr_r10, .-jump_vaddr_r10
366FUNCTION(jump_vaddr_r12):
367 eor r2, r12, r12, lsl #16
368 mov r0, r12
369 b jump_vaddr
370 .size jump_vaddr_r12, .-jump_vaddr_r12
371FUNCTION(jump_vaddr_r7):
372 eor r2, r7, r7, lsl #16
373 add r0, r7, #0
374 .size jump_vaddr_r7, .-jump_vaddr_r7
375FUNCTION(jump_vaddr):
376 load_varadr_ext r1, hash_table
377 mvn r3, #15
378 and r2, r3, r2, lsr #12
379 ldr r2, [r1, r2]!
380 teq r2, r0
381 ldreq pc, [r1, #8]
382 ldr r2, [r1, #4]
383 teq r2, r0
384 ldreq pc, [r1, #12]
385 str r10, [fp, #LO_cycle_count]
386 bl get_addr
387 ldr r10, [fp, #LO_cycle_count]
388 mov pc, r0
389 .size jump_vaddr, .-jump_vaddr
390
391 .align 2
392
393FUNCTION(verify_code_ds):
394 str r8, [fp, #LO_branch_target] @ preserve HOST_BTREG?
395FUNCTION(verify_code):
396 /* r1 = source */
397 /* r2 = target */
398 /* r3 = length */
399 tst r3, #4
400 mov r4, #0
401 add r3, r1, r3
402 mov r5, #0
403 ldrne r4, [r1], #4
404 mov r12, #0
405 ldrne r5, [r2], #4
406 teq r1, r3
407 beq .D3
408.D2:
409 ldr r7, [r1], #4
410 eor r9, r4, r5
411 ldr r8, [r2], #4
412 orrs r9, r9, r12
413 bne .D4
414 ldr r4, [r1], #4
415 eor r12, r7, r8
416 ldr r5, [r2], #4
417 cmp r1, r3
418 bcc .D2
419 teq r7, r8
420.D3:
421 teqeq r4, r5
422.D4:
423 ldr r8, [fp, #LO_branch_target]
424 moveq pc, lr
425.D5:
426 bl get_addr
427 mov pc, r0
428 .size verify_code, .-verify_code
429 .size verify_code_ds, .-verify_code_ds
430
431 .align 2
432FUNCTION(cc_interrupt):
433 ldr r0, [fp, #LO_last_count]
434 mov r1, #0
435 mov r2, #0x1fc
436 add r10, r0, r10
437 str r1, [fp, #LO_pending_exception]
438 and r2, r2, r10, lsr #17
439 add r3, fp, #LO_restore_candidate
440 str r10, [fp, #LO_cycle] /* PCSX cycles */
441@@ str r10, [fp, #LO_reg_cop0+36] /* Count */
442 ldr r4, [r2, r3]
443 mov r10, lr
444 tst r4, r4
445 bne .E4
446.E1:
447 bl gen_interupt
448 mov lr, r10
449 ldr r10, [fp, #LO_cycle]
450 ldr r0, [fp, #LO_next_interupt]
451 ldr r1, [fp, #LO_pending_exception]
452 ldr r2, [fp, #LO_stop]
453 str r0, [fp, #LO_last_count]
454 sub r10, r10, r0
455 tst r2, r2
456 ldmfdne sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
457 tst r1, r1
458 moveq pc, lr
459.E2:
460 ldr r0, [fp, #LO_pcaddr]
461 bl get_addr_ht
462 mov pc, r0
463.E4:
464 /* Move 'dirty' blocks to the 'clean' list */
465 lsl r5, r2, #3
466 str r1, [r2, r3]
467.E5:
468 lsrs r4, r4, #1
469 mov r0, r5
470 add r5, r5, #1
471 blcs clean_blocks
472 tst r5, #31
473 bne .E5
474 b .E1
475 .size cc_interrupt, .-cc_interrupt
476
477 .align 2
478FUNCTION(fp_exception):
479 mov r2, #0x10000000
480.E7:
481 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
482 mov r3, #0x80000000
483 str r0, [fp, #LO_reg_cop0+56] /* EPC */
484 orr r1, #2
485 add r2, r2, #0x2c
486 str r1, [fp, #LO_reg_cop0+48] /* Status */
487 str r2, [fp, #LO_reg_cop0+52] /* Cause */
488 add r0, r3, #0x80
489 bl get_addr_ht
490 mov pc, r0
491 .size fp_exception, .-fp_exception
492 .align 2
493FUNCTION(fp_exception_ds):
494 mov r2, #0x90000000 /* Set high bit if delay slot */
495 b .E7
496 .size fp_exception_ds, .-fp_exception_ds
497
498 .align 2
499FUNCTION(jump_syscall):
500 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
501 mov r3, #0x80000000
502 str r0, [fp, #LO_reg_cop0+56] /* EPC */
503 orr r1, #2
504 mov r2, #0x20
505 str r1, [fp, #LO_reg_cop0+48] /* Status */
506 str r2, [fp, #LO_reg_cop0+52] /* Cause */
507 add r0, r3, #0x80
508 bl get_addr_ht
509 mov pc, r0
510 .size jump_syscall, .-jump_syscall
511 .align 2
512
513 /* note: psxException might do recursive recompiler call from it's HLE code,
514 * so be ready for this */
515FUNCTION(jump_to_new_pc):
516 ldr r1, [fp, #LO_next_interupt]
517 ldr r10, [fp, #LO_cycle]
518 ldr r0, [fp, #LO_pcaddr]
519 sub r10, r10, r1
520 str r1, [fp, #LO_last_count]
521 bl get_addr_ht
522 mov pc, r0
523 .size jump_to_new_pc, .-jump_to_new_pc
524
525 .align 2
526FUNCTION(new_dyna_leave):
527 ldr r0, [fp, #LO_last_count]
528 add r12, fp, #28
529 add r10, r0, r10
530 str r10, [fp, #LO_cycle]
531 ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
532 .size new_dyna_leave, .-new_dyna_leave
533
534 .align 2
535FUNCTION(invalidate_addr_r0):
536 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
537 b invalidate_addr_call
538 .size invalidate_addr_r0, .-invalidate_addr_r0
539 .align 2
540FUNCTION(invalidate_addr_r1):
541 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
542 mov r0, r1
543 b invalidate_addr_call
544 .size invalidate_addr_r1, .-invalidate_addr_r1
545 .align 2
546FUNCTION(invalidate_addr_r2):
547 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
548 mov r0, r2
549 b invalidate_addr_call
550 .size invalidate_addr_r2, .-invalidate_addr_r2
551 .align 2
552FUNCTION(invalidate_addr_r3):
553 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
554 mov r0, r3
555 b invalidate_addr_call
556 .size invalidate_addr_r3, .-invalidate_addr_r3
557 .align 2
558FUNCTION(invalidate_addr_r4):
559 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
560 mov r0, r4
561 b invalidate_addr_call
562 .size invalidate_addr_r4, .-invalidate_addr_r4
563 .align 2
564FUNCTION(invalidate_addr_r5):
565 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
566 mov r0, r5
567 b invalidate_addr_call
568 .size invalidate_addr_r5, .-invalidate_addr_r5
569 .align 2
570FUNCTION(invalidate_addr_r6):
571 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
572 mov r0, r6
573 b invalidate_addr_call
574 .size invalidate_addr_r6, .-invalidate_addr_r6
575 .align 2
576FUNCTION(invalidate_addr_r7):
577 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
578 mov r0, r7
579 b invalidate_addr_call
580 .size invalidate_addr_r7, .-invalidate_addr_r7
581 .align 2
582FUNCTION(invalidate_addr_r8):
583 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
584 mov r0, r8
585 b invalidate_addr_call
586 .size invalidate_addr_r8, .-invalidate_addr_r8
587 .align 2
588FUNCTION(invalidate_addr_r9):
589 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
590 mov r0, r9
591 b invalidate_addr_call
592 .size invalidate_addr_r9, .-invalidate_addr_r9
593 .align 2
594FUNCTION(invalidate_addr_r10):
595 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
596 mov r0, r10
597 b invalidate_addr_call
598 .size invalidate_addr_r10, .-invalidate_addr_r10
599 .align 2
600FUNCTION(invalidate_addr_r12):
601 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
602 mov r0, r12
603 .size invalidate_addr_r12, .-invalidate_addr_r12
604 .align 2
605invalidate_addr_call:
606 ldr r12, [fp, #LO_inv_code_start]
607 ldr lr, [fp, #LO_inv_code_end]
608 cmp r0, r12
609 cmpcs lr, r0
610 blcc invalidate_addr
611 ldmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, pc}
612 .size invalidate_addr_call, .-invalidate_addr_call
613
614 .align 2
615FUNCTION(new_dyna_start):
616 /* ip is stored to conform EABI alignment */
617 stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr}
618 mov fp, r0 /* dynarec_local */
619 ldr r0, [fp, #LO_pcaddr]
620 bl get_addr_ht
621 ldr r1, [fp, #LO_next_interupt]
622 ldr r10, [fp, #LO_cycle]
623 str r1, [fp, #LO_last_count]
624 sub r10, r10, r1
625 mov pc, r0
626 .size new_dyna_start, .-new_dyna_start
627
628/* --------------------------------------- */
629
630.align 2
631
632.macro pcsx_read_mem readop tab_shift
633 /* r0 = address, r1 = handler_tab, r2 = cycles */
634 lsl r3, r0, #20
635 lsr r3, #(20+\tab_shift)
636 ldr r12, [fp, #LO_last_count]
637 ldr r1, [r1, r3, lsl #2]
638 add r2, r2, r12
639 lsls r1, #1
640.if \tab_shift == 1
641 lsl r3, #1
642 \readop r0, [r1, r3]
643.else
644 \readop r0, [r1, r3, lsl #\tab_shift]
645.endif
646 movcc pc, lr
647 str r2, [fp, #LO_cycle]
648 bx r1
649.endm
650
651FUNCTION(jump_handler_read8):
652 add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
653 pcsx_read_mem ldrbcc, 0
654
655FUNCTION(jump_handler_read16):
656 add r1, #0x1000/4*4 @ shift to r16 part
657 pcsx_read_mem ldrhcc, 1
658
659FUNCTION(jump_handler_read32):
660 pcsx_read_mem ldrcc, 2
661
662
663.macro pcsx_write_mem wrtop tab_shift
664 /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */
665 lsl r12,r0, #20
666 lsr r12, #(20+\tab_shift)
667 ldr r3, [r3, r12, lsl #2]
668 str r0, [fp, #LO_address] @ some handlers still need it..
669 lsls r3, #1
670 mov r0, r2 @ cycle return in case of direct store
671.if \tab_shift == 1
672 lsl r12, #1
673 \wrtop r1, [r3, r12]
674.else
675 \wrtop r1, [r3, r12, lsl #\tab_shift]
676.endif
677 movcc pc, lr
678 ldr r12, [fp, #LO_last_count]
679 mov r0, r1
680 add r2, r2, r12
681 push {r2, lr}
682 str r2, [fp, #LO_cycle]
683 blx r3
684
685 ldr r0, [fp, #LO_next_interupt]
686 pop {r2, lr}
687 str r0, [fp, #LO_last_count]
688 sub r0, r2, r0
689 bx lr
690.endm
691
692FUNCTION(jump_handler_write8):
693 add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
694 pcsx_write_mem strbcc, 0
695
696FUNCTION(jump_handler_write16):
697 add r3, #0x1000/4*4 @ shift to r16 part
698 pcsx_write_mem strhcc, 1
699
700FUNCTION(jump_handler_write32):
701 pcsx_write_mem strcc, 2
702
703FUNCTION(jump_handler_write_h):
704 /* r0 = address, r1 = data, r2 = cycles, r3 = handler */
705 ldr r12, [fp, #LO_last_count]
706 str r0, [fp, #LO_address] @ some handlers still need it..
707 add r2, r2, r12
708 mov r0, r1
709 push {r2, lr}
710 str r2, [fp, #LO_cycle]
711 blx r3
712
713 ldr r0, [fp, #LO_next_interupt]
714 pop {r2, lr}
715 str r0, [fp, #LO_last_count]
716 sub r0, r2, r0
717 bx lr
718
719FUNCTION(jump_handle_swl):
720 /* r0 = address, r1 = data, r2 = cycles */
721 ldr r3, [fp, #LO_mem_wtab]
722 mov r12,r0,lsr #12
723 ldr r3, [r3, r12, lsl #2]
724 lsls r3, #1
725 bcs 4f
726 add r3, r0, r3
727 mov r0, r2
728 tst r3, #2
729 beq 101f
730 tst r3, #1
731 beq 2f
7323:
733 str r1, [r3, #-3]
734 bx lr
7352:
736 lsr r2, r1, #8
737 lsr r1, #24
738 strh r2, [r3, #-2]
739 strb r1, [r3]
740 bx lr
741101:
742 tst r3, #1
743 lsrne r1, #16 @ 1
744 lsreq r12, r1, #24 @ 0
745 strhne r1, [r3, #-1]
746 strbeq r12, [r3]
747 bx lr
7484:
749 mov r0, r2
750@ b abort
751 bx lr @ TODO?
752
753
754FUNCTION(jump_handle_swr):
755 /* r0 = address, r1 = data, r2 = cycles */
756 ldr r3, [fp, #LO_mem_wtab]
757 mov r12,r0,lsr #12
758 ldr r3, [r3, r12, lsl #2]
759 lsls r3, #1
760 bcs 4f
761 add r3, r0, r3
762 and r12,r3, #3
763 mov r0, r2
764 cmp r12,#2
765 strbgt r1, [r3] @ 3
766 strheq r1, [r3] @ 2
767 cmp r12,#1
768 strlt r1, [r3] @ 0
769 bxne lr
770 lsr r2, r1, #8 @ 1
771 strb r1, [r3]
772 strh r2, [r3, #1]
773 bx lr
7744:
775 mov r0, r2
776@ b abort
777 bx lr @ TODO?
778
779
780.macro rcntx_read_mode0 num
781 /* r0 = address, r2 = cycles */
782 ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart
783 mov r0, r2, lsl #16
784 sub r0, r0, r3, lsl #16
785 lsr r0, #16
786 bx lr
787.endm
788
789FUNCTION(rcnt0_read_count_m0):
790 rcntx_read_mode0 0
791
792FUNCTION(rcnt1_read_count_m0):
793 rcntx_read_mode0 1
794
795FUNCTION(rcnt2_read_count_m0):
796 rcntx_read_mode0 2
797
798FUNCTION(rcnt0_read_count_m1):
799 /* r0 = address, r2 = cycles */
800 ldr r3, [fp, #LO_rcnts+6*4+7*4*0] @ cycleStart
801 mov_16 r1, 0x3334
802 sub r2, r2, r3
803 mul r0, r1, r2 @ /= 5
804 lsr r0, #16
805 bx lr
806
807FUNCTION(rcnt1_read_count_m1):
808 /* r0 = address, r2 = cycles */
809 ldr r3, [fp, #LO_rcnts+6*4+7*4*1]
810 mov_24 r1, 0x1e6cde
811 sub r2, r2, r3
812 umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd
813 bx lr
814
815FUNCTION(rcnt2_read_count_m1):
816 /* r0 = address, r2 = cycles */
817 ldr r3, [fp, #LO_rcnts+6*4+7*4*2]
818 mov r0, r2, lsl #16-3
819 sub r0, r0, r3, lsl #16-3
820 lsr r0, #16 @ /= 8
821 bx lr
822
823@ vim:filetype=armasm