drc: replace unused reg32 with new reg_sv_flags
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / linkage_arm.S
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * linkage_arm.s for PCSX *
3 * Copyright (C) 2009-2011 Ari64 *
4 * Copyright (C) 2010-2013 GraÅžvydas "notaz" Ignotas *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
20 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
21
22#include "arm_features.h"
23#include "linkage_offsets.h"
24
25
26#ifdef __MACH__
27#define dynarec_local ESYM(dynarec_local)
28#define add_link ESYM(add_link)
29#define new_recompile_block ESYM(new_recompile_block)
30#define get_addr ESYM(get_addr)
31#define get_addr_ht ESYM(get_addr_ht)
32#define clean_blocks ESYM(clean_blocks)
33#define gen_interupt ESYM(gen_interupt)
34#define psxException ESYM(psxException)
35#define execI ESYM(execI)
36#define invalidate_addr ESYM(invalidate_addr)
37#endif
38
39 .bss
40 .align 4
41 .global dynarec_local
42 .type dynarec_local, %object
43 .size dynarec_local, LO_dynarec_local_size
44dynarec_local:
45 .space LO_dynarec_local_size
46
47#define DRC_VAR_(name, vname, size_) \
48 vname = dynarec_local + LO_##name; \
49 .global vname; \
50 .type vname, %object; \
51 .size vname, size_
52
53#define DRC_VAR(name, size_) \
54 DRC_VAR_(name, ESYM(name), size_)
55
56DRC_VAR(next_interupt, 4)
57DRC_VAR(cycle_count, 4)
58DRC_VAR(last_count, 4)
59DRC_VAR(pending_exception, 4)
60DRC_VAR(stop, 4)
61DRC_VAR(invc_ptr, 4)
62DRC_VAR(address, 4)
63DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs)
64
65/* psxRegs */
66DRC_VAR(reg, 128)
67DRC_VAR(lo, 4)
68DRC_VAR(hi, 4)
69DRC_VAR(reg_cop0, 128)
70DRC_VAR(reg_cop2d, 128)
71DRC_VAR(reg_cop2c, 128)
72DRC_VAR(pcaddr, 4)
73@DRC_VAR(code, 4)
74@DRC_VAR(cycle, 4)
75@DRC_VAR(interrupt, 4)
76@DRC_VAR(intCycle, 256)
77
78DRC_VAR(rcnts, 7*4*4)
79DRC_VAR(mem_rtab, 4)
80DRC_VAR(mem_wtab, 4)
81DRC_VAR(psxH_ptr, 4)
82DRC_VAR(zeromem_ptr, 4)
83DRC_VAR(inv_code_start, 4)
84DRC_VAR(inv_code_end, 4)
85DRC_VAR(branch_target, 4)
86DRC_VAR(scratch_buf_ptr, 4)
87@DRC_VAR(align0, 12) /* unused/alignment */
88DRC_VAR(mini_ht, 256)
89DRC_VAR(restore_candidate, 512)
90
91/* unused */
92DRC_VAR(FCR0, 4)
93DRC_VAR(FCR31, 4)
94
95#ifdef __MACH__
96 .data
97 .align 2
98ptr_jump_in:
99 .word ESYM(jump_in)
100ptr_jump_dirty:
101 .word ESYM(jump_dirty)
102ptr_hash_table:
103 .word ESYM(hash_table)
104#endif
105
106
107 .syntax unified
108 .text
109 .align 2
110
111#ifndef HAVE_ARMV5
112.macro blx rd
113 mov lr, pc
114 bx \rd
115.endm
116#endif
117
118.macro load_varadr reg var
119#if defined(__ARM_ARCH_7A__) && !defined(__PIC__)
120 movw \reg, #:lower16:\var
121 movt \reg, #:upper16:\var
122#elif defined(__ARM_ARCH_7A__) && defined(__MACH__)
123 movw \reg, #:lower16:(\var-(1678f+8))
124 movt \reg, #:upper16:(\var-(1678f+8))
1251678:
126 add \reg, pc
127#else
128 ldr \reg, =\var
129#endif
130.endm
131
132.macro load_varadr_ext reg var
133#if defined(__ARM_ARCH_7A__) && defined(__MACH__) && defined(__PIC__)
134 movw \reg, #:lower16:(ptr_\var-(1678f+8))
135 movt \reg, #:upper16:(ptr_\var-(1678f+8))
1361678:
137 ldr \reg, [pc, \reg]
138#else
139 load_varadr \reg \var
140#endif
141.endm
142
143.macro mov_16 reg imm
144#ifdef __ARM_ARCH_7A__
145 movw \reg, #\imm
146#else
147 mov \reg, #(\imm & 0x00ff)
148 orr \reg, #(\imm & 0xff00)
149#endif
150.endm
151
152.macro mov_24 reg imm
153#ifdef __ARM_ARCH_7A__
154 movw \reg, #(\imm & 0xffff)
155 movt \reg, #(\imm >> 16)
156#else
157 mov \reg, #(\imm & 0x0000ff)
158 orr \reg, #(\imm & 0x00ff00)
159 orr \reg, #(\imm & 0xff0000)
160#endif
161.endm
162
163.macro dyna_linker_main
164 /* r0 = virtual target address */
165 /* r1 = instruction to patch */
166 load_varadr_ext r3, jump_in
167 /* get_page */
168 lsr r2, r0, #12
169 mov r6, #4096
170 bic r2, r2, #0xe0000
171 sub r6, r6, #1
172 cmp r2, #0x1000
173 ldr r7, [r1]
174 biclt r2, #0x0e00
175 and r6, r6, r2
176 cmp r2, #2048
177 add r12, r7, #2
178 orrcs r2, r6, #2048
179 ldr r5, [r3, r2, lsl #2]
180 lsl r12, r12, #8
181 add r6, r1, r12, asr #6
182 mov r8, #0
183 /* jump_in lookup */
1841:
185 movs r4, r5
186 beq 2f
187 ldr r3, [r5] /* ll_entry .vaddr */
188 ldrd r4, r5, [r4, #8] /* ll_entry .next, .addr */
189 teq r3, r0
190 bne 1b
191 teq r4, r6
192 moveq pc, r4 /* Stale i-cache */
193 mov r8, r4
194 b 1b /* jump_in may have dupes, continue search */
1952:
196 tst r8, r8
197 beq 3f /* r0 not in jump_in */
198
199 mov r5, r1
200 mov r1, r6
201 bl add_link
202 sub r2, r8, r5
203 and r1, r7, #0xff000000
204 lsl r2, r2, #6
205 sub r1, r1, #2
206 add r1, r1, r2, lsr #8
207 str r1, [r5]
208 mov pc, r8
2093:
210 /* hash_table lookup */
211 cmp r2, #2048
212 load_varadr_ext r3, jump_dirty
213 eor r4, r0, r0, lsl #16
214 lslcc r2, r0, #9
215 load_varadr_ext r6, hash_table
216 lsr r4, r4, #12
217 lsrcc r2, r2, #21
218 bic r4, r4, #15
219 ldr r5, [r3, r2, lsl #2]
220 ldr r7, [r6, r4]!
221 teq r7, r0
222 ldreq pc, [r6, #4]
223 ldr r7, [r6, #8]
224 teq r7, r0
225 ldreq pc, [r6, #12]
226 /* jump_dirty lookup */
2276:
228 movs r4, r5
229 beq 8f
230 ldr r3, [r5]
231 ldr r5, [r4, #12]
232 teq r3, r0
233 bne 6b
2347:
235 ldr r1, [r4, #8]
236 /* hash_table insert */
237 ldr r2, [r6]
238 ldr r3, [r6, #4]
239 str r0, [r6]
240 str r1, [r6, #4]
241 str r2, [r6, #8]
242 str r3, [r6, #12]
243 mov pc, r1
2448:
245.endm
246
247
248FUNCTION(dyna_linker):
249 /* r0 = virtual target address */
250 /* r1 = instruction to patch */
251 dyna_linker_main
252
253 mov r4, r0
254 mov r5, r1
255 bl new_recompile_block
256 tst r0, r0
257 mov r0, r4
258 mov r1, r5
259 beq dyna_linker
260 /* pagefault */
261 mov r1, r0
262 mov r2, #8
263 .size dyna_linker, .-dyna_linker
264
265FUNCTION(exec_pagefault):
266 /* r0 = instruction pointer */
267 /* r1 = fault address */
268 /* r2 = cause */
269 ldr r3, [fp, #LO_reg_cop0+48] /* Status */
270 mvn r6, #0xF000000F
271 ldr r4, [fp, #LO_reg_cop0+16] /* Context */
272 bic r6, r6, #0x0F800000
273 str r0, [fp, #LO_reg_cop0+56] /* EPC */
274 orr r3, r3, #2
275 str r1, [fp, #LO_reg_cop0+32] /* BadVAddr */
276 bic r4, r4, r6
277 str r3, [fp, #LO_reg_cop0+48] /* Status */
278 and r5, r6, r1, lsr #9
279 str r2, [fp, #LO_reg_cop0+52] /* Cause */
280 and r1, r1, r6, lsl #9
281 str r1, [fp, #LO_reg_cop0+40] /* EntryHi */
282 orr r4, r4, r5
283 str r4, [fp, #LO_reg_cop0+16] /* Context */
284 mov r0, #0x80000000
285 bl get_addr_ht
286 mov pc, r0
287 .size exec_pagefault, .-exec_pagefault
288
289/* Special dynamic linker for the case where a page fault
290 may occur in a branch delay slot */
291FUNCTION(dyna_linker_ds):
292 /* r0 = virtual target address */
293 /* r1 = instruction to patch */
294 dyna_linker_main
295
296 mov r4, r0
297 bic r0, r0, #7
298 mov r5, r1
299 orr r0, r0, #1
300 bl new_recompile_block
301 tst r0, r0
302 mov r0, r4
303 mov r1, r5
304 beq dyna_linker_ds
305 /* pagefault */
306 bic r1, r0, #7
307 mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */
308 sub r0, r1, #4
309 b exec_pagefault
310 .size dyna_linker_ds, .-dyna_linker_ds
311
312 .align 2
313
314FUNCTION(jump_vaddr_r0):
315 eor r2, r0, r0, lsl #16
316 b jump_vaddr
317 .size jump_vaddr_r0, .-jump_vaddr_r0
318FUNCTION(jump_vaddr_r1):
319 eor r2, r1, r1, lsl #16
320 mov r0, r1
321 b jump_vaddr
322 .size jump_vaddr_r1, .-jump_vaddr_r1
323FUNCTION(jump_vaddr_r2):
324 mov r0, r2
325 eor r2, r2, r2, lsl #16
326 b jump_vaddr
327 .size jump_vaddr_r2, .-jump_vaddr_r2
328FUNCTION(jump_vaddr_r3):
329 eor r2, r3, r3, lsl #16
330 mov r0, r3
331 b jump_vaddr
332 .size jump_vaddr_r3, .-jump_vaddr_r3
333FUNCTION(jump_vaddr_r4):
334 eor r2, r4, r4, lsl #16
335 mov r0, r4
336 b jump_vaddr
337 .size jump_vaddr_r4, .-jump_vaddr_r4
338FUNCTION(jump_vaddr_r5):
339 eor r2, r5, r5, lsl #16
340 mov r0, r5
341 b jump_vaddr
342 .size jump_vaddr_r5, .-jump_vaddr_r5
343FUNCTION(jump_vaddr_r6):
344 eor r2, r6, r6, lsl #16
345 mov r0, r6
346 b jump_vaddr
347 .size jump_vaddr_r6, .-jump_vaddr_r6
348FUNCTION(jump_vaddr_r8):
349 eor r2, r8, r8, lsl #16
350 mov r0, r8
351 b jump_vaddr
352 .size jump_vaddr_r8, .-jump_vaddr_r8
353FUNCTION(jump_vaddr_r9):
354 eor r2, r9, r9, lsl #16
355 mov r0, r9
356 b jump_vaddr
357 .size jump_vaddr_r9, .-jump_vaddr_r9
358FUNCTION(jump_vaddr_r10):
359 eor r2, r10, r10, lsl #16
360 mov r0, r10
361 b jump_vaddr
362 .size jump_vaddr_r10, .-jump_vaddr_r10
363FUNCTION(jump_vaddr_r12):
364 eor r2, r12, r12, lsl #16
365 mov r0, r12
366 b jump_vaddr
367 .size jump_vaddr_r12, .-jump_vaddr_r12
368FUNCTION(jump_vaddr_r7):
369 eor r2, r7, r7, lsl #16
370 add r0, r7, #0
371 .size jump_vaddr_r7, .-jump_vaddr_r7
372FUNCTION(jump_vaddr):
373 load_varadr_ext r1, hash_table
374 mvn r3, #15
375 and r2, r3, r2, lsr #12
376 ldr r2, [r1, r2]!
377 teq r2, r0
378 ldreq pc, [r1, #4]
379 ldr r2, [r1, #8]
380 teq r2, r0
381 ldreq pc, [r1, #12]
382 str r10, [fp, #LO_cycle_count]
383 bl get_addr
384 ldr r10, [fp, #LO_cycle_count]
385 mov pc, r0
386 .size jump_vaddr, .-jump_vaddr
387
388 .align 2
389
390FUNCTION(verify_code_ds):
391 str r8, [fp, #LO_branch_target]
392FUNCTION(verify_code_vm):
393FUNCTION(verify_code):
394 /* r1 = source */
395 /* r2 = target */
396 /* r3 = length */
397 tst r3, #4
398 mov r4, #0
399 add r3, r1, r3
400 mov r5, #0
401 ldrne r4, [r1], #4
402 mov r12, #0
403 ldrne r5, [r2], #4
404 teq r1, r3
405 beq .D3
406.D2:
407 ldr r7, [r1], #4
408 eor r9, r4, r5
409 ldr r8, [r2], #4
410 orrs r9, r9, r12
411 bne .D4
412 ldr r4, [r1], #4
413 eor r12, r7, r8
414 ldr r5, [r2], #4
415 cmp r1, r3
416 bcc .D2
417 teq r7, r8
418.D3:
419 teqeq r4, r5
420.D4:
421 ldr r8, [fp, #LO_branch_target]
422 moveq pc, lr
423.D5:
424 bl get_addr
425 mov pc, r0
426 .size verify_code, .-verify_code
427 .size verify_code_vm, .-verify_code_vm
428
429 .align 2
430FUNCTION(cc_interrupt):
431 ldr r0, [fp, #LO_last_count]
432 mov r1, #0
433 mov r2, #0x1fc
434 add r10, r0, r10
435 str r1, [fp, #LO_pending_exception]
436 and r2, r2, r10, lsr #17
437 add r3, fp, #LO_restore_candidate
438 str r10, [fp, #LO_cycle] /* PCSX cycles */
439@@ str r10, [fp, #LO_reg_cop0+36] /* Count */
440 ldr r4, [r2, r3]
441 mov r10, lr
442 tst r4, r4
443 bne .E4
444.E1:
445 bl gen_interupt
446 mov lr, r10
447 ldr r10, [fp, #LO_cycle]
448 ldr r0, [fp, #LO_next_interupt]
449 ldr r1, [fp, #LO_pending_exception]
450 ldr r2, [fp, #LO_stop]
451 str r0, [fp, #LO_last_count]
452 sub r10, r10, r0
453 tst r2, r2
454 ldmfdne sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
455 tst r1, r1
456 moveq pc, lr
457.E2:
458 ldr r0, [fp, #LO_pcaddr]
459 bl get_addr_ht
460 mov pc, r0
461.E4:
462 /* Move 'dirty' blocks to the 'clean' list */
463 lsl r5, r2, #3
464 str r1, [r2, r3]
465.E5:
466 lsrs r4, r4, #1
467 mov r0, r5
468 add r5, r5, #1
469 blcs clean_blocks
470 tst r5, #31
471 bne .E5
472 b .E1
473 .size cc_interrupt, .-cc_interrupt
474
475 .align 2
476FUNCTION(do_interrupt):
477 ldr r0, [fp, #LO_pcaddr]
478 bl get_addr_ht
479 add r10, r10, #2
480 mov pc, r0
481 .size do_interrupt, .-do_interrupt
482
483 .align 2
484FUNCTION(fp_exception):
485 mov r2, #0x10000000
486.E7:
487 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
488 mov r3, #0x80000000
489 str r0, [fp, #LO_reg_cop0+56] /* EPC */
490 orr r1, #2
491 add r2, r2, #0x2c
492 str r1, [fp, #LO_reg_cop0+48] /* Status */
493 str r2, [fp, #LO_reg_cop0+52] /* Cause */
494 add r0, r3, #0x80
495 bl get_addr_ht
496 mov pc, r0
497 .size fp_exception, .-fp_exception
498 .align 2
499FUNCTION(fp_exception_ds):
500 mov r2, #0x90000000 /* Set high bit if delay slot */
501 b .E7
502 .size fp_exception_ds, .-fp_exception_ds
503
504 .align 2
505FUNCTION(jump_syscall):
506 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
507 mov r3, #0x80000000
508 str r0, [fp, #LO_reg_cop0+56] /* EPC */
509 orr r1, #2
510 mov r2, #0x20
511 str r1, [fp, #LO_reg_cop0+48] /* Status */
512 str r2, [fp, #LO_reg_cop0+52] /* Cause */
513 add r0, r3, #0x80
514 bl get_addr_ht
515 mov pc, r0
516 .size jump_syscall, .-jump_syscall
517 .align 2
518
519 .align 2
520FUNCTION(jump_syscall_hle):
521 str r0, [fp, #LO_pcaddr] /* PC must be set to EPC for psxException */
522 ldr r2, [fp, #LO_last_count]
523 mov r1, #0 /* in delay slot */
524 add r2, r2, r10
525 mov r0, #0x20 /* cause */
526 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
527 bl psxException
528
529 /* note: psxException might do recursive recompiler call from it's HLE code,
530 * so be ready for this */
531pcsx_return:
532 ldr r1, [fp, #LO_next_interupt]
533 ldr r10, [fp, #LO_cycle]
534 ldr r0, [fp, #LO_pcaddr]
535 sub r10, r10, r1
536 str r1, [fp, #LO_last_count]
537 bl get_addr_ht
538 mov pc, r0
539 .size jump_syscall_hle, .-jump_syscall_hle
540
541 .align 2
542FUNCTION(jump_hlecall):
543 ldr r2, [fp, #LO_last_count]
544 str r0, [fp, #LO_pcaddr]
545 add r2, r2, r10
546 adr lr, pcsx_return
547 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
548 bx r1
549 .size jump_hlecall, .-jump_hlecall
550
551 .align 2
552FUNCTION(jump_intcall):
553 ldr r2, [fp, #LO_last_count]
554 str r0, [fp, #LO_pcaddr]
555 add r2, r2, r10
556 adr lr, pcsx_return
557 str r2, [fp, #LO_cycle] /* PCSX cycle counter */
558 b execI
559 .size jump_hlecall, .-jump_hlecall
560
561 .align 2
562FUNCTION(new_dyna_leave):
563 ldr r0, [fp, #LO_last_count]
564 add r12, fp, #28
565 add r10, r0, r10
566 str r10, [fp, #LO_cycle]
567 ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
568 .size new_dyna_leave, .-new_dyna_leave
569
570 .align 2
571FUNCTION(invalidate_addr_r0):
572 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
573 b invalidate_addr_call
574 .size invalidate_addr_r0, .-invalidate_addr_r0
575 .align 2
576FUNCTION(invalidate_addr_r1):
577 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
578 mov r0, r1
579 b invalidate_addr_call
580 .size invalidate_addr_r1, .-invalidate_addr_r1
581 .align 2
582FUNCTION(invalidate_addr_r2):
583 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
584 mov r0, r2
585 b invalidate_addr_call
586 .size invalidate_addr_r2, .-invalidate_addr_r2
587 .align 2
588FUNCTION(invalidate_addr_r3):
589 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
590 mov r0, r3
591 b invalidate_addr_call
592 .size invalidate_addr_r3, .-invalidate_addr_r3
593 .align 2
594FUNCTION(invalidate_addr_r4):
595 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
596 mov r0, r4
597 b invalidate_addr_call
598 .size invalidate_addr_r4, .-invalidate_addr_r4
599 .align 2
600FUNCTION(invalidate_addr_r5):
601 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
602 mov r0, r5
603 b invalidate_addr_call
604 .size invalidate_addr_r5, .-invalidate_addr_r5
605 .align 2
606FUNCTION(invalidate_addr_r6):
607 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
608 mov r0, r6
609 b invalidate_addr_call
610 .size invalidate_addr_r6, .-invalidate_addr_r6
611 .align 2
612FUNCTION(invalidate_addr_r7):
613 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
614 mov r0, r7
615 b invalidate_addr_call
616 .size invalidate_addr_r7, .-invalidate_addr_r7
617 .align 2
618FUNCTION(invalidate_addr_r8):
619 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
620 mov r0, r8
621 b invalidate_addr_call
622 .size invalidate_addr_r8, .-invalidate_addr_r8
623 .align 2
624FUNCTION(invalidate_addr_r9):
625 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
626 mov r0, r9
627 b invalidate_addr_call
628 .size invalidate_addr_r9, .-invalidate_addr_r9
629 .align 2
630FUNCTION(invalidate_addr_r10):
631 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
632 mov r0, r10
633 b invalidate_addr_call
634 .size invalidate_addr_r10, .-invalidate_addr_r10
635 .align 2
636FUNCTION(invalidate_addr_r12):
637 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
638 mov r0, r12
639 .size invalidate_addr_r12, .-invalidate_addr_r12
640 .align 2
641invalidate_addr_call:
642 ldr r12, [fp, #LO_inv_code_start]
643 ldr lr, [fp, #LO_inv_code_end]
644 cmp r0, r12
645 cmpcs lr, r0
646 blcc invalidate_addr
647 ldmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, pc}
648 .size invalidate_addr_call, .-invalidate_addr_call
649
650 .align 2
651FUNCTION(new_dyna_start):
652 /* ip is stored to conform EABI alignment */
653 stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr}
654 load_varadr fp, dynarec_local
655 ldr r0, [fp, #LO_pcaddr]
656 bl get_addr_ht
657 ldr r1, [fp, #LO_next_interupt]
658 ldr r10, [fp, #LO_cycle]
659 str r1, [fp, #LO_last_count]
660 sub r10, r10, r1
661 mov pc, r0
662 .size new_dyna_start, .-new_dyna_start
663
664/* --------------------------------------- */
665
666.align 2
667
668.macro pcsx_read_mem readop tab_shift
669 /* r0 = address, r1 = handler_tab, r2 = cycles */
670 lsl r3, r0, #20
671 lsr r3, #(20+\tab_shift)
672 ldr r12, [fp, #LO_last_count]
673 ldr r1, [r1, r3, lsl #2]
674 add r2, r2, r12
675 lsls r1, #1
676.if \tab_shift == 1
677 lsl r3, #1
678 \readop r0, [r1, r3]
679.else
680 \readop r0, [r1, r3, lsl #\tab_shift]
681.endif
682 movcc pc, lr
683 str r2, [fp, #LO_cycle]
684 bx r1
685.endm
686
687FUNCTION(jump_handler_read8):
688 add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
689 pcsx_read_mem ldrbcc, 0
690
691FUNCTION(jump_handler_read16):
692 add r1, #0x1000/4*4 @ shift to r16 part
693 pcsx_read_mem ldrhcc, 1
694
695FUNCTION(jump_handler_read32):
696 pcsx_read_mem ldrcc, 2
697
698
699.macro pcsx_write_mem wrtop tab_shift
700 /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */
701 lsl r12,r0, #20
702 lsr r12, #(20+\tab_shift)
703 ldr r3, [r3, r12, lsl #2]
704 str r0, [fp, #LO_address] @ some handlers still need it..
705 lsls r3, #1
706 mov r0, r2 @ cycle return in case of direct store
707.if \tab_shift == 1
708 lsl r12, #1
709 \wrtop r1, [r3, r12]
710.else
711 \wrtop r1, [r3, r12, lsl #\tab_shift]
712.endif
713 movcc pc, lr
714 ldr r12, [fp, #LO_last_count]
715 mov r0, r1
716 add r2, r2, r12
717 push {r2, lr}
718 str r2, [fp, #LO_cycle]
719 blx r3
720
721 ldr r0, [fp, #LO_next_interupt]
722 pop {r2, r3}
723 str r0, [fp, #LO_last_count]
724 sub r0, r2, r0
725 bx r3
726.endm
727
728FUNCTION(jump_handler_write8):
729 add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
730 pcsx_write_mem strbcc, 0
731
732FUNCTION(jump_handler_write16):
733 add r3, #0x1000/4*4 @ shift to r16 part
734 pcsx_write_mem strhcc, 1
735
736FUNCTION(jump_handler_write32):
737 pcsx_write_mem strcc, 2
738
739FUNCTION(jump_handler_write_h):
740 /* r0 = address, r1 = data, r2 = cycles, r3 = handler */
741 ldr r12, [fp, #LO_last_count]
742 str r0, [fp, #LO_address] @ some handlers still need it..
743 add r2, r2, r12
744 mov r0, r1
745 push {r2, lr}
746 str r2, [fp, #LO_cycle]
747 blx r3
748
749 ldr r0, [fp, #LO_next_interupt]
750 pop {r2, r3}
751 str r0, [fp, #LO_last_count]
752 sub r0, r2, r0
753 bx r3
754
755FUNCTION(jump_handle_swl):
756 /* r0 = address, r1 = data, r2 = cycles */
757 ldr r3, [fp, #LO_mem_wtab]
758 mov r12,r0,lsr #12
759 ldr r3, [r3, r12, lsl #2]
760 lsls r3, #1
761 bcs 4f
762 add r3, r0, r3
763 mov r0, r2
764 tst r3, #2
765 beq 101f
766 tst r3, #1
767 beq 2f
7683:
769 str r1, [r3, #-3]
770 bx lr
7712:
772 lsr r2, r1, #8
773 lsr r1, #24
774 strh r2, [r3, #-2]
775 strb r1, [r3]
776 bx lr
777101:
778 tst r3, #1
779 lsrne r1, #16 @ 1
780 lsreq r12, r1, #24 @ 0
781 strhne r1, [r3, #-1]
782 strbeq r12, [r3]
783 bx lr
7844:
785 mov r0, r2
786@ b abort
787 bx lr @ TODO?
788
789
790FUNCTION(jump_handle_swr):
791 /* r0 = address, r1 = data, r2 = cycles */
792 ldr r3, [fp, #LO_mem_wtab]
793 mov r12,r0,lsr #12
794 ldr r3, [r3, r12, lsl #2]
795 lsls r3, #1
796 bcs 4f
797 add r3, r0, r3
798 and r12,r3, #3
799 mov r0, r2
800 cmp r12,#2
801 strbgt r1, [r3] @ 3
802 strheq r1, [r3] @ 2
803 cmp r12,#1
804 strlt r1, [r3] @ 0
805 bxne lr
806 lsr r2, r1, #8 @ 1
807 strb r1, [r3]
808 strh r2, [r3, #1]
809 bx lr
8104:
811 mov r0, r2
812@ b abort
813 bx lr @ TODO?
814
815
816.macro rcntx_read_mode0 num
817 /* r0 = address, r2 = cycles */
818 ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart
819 mov r0, r2, lsl #16
820 sub r0, r0, r3, lsl #16
821 lsr r0, #16
822 bx lr
823.endm
824
825FUNCTION(rcnt0_read_count_m0):
826 rcntx_read_mode0 0
827
828FUNCTION(rcnt1_read_count_m0):
829 rcntx_read_mode0 1
830
831FUNCTION(rcnt2_read_count_m0):
832 rcntx_read_mode0 2
833
834FUNCTION(rcnt0_read_count_m1):
835 /* r0 = address, r2 = cycles */
836 ldr r3, [fp, #LO_rcnts+6*4+7*4*0] @ cycleStart
837 mov_16 r1, 0x3334
838 sub r2, r2, r3
839 mul r0, r1, r2 @ /= 5
840 lsr r0, #16
841 bx lr
842
843FUNCTION(rcnt1_read_count_m1):
844 /* r0 = address, r2 = cycles */
845 ldr r3, [fp, #LO_rcnts+6*4+7*4*1]
846 mov_24 r1, 0x1e6cde
847 sub r2, r2, r3
848 umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd
849 bx lr
850
851FUNCTION(rcnt2_read_count_m1):
852 /* r0 = address, r2 = cycles */
853 ldr r3, [fp, #LO_rcnts+6*4+7*4*2]
854 mov r0, r2, lsl #16-3
855 sub r0, r0, r3, lsl #16-3
856 lsr r0, #16 @ /= 8
857 bx lr
858
859@ vim:filetype=armasm