drc/gte: add some stall handling
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / linkage_arm.S
... / ...
CommitLineData
1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * linkage_arm.s for PCSX *
3 * Copyright (C) 2009-2011 Ari64 *
4 * Copyright (C) 2010-2013 GraÅžvydas "notaz" Ignotas *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
20 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
21
22#include "arm_features.h"
23#include "new_dynarec_config.h"
24#include "linkage_offsets.h"
25
26
27#ifdef __MACH__
28#define dynarec_local ESYM(dynarec_local)
29#define add_link ESYM(add_link)
30#define new_recompile_block ESYM(new_recompile_block)
31#define get_addr ESYM(get_addr)
32#define get_addr_ht ESYM(get_addr_ht)
33#define clean_blocks ESYM(clean_blocks)
34#define gen_interupt ESYM(gen_interupt)
35#define invalidate_addr ESYM(invalidate_addr)
36#define gteCheckStallRaw ESYM(gteCheckStallRaw)
37#endif
38
39 .bss
40 .align 4
41 .global dynarec_local
42 .type dynarec_local, %object
43 .size dynarec_local, LO_dynarec_local_size
44dynarec_local:
45 .space LO_dynarec_local_size
46
47#define DRC_VAR_(name, vname, size_) \
48 vname = dynarec_local + LO_##name; \
49 .global vname; \
50 .type vname, %object; \
51 .size vname, size_
52
53#define DRC_VAR(name, size_) \
54 DRC_VAR_(name, ESYM(name), size_)
55
56DRC_VAR(next_interupt, 4)
57DRC_VAR(cycle_count, 4)
58DRC_VAR(last_count, 4)
59DRC_VAR(pending_exception, 4)
60DRC_VAR(stop, 4)
61DRC_VAR(branch_target, 4)
62DRC_VAR(address, 4)
63@DRC_VAR(align0, 4) /* unused/alignment */
64DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs)
65
66/* psxRegs */
67@DRC_VAR(reg, 128)
68DRC_VAR(lo, 4)
69DRC_VAR(hi, 4)
70DRC_VAR(reg_cop0, 128)
71DRC_VAR(reg_cop2d, 128)
72DRC_VAR(reg_cop2c, 128)
73DRC_VAR(pcaddr, 4)
74@DRC_VAR(code, 4)
75@DRC_VAR(cycle, 4)
76@DRC_VAR(interrupt, 4)
77@DRC_VAR(intCycle, 256)
78
79DRC_VAR(rcnts, 7*4*4)
80DRC_VAR(inv_code_start, 4)
81DRC_VAR(inv_code_end, 4)
82DRC_VAR(mem_rtab, 4)
83DRC_VAR(mem_wtab, 4)
84DRC_VAR(psxH_ptr, 4)
85DRC_VAR(zeromem_ptr, 4)
86DRC_VAR(invc_ptr, 4)
87DRC_VAR(scratch_buf_ptr, 4)
88@DRC_VAR(align1, 8) /* unused/alignment */
89DRC_VAR(mini_ht, 256)
90DRC_VAR(restore_candidate, 512)
91
92
93#ifdef TEXRELS_FORBIDDEN
94 .data
95 .align 2
96ptr_jump_in:
97 .word ESYM(jump_in)
98ptr_jump_dirty:
99 .word ESYM(jump_dirty)
100ptr_hash_table:
101 .word ESYM(hash_table)
102#endif
103
104
105 .syntax unified
106 .text
107 .align 2
108
109#ifndef HAVE_ARMV5
110.macro blx rd
111 mov lr, pc
112 bx \rd
113.endm
114#endif
115
116.macro load_varadr reg var
117#if defined(HAVE_ARMV7) && defined(TEXRELS_FORBIDDEN)
118 movw \reg, #:lower16:(\var-(1678f+8))
119 movt \reg, #:upper16:(\var-(1678f+8))
1201678:
121 add \reg, pc
122#elif defined(HAVE_ARMV7) && !defined(__PIC__)
123 movw \reg, #:lower16:\var
124 movt \reg, #:upper16:\var
125#else
126 ldr \reg, =\var
127#endif
128.endm
129
130.macro load_varadr_ext reg var
131#if defined(HAVE_ARMV7) && defined(TEXRELS_FORBIDDEN)
132 movw \reg, #:lower16:(ptr_\var-(1678f+8))
133 movt \reg, #:upper16:(ptr_\var-(1678f+8))
1341678:
135 ldr \reg, [pc, \reg]
136#else
137 load_varadr \reg \var
138#endif
139.endm
140
141.macro mov_16 reg imm
142#ifdef HAVE_ARMV7
143 movw \reg, #\imm
144#else
145 mov \reg, #(\imm & 0x00ff)
146 orr \reg, #(\imm & 0xff00)
147#endif
148.endm
149
150.macro mov_24 reg imm
151#ifdef HAVE_ARMV7
152 movw \reg, #(\imm & 0xffff)
153 movt \reg, #(\imm >> 16)
154#else
155 mov \reg, #(\imm & 0x0000ff)
156 orr \reg, #(\imm & 0x00ff00)
157 orr \reg, #(\imm & 0xff0000)
158#endif
159.endm
160
161/* r0 = virtual target address */
162/* r1 = instruction to patch */
163.macro dyna_linker_main
164#ifndef NO_WRITE_EXEC
165 load_varadr_ext r3, jump_in
166 /* get_page */
167 lsr r2, r0, #12
168 mov r6, #4096
169 bic r2, r2, #0xe0000
170 sub r6, r6, #1
171 cmp r2, #0x1000
172 ldr r7, [r1]
173 biclt r2, #0x0e00
174 and r6, r6, r2
175 cmp r2, #2048
176 add r12, r7, #2
177 orrcs r2, r6, #2048
178 ldr r5, [r3, r2, lsl #2]
179 lsl r12, r12, #8
180 add r6, r1, r12, asr #6
181 mov r8, #0
182 /* jump_in lookup */
1831:
184 movs r4, r5
185 beq 2f
186 ldr r3, [r5] /* ll_entry .vaddr */
187 ldrd r4, r5, [r4, #8] /* ll_entry .next, .addr */
188 teq r3, r0
189 bne 1b
190 teq r4, r6
191 moveq pc, r4 /* Stale i-cache */
192 mov r8, r4
193 b 1b /* jump_in may have dupes, continue search */
1942:
195 tst r8, r8
196 beq 3f /* r0 not in jump_in */
197
198 mov r5, r1
199 mov r1, r6
200 bl add_link
201 sub r2, r8, r5
202 and r1, r7, #0xff000000
203 lsl r2, r2, #6
204 sub r1, r1, #2
205 add r1, r1, r2, lsr #8
206 str r1, [r5]
207 mov pc, r8
2083:
209 /* hash_table lookup */
210 cmp r2, #2048
211 load_varadr_ext r3, jump_dirty
212 eor r4, r0, r0, lsl #16
213 lslcc r2, r0, #9
214 load_varadr_ext r6, hash_table
215 lsr r4, r4, #12
216 lsrcc r2, r2, #21
217 bic r4, r4, #15
218 ldr r5, [r3, r2, lsl #2]
219 ldr r7, [r6, r4]!
220 teq r7, r0
221 ldreq pc, [r6, #8]
222 ldr r7, [r6, #4]
223 teq r7, r0
224 ldreq pc, [r6, #12]
225 /* jump_dirty lookup */
2266:
227 movs r4, r5
228 beq 8f
229 ldr r3, [r5]
230 ldr r5, [r4, #12]
231 teq r3, r0
232 bne 6b
2337:
234 ldr r1, [r4, #8]
235 /* hash_table insert */
236 ldr r2, [r6]
237 ldr r3, [r6, #8]
238 str r0, [r6]
239 str r1, [r6, #8]
240 str r2, [r6, #4]
241 str r3, [r6, #12]
242 mov pc, r1
2438:
244#else
245 /* XXX: should be able to do better than this... */
246 bl get_addr_ht
247 mov pc, r0
248#endif
249.endm
250
251
252FUNCTION(dyna_linker):
253 /* r0 = virtual target address */
254 /* r1 = instruction to patch */
255 dyna_linker_main
256
257 mov r4, r0
258 mov r5, r1
259 bl new_recompile_block
260 tst r0, r0
261 mov r0, r4
262 mov r1, r5
263 beq dyna_linker
264 /* pagefault */
265 mov r1, r0
266 mov r2, #8
267 .size dyna_linker, .-dyna_linker
268
269FUNCTION(exec_pagefault):
270 /* r0 = instruction pointer */
271 /* r1 = fault address */
272 /* r2 = cause */
273 ldr r3, [fp, #LO_reg_cop0+48] /* Status */
274 mvn r6, #0xF000000F
275 ldr r4, [fp, #LO_reg_cop0+16] /* Context */
276 bic r6, r6, #0x0F800000
277 str r0, [fp, #LO_reg_cop0+56] /* EPC */
278 orr r3, r3, #2
279 str r1, [fp, #LO_reg_cop0+32] /* BadVAddr */
280 bic r4, r4, r6
281 str r3, [fp, #LO_reg_cop0+48] /* Status */
282 and r5, r6, r1, lsr #9
283 str r2, [fp, #LO_reg_cop0+52] /* Cause */
284 and r1, r1, r6, lsl #9
285 str r1, [fp, #LO_reg_cop0+40] /* EntryHi */
286 orr r4, r4, r5
287 str r4, [fp, #LO_reg_cop0+16] /* Context */
288 mov r0, #0x80000000
289 bl get_addr_ht
290 mov pc, r0
291 .size exec_pagefault, .-exec_pagefault
292
293/* Special dynamic linker for the case where a page fault
294 may occur in a branch delay slot */
295FUNCTION(dyna_linker_ds):
296 /* r0 = virtual target address */
297 /* r1 = instruction to patch */
298 dyna_linker_main
299
300 mov r4, r0
301 bic r0, r0, #7
302 mov r5, r1
303 orr r0, r0, #1
304 bl new_recompile_block
305 tst r0, r0
306 mov r0, r4
307 mov r1, r5
308 beq dyna_linker_ds
309 /* pagefault */
310 bic r1, r0, #7
311 mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */
312 sub r0, r1, #4
313 b exec_pagefault
314 .size dyna_linker_ds, .-dyna_linker_ds
315
316 .align 2
317
318FUNCTION(jump_vaddr_r0):
319 eor r2, r0, r0, lsl #16
320 b jump_vaddr
321 .size jump_vaddr_r0, .-jump_vaddr_r0
322FUNCTION(jump_vaddr_r1):
323 eor r2, r1, r1, lsl #16
324 mov r0, r1
325 b jump_vaddr
326 .size jump_vaddr_r1, .-jump_vaddr_r1
327FUNCTION(jump_vaddr_r2):
328 mov r0, r2
329 eor r2, r2, r2, lsl #16
330 b jump_vaddr
331 .size jump_vaddr_r2, .-jump_vaddr_r2
332FUNCTION(jump_vaddr_r3):
333 eor r2, r3, r3, lsl #16
334 mov r0, r3
335 b jump_vaddr
336 .size jump_vaddr_r3, .-jump_vaddr_r3
337FUNCTION(jump_vaddr_r4):
338 eor r2, r4, r4, lsl #16
339 mov r0, r4
340 b jump_vaddr
341 .size jump_vaddr_r4, .-jump_vaddr_r4
342FUNCTION(jump_vaddr_r5):
343 eor r2, r5, r5, lsl #16
344 mov r0, r5
345 b jump_vaddr
346 .size jump_vaddr_r5, .-jump_vaddr_r5
347FUNCTION(jump_vaddr_r6):
348 eor r2, r6, r6, lsl #16
349 mov r0, r6
350 b jump_vaddr
351 .size jump_vaddr_r6, .-jump_vaddr_r6
352FUNCTION(jump_vaddr_r8):
353 eor r2, r8, r8, lsl #16
354 mov r0, r8
355 b jump_vaddr
356 .size jump_vaddr_r8, .-jump_vaddr_r8
357FUNCTION(jump_vaddr_r9):
358 eor r2, r9, r9, lsl #16
359 mov r0, r9
360 b jump_vaddr
361 .size jump_vaddr_r9, .-jump_vaddr_r9
362FUNCTION(jump_vaddr_r10):
363 eor r2, r10, r10, lsl #16
364 mov r0, r10
365 b jump_vaddr
366 .size jump_vaddr_r10, .-jump_vaddr_r10
367FUNCTION(jump_vaddr_r12):
368 eor r2, r12, r12, lsl #16
369 mov r0, r12
370 b jump_vaddr
371 .size jump_vaddr_r12, .-jump_vaddr_r12
372FUNCTION(jump_vaddr_r7):
373 eor r2, r7, r7, lsl #16
374 add r0, r7, #0
375 .size jump_vaddr_r7, .-jump_vaddr_r7
376FUNCTION(jump_vaddr):
377 load_varadr_ext r1, hash_table
378 mvn r3, #15
379 and r2, r3, r2, lsr #12
380 ldr r2, [r1, r2]!
381 teq r2, r0
382 ldreq pc, [r1, #8]
383 ldr r2, [r1, #4]
384 teq r2, r0
385 ldreq pc, [r1, #12]
386 str r10, [fp, #LO_cycle_count]
387 bl get_addr
388 ldr r10, [fp, #LO_cycle_count]
389 mov pc, r0
390 .size jump_vaddr, .-jump_vaddr
391
392 .align 2
393
394FUNCTION(verify_code_ds):
395 str r8, [fp, #LO_branch_target] @ preserve HOST_BTREG?
396FUNCTION(verify_code):
397 /* r1 = source */
398 /* r2 = target */
399 /* r3 = length */
400 tst r3, #4
401 mov r4, #0
402 add r3, r1, r3
403 mov r5, #0
404 ldrne r4, [r1], #4
405 mov r12, #0
406 ldrne r5, [r2], #4
407 teq r1, r3
408 beq .D3
409.D2:
410 ldr r7, [r1], #4
411 eor r9, r4, r5
412 ldr r8, [r2], #4
413 orrs r9, r9, r12
414 bne .D4
415 ldr r4, [r1], #4
416 eor r12, r7, r8
417 ldr r5, [r2], #4
418 cmp r1, r3
419 bcc .D2
420 teq r7, r8
421.D3:
422 teqeq r4, r5
423.D4:
424 ldr r8, [fp, #LO_branch_target]
425 moveq pc, lr
426.D5:
427 bl get_addr
428 mov pc, r0
429 .size verify_code, .-verify_code
430 .size verify_code_ds, .-verify_code_ds
431
432 .align 2
433FUNCTION(cc_interrupt):
434 ldr r0, [fp, #LO_last_count]
435 mov r1, #0
436 mov r2, #0x1fc
437 add r10, r0, r10
438 str r1, [fp, #LO_pending_exception]
439 and r2, r2, r10, lsr #17
440 add r3, fp, #LO_restore_candidate
441 str r10, [fp, #LO_cycle] /* PCSX cycles */
442@@ str r10, [fp, #LO_reg_cop0+36] /* Count */
443 ldr r4, [r2, r3]
444 mov r10, lr
445 tst r4, r4
446 bne .E4
447.E1:
448 bl gen_interupt
449 mov lr, r10
450 ldr r10, [fp, #LO_cycle]
451 ldr r0, [fp, #LO_next_interupt]
452 ldr r1, [fp, #LO_pending_exception]
453 ldr r2, [fp, #LO_stop]
454 str r0, [fp, #LO_last_count]
455 sub r10, r10, r0
456 tst r2, r2
457 ldmfdne sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
458 tst r1, r1
459 moveq pc, lr
460.E2:
461 ldr r0, [fp, #LO_pcaddr]
462 bl get_addr_ht
463 mov pc, r0
464.E4:
465 /* Move 'dirty' blocks to the 'clean' list */
466 lsl r5, r2, #3
467 str r1, [r2, r3]
468.E5:
469 lsrs r4, r4, #1
470 mov r0, r5
471 add r5, r5, #1
472 blcs clean_blocks
473 tst r5, #31
474 bne .E5
475 b .E1
476 .size cc_interrupt, .-cc_interrupt
477
478 .align 2
479FUNCTION(fp_exception):
480 mov r2, #0x10000000
481.E7:
482 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
483 mov r3, #0x80000000
484 str r0, [fp, #LO_reg_cop0+56] /* EPC */
485 orr r1, #2
486 add r2, r2, #0x2c
487 str r1, [fp, #LO_reg_cop0+48] /* Status */
488 str r2, [fp, #LO_reg_cop0+52] /* Cause */
489 add r0, r3, #0x80
490 bl get_addr_ht
491 mov pc, r0
492 .size fp_exception, .-fp_exception
493 .align 2
494FUNCTION(fp_exception_ds):
495 mov r2, #0x90000000 /* Set high bit if delay slot */
496 b .E7
497 .size fp_exception_ds, .-fp_exception_ds
498
499 .align 2
500FUNCTION(jump_syscall):
501 ldr r1, [fp, #LO_reg_cop0+48] /* Status */
502 mov r3, #0x80000000
503 str r0, [fp, #LO_reg_cop0+56] /* EPC */
504 orr r1, #2
505 mov r2, #0x20
506 str r1, [fp, #LO_reg_cop0+48] /* Status */
507 str r2, [fp, #LO_reg_cop0+52] /* Cause */
508 add r0, r3, #0x80
509 bl get_addr_ht
510 mov pc, r0
511 .size jump_syscall, .-jump_syscall
512 .align 2
513
514 /* note: psxException might do recursive recompiler call from it's HLE code,
515 * so be ready for this */
516FUNCTION(jump_to_new_pc):
517 ldr r1, [fp, #LO_next_interupt]
518 ldr r10, [fp, #LO_cycle]
519 ldr r0, [fp, #LO_pcaddr]
520 sub r10, r10, r1
521 str r1, [fp, #LO_last_count]
522 bl get_addr_ht
523 mov pc, r0
524 .size jump_to_new_pc, .-jump_to_new_pc
525
526 .align 2
527FUNCTION(new_dyna_leave):
528 ldr r0, [fp, #LO_last_count]
529 add r12, fp, #28
530 add r10, r0, r10
531 str r10, [fp, #LO_cycle]
532 ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
533 .size new_dyna_leave, .-new_dyna_leave
534
535 .align 2
536FUNCTION(invalidate_addr_r0):
537 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
538 b invalidate_addr_call
539 .size invalidate_addr_r0, .-invalidate_addr_r0
540 .align 2
541FUNCTION(invalidate_addr_r1):
542 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
543 mov r0, r1
544 b invalidate_addr_call
545 .size invalidate_addr_r1, .-invalidate_addr_r1
546 .align 2
547FUNCTION(invalidate_addr_r2):
548 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
549 mov r0, r2
550 b invalidate_addr_call
551 .size invalidate_addr_r2, .-invalidate_addr_r2
552 .align 2
553FUNCTION(invalidate_addr_r3):
554 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
555 mov r0, r3
556 b invalidate_addr_call
557 .size invalidate_addr_r3, .-invalidate_addr_r3
558 .align 2
559FUNCTION(invalidate_addr_r4):
560 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
561 mov r0, r4
562 b invalidate_addr_call
563 .size invalidate_addr_r4, .-invalidate_addr_r4
564 .align 2
565FUNCTION(invalidate_addr_r5):
566 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
567 mov r0, r5
568 b invalidate_addr_call
569 .size invalidate_addr_r5, .-invalidate_addr_r5
570 .align 2
571FUNCTION(invalidate_addr_r6):
572 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
573 mov r0, r6
574 b invalidate_addr_call
575 .size invalidate_addr_r6, .-invalidate_addr_r6
576 .align 2
577FUNCTION(invalidate_addr_r7):
578 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
579 mov r0, r7
580 b invalidate_addr_call
581 .size invalidate_addr_r7, .-invalidate_addr_r7
582 .align 2
583FUNCTION(invalidate_addr_r8):
584 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
585 mov r0, r8
586 b invalidate_addr_call
587 .size invalidate_addr_r8, .-invalidate_addr_r8
588 .align 2
589FUNCTION(invalidate_addr_r9):
590 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
591 mov r0, r9
592 b invalidate_addr_call
593 .size invalidate_addr_r9, .-invalidate_addr_r9
594 .align 2
595FUNCTION(invalidate_addr_r10):
596 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
597 mov r0, r10
598 b invalidate_addr_call
599 .size invalidate_addr_r10, .-invalidate_addr_r10
600 .align 2
601FUNCTION(invalidate_addr_r12):
602 stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr}
603 mov r0, r12
604 .size invalidate_addr_r12, .-invalidate_addr_r12
605 .align 2
606invalidate_addr_call:
607 ldr r12, [fp, #LO_inv_code_start]
608 ldr lr, [fp, #LO_inv_code_end]
609 cmp r0, r12
610 cmpcs lr, r0
611 blcc invalidate_addr
612 ldmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, pc}
613 .size invalidate_addr_call, .-invalidate_addr_call
614
615 .align 2
616FUNCTION(new_dyna_start):
617 /* ip is stored to conform EABI alignment */
618 stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr}
619 mov fp, r0 /* dynarec_local */
620 ldr r0, [fp, #LO_pcaddr]
621 bl get_addr_ht
622 ldr r1, [fp, #LO_next_interupt]
623 ldr r10, [fp, #LO_cycle]
624 str r1, [fp, #LO_last_count]
625 sub r10, r10, r1
626 mov pc, r0
627 .size new_dyna_start, .-new_dyna_start
628
629/* --------------------------------------- */
630
631.align 2
632
633.macro pcsx_read_mem readop tab_shift
634 /* r0 = address, r1 = handler_tab, r2 = cycles */
635 lsl r3, r0, #20
636 lsr r3, #(20+\tab_shift)
637 ldr r12, [fp, #LO_last_count]
638 ldr r1, [r1, r3, lsl #2]
639 add r2, r2, r12
640 lsls r1, #1
641.if \tab_shift == 1
642 lsl r3, #1
643 \readop r0, [r1, r3]
644.else
645 \readop r0, [r1, r3, lsl #\tab_shift]
646.endif
647 movcc pc, lr
648 str r2, [fp, #LO_cycle]
649 bx r1
650.endm
651
652FUNCTION(jump_handler_read8):
653 add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
654 pcsx_read_mem ldrbcc, 0
655
656FUNCTION(jump_handler_read16):
657 add r1, #0x1000/4*4 @ shift to r16 part
658 pcsx_read_mem ldrhcc, 1
659
660FUNCTION(jump_handler_read32):
661 pcsx_read_mem ldrcc, 2
662
663
664.macro pcsx_write_mem wrtop tab_shift
665 /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */
666 lsl r12,r0, #20
667 lsr r12, #(20+\tab_shift)
668 ldr r3, [r3, r12, lsl #2]
669 str r0, [fp, #LO_address] @ some handlers still need it..
670 lsls r3, #1
671 mov r0, r2 @ cycle return in case of direct store
672.if \tab_shift == 1
673 lsl r12, #1
674 \wrtop r1, [r3, r12]
675.else
676 \wrtop r1, [r3, r12, lsl #\tab_shift]
677.endif
678 movcc pc, lr
679 ldr r12, [fp, #LO_last_count]
680 mov r0, r1
681 add r2, r2, r12
682 push {r2, lr}
683 str r2, [fp, #LO_cycle]
684 blx r3
685
686 ldr r0, [fp, #LO_next_interupt]
687 pop {r2, lr}
688 str r0, [fp, #LO_last_count]
689 sub r0, r2, r0
690 bx lr
691.endm
692
693FUNCTION(jump_handler_write8):
694 add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
695 pcsx_write_mem strbcc, 0
696
697FUNCTION(jump_handler_write16):
698 add r3, #0x1000/4*4 @ shift to r16 part
699 pcsx_write_mem strhcc, 1
700
701FUNCTION(jump_handler_write32):
702 pcsx_write_mem strcc, 2
703
704FUNCTION(jump_handler_write_h):
705 /* r0 = address, r1 = data, r2 = cycles, r3 = handler */
706 ldr r12, [fp, #LO_last_count]
707 str r0, [fp, #LO_address] @ some handlers still need it..
708 add r2, r2, r12
709 mov r0, r1
710 push {r2, lr}
711 str r2, [fp, #LO_cycle]
712 blx r3
713
714 ldr r0, [fp, #LO_next_interupt]
715 pop {r2, lr}
716 str r0, [fp, #LO_last_count]
717 sub r0, r2, r0
718 bx lr
719
720FUNCTION(jump_handle_swl):
721 /* r0 = address, r1 = data, r2 = cycles */
722 ldr r3, [fp, #LO_mem_wtab]
723 mov r12,r0,lsr #12
724 ldr r3, [r3, r12, lsl #2]
725 lsls r3, #1
726 bcs 4f
727 add r3, r0, r3
728 mov r0, r2
729 tst r3, #2
730 beq 101f
731 tst r3, #1
732 beq 2f
7333:
734 str r1, [r3, #-3]
735 bx lr
7362:
737 lsr r2, r1, #8
738 lsr r1, #24
739 strh r2, [r3, #-2]
740 strb r1, [r3]
741 bx lr
742101:
743 tst r3, #1
744 lsrne r1, #16 @ 1
745 lsreq r12, r1, #24 @ 0
746 strhne r1, [r3, #-1]
747 strbeq r12, [r3]
748 bx lr
7494:
750 mov r0, r2
751@ b abort
752 bx lr @ TODO?
753
754
755FUNCTION(jump_handle_swr):
756 /* r0 = address, r1 = data, r2 = cycles */
757 ldr r3, [fp, #LO_mem_wtab]
758 mov r12,r0,lsr #12
759 ldr r3, [r3, r12, lsl #2]
760 lsls r3, #1
761 bcs 4f
762 add r3, r0, r3
763 and r12,r3, #3
764 mov r0, r2
765 cmp r12,#2
766 strbgt r1, [r3] @ 3
767 strheq r1, [r3] @ 2
768 cmp r12,#1
769 strlt r1, [r3] @ 0
770 bxne lr
771 lsr r2, r1, #8 @ 1
772 strb r1, [r3]
773 strh r2, [r3, #1]
774 bx lr
7754:
776 mov r0, r2
777@ b abort
778 bx lr @ TODO?
779
780
781.macro rcntx_read_mode0 num
782 /* r0 = address, r2 = cycles */
783 ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart
784 mov r0, r2, lsl #16
785 sub r0, r0, r3, lsl #16
786 lsr r0, #16
787 bx lr
788.endm
789
790FUNCTION(rcnt0_read_count_m0):
791 rcntx_read_mode0 0
792
793FUNCTION(rcnt1_read_count_m0):
794 rcntx_read_mode0 1
795
796FUNCTION(rcnt2_read_count_m0):
797 rcntx_read_mode0 2
798
799FUNCTION(rcnt0_read_count_m1):
800 /* r0 = address, r2 = cycles */
801 ldr r3, [fp, #LO_rcnts+6*4+7*4*0] @ cycleStart
802 mov_16 r1, 0x3334
803 sub r2, r2, r3
804 mul r0, r1, r2 @ /= 5
805 lsr r0, #16
806 bx lr
807
808FUNCTION(rcnt1_read_count_m1):
809 /* r0 = address, r2 = cycles */
810 ldr r3, [fp, #LO_rcnts+6*4+7*4*1]
811 mov_24 r1, 0x1e6cde
812 sub r2, r2, r3
813 umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd
814 bx lr
815
816FUNCTION(rcnt2_read_count_m1):
817 /* r0 = address, r2 = cycles */
818 ldr r3, [fp, #LO_rcnts+6*4+7*4*2]
819 mov r0, r2, lsl #16-3
820 sub r0, r0, r3, lsl #16-3
821 lsr r0, #16 @ /= 8
822 bx lr
823
824FUNCTION(call_gteStall):
825 /* r0 = op_cycles, r1 = cycles */
826 ldr r2, [fp, #LO_last_count]
827 str lr, [fp, #LO_saved_lr]
828 add r1, r1, r2
829 str r1, [fp, #LO_cycle]
830 add r1, fp, #LO_psxRegs
831 bl gteCheckStallRaw
832 ldr lr, [fp, #LO_saved_lr]
833 add r10, r10, r0
834 bx lr
835
836@ vim:filetype=armasm