notaz.gp2x.de
/
pcsx_rearmed.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
(iOS) Add min SDK version for compatbility
[pcsx_rearmed.git]
/
libpcsxcore
/
new_dynarec
/
linkage_arm.S
diff --git
a/libpcsxcore/new_dynarec/linkage_arm.S
b/libpcsxcore/new_dynarec/linkage_arm.S
index
e31b9b4
..
942b492
100644
(file)
--- a/
libpcsxcore/new_dynarec/linkage_arm.S
+++ b/
libpcsxcore/new_dynarec/linkage_arm.S
@@
-83,7
+83,8
@@
DRC_VAR(zeromem_ptr, 4)
DRC_VAR(inv_code_start, 4)
DRC_VAR(inv_code_end, 4)
DRC_VAR(branch_target, 4)
DRC_VAR(inv_code_start, 4)
DRC_VAR(inv_code_end, 4)
DRC_VAR(branch_target, 4)
-@DRC_VAR(align0, 16) /* unused/alignment */
+DRC_VAR(scratch_buf_ptr, 4)
+@DRC_VAR(align0, 12) /* unused/alignment */
DRC_VAR(mini_ht, 256)
DRC_VAR(restore_candidate, 512)
DRC_VAR(mini_ht, 256)
DRC_VAR(restore_candidate, 512)
@@
-91,6
+92,22
@@
DRC_VAR(restore_candidate, 512)
DRC_VAR(FCR0, 4)
DRC_VAR(FCR31, 4)
DRC_VAR(FCR0, 4)
DRC_VAR(FCR31, 4)
+#ifdef __MACH__
+ .data
+ .align 2
+ptr_jump_in:
+ .word ESYM(jump_in)
+ptr_jump_dirty:
+ .word ESYM(jump_dirty)
+ptr_hash_table:
+ .word ESYM(hash_table)
+#endif
+
+
+ .syntax unified
+ .text
+ .align 2
+
#ifndef HAVE_ARMV5
.macro blx rd
mov lr, pc
#ifndef HAVE_ARMV5
.macro blx rd
mov lr, pc
@@
-102,11
+119,27
@@
DRC_VAR(FCR31, 4)
#if defined(__ARM_ARCH_7A__) && !defined(__PIC__)
movw \reg, #:lower16:\var
movt \reg, #:upper16:\var
#if defined(__ARM_ARCH_7A__) && !defined(__PIC__)
movw \reg, #:lower16:\var
movt \reg, #:upper16:\var
+#elif defined(__ARM_ARCH_7A__) && defined(__MACH__)
+ movw \reg, #:lower16:(\var-(1678f+8))
+ movt \reg, #:upper16:(\var-(1678f+8))
+1678:
+ add \reg, pc
#else
ldr \reg, =\var
#endif
.endm
#else
ldr \reg, =\var
#endif
.endm
+.macro load_varadr_ext reg var
+#if defined(__ARM_ARCH_7A__) && defined(__MACH__) && defined(__PIC__)
+ movw \reg, #:lower16:(ptr_\var-(1678f+8))
+ movt \reg, #:upper16:(ptr_\var-(1678f+8))
+1678:
+ ldr \reg, [pc, \reg]
+#else
+ load_varadr \reg \var
+#endif
+.endm
+
.macro mov_16 reg imm
#ifdef __ARM_ARCH_7A__
movw \reg, #\imm
.macro mov_16 reg imm
#ifdef __ARM_ARCH_7A__
movw \reg, #\imm
@@
-130,7
+163,7
@@
DRC_VAR(FCR31, 4)
.macro dyna_linker_main
/* r0 = virtual target address */
/* r1 = instruction to patch */
.macro dyna_linker_main
/* r0 = virtual target address */
/* r1 = instruction to patch */
- l
dr r3, .jiptr
+ l
oad_varadr_ext r3, jump_in
/* get_page */
lsr r2, r0, #12
mov r6, #4096
/* get_page */
lsr r2, r0, #12
mov r6, #4096
@@
-180,10
+213,10
@@
DRC_VAR(FCR31, 4)
3:
/* hash_table lookup */
cmp r2, #2048
3:
/* hash_table lookup */
cmp r2, #2048
- l
dr r3, .jdptr
+ l
oad_varadr_ext r3, jump_dirty
eor r4, r0, r0, lsl #16
lslcc r2, r0, #9
eor r4, r0, r0, lsl #16
lslcc r2, r0, #9
- l
dr r6, .htptr
+ l
oad_varadr_ext r6, hash_table
lsr r4, r4, #12
lsrcc r2, r2, #21
bic r4, r4, #15
lsr r4, r4, #12
lsrcc r2, r2, #21
bic r4, r4, #15
@@
-215,8
+248,6
@@
DRC_VAR(FCR31, 4)
8:
.endm
8:
.endm
- .text
- .align 2
FUNCTION(dyna_linker):
/* r0 = virtual target address */
FUNCTION(dyna_linker):
/* r0 = virtual target address */
@@
-281,12
+312,6
@@
FUNCTION(dyna_linker_ds):
sub r0, r1, #4
b exec_pagefault
.size dyna_linker_ds, .-dyna_linker_ds
sub r0, r1, #4
b exec_pagefault
.size dyna_linker_ds, .-dyna_linker_ds
-.jiptr:
- .word jump_in
-.jdptr:
- .word jump_dirty
-.htptr:
- .word hash_table
.align 2
.align 2
@@
-349,7
+374,7
@@
FUNCTION(jump_vaddr_r7):
add r0, r7, #0
.size jump_vaddr_r7, .-jump_vaddr_r7
FUNCTION(jump_vaddr):
add r0, r7, #0
.size jump_vaddr_r7, .-jump_vaddr_r7
FUNCTION(jump_vaddr):
- l
dr r1, .htptr
+ l
oad_varadr_ext r1, hash_table
mvn r3, #15
and r2, r3, r2, lsr #12
ldr r2, [r1, r2]!
mvn r3, #15
and r2, r3, r2, lsr #12
ldr r2, [r1, r2]!
@@
-430,7
+455,7
@@
FUNCTION(cc_interrupt):
str r0, [fp, #LO_last_count]
sub r10, r10, r0
tst r2, r2
str r0, [fp, #LO_last_count]
sub r10, r10, r0
tst r2, r2
- ldm
nefd
sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
+ ldm
fdne
sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
tst r1, r1
moveq pc, lr
.E2:
tst r1, r1
moveq pc, lr
.E2:
@@
-548,72
+573,72
@@
FUNCTION(new_dyna_leave):
.align 2
FUNCTION(invalidate_addr_r0):
.align 2
FUNCTION(invalidate_addr_r0):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
b invalidate_addr_call
.size invalidate_addr_r0, .-invalidate_addr_r0
.align 2
FUNCTION(invalidate_addr_r1):
b invalidate_addr_call
.size invalidate_addr_r0, .-invalidate_addr_r0
.align 2
FUNCTION(invalidate_addr_r1):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r1
b invalidate_addr_call
.size invalidate_addr_r1, .-invalidate_addr_r1
.align 2
FUNCTION(invalidate_addr_r2):
mov r0, r1
b invalidate_addr_call
.size invalidate_addr_r1, .-invalidate_addr_r1
.align 2
FUNCTION(invalidate_addr_r2):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r2
b invalidate_addr_call
.size invalidate_addr_r2, .-invalidate_addr_r2
.align 2
FUNCTION(invalidate_addr_r3):
mov r0, r2
b invalidate_addr_call
.size invalidate_addr_r2, .-invalidate_addr_r2
.align 2
FUNCTION(invalidate_addr_r3):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r3
b invalidate_addr_call
.size invalidate_addr_r3, .-invalidate_addr_r3
.align 2
FUNCTION(invalidate_addr_r4):
mov r0, r3
b invalidate_addr_call
.size invalidate_addr_r3, .-invalidate_addr_r3
.align 2
FUNCTION(invalidate_addr_r4):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r4
b invalidate_addr_call
.size invalidate_addr_r4, .-invalidate_addr_r4
.align 2
FUNCTION(invalidate_addr_r5):
mov r0, r4
b invalidate_addr_call
.size invalidate_addr_r4, .-invalidate_addr_r4
.align 2
FUNCTION(invalidate_addr_r5):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r5
b invalidate_addr_call
.size invalidate_addr_r5, .-invalidate_addr_r5
.align 2
FUNCTION(invalidate_addr_r6):
mov r0, r5
b invalidate_addr_call
.size invalidate_addr_r5, .-invalidate_addr_r5
.align 2
FUNCTION(invalidate_addr_r6):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r6
b invalidate_addr_call
.size invalidate_addr_r6, .-invalidate_addr_r6
.align 2
FUNCTION(invalidate_addr_r7):
mov r0, r6
b invalidate_addr_call
.size invalidate_addr_r6, .-invalidate_addr_r6
.align 2
FUNCTION(invalidate_addr_r7):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r7
b invalidate_addr_call
.size invalidate_addr_r7, .-invalidate_addr_r7
.align 2
FUNCTION(invalidate_addr_r8):
mov r0, r7
b invalidate_addr_call
.size invalidate_addr_r7, .-invalidate_addr_r7
.align 2
FUNCTION(invalidate_addr_r8):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r8
b invalidate_addr_call
.size invalidate_addr_r8, .-invalidate_addr_r8
.align 2
FUNCTION(invalidate_addr_r9):
mov r0, r8
b invalidate_addr_call
.size invalidate_addr_r8, .-invalidate_addr_r8
.align 2
FUNCTION(invalidate_addr_r9):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r9
b invalidate_addr_call
.size invalidate_addr_r9, .-invalidate_addr_r9
.align 2
FUNCTION(invalidate_addr_r10):
mov r0, r9
b invalidate_addr_call
.size invalidate_addr_r9, .-invalidate_addr_r9
.align 2
FUNCTION(invalidate_addr_r10):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r10
b invalidate_addr_call
.size invalidate_addr_r10, .-invalidate_addr_r10
.align 2
FUNCTION(invalidate_addr_r12):
mov r0, r10
b invalidate_addr_call
.size invalidate_addr_r10, .-invalidate_addr_r10
.align 2
FUNCTION(invalidate_addr_r12):
- stmia fp, {r0, r1, r2, r3, r12, lr}
+ stmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, lr}
mov r0, r12
.size invalidate_addr_r12, .-invalidate_addr_r12
.align 2
mov r0, r12
.size invalidate_addr_r12, .-invalidate_addr_r12
.align 2
@@
-623,7
+648,7
@@
invalidate_addr_call:
cmp r0, r12
cmpcs lr, r0
blcc invalidate_addr
cmp r0, r12
cmpcs lr, r0
blcc invalidate_addr
- ldmia fp, {r0, r1, r2, r3, r12, pc}
+ ldmia fp, {r0, r1, r2, r3,
EXTRA_UNSAVED_REGS
r12, pc}
.size invalidate_addr_call, .-invalidate_addr_call
.align 2
.size invalidate_addr_call, .-invalidate_addr_call
.align 2
@@
-665,11
+690,11
@@
FUNCTION(new_dyna_start):
FUNCTION(jump_handler_read8):
add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
FUNCTION(jump_handler_read8):
add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
- pcsx_read_mem ldr
ccb
, 0
+ pcsx_read_mem ldr
bcc
, 0
FUNCTION(jump_handler_read16):
add r1, #0x1000/4*4 @ shift to r16 part
FUNCTION(jump_handler_read16):
add r1, #0x1000/4*4 @ shift to r16 part
- pcsx_read_mem ldr
cch
, 1
+ pcsx_read_mem ldr
hcc
, 1
FUNCTION(jump_handler_read32):
pcsx_read_mem ldrcc, 2
FUNCTION(jump_handler_read32):
pcsx_read_mem ldrcc, 2
@@
-706,11
+731,11
@@
FUNCTION(jump_handler_read32):
FUNCTION(jump_handler_write8):
add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
FUNCTION(jump_handler_write8):
add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part
- pcsx_write_mem str
ccb
, 0
+ pcsx_write_mem str
bcc
, 0
FUNCTION(jump_handler_write16):
add r3, #0x1000/4*4 @ shift to r16 part
FUNCTION(jump_handler_write16):
add r3, #0x1000/4*4 @ shift to r16 part
- pcsx_write_mem str
cch
, 1
+ pcsx_write_mem str
hcc
, 1
FUNCTION(jump_handler_write32):
pcsx_write_mem strcc, 2
FUNCTION(jump_handler_write32):
pcsx_write_mem strcc, 2
@@
-757,8
+782,8
@@
FUNCTION(jump_handle_swl):
tst r3, #1
lsrne r1, #16 @ 1
lsreq r12, r1, #24 @ 0
tst r3, #1
lsrne r1, #16 @ 1
lsreq r12, r1, #24 @ 0
- str
neh
r1, [r3, #-1]
- str
eqb
r12, [r3]
+ str
hne
r1, [r3, #-1]
+ str
beq
r12, [r3]
bx lr
4:
mov r0, r2
bx lr
4:
mov r0, r2
@@
-777,8
+802,8
@@
FUNCTION(jump_handle_swr):
and r12,r3, #3
mov r0, r2
cmp r12,#2
and r12,r3, #3
mov r0, r2
cmp r12,#2
- str
gtb
r1, [r3] @ 3
- str
eqh
r1, [r3] @ 2
+ str
bgt
r1, [r3] @ 3
+ str
heq
r1, [r3] @ 2
cmp r12,#1
strlt r1, [r3] @ 0
bxne lr
cmp r12,#1
strlt r1, [r3] @ 0
bxne lr
@@
-796,7
+821,7
@@
FUNCTION(jump_handle_swr):
/* r0 = address, r2 = cycles */
ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart
mov r0, r2, lsl #16
/* r0 = address, r2 = cycles */
ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart
mov r0, r2, lsl #16
- sub r0, r3, lsl #16
+ sub r0, r
0, r
3, lsl #16
lsr r0, #16
bx lr
.endm
lsr r0, #16
bx lr
.endm
@@
-831,7
+856,7
@@
FUNCTION(rcnt2_read_count_m1):
/* r0 = address, r2 = cycles */
ldr r3, [fp, #LO_rcnts+6*4+7*4*2]
mov r0, r2, lsl #16-3
/* r0 = address, r2 = cycles */
ldr r3, [fp, #LO_rcnts+6*4+7*4*2]
mov r0, r2, lsl #16-3
- sub r0, r3, lsl #16-3
+ sub r0, r
0, r
3, lsl #16-3
lsr r0, #16 @ /= 8
bx lr
lsr r0, #16 @ /= 8
bx lr