/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* linkage_arm.s for PCSX *
* Copyright (C) 2009-2011 Ari64 *
- * Copyright (C) 2010-2011 Gražvydas "notaz" Ignotas *
+ * Copyright (C) 2010-2013 Gražvydas "notaz" Ignotas *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "arm_features.h"
-
-
- .global dynarec_local
- .global reg
- .global hi
- .global lo
- .global reg_cop0
- .global reg_cop2d
- .global reg_cop2c
- .global FCR0
- .global FCR31
- .global next_interupt
- .global cycle_count
- .global last_count
- .global pending_exception
- .global pcaddr
- .global stop
- .global invc_ptr
- .global address
- .global branch_target
- .global PC
- .global mini_ht
- .global restore_candidate
- /* psx */
- .global psxRegs
- .global mem_rtab
- .global mem_wtab
- .global psxH_ptr
- .global zeromem_ptr
- .global inv_code_start
- .global inv_code_end
- .global rcnts
+#include "linkage_offsets.h"
+
+
+#ifdef __MACH__
+#define dynarec_local ESYM(dynarec_local)
+#define add_link ESYM(add_link)
+#define new_recompile_block ESYM(new_recompile_block)
+#define get_addr ESYM(get_addr)
+#define get_addr_ht ESYM(get_addr_ht)
+#define clean_blocks ESYM(clean_blocks)
+#define gen_interupt ESYM(gen_interupt)
+#define psxException ESYM(psxException)
+#define execI ESYM(execI)
+#define invalidate_addr ESYM(invalidate_addr)
+#endif
.bss
.align 4
+ .global dynarec_local
.type dynarec_local, %object
- .size dynarec_local, dynarec_local_end-dynarec_local
+ .size dynarec_local, LO_dynarec_local_size
dynarec_local:
- .space dynarec_local_end-dynarec_local
-next_interupt = dynarec_local + 64
- .type next_interupt, %object
- .size next_interupt, 4
-cycle_count = next_interupt + 4
- .type cycle_count, %object
- .size cycle_count, 4
-last_count = cycle_count + 4
- .type last_count, %object
- .size last_count, 4
-pending_exception = last_count + 4
- .type pending_exception, %object
- .size pending_exception, 4
-stop = pending_exception + 4
- .type stop, %object
- .size stop, 4
-invc_ptr = stop + 4
- .type invc_ptr, %object
- .size invc_ptr, 4
-address = invc_ptr + 4
- .type address, %object
- .size address, 4
-psxRegs = address + 4
+ .space LO_dynarec_local_size
+
+#define DRC_VAR_(name, vname, size_) \
+ vname = dynarec_local + LO_##name; \
+ .global vname; \
+ .type vname, %object; \
+ .size vname, size_
+
+#define DRC_VAR(name, size_) \
+ DRC_VAR_(name, ESYM(name), size_)
+
+DRC_VAR(next_interupt, 4)
+DRC_VAR(cycle_count, 4)
+DRC_VAR(last_count, 4)
+DRC_VAR(pending_exception, 4)
+DRC_VAR(stop, 4)
+DRC_VAR(invc_ptr, 4)
+DRC_VAR(address, 4)
+DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs)
/* psxRegs */
- .type psxRegs, %object
- .size psxRegs, psxRegs_end-psxRegs
-reg = psxRegs
- .type reg, %object
- .size reg, 128
-lo = reg + 128
- .type lo, %object
- .size lo, 4
-hi = lo + 4
- .type hi, %object
- .size hi, 4
-reg_cop0 = hi + 4
- .type reg_cop0, %object
- .size reg_cop0, 128
-reg_cop2d = reg_cop0 + 128
- .type reg_cop2d, %object
- .size reg_cop2d, 128
-reg_cop2c = reg_cop2d + 128
- .type reg_cop2c, %object
- .size reg_cop2c, 128
-PC = reg_cop2c + 128
-pcaddr = PC
- .type PC, %object
- .size PC, 4
-code = PC + 4
- .type code, %object
- .size code, 4
-cycle = code + 4
- .type cycle, %object
- .size cycle, 4
-interrupt = cycle + 4
- .type interrupt, %object
- .size interrupt, 4
-intCycle = interrupt + 4
- .type intCycle, %object
- .size intCycle, 256
-psxRegs_end = intCycle + 256
-
-rcnts = psxRegs_end
- .type rcnts, %object
- .size rcnts, 7*4*4
-rcnts_end = rcnts + 7*4*4
-
-mem_rtab = rcnts_end
- .type mem_rtab, %object
- .size mem_rtab, 4
-mem_wtab = mem_rtab + 4
- .type mem_wtab, %object
- .size mem_wtab, 4
-psxH_ptr = mem_wtab + 4
- .type psxH_ptr, %object
- .size psxH_ptr, 4
-zeromem_ptr = psxH_ptr + 4
- .type zeromem_ptr, %object
- .size zeromem_ptr, 4
-inv_code_start = zeromem_ptr + 4
- .type inv_code_start, %object
- .size inv_code_start, 4
-inv_code_end = inv_code_start + 4
- .type inv_code_end, %object
- .size inv_code_end, 4
-branch_target = inv_code_end + 4
- .type branch_target, %object
- .size branch_target, 4
-align0 = branch_target + 4 /* unused/alignment */
- .type align0, %object
- .size align0, 16
-mini_ht = align0 + 16
- .type mini_ht, %object
- .size mini_ht, 256
-restore_candidate = mini_ht + 256
- .type restore_candidate, %object
- .size restore_candidate, 512
-dynarec_local_end = restore_candidate + 512
+DRC_VAR(reg, 128)
+DRC_VAR(lo, 4)
+DRC_VAR(hi, 4)
+DRC_VAR(reg_cop0, 128)
+DRC_VAR(reg_cop2d, 128)
+DRC_VAR(reg_cop2c, 128)
+DRC_VAR(pcaddr, 4)
+@DRC_VAR(code, 4)
+@DRC_VAR(cycle, 4)
+@DRC_VAR(interrupt, 4)
+@DRC_VAR(intCycle, 256)
+
+DRC_VAR(rcnts, 7*4*4)
+DRC_VAR(mem_rtab, 4)
+DRC_VAR(mem_wtab, 4)
+DRC_VAR(psxH_ptr, 4)
+DRC_VAR(zeromem_ptr, 4)
+DRC_VAR(inv_code_start, 4)
+DRC_VAR(inv_code_end, 4)
+DRC_VAR(branch_target, 4)
+@DRC_VAR(align0, 16) /* unused/alignment */
+DRC_VAR(mini_ht, 256)
+DRC_VAR(restore_candidate, 512)
/* unused */
-FCR0 = align0
- .type FCR0, %object
- .size FCR0, 4
-FCR31 = align0
- .type FCR31, %object
- .size FCR31, 4
+DRC_VAR(FCR0, 4)
+DRC_VAR(FCR31, 4)
#ifndef HAVE_ARMV5
.macro blx rd
/* r0 = instruction pointer */
/* r1 = fault address */
/* r2 = cause */
- ldr r3, [fp, #reg_cop0+48-dynarec_local] /* Status */
+ ldr r3, [fp, #LO_reg_cop0+48] /* Status */
mvn r6, #0xF000000F
- ldr r4, [fp, #reg_cop0+16-dynarec_local] /* Context */
+ ldr r4, [fp, #LO_reg_cop0+16] /* Context */
bic r6, r6, #0x0F800000
- str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
+ str r0, [fp, #LO_reg_cop0+56] /* EPC */
orr r3, r3, #2
- str r1, [fp, #reg_cop0+32-dynarec_local] /* BadVAddr */
+ str r1, [fp, #LO_reg_cop0+32] /* BadVAddr */
bic r4, r4, r6
- str r3, [fp, #reg_cop0+48-dynarec_local] /* Status */
+ str r3, [fp, #LO_reg_cop0+48] /* Status */
and r5, r6, r1, lsr #9
- str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
+ str r2, [fp, #LO_reg_cop0+52] /* Cause */
and r1, r1, r6, lsl #9
- str r1, [fp, #reg_cop0+40-dynarec_local] /* EntryHi */
+ str r1, [fp, #LO_reg_cop0+40] /* EntryHi */
orr r4, r4, r5
- str r4, [fp, #reg_cop0+16-dynarec_local] /* Context */
+ str r4, [fp, #LO_reg_cop0+16] /* Context */
mov r0, #0x80000000
bl get_addr_ht
mov pc, r0
ldr r2, [r1, #8]
teq r2, r0
ldreq pc, [r1, #12]
- str r10, [fp, #cycle_count-dynarec_local]
+ str r10, [fp, #LO_cycle_count]
bl get_addr
- ldr r10, [fp, #cycle_count-dynarec_local]
+ ldr r10, [fp, #LO_cycle_count]
mov pc, r0
.size jump_vaddr, .-jump_vaddr
.align 2
FUNCTION(verify_code_ds):
- str r8, [fp, #branch_target-dynarec_local]
+ str r8, [fp, #LO_branch_target]
FUNCTION(verify_code_vm):
FUNCTION(verify_code):
/* r1 = source */
.D3:
teqeq r4, r5
.D4:
- ldr r8, [fp, #branch_target-dynarec_local]
+ ldr r8, [fp, #LO_branch_target]
moveq pc, lr
.D5:
bl get_addr
.align 2
FUNCTION(cc_interrupt):
- ldr r0, [fp, #last_count-dynarec_local]
+ ldr r0, [fp, #LO_last_count]
mov r1, #0
mov r2, #0x1fc
add r10, r0, r10
- str r1, [fp, #pending_exception-dynarec_local]
+ str r1, [fp, #LO_pending_exception]
and r2, r2, r10, lsr #17
- add r3, fp, #restore_candidate-dynarec_local
- str r10, [fp, #cycle-dynarec_local] /* PCSX cycles */
-@@ str r10, [fp, #reg_cop0+36-dynarec_local] /* Count */
+ add r3, fp, #LO_restore_candidate
+ str r10, [fp, #LO_cycle] /* PCSX cycles */
+@@ str r10, [fp, #LO_reg_cop0+36] /* Count */
ldr r4, [r2, r3]
mov r10, lr
tst r4, r4
.E1:
bl gen_interupt
mov lr, r10
- ldr r10, [fp, #cycle-dynarec_local]
- ldr r0, [fp, #next_interupt-dynarec_local]
- ldr r1, [fp, #pending_exception-dynarec_local]
- ldr r2, [fp, #stop-dynarec_local]
- str r0, [fp, #last_count-dynarec_local]
+ ldr r10, [fp, #LO_cycle]
+ ldr r0, [fp, #LO_next_interupt]
+ ldr r1, [fp, #LO_pending_exception]
+ ldr r2, [fp, #LO_stop]
+ str r0, [fp, #LO_last_count]
sub r10, r10, r0
tst r2, r2
ldmnefd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
tst r1, r1
moveq pc, lr
.E2:
- ldr r0, [fp, #pcaddr-dynarec_local]
+ ldr r0, [fp, #LO_pcaddr]
bl get_addr_ht
mov pc, r0
.E4:
.align 2
FUNCTION(do_interrupt):
- ldr r0, [fp, #pcaddr-dynarec_local]
+ ldr r0, [fp, #LO_pcaddr]
bl get_addr_ht
add r10, r10, #2
mov pc, r0
FUNCTION(fp_exception):
mov r2, #0x10000000
.E7:
- ldr r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
+ ldr r1, [fp, #LO_reg_cop0+48] /* Status */
mov r3, #0x80000000
- str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
+ str r0, [fp, #LO_reg_cop0+56] /* EPC */
orr r1, #2
add r2, r2, #0x2c
- str r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
- str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
+ str r1, [fp, #LO_reg_cop0+48] /* Status */
+ str r2, [fp, #LO_reg_cop0+52] /* Cause */
add r0, r3, #0x80
bl get_addr_ht
mov pc, r0
.align 2
FUNCTION(jump_syscall):
- ldr r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
+ ldr r1, [fp, #LO_reg_cop0+48] /* Status */
mov r3, #0x80000000
- str r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
+ str r0, [fp, #LO_reg_cop0+56] /* EPC */
orr r1, #2
mov r2, #0x20
- str r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
- str r2, [fp, #reg_cop0+52-dynarec_local] /* Cause */
+ str r1, [fp, #LO_reg_cop0+48] /* Status */
+ str r2, [fp, #LO_reg_cop0+52] /* Cause */
add r0, r3, #0x80
bl get_addr_ht
mov pc, r0
.align 2
FUNCTION(jump_syscall_hle):
- str r0, [fp, #pcaddr-dynarec_local] /* PC must be set to EPC for psxException */
- ldr r2, [fp, #last_count-dynarec_local]
+ str r0, [fp, #LO_pcaddr] /* PC must be set to EPC for psxException */
+ ldr r2, [fp, #LO_last_count]
mov r1, #0 /* in delay slot */
add r2, r2, r10
mov r0, #0x20 /* cause */
- str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
+ str r2, [fp, #LO_cycle] /* PCSX cycle counter */
bl psxException
- /* note: psxException might do recorsive recompiler call from it's HLE code,
+ /* note: psxException might do recursive recompiler call from it's HLE code,
* so be ready for this */
pcsx_return:
- ldr r1, [fp, #next_interupt-dynarec_local]
- ldr r10, [fp, #cycle-dynarec_local]
- ldr r0, [fp, #pcaddr-dynarec_local]
+ ldr r1, [fp, #LO_next_interupt]
+ ldr r10, [fp, #LO_cycle]
+ ldr r0, [fp, #LO_pcaddr]
sub r10, r10, r1
- str r1, [fp, #last_count-dynarec_local]
+ str r1, [fp, #LO_last_count]
bl get_addr_ht
mov pc, r0
.size jump_syscall_hle, .-jump_syscall_hle
.align 2
FUNCTION(jump_hlecall):
- ldr r2, [fp, #last_count-dynarec_local]
- str r0, [fp, #pcaddr-dynarec_local]
+ ldr r2, [fp, #LO_last_count]
+ str r0, [fp, #LO_pcaddr]
add r2, r2, r10
adr lr, pcsx_return
- str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
+ str r2, [fp, #LO_cycle] /* PCSX cycle counter */
bx r1
.size jump_hlecall, .-jump_hlecall
.align 2
FUNCTION(jump_intcall):
- ldr r2, [fp, #last_count-dynarec_local]
- str r0, [fp, #pcaddr-dynarec_local]
+ ldr r2, [fp, #LO_last_count]
+ str r0, [fp, #LO_pcaddr]
add r2, r2, r10
adr lr, pcsx_return
- str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
+ str r2, [fp, #LO_cycle] /* PCSX cycle counter */
b execI
.size jump_hlecall, .-jump_hlecall
.align 2
FUNCTION(new_dyna_leave):
- ldr r0, [fp, #last_count-dynarec_local]
+ ldr r0, [fp, #LO_last_count]
add r12, fp, #28
add r10, r0, r10
- str r10, [fp, #cycle-dynarec_local]
+ str r10, [fp, #LO_cycle]
ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc}
.size new_dyna_leave, .-new_dyna_leave
mov r0, r12
.size invalidate_addr_r12, .-invalidate_addr_r12
.align 2
-FUNCTION(invalidate_addr_call):
- ldr r12, [fp, #inv_code_start-dynarec_local]
- ldr lr, [fp, #inv_code_end-dynarec_local]
+invalidate_addr_call:
+ ldr r12, [fp, #LO_inv_code_start]
+ ldr lr, [fp, #LO_inv_code_end]
cmp r0, r12
cmpcs lr, r0
blcc invalidate_addr
/* ip is stored to conform EABI alignment */
stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr}
load_varadr fp, dynarec_local
- ldr r0, [fp, #pcaddr-dynarec_local]
+ ldr r0, [fp, #LO_pcaddr]
bl get_addr_ht
- ldr r1, [fp, #next_interupt-dynarec_local]
- ldr r10, [fp, #cycle-dynarec_local]
- str r1, [fp, #last_count-dynarec_local]
+ ldr r1, [fp, #LO_next_interupt]
+ ldr r10, [fp, #LO_cycle]
+ str r1, [fp, #LO_last_count]
sub r10, r10, r1
mov pc, r0
.size new_dyna_start, .-new_dyna_start
/* r0 = address, r1 = handler_tab, r2 = cycles */
lsl r3, r0, #20
lsr r3, #(20+\tab_shift)
- ldr r12, [fp, #last_count-dynarec_local]
+ ldr r12, [fp, #LO_last_count]
ldr r1, [r1, r3, lsl #2]
add r2, r2, r12
lsls r1, #1
\readop r0, [r1, r3, lsl #\tab_shift]
.endif
movcc pc, lr
- str r2, [fp, #cycle-dynarec_local]
+ str r2, [fp, #LO_cycle]
bx r1
.endm
lsl r12,r0, #20
lsr r12, #(20+\tab_shift)
ldr r3, [r3, r12, lsl #2]
- str r0, [fp, #address-dynarec_local] @ some handlers still need it..
+ str r0, [fp, #LO_address] @ some handlers still need it..
lsls r3, #1
mov r0, r2 @ cycle return in case of direct store
.if \tab_shift == 1
\wrtop r1, [r3, r12, lsl #\tab_shift]
.endif
movcc pc, lr
- ldr r12, [fp, #last_count-dynarec_local]
+ ldr r12, [fp, #LO_last_count]
mov r0, r1
add r2, r2, r12
push {r2, lr}
- str r2, [fp, #cycle-dynarec_local]
+ str r2, [fp, #LO_cycle]
blx r3
- ldr r0, [fp, #next_interupt-dynarec_local]
+ ldr r0, [fp, #LO_next_interupt]
pop {r2, r3}
- str r0, [fp, #last_count-dynarec_local]
+ str r0, [fp, #LO_last_count]
sub r0, r2, r0
bx r3
.endm
FUNCTION(jump_handler_write_h):
/* r0 = address, r1 = data, r2 = cycles, r3 = handler */
- ldr r12, [fp, #last_count-dynarec_local]
- str r0, [fp, #address-dynarec_local] @ some handlers still need it..
+ ldr r12, [fp, #LO_last_count]
+ str r0, [fp, #LO_address] @ some handlers still need it..
add r2, r2, r12
mov r0, r1
push {r2, lr}
- str r2, [fp, #cycle-dynarec_local]
+ str r2, [fp, #LO_cycle]
blx r3
- ldr r0, [fp, #next_interupt-dynarec_local]
+ ldr r0, [fp, #LO_next_interupt]
pop {r2, r3}
- str r0, [fp, #last_count-dynarec_local]
+ str r0, [fp, #LO_last_count]
sub r0, r2, r0
bx r3
FUNCTION(jump_handle_swl):
/* r0 = address, r1 = data, r2 = cycles */
- ldr r3, [fp, #mem_wtab-dynarec_local]
+ ldr r3, [fp, #LO_mem_wtab]
mov r12,r0,lsr #12
ldr r3, [r3, r12, lsl #2]
lsls r3, #1
FUNCTION(jump_handle_swr):
/* r0 = address, r1 = data, r2 = cycles */
- ldr r3, [fp, #mem_wtab-dynarec_local]
+ ldr r3, [fp, #LO_mem_wtab]
mov r12,r0,lsr #12
ldr r3, [r3, r12, lsl #2]
lsls r3, #1
.macro rcntx_read_mode0 num
/* r0 = address, r2 = cycles */
- ldr r3, [fp, #rcnts-dynarec_local+6*4+7*4*\num] @ cycleStart
+ ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart
mov r0, r2, lsl #16
sub r0, r3, lsl #16
lsr r0, #16
FUNCTION(rcnt0_read_count_m1):
/* r0 = address, r2 = cycles */
- ldr r3, [fp, #rcnts-dynarec_local+6*4+7*4*0] @ cycleStart
+ ldr r3, [fp, #LO_rcnts+6*4+7*4*0] @ cycleStart
mov_16 r1, 0x3334
sub r2, r2, r3
mul r0, r1, r2 @ /= 5
FUNCTION(rcnt1_read_count_m1):
/* r0 = address, r2 = cycles */
- ldr r3, [fp, #rcnts-dynarec_local+6*4+7*4*1]
+ ldr r3, [fp, #LO_rcnts+6*4+7*4*1]
mov_24 r1, 0x1e6cde
sub r2, r2, r3
umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd
FUNCTION(rcnt2_read_count_m1):
/* r0 = address, r2 = cycles */
- ldr r3, [fp, #rcnts-dynarec_local+6*4+7*4*2]
+ ldr r3, [fp, #LO_rcnts+6*4+7*4*2]
mov r0, r2, lsl #16-3
sub r0, r3, lsl #16-3
lsr r0, #16 @ /= 8