/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* linkage_arm.s for PCSX *
- * Copyright (C) 2009-2010 Ari64 *
- * Copyright (C) 2010 Gražvydas "notaz" Ignotas *
+ * Copyright (C) 2009-2011 Ari64 *
+ * Copyright (C) 2010-2011 Gražvydas "notaz" Ignotas *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
.size interrupt, 4
intCycle = interrupt + 4
.type intCycle, %object
- .size intCycle, 128
-psxRegs_end = intCycle + 128
+ .size intCycle, 256
+psxRegs_end = intCycle + 256
/* nd_pcsx_io */
nd_pcsx_io = psxRegs_end
.global do_interrupt
.type do_interrupt, %function
do_interrupt:
- /* FIXME: cycles already calculated, not needed? */
ldr r0, [fp, #pcaddr-dynarec_local]
bl get_addr_ht
- ldr r1, [fp, #next_interupt-dynarec_local]
- ldr r10, [fp, #cycle-dynarec_local]
- str r1, [fp, #last_count-dynarec_local]
- sub r10, r10, r1
add r10, r10, #2
mov pc, r0
.size do_interrupt, .-do_interrupt
+
.align 2
.global fp_exception
.type fp_exception, %function
ldr r2, [fp, #last_count-dynarec_local]
str r0, [fp, #pcaddr-dynarec_local]
add r2, r2, r10
- str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
adr lr, pcsx_return
+ str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
bx r1
.size jump_hlecall, .-jump_hlecall
+ .align 2
+ .global jump_intcall
+ .type jump_intcall, %function
+jump_intcall:
+ ldr r2, [fp, #last_count-dynarec_local]
+ str r0, [fp, #pcaddr-dynarec_local]
+ add r2, r2, r10
+ adr lr, pcsx_return
+ str r2, [fp, #cycle-dynarec_local] /* PCSX cycle counter */
+ b execI
+ .size jump_hlecall, .-jump_hlecall
+
new_dyna_leave:
.align 2
.global new_dyna_leave
.size indirect_jump_indexed, .-indirect_jump_indexed
.align 2
- .global jump_eret
- .type jump_eret, %function
-jump_eret:
- ldr r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
- ldr r0, [fp, #last_count-dynarec_local]
- bic r1, r1, #2
- add r10, r0, r10
- str r1, [fp, #reg_cop0+48-dynarec_local] /* Status */
- str r10, [fp, #cycle-dynarec_local]
- bl check_interupt
- ldr r1, [fp, #next_interupt-dynarec_local]
- ldr r0, [fp, #reg_cop0+56-dynarec_local] /* EPC */
- str r1, [fp, #last_count-dynarec_local]
- subs r10, r10, r1
- bpl .E11
-.E8:
- bl get_addr
- mov pc, r0
-.E11:
- str r0, [fp, #pcaddr-dynarec_local]
- bl cc_interrupt
- ldr r0, [fp, #pcaddr-dynarec_local]
- b .E8
- .size jump_eret, .-jump_eret
+ .global invalidate_addr_r0
+ .type invalidate_addr_r0, %function
+invalidate_addr_r0:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r0, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r0, .-invalidate_addr_r0
+ .align 2
+ .global invalidate_addr_r1
+ .type invalidate_addr_r1, %function
+invalidate_addr_r1:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r1, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r1, .-invalidate_addr_r1
+ .align 2
+ .global invalidate_addr_r2
+ .type invalidate_addr_r2, %function
+invalidate_addr_r2:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r2, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r2, .-invalidate_addr_r2
+ .align 2
+ .global invalidate_addr_r3
+ .type invalidate_addr_r3, %function
+invalidate_addr_r3:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r3, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r3, .-invalidate_addr_r3
+ .align 2
+ .global invalidate_addr_r4
+ .type invalidate_addr_r4, %function
+invalidate_addr_r4:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r4, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r4, .-invalidate_addr_r4
+ .align 2
+ .global invalidate_addr_r5
+ .type invalidate_addr_r5, %function
+invalidate_addr_r5:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r5, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r5, .-invalidate_addr_r5
+ .align 2
+ .global invalidate_addr_r6
+ .type invalidate_addr_r6, %function
+invalidate_addr_r6:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r6, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r6, .-invalidate_addr_r6
+ .align 2
+ .global invalidate_addr_r7
+ .type invalidate_addr_r7, %function
+invalidate_addr_r7:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r7, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r7, .-invalidate_addr_r7
+ .align 2
+ .global invalidate_addr_r8
+ .type invalidate_addr_r8, %function
+invalidate_addr_r8:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r8, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r8, .-invalidate_addr_r8
+ .align 2
+ .global invalidate_addr_r9
+ .type invalidate_addr_r9, %function
+invalidate_addr_r9:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r9, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r9, .-invalidate_addr_r9
+ .align 2
+ .global invalidate_addr_r10
+ .type invalidate_addr_r10, %function
+invalidate_addr_r10:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r10, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r10, .-invalidate_addr_r10
+ .align 2
+ .global invalidate_addr_r12
+ .type invalidate_addr_r12, %function
+invalidate_addr_r12:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r12, #12
+ .size invalidate_addr_r12, .-invalidate_addr_r12
+ .align 2
+ .global invalidate_addr_call
+ .type invalidate_addr_call, %function
+invalidate_addr_call:
+ bl invalidate_block
+ ldmia fp, {r0, r1, r2, r3, r12, pc}
+ .size invalidate_addr_call, .-invalidate_addr_call
.align 2
.global new_dyna_start
.global ari_write_ram_mirror8
.global ari_write_ram_mirror16
.global ari_write_ram_mirror32
+.global ari_read_bios8
+.global ari_read_bios16
+.global ari_read_bios32
.global ari_read_io8
.global ari_read_io16
.global ari_read_io32
ari_read_ram_mirror (3<<11), ldr
/* invalidation is already taken care of by the caller */
-.macro ari_write_ram bic_const var op
+.macro ari_write_ram bic_const var pf
ldr r0, [fp, #address-dynarec_local]
- ldr r1, [fp, #\var-dynarec_local]
+ ldr\pf r1, [fp, #\var-dynarec_local]
.if \bic_const
bic r0, r0, #\bic_const
.endif
- \op r1, [r0]
+ str\pf r1, [r0]
mov pc, lr
.endm
ari_write_ram8:
- ari_write_ram 0, byte, strb
+ ari_write_ram 0, byte, b
ari_write_ram16:
- ari_write_ram 1, hword, strh
+ ari_write_ram 1, hword, h
ari_write_ram32:
- ari_write_ram 3, word, str
+ ari_write_ram 3, word,
-.macro ari_write_ram_mirror mvn_const var op
+.macro ari_write_ram_mirror mvn_const var pf
ldr r0, [fp, #address-dynarec_local]
mvn r3, #\mvn_const
- ldr r1, [fp, #\var-dynarec_local]
+ ldr\pf r1, [fp, #\var-dynarec_local]
and r0, r3, lsr #11
ldr r2, [fp, #invc_ptr-dynarec_local]
orr r0, r0, #1<<31
ldrb r2, [r2, r0, lsr #12]
- \op r1, [r0]
+ str\pf r1, [r0]
tst r2, r2
movne pc, lr
lsr r0, r0, #12
.endm
ari_write_ram_mirror8:
- ari_write_ram_mirror 0, byte, strb
+ ari_write_ram_mirror 0, byte, b
ari_write_ram_mirror16:
- ari_write_ram_mirror (1<<11), hword, strh
+ ari_write_ram_mirror (1<<11), hword, h
ari_write_ram_mirror32:
- ari_write_ram_mirror (3<<11), word, str
+ ari_write_ram_mirror (3<<11), word,
+
+
+.macro ari_read_bios_mirror bic_const op
+ ldr r0, [fp, #address-dynarec_local]
+ orr r0, r0, #0x80000000
+ bic r0, r0, #(0x20000000|\bic_const) @ map to 0x9fc...
+ \op r0, [r0]
+ str r0, [fp, #readmem_dword-dynarec_local]
+ mov pc, lr
+.endm
+
+ari_read_bios8:
+ ari_read_bios_mirror 0, ldrb
+
+ari_read_bios16:
+ ari_read_bios_mirror 1, ldrh
+
+ari_read_bios32:
+ ari_read_bios_mirror 3, ldr
@ for testing
.endif
.endm
-.macro ari_write_io opvl opst var mem_tab tab_shift
+.macro ari_write_io pf var mem_tab tab_shift
ldr r0, [fp, #address-dynarec_local]
- \opvl r1, [fp, #\var-dynarec_local]
+ ldr\pf r1, [fp, #\var-dynarec_local]
.if \tab_shift == 0
bic r0, r0, #3
.endif
bxne r12
0:
ldr r3, [fp, #psxH_ptr-dynarec_local]
- \opst r1, [r2, r3]
+ str\pf r1, [r2, r3]
mov pc, lr
1:
-.if \tab_shift == 1 @ write16
cmp r2, #0x1c00
blo 0b
cmp r2, #0x1e00
+.if \tab_shift != 0
ldrlo pc, [fp, #spu_writef-dynarec_local]
- nop
+.else
+ @ write32 to SPU - very rare case (is this correct?)
+ bhs 0b
+ add r2, r0, #2
+ mov r3, r1, lsr #16
+ push {r2,r3,lr}
+ mov lr, pc
+ ldr pc, [fp, #spu_writef-dynarec_local]
+ pop {r0,r1,lr}
+ ldr pc, [fp, #spu_writef-dynarec_local]
.endif
+ nop
b 0b
.endm
ari_write_io8:
- ari_write_io ldrb, strb, byte, tab_write8, 2
+ @ PCSX always writes to psxH, so do we for consistency
+ ldr r0, [fp, #address-dynarec_local]
+ ldr r3, [fp, #psxH_ptr-dynarec_local]
+ ldrb r1, [fp, #byte-dynarec_local]
+ bic r2, r0, #0x1f800000
+ ldr r12,[fp, #tab_write8-dynarec_local]
+ strb r1, [r2, r3]
+ subs r3, r2, #0x1000
+ movlo pc, lr
+@ ari_write_io_old 2
+ cmp r3, #0x880
+ movhs pc, lr
+ ldr r12,[r12, r3, lsl #2]
+ mov r0, r1
+ tst r12,r12
+ bxne r12
+ mov pc, lr
ari_write_io16:
- ari_write_io ldrh, strh, hword, tab_write16, 1
+ ari_write_io h, hword, tab_write16, 1
ari_write_io32:
- ari_write_io ldr, str, word, tab_write32, 0
+ ari_write_io , word, tab_write32, 0
@ vim:filetype=armasm