X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?p=pcsx_rearmed.git;a=blobdiff_plain;f=libpcsxcore%2Fnew_dynarec%2Flinkage_arm.S;h=970d91c70d38d264f9ca90fb727d2fc71b7487fd;hp=0c5b20557123bf4e405aa883cdd66bc80a18c9d3;hb=3d680478922d5f28e3dbe471308cc27a70e31fdf;hpb=b861c0a92c13df8a8b3c59b87ad7924a3861a5ac diff --git a/libpcsxcore/new_dynarec/linkage_arm.S b/libpcsxcore/new_dynarec/linkage_arm.S index 0c5b2055..970d91c7 100644 --- a/libpcsxcore/new_dynarec/linkage_arm.S +++ b/libpcsxcore/new_dynarec/linkage_arm.S @@ -20,20 +20,20 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include "arm_features.h" +#include "new_dynarec_config.h" #include "linkage_offsets.h" #ifdef __MACH__ #define dynarec_local ESYM(dynarec_local) -#define add_link ESYM(add_link) +#define add_jump_out ESYM(add_jump_out) #define new_recompile_block ESYM(new_recompile_block) #define get_addr ESYM(get_addr) #define get_addr_ht ESYM(get_addr_ht) #define clean_blocks ESYM(clean_blocks) #define gen_interupt ESYM(gen_interupt) -#define psxException ESYM(psxException) -#define execI ESYM(execI) #define invalidate_addr ESYM(invalidate_addr) +#define gteCheckStallRaw ESYM(gteCheckStallRaw) #endif .bss @@ -58,12 +58,13 @@ DRC_VAR(cycle_count, 4) DRC_VAR(last_count, 4) DRC_VAR(pending_exception, 4) DRC_VAR(stop, 4) -DRC_VAR(invc_ptr, 4) +DRC_VAR(branch_target, 4) DRC_VAR(address, 4) +@DRC_VAR(align0, 4) /* unused/alignment */ DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs) /* psxRegs */ -DRC_VAR(reg, 128) +@DRC_VAR(reg, 128) DRC_VAR(lo, 4) DRC_VAR(hi, 4) DRC_VAR(reg_cop0, 128) @@ -76,23 +77,20 @@ DRC_VAR(pcaddr, 4) @DRC_VAR(intCycle, 256) DRC_VAR(rcnts, 7*4*4) +DRC_VAR(inv_code_start, 4) +DRC_VAR(inv_code_end, 4) DRC_VAR(mem_rtab, 4) DRC_VAR(mem_wtab, 4) DRC_VAR(psxH_ptr, 4) DRC_VAR(zeromem_ptr, 4) -DRC_VAR(inv_code_start, 4) -DRC_VAR(inv_code_end, 4) -DRC_VAR(branch_target, 4) +DRC_VAR(invc_ptr, 4) DRC_VAR(scratch_buf_ptr, 4) -@DRC_VAR(align0, 12) /* unused/alignment */ +@DRC_VAR(align1, 8) /* unused/alignment */ DRC_VAR(mini_ht, 256) DRC_VAR(restore_candidate, 512) -/* unused */ -DRC_VAR(FCR0, 4) -DRC_VAR(FCR31, 4) -#ifdef __MACH__ +#ifdef TEXRELS_FORBIDDEN .data .align 2 ptr_jump_in: @@ -116,23 +114,23 @@ ptr_hash_table: #endif .macro load_varadr reg var -#if defined(__ARM_ARCH_7A__) && !defined(__PIC__) - movw \reg, #:lower16:\var - movt \reg, #:upper16:\var -#elif defined(__ARM_ARCH_7A__) && defined(__MACH__) - movw \reg, #:lower16:(\var-(1678f+4)) - movt \reg, #:upper16:(\var-(1678f+4)) +#if defined(HAVE_ARMV7) && defined(TEXRELS_FORBIDDEN) + movw \reg, #:lower16:(\var-(1678f+8)) + movt \reg, #:upper16:(\var-(1678f+8)) 1678: add \reg, pc +#elif defined(HAVE_ARMV7) && !defined(__PIC__) + movw \reg, #:lower16:\var + movt \reg, #:upper16:\var #else ldr \reg, =\var #endif .endm .macro load_varadr_ext reg var -#if defined(__ARM_ARCH_7A__) && defined(__MACH__) && defined(__PIC__) - movw \reg, #:lower16:(ptr_\var-(1678f+4)) - movt \reg, #:upper16:(ptr_\var-(1678f+4)) +#if defined(HAVE_ARMV7) && defined(TEXRELS_FORBIDDEN) + movw \reg, #:lower16:(ptr_\var-(1678f+8)) + movt \reg, #:upper16:(ptr_\var-(1678f+8)) 1678: ldr \reg, [pc, \reg] #else @@ -141,7 +139,7 @@ ptr_hash_table: .endm .macro mov_16 reg imm -#ifdef __ARM_ARCH_7A__ +#ifdef HAVE_ARMV7 movw \reg, #\imm #else mov \reg, #(\imm & 0x00ff) @@ -150,7 +148,7 @@ ptr_hash_table: .endm .macro mov_24 reg imm -#ifdef __ARM_ARCH_7A__ +#ifdef HAVE_ARMV7 movw \reg, #(\imm & 0xffff) movt \reg, #(\imm >> 16) #else @@ -160,9 +158,10 @@ ptr_hash_table: #endif .endm +/* r0 = virtual target address */ +/* r1 = instruction to patch */ .macro dyna_linker_main - /* r0 = virtual target address */ - /* r1 = instruction to patch */ +#ifndef NO_WRITE_EXEC load_varadr_ext r3, jump_in /* get_page */ lsr r2, r0, #12 @@ -178,20 +177,16 @@ ptr_hash_table: orrcs r2, r6, #2048 ldr r5, [r3, r2, lsl #2] lsl r12, r12, #8 - add r6, r1, r12, asr #6 + add r6, r1, r12, asr #6 /* old target */ mov r8, #0 /* jump_in lookup */ 1: movs r4, r5 beq 2f - ldr r3, [r5] - ldr r5, [r4, #12] + ldr r3, [r5] /* ll_entry .vaddr */ + ldrd r4, r5, [r4, #8] /* ll_entry .next, .addr */ teq r3, r0 bne 1b - ldr r3, [r4, #4] - ldr r4, [r4, #8] - tst r3, r3 - bne 1b teq r4, r6 moveq pc, r4 /* Stale i-cache */ mov r8, r4 @@ -202,7 +197,7 @@ ptr_hash_table: mov r5, r1 mov r1, r6 - bl add_link + bl add_jump_out sub r2, r8, r5 and r1, r7, #0xff000000 lsl r2, r2, #6 @@ -223,8 +218,8 @@ ptr_hash_table: ldr r5, [r3, r2, lsl #2] ldr r7, [r6, r4]! teq r7, r0 - ldreq pc, [r6, #4] - ldr r7, [r6, #8] + ldreq pc, [r6, #8] + ldr r7, [r6, #4] teq r7, r0 ldreq pc, [r6, #12] /* jump_dirty lookup */ @@ -239,13 +234,18 @@ ptr_hash_table: ldr r1, [r4, #8] /* hash_table insert */ ldr r2, [r6] - ldr r3, [r6, #4] + ldr r3, [r6, #8] str r0, [r6] - str r1, [r6, #4] - str r2, [r6, #8] + str r1, [r6, #8] + str r2, [r6, #4] str r3, [r6, #12] mov pc, r1 8: +#else + /* XXX: should be able to do better than this... */ + bl get_addr_ht + mov pc, r0 +#endif .endm @@ -379,8 +379,8 @@ FUNCTION(jump_vaddr): and r2, r3, r2, lsr #12 ldr r2, [r1, r2]! teq r2, r0 - ldreq pc, [r1, #4] - ldr r2, [r1, #8] + ldreq pc, [r1, #8] + ldr r2, [r1, #4] teq r2, r0 ldreq pc, [r1, #12] str r10, [fp, #LO_cycle_count] @@ -392,8 +392,7 @@ FUNCTION(jump_vaddr): .align 2 FUNCTION(verify_code_ds): - str r8, [fp, #LO_branch_target] -FUNCTION(verify_code_vm): + str r8, [fp, #LO_branch_target] @ preserve HOST_BTREG? FUNCTION(verify_code): /* r1 = source */ /* r2 = target */ @@ -428,7 +427,7 @@ FUNCTION(verify_code): bl get_addr mov pc, r0 .size verify_code, .-verify_code - .size verify_code_vm, .-verify_code_vm + .size verify_code_ds, .-verify_code_ds .align 2 FUNCTION(cc_interrupt): @@ -476,14 +475,6 @@ FUNCTION(cc_interrupt): b .E1 .size cc_interrupt, .-cc_interrupt - .align 2 -FUNCTION(do_interrupt): - ldr r0, [fp, #LO_pcaddr] - bl get_addr_ht - add r10, r10, #2 - mov pc, r0 - .size do_interrupt, .-do_interrupt - .align 2 FUNCTION(fp_exception): mov r2, #0x10000000 @@ -520,19 +511,9 @@ FUNCTION(jump_syscall): .size jump_syscall, .-jump_syscall .align 2 - .align 2 -FUNCTION(jump_syscall_hle): - str r0, [fp, #LO_pcaddr] /* PC must be set to EPC for psxException */ - ldr r2, [fp, #LO_last_count] - mov r1, #0 /* in delay slot */ - add r2, r2, r10 - mov r0, #0x20 /* cause */ - str r2, [fp, #LO_cycle] /* PCSX cycle counter */ - bl psxException - /* note: psxException might do recursive recompiler call from it's HLE code, * so be ready for this */ -pcsx_return: +FUNCTION(jump_to_new_pc): ldr r1, [fp, #LO_next_interupt] ldr r10, [fp, #LO_cycle] ldr r0, [fp, #LO_pcaddr] @@ -540,27 +521,7 @@ pcsx_return: str r1, [fp, #LO_last_count] bl get_addr_ht mov pc, r0 - .size jump_syscall_hle, .-jump_syscall_hle - - .align 2 -FUNCTION(jump_hlecall): - ldr r2, [fp, #LO_last_count] - str r0, [fp, #LO_pcaddr] - add r2, r2, r10 - adr lr, pcsx_return - str r2, [fp, #LO_cycle] /* PCSX cycle counter */ - bx r1 - .size jump_hlecall, .-jump_hlecall - - .align 2 -FUNCTION(jump_intcall): - ldr r2, [fp, #LO_last_count] - str r0, [fp, #LO_pcaddr] - add r2, r2, r10 - adr lr, pcsx_return - str r2, [fp, #LO_cycle] /* PCSX cycle counter */ - b execI - .size jump_hlecall, .-jump_hlecall + .size jump_to_new_pc, .-jump_to_new_pc .align 2 FUNCTION(new_dyna_leave): @@ -573,72 +534,72 @@ FUNCTION(new_dyna_leave): .align 2 FUNCTION(invalidate_addr_r0): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} b invalidate_addr_call .size invalidate_addr_r0, .-invalidate_addr_r0 .align 2 FUNCTION(invalidate_addr_r1): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r1 b invalidate_addr_call .size invalidate_addr_r1, .-invalidate_addr_r1 .align 2 FUNCTION(invalidate_addr_r2): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r2 b invalidate_addr_call .size invalidate_addr_r2, .-invalidate_addr_r2 .align 2 FUNCTION(invalidate_addr_r3): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r3 b invalidate_addr_call .size invalidate_addr_r3, .-invalidate_addr_r3 .align 2 FUNCTION(invalidate_addr_r4): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r4 b invalidate_addr_call .size invalidate_addr_r4, .-invalidate_addr_r4 .align 2 FUNCTION(invalidate_addr_r5): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r5 b invalidate_addr_call .size invalidate_addr_r5, .-invalidate_addr_r5 .align 2 FUNCTION(invalidate_addr_r6): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r6 b invalidate_addr_call .size invalidate_addr_r6, .-invalidate_addr_r6 .align 2 FUNCTION(invalidate_addr_r7): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r7 b invalidate_addr_call .size invalidate_addr_r7, .-invalidate_addr_r7 .align 2 FUNCTION(invalidate_addr_r8): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r8 b invalidate_addr_call .size invalidate_addr_r8, .-invalidate_addr_r8 .align 2 FUNCTION(invalidate_addr_r9): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r9 b invalidate_addr_call .size invalidate_addr_r9, .-invalidate_addr_r9 .align 2 FUNCTION(invalidate_addr_r10): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r10 b invalidate_addr_call .size invalidate_addr_r10, .-invalidate_addr_r10 .align 2 FUNCTION(invalidate_addr_r12): - stmia fp, {r0, r1, r2, r3, r12, lr} + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} mov r0, r12 .size invalidate_addr_r12, .-invalidate_addr_r12 .align 2 @@ -648,14 +609,14 @@ invalidate_addr_call: cmp r0, r12 cmpcs lr, r0 blcc invalidate_addr - ldmia fp, {r0, r1, r2, r3, r12, pc} + ldmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, pc} .size invalidate_addr_call, .-invalidate_addr_call .align 2 FUNCTION(new_dyna_start): /* ip is stored to conform EABI alignment */ stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr} - load_varadr fp, dynarec_local + mov fp, r0 /* dynarec_local */ ldr r0, [fp, #LO_pcaddr] bl get_addr_ht ldr r1, [fp, #LO_next_interupt] @@ -694,7 +655,7 @@ FUNCTION(jump_handler_read8): FUNCTION(jump_handler_read16): add r1, #0x1000/4*4 @ shift to r16 part - pcsx_read_mem ldrbcc, 1 + pcsx_read_mem ldrhcc, 1 FUNCTION(jump_handler_read32): pcsx_read_mem ldrcc, 2 @@ -723,10 +684,10 @@ FUNCTION(jump_handler_read32): blx r3 ldr r0, [fp, #LO_next_interupt] - pop {r2, r3} + pop {r2, lr} str r0, [fp, #LO_last_count] sub r0, r2, r0 - bx r3 + bx lr .endm FUNCTION(jump_handler_write8): @@ -751,10 +712,10 @@ FUNCTION(jump_handler_write_h): blx r3 ldr r0, [fp, #LO_next_interupt] - pop {r2, r3} + pop {r2, lr} str r0, [fp, #LO_last_count] sub r0, r2, r0 - bx r3 + bx lr FUNCTION(jump_handle_swl): /* r0 = address, r1 = data, r2 = cycles */ @@ -860,4 +821,16 @@ FUNCTION(rcnt2_read_count_m1): lsr r0, #16 @ /= 8 bx lr +FUNCTION(call_gteStall): + /* r0 = op_cycles, r1 = cycles */ + ldr r2, [fp, #LO_last_count] + str lr, [fp, #LO_saved_lr] + add r1, r1, r2 + str r1, [fp, #LO_cycle] + add r1, fp, #LO_psxRegs + bl gteCheckStallRaw + ldr lr, [fp, #LO_saved_lr] + add r10, r10, r0 + bx lr + @ vim:filetype=armasm