0,
0};
+void invalidate_addr_r0();
+void invalidate_addr_r1();
+void invalidate_addr_r2();
+void invalidate_addr_r3();
+void invalidate_addr_r4();
+void invalidate_addr_r5();
+void invalidate_addr_r6();
+void invalidate_addr_r7();
+void invalidate_addr_r8();
+void invalidate_addr_r9();
+void invalidate_addr_r10();
+void invalidate_addr_r12();
+
+const u_int invalidate_addr_reg[16] = {
+ (int)invalidate_addr_r0,
+ (int)invalidate_addr_r1,
+ (int)invalidate_addr_r2,
+ (int)invalidate_addr_r3,
+ (int)invalidate_addr_r4,
+ (int)invalidate_addr_r5,
+ (int)invalidate_addr_r6,
+ (int)invalidate_addr_r7,
+ (int)invalidate_addr_r8,
+ (int)invalidate_addr_r9,
+ (int)invalidate_addr_r10,
+ 0,
+ (int)invalidate_addr_r12,
+ 0,
+ 0,
+ 0};
+
#include "fpu.h"
unsigned int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
output_w32(0xe0800620|rd_rn_rm(rt,rs1,rs2));
}
+void emit_callne(int a)
+{
+ assem_debug("blne %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x1b000000|offset);
+}
+
// Used to preload hash table entries
void emit_prefetch(void *addr)
{
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* linkage_arm.s for PCSX *
- * Copyright (C) 2009-2010 Ari64 *
+ * Copyright (C) 2009-2011 Ari64 *
* Copyright (C) 2010-2011 GraÅžvydas "notaz" Ignotas *
* *
* This program is free software; you can redistribute it and/or modify *
.size indirect_jump, .-indirect_jump
.size indirect_jump_indexed, .-indirect_jump_indexed
+ .align 2
+ .global invalidate_addr_r0
+ .type invalidate_addr_r0, %function
+invalidate_addr_r0:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r0, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r0, .-invalidate_addr_r0
+ .align 2
+ .global invalidate_addr_r1
+ .type invalidate_addr_r1, %function
+invalidate_addr_r1:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r1, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r1, .-invalidate_addr_r1
+ .align 2
+ .global invalidate_addr_r2
+ .type invalidate_addr_r2, %function
+invalidate_addr_r2:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r2, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r2, .-invalidate_addr_r2
+ .align 2
+ .global invalidate_addr_r3
+ .type invalidate_addr_r3, %function
+invalidate_addr_r3:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r3, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r3, .-invalidate_addr_r3
+ .align 2
+ .global invalidate_addr_r4
+ .type invalidate_addr_r4, %function
+invalidate_addr_r4:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r4, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r4, .-invalidate_addr_r4
+ .align 2
+ .global invalidate_addr_r5
+ .type invalidate_addr_r5, %function
+invalidate_addr_r5:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r5, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r5, .-invalidate_addr_r5
+ .align 2
+ .global invalidate_addr_r6
+ .type invalidate_addr_r6, %function
+invalidate_addr_r6:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r6, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r6, .-invalidate_addr_r6
+ .align 2
+ .global invalidate_addr_r7
+ .type invalidate_addr_r7, %function
+invalidate_addr_r7:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r7, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r7, .-invalidate_addr_r7
+ .align 2
+ .global invalidate_addr_r8
+ .type invalidate_addr_r8, %function
+invalidate_addr_r8:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r8, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r8, .-invalidate_addr_r8
+ .align 2
+ .global invalidate_addr_r9
+ .type invalidate_addr_r9, %function
+invalidate_addr_r9:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r9, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r9, .-invalidate_addr_r9
+ .align 2
+ .global invalidate_addr_r10
+ .type invalidate_addr_r10, %function
+invalidate_addr_r10:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r10, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r10, .-invalidate_addr_r10
+ .align 2
+ .global invalidate_addr_r12
+ .type invalidate_addr_r12, %function
+invalidate_addr_r12:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r12, #12
+ .size invalidate_addr_r12, .-invalidate_addr_r12
+ .align 2
+ .global invalidate_addr_call
+ .type invalidate_addr_call, %function
+invalidate_addr_call:
+ bl invalidate_block
+ ldmia fp, {r0, r1, r2, r3, r12, pc}
+ .size invalidate_addr_call, .-invalidate_addr_call
+
.align 2
.global new_dyna_start
.type new_dyna_start, %function
#else
emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
#endif
+ #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ emit_callne(invalidate_addr_reg[addr]);
+ #else
jaddr2=(int)out;
emit_jne(0);
add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
+ #endif
}
}
if(jaddr) {
#else
emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
#endif
+ #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ emit_callne(invalidate_addr_reg[temp]);
+ #else
jaddr3=(int)out;
emit_jne(0);
add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
+ #endif
}
}
if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
#else
emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
#endif
+ #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ emit_callne(invalidate_addr_reg[ar]);
+ #else
jaddr3=(int)out;
emit_jne(0);
add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
+ #endif
}
if (opcode[i]==0x32) { // LWC2
cop2_put_dreg(copr,tl,HOST_TEMPREG);