void gteRTPS_nf_arm(void *cp2_regs, int opcode);
void gteRTPT_nf_arm(void *cp2_regs, int opcode);
void gteNCLIP_arm(void *cp2_regs, int opcode);
+
+// decomposed ops, nonstd calling convention
+void gteMVMVA_part_arm(void *cp2_regs, int is_shift12);
+void gteMVMVA_part_nf_arm(void *cp2_regs, int is_shift12);
+void gteMVMVA_part_cv3sh12_arm(void *cp2_regs);
+
+void gteMACtoIR_lm0(void *cp2_regs);
+void gteMACtoIR_lm1(void *cp2_regs);
+void gteMACtoIR_lm0_nf(void *cp2_regs);
+void gteMACtoIR_lm1_nf(void *cp2_regs);
.endif
.endm
+@ prepare work reg for ssatx0 (sat to 0..2^(bit-1))
+@ in: wr reg, bit to saturate to
+.macro ssatx0_prep wr bit
+ mov \wr, #(1<<(\bit-1))
+.endm
+
+.macro ssatx0 rd wr bit
+ cmp \rd, \wr
+ subge \rd, \wr, #1
+ cmn \rd, #0
+ movlt \rd, #0
+.endm
+
.macro usat16_ rd rs
.if HAVE_ARMV7
usat \rd, #16, \rs
mov r9, r0
pop {r0, r12, lr}
.endif
-1:
- cmp r9, #0x20000
+1: cmp r9, #0x20000
add r1, r0, #4*12
movhs r9, #0x20000
ldrd r6, [r0,#4*(32+24)] @ gteOFXY
.size gteRTPT_nf_arm, .-gteRTPT_nf_arm
+@ note: not std calling convention used
+@ r0 = CP2 (d,c) (must preserve)
+@ r1 = needs_shift12
+@ r4,r5 = VXYZ(v) packed
+@ r6 = &MX11(mx)
+@ r7 = &CV1(cv)
+.macro mvma_op do_flags
+ push {r8-r11}
+
+.if \do_flags
+ ands r3, r1, #1 @ gteFLAG, shift_need
+.else
+ tst r1, #1
+.endif
+ ldmia r7, {r7-r9} @ CV123
+ ldmia r6!,{r10-r12} @ MX1*,MX2*
+ asr r1, r7, #20
+ lsl r7, #12 @ expand to 64bit
+ smlalbb r7, r1, r10, r4 @ MX11 * vx
+ smlaltt r7, r1, r10, r4 @ MX12 * vy
+ smlalbb r7, r1, r11, r5 @ MX13 * vz
+ lsrne r7, #12
+ orrne r7, r1, lsl #20 @ gteMAC0
+.if \do_flags
+ asrne r1, #20
+ adds r2, r7, #0x80000000
+ adcs r1, #0
+ orrgt r3, #(1<<30)
+ orrmi r3, #(1<<31)|(1<<27)
+ tst r3, #1 @ repeat shift test
+.endif
+ asr r1, r8, #20
+ lsl r8, #12 @ expand to 64bit
+ smlaltb r8, r1, r11, r4 @ MX21 * vx
+ smlalbt r8, r1, r12, r4 @ MX22 * vy
+ smlaltb r8, r1, r12, r5 @ MX23 * vz
+ lsrne r8, #12
+ orrne r8, r1, lsl #20 @ gteMAC1
+.if \do_flags
+ asrne r1, #20
+ adds r2, r8, #0x80000000
+ adcs r1, #0
+ orrgt r3, #(1<<29)
+ orrmi r3, #(1<<31)|(1<<26)
+ tst r3, #1 @ repeat shift test
+.endif
+ ldmia r6!,{r10-r11} @ MX3*
+ asr r1, r9, #20
+ lsl r9, #12 @ expand to 64bit
+ smlalbb r9, r1, r10, r4 @ MX31 * vx
+ smlaltt r9, r1, r10, r4 @ MX32 * vy
+ smlalbb r9, r1, r11, r5 @ MX33 * vz
+ lsrne r9, #12
+ orrne r9, r1, lsl #20 @ gteMAC2
+.if \do_flags
+ asrne r1, #20
+ adds r2, r9, #0x80000000
+ adcs r1, #0
+ orrgt r3, #(1<<28)
+ orrmi r3, #(1<<31)|(1<<25)
+ bic r3, #1
+.else
+ mov r3, #0
+.endif
+ str r3, [r0, #4*(32+31)] @ gteFLAG
+ add r1, r0, #4*25
+ stmia r1, {r7-r9}
+
+ pop {r8-r11}
+ bx lr
+.endm
+
+.global gteMVMVA_part_arm
+gteMVMVA_part_arm:
+ mvma_op 1
+ .size gteMVMVA_part_arm, .-gteMVMVA_part_arm
+
+.global gteMVMVA_part_nf_arm
+gteMVMVA_part_nf_arm:
+ mvma_op 0
+ .size gteMVMVA_part_nf_arm, .-gteMVMVA_part_nf_arm
+
+@ common version of MVMVA with cv3 (== 0) and shift12,
+@ can't overflow so no gteMAC flags needed
+@ note: not std calling convention used
+@ r0 = CP2 (d,c) (must preserve)
+@ r4,r5 = VXYZ(v) packed
+@ r6 = &MX11(mx)
+.global gteMVMVA_part_cv3sh12_arm
+gteMVMVA_part_cv3sh12_arm:
+ push {r8-r9}
+ ldmia r6!,{r7-r9} @ MX1*,MX2*
+ smulbb r1, r7, r4 @ MX11 * vx
+ smultt r2, r7, r4 @ MX12 * vy
+ smulbb r3, r8, r5 @ MX13 * vz
+ qadd r1, r1, r2
+ asr r3, #1 @ prevent oflow, lose a bit
+ add r1, r3, r1, asr #1
+ asr r7, r1, #11
+ smultb r1, r8, r4 @ MX21 * vx
+ smulbt r2, r9, r4 @ MX22 * vy
+ smultb r3, r9, r5 @ MX23 * vz
+ qadd r1, r1, r2
+ asr r3, #1
+ add r1, r3, r1, asr #1
+ asr r8, r1, #11
+ ldmia r6, {r6,r9} @ MX3*
+ smulbb r1, r6, r4 @ MX31 * vx
+ smultt r2, r6, r4 @ MX32 * vy
+ smulbb r3, r9, r5 @ MX33 * vz
+ qadd r1, r1, r2
+ asr r3, #1
+ add r1, r3, r1, asr #1
+ asr r9, r1, #11
+ add r1, r0, #4*25
+ mov r2, #0
+ stmia r1, {r7-r9}
+ str r2, [r0, #4*(32+31)] @ gteFLAG
+ pop {r8-r9}
+ bx lr
+ .size gteMVMVA_part_cv3sh12_arm, .-gteMVMVA_part_cv3sh12_arm
+
+
.global gteNCLIP_arm @ r0=CP2 (d,c),
gteNCLIP_arm:
push {r4-r6,lr}
.size gteNCLIP_arm, .-gteNCLIP_arm
+.macro gteMACtoIR lm
+ ldr r2, [r0, #4*25] @ gteMAC1
+ mov r1, #1<<15
+ ldr r12,[r0, #4*(32+31)] @ gteFLAG
+ cmp r2, r1
+ subge r2, r1, #1
+ orrge r12, #(1<<31)|(1<<24)
+.if \lm
+ cmp r2, #0
+ movlt r2, #0
+.else
+ cmn r2, r1
+ rsblt r2, r1, #0
+.endif
+ str r2, [r0, #4*9]
+ ldrd r2, [r0, #4*26] @ gteMAC23
+ orrlt r12, #(1<<31)|(1<<24)
+ cmp r2, r1
+ subge r2, r1, #1
+ orrge r12, #1<<23
+ orrge r12, #1<<31
+.if \lm
+ cmp r2, #0
+ movlt r2, #0
+.else
+ cmn r2, r1
+ rsblt r2, r1, #0
+.endif
+ orrlt r12, #1<<23
+ orrlt r12, #1<<31
+ cmp r3, r1
+ subge r3, r1, #1
+ orrge r12, #1<<22
+.if \lm
+ cmp r3, #0
+ movlt r3, #0
+.else
+ cmn r3, r1
+ rsblt r3, r1, #0
+.endif
+ orrlt r12, #1<<22
+ strd r2, [r0, #4*10] @ gteIR23
+ str r12,[r0, #4*(32+31)] @ gteFLAG
+ bx lr
+.endm
+
+.global gteMACtoIR_lm0 @ r0=CP2 (d,c)
+gteMACtoIR_lm0:
+ gteMACtoIR 0
+ .size gteMACtoIR_lm0, .-gteMACtoIR_lm0
+
+.global gteMACtoIR_lm1 @ r0=CP2 (d,c)
+gteMACtoIR_lm1:
+ gteMACtoIR 1
+ .size gteMACtoIR_lm1, .-gteMACtoIR_lm1
+
+
+.global gteMACtoIR_lm0_nf @ r0=CP2 (d,c)
+gteMACtoIR_lm0_nf:
+ add r12, r0, #4*25
+ ldmia r12, {r1-r3}
+ ssatx_prep r12, 16
+ ssatx r1, r12, 16
+ ssatx r2, r12, 16
+ ssatx r3, r12, 16
+ add r12, r0, #4*9
+ stmia r12, {r1-r3}
+ bx lr
+ .size gteMACtoIR_lm0_nf, .-gteMACtoIR_lm0_nf
+
+
+.global gteMACtoIR_lm1_nf @ r0=CP2 (d,c)
+gteMACtoIR_lm1_nf:
+ add r12, r0, #4*25
+ ldmia r12, {r1-r3}
+ ssatx0_prep r12, 16
+ ssatx0 r1, r12, 16
+ ssatx0 r2, r12, 16
+ ssatx0 r3, r12, 16
+ add r12, r0, #4*9
+ stmia r12, {r1-r3}
+ bx lr
+ .size gteMACtoIR_lm1_nf, .-gteMACtoIR_lm1_nf
+
+
+.if 0
+.global gteMVMVA_test
+gteMVMVA_test:
+ push {r4-r7,lr}
+ push {r1}
+ and r2, r1, #0x18000 @ v
+ cmp r2, #0x18000 @ v == 3?
+ addeq r4, r0, #4*9
+ addne r3, r0, r2, lsr #12
+ ldmeqia r4, {r3-r5}
+ ldmneia r3, {r4,r5}
+ lsleq r3, #16
+ lsreq r3, #16
+ orreq r4, r3, r4, lsl #16 @ r4,r5 = VXYZ(v)
+ @and r5, #0xffff
+ add r12, r0, #4*32
+ and r3, r1, #0x60000 @ mx
+ lsr r3, #17
+ add r6, r12, r3, lsl #5
+ cmp r3, #3
+ adreq r6, zeroes
+ and r2, r1, #0x06000 @ cv
+ lsr r2, #13
+ add r7, r12, r2, lsl #5
+ add r7, #4*5
+ cmp r2, #3
+ adreq r7, zeroes
+.if 1
+ adr lr, 1f
+ bne 0f
+ tst r1, #1<<19
+ bne gteMVMVA_part_cv3sh12_arm
+0:
+ and r1, #1<<19
+ lsr r1, #19
+ b gteMVMVA_part_arm
+1:
+ pop {r1}
+ tst r1, #1<<10
+ adr lr, 0f
+ beq gteMACtoIR_lm0
+ bne gteMACtoIR_lm1
+0:
+.else
+ bl gteMVMVA_part_neon
+ pop {r1}
+ and r1, #1<<10
+ bl gteMACtoIR_flags_neon
+.endif
+ pop {r4-r7,pc}
+
+zeroes:
+ .word 0,0,0,0,0
+.endif
+
+
@ vim:filetype=armasm
void gteRTPS_neon(void *cp2_regs, int opcode);
void gteRTPT_neon(void *cp2_regs, int opcode);
-void gteMVMVA_neon(void *cp2_regs, int opcode);
+
+// decomposed ops, nonstd calling convention
+void gteMVMVA_part_neon(void *cp2_regs, int opcode);
+
+// after NEON call only, does not do gteIR
+void gteMACtoIR_flags_neon(void *cp2_regs, int lm);
-.global gteMVMVA_neon @ r0=CP2 (d,c), op
-gteMVMVA_neon:
- push {r4-r5,lr}
-
- add r12, r0, #4*32
-
- ubfx r2, r1, #15, #2 @ v
-
- vmov.i32 q0, #0 @ d0,d1
- vmov.i32 q1, #0 @ d2,d3
- vmov.i32 q2, #0 @ d4,d5
- cmp r2, #3
- addeq r4, r0, #4*9
- addne r3, r0, r2, lsl #3
- ldmeqia r4, {r3-r5}
- ldmneia r3, {r4,r5}
- pkhbteq r4, r3, r4, lsl #16
+@ note: non-std calling convention used
+@ r0 = CP2 (d,c) (must preserve)
+@ r1 = op
+@ r4,r5 = VXYZ(v) packed
+@ r6 = &MX11(mx)
+@ r7 = &CV1(cv)
+.global gteMVMVA_part_neon
+gteMVMVA_part_neon:
uxth r5, r5
vmov.32 d8[0], r4
vmov.32 d8[1], r5 @ VXYZ(v)
- ubfx r3, r1, #17, #2 @ mx
- ubfx r2, r1, #13, #2 @ cv
- cmp r3, #3
- beq 0f @ very rare case
- add r3, r12, r3, lsl #5
- vldmia r3, {d0-d2} @ MXxy/gteR* [16*9]
-0:
- cmp r2, #3
- add r3, r12, r2, lsl #5
- beq 0f
- add r3, #4*5
- vldmia r3, {d4-d5} @ CVx/gteTR*
+ vldmia r6, {d0-d2} @ MXxy/gteR* [16*9]
+ vldmia r7, {d4-d5} @ CVx/gteTR*
-0:
vmov.i32 q15, #0
vext.16 d2, d1, d2, #2 @ xxx3 -> x321
vext.16 d1, d0, d1, #3 @ xx32 -> x321
add r3, r0, #4*9
vst1.32 d18, [r3]!
vst1.32 d19[0], [r3]
+ bx lr
+ .size gteMVMVA_part_neon, .-gteMVMVA_part_neon
- tst r1, #1<<10 @ lm
- mov r2, #0
+
+@ get flags after gteMVMVA_part_neon operation
+.global gteMACtoIR_flags_neon @ r0=CP2 (d,c), r1=lm
+gteMACtoIR_flags_neon:
+ push {r4,r5,lr}
+ tst r1, r1 @ lm
mov lr, #0 @ gteFLAG
+ mov r2, #0
mov r12, #15
moveq r2, #0x8000 @ adj
moveq r12, #16 @ shift
orrne lr, #(1<<22) @ IR3/limB3
str lr, [r0, #4*(32+31)] @ gteFLAG
- pop {r4-r5,pc}
- .size gteMVMVA_neon, .-gteMVMVA_neon
+ pop {r4,r5,pc}
+ .size gteMACtoIR_flags_neon, .-gteMACtoIR_flags_neon
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#ifdef PCSX
+#include "../gte_arm.h"
+#include "../gte_neon.h"
+#include "pcnt.h"
+#endif
+
extern int cycle_count;
extern int last_count;
extern int pcaddr;
output_w32(0xe15000b0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf));
}
}
+static void emit_ldrd(int offset, int rs, int rt)
+{
+ assert(offset>-256&&offset<256);
+ assem_debug("ldrd %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe1c000d0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf));
+ }else{
+ output_w32(0xe14000d0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf));
+ }
+}
void emit_readword(int addr, int rt)
{
u_int offset = addr-(u_int)&dynarec_local;
output_w32(0x72800000|rd_rn_rm(15,15,0));
}
-// Save registers before function call
-void save_regs(u_int reglist)
+static void save_regs_all(u_int reglist)
{
- reglist&=0x100f; // only save the caller-save registers, r0-r3, r12
+ int i;
if(!reglist) return;
assem_debug("stmia fp,{");
- if(reglist&1) assem_debug("r0, ");
- if(reglist&2) assem_debug("r1, ");
- if(reglist&4) assem_debug("r2, ");
- if(reglist&8) assem_debug("r3, ");
- if(reglist&0x1000) assem_debug("r12");
+ for(i=0;i<16;i++)
+ if(reglist&(1<<i))
+ assem_debug("r%d,",i);
assem_debug("}\n");
output_w32(0xe88b0000|reglist);
}
-// Restore registers after function call
-void restore_regs(u_int reglist)
+static void restore_regs_all(u_int reglist)
{
- reglist&=0x100f; // only restore the caller-save registers, r0-r3, r12
+ int i;
if(!reglist) return;
assem_debug("ldmia fp,{");
- if(reglist&1) assem_debug("r0, ");
- if(reglist&2) assem_debug("r1, ");
- if(reglist&4) assem_debug("r2, ");
- if(reglist&8) assem_debug("r3, ");
- if(reglist&0x1000) assem_debug("r12");
+ for(i=0;i<16;i++)
+ if(reglist&(1<<i))
+ assem_debug("r%d,",i);
assem_debug("}\n");
output_w32(0xe89b0000|reglist);
}
+// Save registers before function call
+static void save_regs(u_int reglist)
+{
+ reglist&=0x100f; // only save the caller-save registers, r0-r3, r12
+ save_regs_all(reglist);
+}
+// Restore registers after function call
+static void restore_regs(u_int reglist)
+{
+ reglist&=0x100f; // only restore the caller-save registers, r0-r3, r12
+ restore_regs_all(reglist);
+}
// Write back consts using r14 so we don't disturb the other registers
void wb_consts(signed char i_regmap[],uint64_t i_is32,u_int i_dirty,int i)
}
}
-void c2op_assemble(int i,struct regstat *i_regs)
+static void c2op_prologue(u_int op,u_int reglist)
+{
+ save_regs_all(reglist);
+ emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0); // cop2 regs
+}
+
+static void c2op_epilogue(u_int op,u_int reglist)
+{
+ restore_regs_all(reglist);
+}
+
+static void c2op_assemble(int i,struct regstat *i_regs)
{
signed char temp=get_reg(i_regs->regmap,-1);
u_int c2op=source[i]&0x3f;
u_int hr,reglist=0;
- int need_flags;
+ int need_flags,need_ir;
for(hr=0;hr<HOST_REGS;hr++) {
if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
}
- if(i==0||itype[i-1]!=C2OP)
- save_regs(reglist);
if (gte_handlers[c2op]!=NULL) {
- int cc=get_reg(i_regs->regmap,CCREG);
- emit_movimm(source[i],1); // opcode
- if (cc>=0&>e_cycletab[c2op])
- emit_addimm(cc,gte_cycletab[c2op]/2,cc); // XXX: could just adjust ccadj?
- emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0); // cop2 regs
- emit_writeword(1,(int)&psxRegs.code);
need_flags=!(gte_unneeded[i+1]>>63); // +1 because of how liveness detection works
- assem_debug("gte unneeded %016llx, need_flags %d\n",gte_unneeded[i+1],need_flags);
+ need_ir=(gte_unneeded[i+1]&0xe00)!=0xe00;
+ assem_debug("gte unneeded %016llx, need_flags %d, need_ir %d\n",
+ gte_unneeded[i+1],need_flags,need_ir);
#ifdef ARMv5_ONLY
// let's take more risk here
need_flags=need_flags&>e_reads_flags;
#endif
- emit_call((int)(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]));
- }
+ switch(c2op) {
+ case GTE_MVMVA: {
+ int shift = (source[i] >> 19) & 1;
+ int v = (source[i] >> 15) & 3;
+ int cv = (source[i] >> 13) & 3;
+ int mx = (source[i] >> 17) & 3;
+ int lm = (source[i] >> 10) & 1;
+ reglist&=0x10ff; // +{r4-r7}
+ c2op_prologue(c2op,reglist);
+ /* r4,r5 = VXYZ(v) packed; r6 = &MX11(mx); r7 = &CV1(cv) */
+ if(v<3)
+ emit_ldrd(v*8,0,4);
+ else {
+ emit_movzwl_indexed(9*4,0,4); // gteIR
+ emit_movzwl_indexed(10*4,0,6);
+ emit_movzwl_indexed(11*4,0,5);
+ emit_orrshl_imm(6,16,4);
+ }
+ if(mx<3)
+ emit_addimm(0,32*4+mx*8*4,6);
+ else
+ emit_readword((int)&zeromem_ptr,6);
+ if(cv<3)
+ emit_addimm(0,32*4+(cv*8+5)*4,7);
+ else
+ emit_readword((int)&zeromem_ptr,7);
+#ifdef __ARM_NEON__
+ emit_movimm(source[i],1); // opcode
+ emit_call((int)gteMVMVA_part_neon);
+ if(need_flags) {
+ emit_movimm(lm,1);
+ emit_call((int)gteMACtoIR_flags_neon);
+ }
+#else
+ if(cv==3&&shift)
+ emit_call((int)gteMVMVA_part_cv3sh12_arm);
+ else {
+ emit_movimm(shift,1);
+ emit_call((int)(need_flags?gteMVMVA_part_arm:gteMVMVA_part_nf_arm));
+ }
+ if(need_flags||need_ir) {
+ if(need_flags)
+ emit_call((int)(lm?gteMACtoIR_lm1:gteMACtoIR_lm0));
+ else
+ emit_call((int)(lm?gteMACtoIR_lm1_nf:gteMACtoIR_lm0_nf)); // lm0 borked
+ }
+#endif
+ break;
+ }
- if(i>=slen-1||itype[i+1]!=C2OP)
- restore_regs(reglist);
+ default:
+ reglist&=0x100f;
+ c2op_prologue(c2op,reglist);
+ emit_movimm(source[i],1); // opcode
+ emit_writeword(1,(int)&psxRegs.code);
+ emit_call((int)(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]));
+ break;
+ }
+ c2op_epilogue(c2op,reglist);
+ }
}
void cop1_unusable(int i,struct regstat *i_regs)
23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 39,
};
-enum gte_opcodes {
- GTE_RTPS = 0x01,
- GTE_NCLIP = 0x06,
- GTE_OP = 0x0c,
- GTE_DPCS = 0x10,
- GTE_INTPL = 0x11,
- GTE_MVMVA = 0x12,
- GTE_NCDS = 0x13,
- GTE_CDP = 0x14,
- GTE_NCDT = 0x16,
- GTE_NCCS = 0x1b,
- GTE_CC = 0x1c,
- GTE_NCS = 0x1e,
- GTE_NCT = 0x20,
- GTE_SQR = 0x28,
- GTE_DCPL = 0x29,
- GTE_DPCT = 0x2a,
- GTE_AVSZ3 = 0x2d,
- GTE_AVSZ4 = 0x2e,
- GTE_RTPT = 0x30,
- GTE_GPF = 0x3d,
- GTE_GPL = 0x3e,
- GTE_NCCT = 0x3f,
-};
-
#define GCBIT(x) \
(1ll << (32+x))
#define GDBIT(x) \
gte_handlers_nf[0x30] = gteRTPT_nf_arm;
#endif
#ifdef __ARM_NEON__
- // compiler's _nf version is still a lot slower then neon
+ // compiler's _nf version is still a lot slower than neon
// _nf_arm RTPS is roughly the same, RTPT slower
gte_handlers[0x01] = gte_handlers_nf[0x01] = gteRTPS_neon;
gte_handlers[0x30] = gte_handlers_nf[0x30] = gteRTPT_neon;
- gte_handlers[0x12] = gte_handlers_nf[0x12] = gteMVMVA_neon;
#endif
#endif
#ifdef DRC_DBG
memcpy(gte_handlers_nf, gte_handlers, sizeof(gte_handlers_nf));
#endif
psxH_ptr = psxH;
+ zeromem_ptr = zero_mem;
return 0;
}
#endif
#if defined(__x86_64__) || defined(__i386__)
-unsigned int address, readmem_word, word;
-unsigned short hword;
-unsigned char byte;
+unsigned int address;
int pending_exception, stop;
unsigned int next_interupt;
int new_dynarec_did_compile;
int cycle_multiplier;
void *psxH_ptr;
+void *zeromem_ptr;
+u8 zero_mem[0x1000];
void new_dynarec_init() {}
void new_dyna_start() {}
void new_dynarec_cleanup() {}
#define Count psxRegs.cycle // psxRegs.CP0.n.Count
/* COP2/GTE */
+enum gte_opcodes {
+ GTE_RTPS = 0x01,
+ GTE_NCLIP = 0x06,
+ GTE_OP = 0x0c,
+ GTE_DPCS = 0x10,
+ GTE_INTPL = 0x11,
+ GTE_MVMVA = 0x12,
+ GTE_NCDS = 0x13,
+ GTE_CDP = 0x14,
+ GTE_NCDT = 0x16,
+ GTE_NCCS = 0x1b,
+ GTE_CC = 0x1c,
+ GTE_NCS = 0x1e,
+ GTE_NCT = 0x20,
+ GTE_SQR = 0x28,
+ GTE_DCPL = 0x29,
+ GTE_DPCT = 0x2a,
+ GTE_AVSZ3 = 0x2d,
+ GTE_AVSZ4 = 0x2e,
+ GTE_RTPT = 0x30,
+ GTE_GPF = 0x3d,
+ GTE_GPL = 0x3e,
+ GTE_NCCT = 0x3f,
+};
+
extern int reg_cop2d[], reg_cop2c[];
extern void *gte_handlers[64];
extern void *gte_handlers_nf[64];
extern unsigned int address;
extern void *psxH_ptr;
+extern void *zeromem_ptr;
// same as invalid_code, just a region for ram write checks (inclusive)
extern u32 inv_code_start, inv_code_end;
.global mem_rtab
.global mem_wtab
.global psxH_ptr
+ .global zeromem_ptr
.global inv_code_start
.global inv_code_end
.global rcnts
psxH_ptr = mem_wtab + 4
.type psxH_ptr, %object
.size psxH_ptr, 4
-inv_code_start = psxH_ptr + 4
+zeromem_ptr = psxH_ptr + 4
+ .type zeromem_ptr, %object
+ .size zeromem_ptr, 4
+inv_code_start = zeromem_ptr + 4
.type inv_code_start, %object
.size inv_code_start, 4
inv_code_end = inv_code_start + 4
.size branch_target, 4
align0 = branch_target + 4 /* unused/alignment */
.type align0, %object
- .size align0, 4
-mini_ht = align0 + 4
+ .size align0, 16
+mini_ht = align0 + 16
.type mini_ht, %object
.size mini_ht, 256
restore_candidate = mini_ht + 256
cc=0;
}
#ifdef PCSX
+ else if(itype[i]==C2OP&>e_cycletab[source[i]&0x3f]>2)
+ {
+ // GTE runs in parallel until accessed, divide by 2 for a rough guess
+ cc+=gte_cycletab[source[i]&0x3f]/2;
+ }
else if(/*itype[i]==LOAD||*/itype[i]==STORE||itype[i]==C1LS) // load causes weird timing issues
{
cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
#define IOMEM16(a) (0x1000/4 + (((a) & 0xfff) / 2))
#define IOMEM8(a) (0x1000/4 + 0x1000/2 + ((a) & 0xfff))
-static u8 unmapped_mem[0x1000];
+u8 zero_mem[0x1000];
u32 read_mem_dummy()
{
// default/unmapped memhandlers
for (i = 0; i < 0x100000; i++) {
//map_item(&mem_readtab[i], mem_unmrtab, 1);
- map_l1_mem(mem_readtab, i, 0, 0x1000, unmapped_mem);
+ map_l1_mem(mem_readtab, i, 0, 0x1000, zero_mem);
map_item(&mem_writetab[i], mem_unmwtab, 1);
}
+extern u8 zero_mem[0x1000];
+
void new_dyna_pcsx_mem_init(void);
void new_dyna_pcsx_mem_reset(void);
void new_dyna_pcsx_mem_load_state(void);