+.global gteMVMVA_neon @ r0=CP2 (d,c), op
+gteMVMVA_neon:
+ push {r4-r5,lr}
+
+ add r12, r0, #4*32
+
+ ubfx r2, r1, #15, #2 @ v
+
+ vmov.i32 q0, #0 @ d0,d1
+ vmov.i32 q1, #0 @ d2,d3
+ vmov.i32 q2, #0 @ d4,d5
+ cmp r2, #3
+ addeq r4, r0, #4*9
+ addne r3, r0, r2, lsl #3
+ ldmeqia r4, {r3-r5}
+ ldmneia r3, {r4,r5}
+ pkhbteq r4, r3, r4, lsl #16
+ uxth r5, r5
+ vmov.32 d8[0], r4
+ vmov.32 d8[1], r5 @ VXYZ(v)
+ ubfx r3, r1, #17, #2 @ mx
+ ubfx r2, r1, #13, #2 @ cv
+ cmp r3, #3
+ beq 0f @ very rare case
+ add r3, r12, r3, lsl #5
+ vldmia r3, {d0-d2} @ MXxy/gteR* [16*9]
+0:
+ cmp r2, #3
+ add r3, r12, r2, lsl #5
+ beq 0f
+ add r3, #4*5
+ vldmia r3, {d4-d5} @ CVx/gteTR*
+
+0:
+ vmov.i32 q15, #0
+ vext.16 d2, d1, d2, #2 @ xxx3 -> x321
+ vext.16 d1, d0, d1, #3 @ xx32 -> x321
+ vshll.s32 q3, d5, #12 @ gteTRZ/CV3
+ vshll.s32 q2, d4, #12 @ gteTR|XY/CV12
+
+ vmull.s16 q8, d0, d8
+ vmull.s16 q9, d1, d8
+ vmull.s16 q10, d2, d8
+ vpadd.s32 d16, d16, d17
+ vpadd.s32 d17, d18, d19
+ vpadd.s32 d18, d20, d21
+ vpadal.s32 q2, q8
+ vpadal.s32 q3, q9
+ tst r1, #1<<19
+ beq 0f
+ vshr.s64 q2, q2, #12
+ vshr.s64 q3, q3, #12
+0:
+ vqmovn.s64 d8, q2 @ gteMAC|12
+ vqmovn.s64 d9, q3 @ gteMAC3
+
+ tst r1, #1<<10
+ add r3, r0, #4*25
+ vqmovn.s32 d10, q4 @ gteIR|123
+ vst1.32 d8, [r3]!
+ vst1.32 d9[0], [r3] @ wb gteMAC|123
+
+ beq 0f
+ vmax.s16 d10, d31
+0:
+ vmovl.s16 q9, d10 @ expand gteIR|123
+ add r3, r0, #4*9
+ vst1.32 d18, [r3]!
+ vst1.32 d19[0], [r3]
+
+ tst r1, #1<<10 @ lm
+ mov r2, #0
+ mov lr, #0 @ gteFLAG
+ mov r12, #15
+ moveq r2, #0x8000 @ adj
+ moveq r12, #16 @ shift
+
+ add r3, r0, #4*25
+ ldmia r3, {r3-r5} @ gteMAC|123
+
+ do_mac_flags r3, r4, r5
+
+ add r3, r2
+ add r4, r2
+ add r5, r2
+ asrs r3, r12
+ orrne lr, #(1<<31)|(1<<24) @ IR1/limB1
+ asrs r4, r12
+ orrne lr, #(1<<31)
+ orrne lr, #(1<<23) @ IR2/limB2
+ asrs r5, r12
+ orrne lr, #(1<<22) @ IR3/limB3
+ str lr, [r0, #4*(32+31)] @ gteFLAG
+
+ pop {r4-r5,pc}
+ .size gteMVMVA_neon, .-gteMVMVA_neon
+
+
+