+ vqmovn.s32 d10, q4 @ gteIR|123; losing 2 cycles?
+.endm
+
+.global gteRTPS_neon @ r0=CP2 (d,c),
+gteRTPS_neon:
+ push {r4-r6,lr}
+
+@ fmrx r4, fpscr @ vmrs? at least 40 cycle hit
+ movw r1, #:lower16:scratch
+ movt r1, #:upper16:scratch
+ mov r12, #0
+
+ vldmia r0, {d8} @ VXYZ(0)
+ rtpx_preload
+
+@ rtpx_mac @ slower here, faster in RTPT?
+ vmov.16 d8[3], r12 @ kill unused upper vector
+ vmull.s16 q8, d0, d8
+ vmull.s16 q9, d1, d8
+ vmull.s16 q10, d2, d8
+ vpadd.s32 d16, d16, d17
+ vpadd.s32 d17, d18, d19
+ vpadd.s32 d18, d20, d21
+ vpadal.s32 q2, q8
+ vpadal.s32 q3, q9 @ d6, d18 is slow?
+ vqshrn.s64 d8, q2, #12 @ gteMAC|12
+ vqshrn.s64 d9, q3, #12 @ gteMAC3
+
+ add r3, r0, #4*25
+ vst1.32 d8, [r3]!
+ vst1.32 d9[0], [r3] @ wb gteMAC|123
+ vqmovn.s32 d10, q4 @ gteIR|123
+
+ add r3, r0, #4*17 @ gteSZ*
+ vldmia r3, {q7} @ d14,d15 gteSZ|123x
+ vmov.i32 d28, #0xffff @ 0xffff[32]
+ vmax.s32 d11, d9, d31
+ vshr.s32 d16, d12, #1 @ | gteH/2 (adjust for cmp)
+ vmov.i32 d26, #1
+ vmin.u32 d11, d28 @ saturate to 0..0xffff limD/fSZ3
+ vmovl.s16 q9, d10 @ || expand gteIR|123
+ vshl.u32 d13, d12, #16 @ | preparing gteH
+ add r3, r0, #4*9
+ vst1.32 d18, [r3]!
+ vst1.32 d19[0], [r3]
+
+ vsli.u64 d15, d11, #32 @ new gteSZ|0123 in q7
+ vclt.u32 d16, d16, d11 @ gteH/2 < fSZ3?
+
+ add r3, r0, #4*(32+24)
+ vld1.32 d4, [r3] @ || gteOF|XY
+ add r3, r0, #4*(32+27)
+ vld1.32 d6, [r3] @ || gteDQ|AB
+
+ vand d11, d16
+ vmovl.s32 q2, d4 @ || gteOF|XY [64]
+ vmax.u32 d11, d26 @ make divisor 1 if not
+ vmovl.s32 q3, d6 @ || gteDQ|AB [64]
+ add r3, r0, #4*16 @ | gteSZ*
+ vstmia r3, {q7} @ | d14,d15 gteSZ|123x
+
+ vcvt.f32.u32 d13, d13 @ gteH (float for div)
+ vcvt.f32.u32 d11, d11 @ divisor
+
+ @ divide.. it's not worth messing with reciprocals here
+ @ just for 1 value, let's just use VFP divider here
+ vdiv.f32 s22, s26, s22
+
+ vcvt.u32.f32 d11, d11 @ quotient
+
+ @ while NEON's busy we calculate some flags on ARM
+ add r3, r0, #4*25
+ mov lr, #0 @ gteFLAG
+ ldmia r3, {r4-r6} @ gteMAC|123
+
+ vst1.32 d11, [r1, :64] @ wb quotient for flags (pre-limE)
+ vqshl.u32 d11, #15
+
+ do_mac_flags r4, r5, r6
+
+ vshr.u32 d11, #15 @ quotient (limE)
+
+ do_irs_flags r4, r5, r6
+
+ vmlal.s32 q2, d18, d11[0]@ gteOF|XY + gteIR|12 * quotient
+ add r3, r0, #4*13
+ vld1.32 d16, [r3] @ || load fS|XY12, new 01
+ vqmovn.s64 d18, q2 @ saturate to 32
+ vmull.s32 q10, d6, d11[0]@ | d20 = gteDQA * quotient
+ vqshl.s32 d19, d18, #5 @ 11bit precision
+
+ ldr r4, [r1] @ quotient
+ movs r3, r6, lsr #16
+ orrne lr, #(1<<31)
+ orrne lr, #(1<<18) @ fSZ (limD)
+
+ vst1.32 d18, [r1, :64] @ writeback fS|XY2 before limG
+
+ vshr.s32 d18, d19, #16+5@ can't vqshrn because of insn
+ vadd.s64 d20, d7 @ | gteDQB + gteDQA * quotient
+ vmovn.s32 d18, q9 @ fS|XY2 [s16]
+
+ vqmovn.s64 d20, q10 @ | gteMAC0
+ add r3, r0, #4*12
+ vst1.32 d16, [r3]! @ writeback fS|XY01
+ vst1.32 d18[0], [r3] @ ...2
+ add r3, r0, #4*24
+ vshr.s32 d21, d20, #12
+ vst1.32 d20[0], [r3] @ gteMAC0
+
+ movs r4, r4, lsr #17
+ orrne lr, #(1<<31)
+ orrne lr, #(1<<17) @ limE
+
+ vmax.s32 d21, d31
+ vmov.i32 d22, #0x1000
+ vmin.s32 d21, d22
+ add r3, r0, #4*8
+ vst1.16 d21[0], [r3] @ gteIR0
+
+ ldmia r1, {r4,r5} @ fS|XY2 before limG, after 11bit sat
+ add r2, r4, #0x400<<16
+ add r3, r5, #0x400<<16
+ lsrs r2, #16+11
+ orrne lr, #(1<<14) @ limG1
+ orrne lr, #(1<<31)
+ lsrs r3, #16+11
+ orrne lr, #(1<<13) @ limG2
+ orrne lr, #(1<<31)
+ adds r2, r4, #1
+ addvcs r3, r5, #1
+ orrvs lr, #(1<<16) @ F
+ orrvs lr, #(1<<31)
+ subs r2, r4, #1
+ subvcs r3, r5, #1
+ orrvs lr, #(1<<31)
+
+ ldr r4, [r0, #4*24] @ gteMAC0
+ orrvs lr, #(1<<15)
+
+ adds r3, r4, #1
+ orrvs lr, #(1<<16) @ F
+ orrvs lr, #(1<<31)
+ subs r2, r4, #1
+ orrvs lr, #(1<<15) @ F
+ orrvs lr, #(1<<31)
+ cmp r4, #0x1000
+ orrhi lr, #(1<<12) @ limH
+
+ str lr, [r0, #4*(32+31)] @ gteFLAG
+
+ pop {r4-r6,pc}
+ .size gteRTPS_neon, .-gteRTPS_neon
+
+
+
+.global gteRTPT_neon @ r0=CP2 (d,c),
+gteRTPT_neon:
+ push {r4-r11,lr}
+
+ movw r1, #:lower16:scratch
+ movt r1, #:upper16:scratch
+ mov r12, #0
+
+ rtpx_preload
+
+ vmov.i32 d22, #0x7fffffff
+ vmov.i32 d23, #0x80000000
+ mov r3, #3 @ counter
+ mov r2, r0 @ VXYZ(0)
+0:
+ vldmia r2!, {d8} @ VXYZ(v)
+ vmov.16 d8[3], r12 @ kill unused upper vector
+
+ rtpx_mac