2 * (C) GraÅžvydas "notaz" Ignotas, 2011
4 * This work is licensed under the terms of GNU GPL version 2 or later.
5 * See the COPYING file in the top-level directory.
8 #include "arm_features.h"
22 @ prepare work reg for ssatx
23 @ in: wr reg, bit to saturate to
24 .macro ssatx_prep wr bit
26 mov \wr, #(1<<(\bit-1))
30 .macro ssatx rd wr bit
41 @ prepare work reg for ssatx0 (sat to 0..2^(bit-1))
42 @ in: wr reg, bit to saturate to
43 .macro ssatx0_prep wr bit
44 mov \wr, #(1<<(\bit-1))
47 .macro ssatx0 rd wr bit
71 lsl \rs, \rs, \rd @ shift up divisor
82 .macro newton_step rcp den zero t1 t2
83 umull \t2, \t1, \den, \rcp @ \t2 is dummy
84 sub \t1, \zero, \t1, lsl #2
85 smlal \t2, \rcp, \t1, \rcp
88 .macro udiv_newton rd rm rs t1 t2 t3 t4
92 lsl \rs, \t1 @ normalize for the algo
93 mov \rm, #0x4d000000 @ initial estimate ~1.2
95 newton_step \rm, \rs, \t2, \t3, \t4
96 newton_step \rm, \rs, \t2, \t3, \t4
97 newton_step \rm, \rs, \t2, \t3, \t4
98 newton_step \rm, \rs, \t2, \t3, \t4
100 umull \t4, \rd, \rm, \rd
101 rsb \t2, \t1, #30 @ here t1 is 1..15
102 mov \rd, \rd, lsr \t2
105 @ unsigned divide rd = rm / rs; 16.16 result
109 .macro udiv rd rm rs t1 t2 t3 t4
111 udiv_newton \rd, \rm, \rs, \t1, \t2, \t3, \t4
114 @ calculate RTPS/RTPT MAC values
115 @ in: r0 context, r8,r9 VXYZ
116 @ out: r10-r12 MAC123
120 add r2, r0, #4*(32+5) @ gteTRX
121 ldmia r1!,{r5-r7} @ gteR1*,gteR2*
123 smulbb r2, r5, r8 @ gteR11 * gteVX0
124 smultt r3, r5, r8 @ gteR12 * gteVY0
125 smulbb r4, r6, r9 @ gteR13 * gteVZ0
127 asr r4, r4, #1 @ prevent oflow, lose a bit
128 add r3, r4, r2, asr #1
129 add r10,r10,r3, asr #11 @ gteMAC1
130 smultb r2, r6, r8 @ gteR21 * gteVX0
131 smulbt r3, r7, r8 @ gteR22 * gteVY0
132 smultb r4, r7, r9 @ gteR23 * gteVZ0
133 ldmia r1!,{r5-r6} @ gteR3*
136 add r3, r4, r2, asr #1
137 add r11,r11,r3, asr #11 @ gteMAC2
138 @ be more accurate for gteMAC3, since it's also a divider
139 smulbb r2, r5, r8 @ gteR31 * gteVX0
140 smultt r3, r5, r8 @ gteR32 * gteVY0
141 smulbb r4, r6, r9 @ gteR33 * gteVZ0
143 asr r3, r4, #31 @ expand to 64bit
145 adc r3, r2, asr #31 @ 64bit sum in r3,r1
146 add r12,r12,r3, lsl #20
147 add r12,r12,r1, lsr #12 @ gteMAC3
151 .global gteRTPS_nf_arm @ r0=CP2 (d,c),
155 ldmia r0, {r8,r9} @ VXYZ(0)
157 add r1, r0, #4*25 @ gteMAC1
158 add r2, r0, #4*17 @ gteSZ1
159 stmia r1, {r10-r12} @ gteMAC123 save
161 add r1, r0, #4*16 @ gteSZ0
162 add r2, r0, #4*9 @ gteIR1
164 usat16_ lr, r12 @ limD
168 stmia r1, {r3-r5,lr} @ gteSZ*
169 ldr r3, [r0,#4*(32+26)] @ gteH
170 stmia r2, {r10,r11,r12} @ gteIR123 save
171 cmp r3, lr, lsl #1 @ gteH < gteSZ3*2 ?
175 udiv r9, r3, lr, r1, r2, r6, r7
185 ldrd r6, [r0,#4*(32+24)] @ gteOFXY
187 add r1, r0, #4*12 @ gteSXY0
190 /* quotient */ subhs r9, #1
192 smlal r6, r2, r10, r9
193 stmia r1!,{r3,r4} @ shift gteSXY
195 smlal r7, r3, r11, r9
197 /* gteDQA, gteDQB */ ldrd r10,[r0, #4*(32+27)]
198 orr r6, r2, lsl #16 @ (gteOFX + gteIR1 * q) >> 16
201 /* gteDQB + gteDQA * q */ mla r4, r10, r9, r11
202 orr r7, r3, lsl #16 @ (gteOFY + gteIR2 * q) >> 16
203 ssatx r6, r2, 11 @ gteSX2
204 ssatx r7, r2, 11 @ gteSY2
207 str r4, [r0,#4*24] @ gteMAC0
210 cmp r4, #0x1000 @ limH
212 str r4, [r0,#4*8] @ gteIR0
215 .size gteRTPS_nf_arm, .-gteRTPS_nf_arm
218 .global gteRTPT_nf_arm @ r0=CP2 (d,c),
220 ldr r1, [r0, #4*19] @ gteSZ3
222 str r1, [r0, #4*16] @ gteSZ0
226 add r1, r0, lr, lsl #1
227 ldrd r8, [r1] @ VXYZ(v)
231 usat16_ r2, r12 @ limD
232 add r1, r0, #4*25 @ gteMAC1
233 ldr r3, [r0,#4*(32+26)] @ gteH
234 stmia r1, {r10-r12} @ gteMAC123 save
239 str r2, [r1, lr] @ fSZ(v)
240 cmp r3, r2, lsl #1 @ gteH < gteSZ3*2 ?
244 udiv r9, r3, r2, r1, r4, r6, r7
256 ldrd r6, [r0,#4*(32+24)] @ gteOFXY
257 /* quotient */ subhs r9, #1
259 smlal r6, r2, r10, r9
261 smlal r7, r3, r11, r9
263 orr r6, r2, lsl #16 @ (gteOFX + gteIR1 * q) >> 16
266 orr r7, r3, lsl #16 @ (gteOFY + gteIR2 * q) >> 16
267 ssatx r6, r2, 11 @ gteSX(v)
268 ssatx r7, r2, 11 @ gteSY(v)
275 ldrd r4, [r0, #4*(32+27)] @ gteDQA, gteDQB
276 add r1, r0, #4*9 @ gteIR1
277 mla r3, r4, r9, r5 @ gteDQB + gteDQA * q
278 stmia r1, {r10,r11,r12} @ gteIR123 save
280 str r3, [r0,#4*24] @ gteMAC0
283 cmp r3, #0x1000 @ limH
285 str r3, [r0,#4*8] @ gteIR0
288 .size gteRTPT_nf_arm, .-gteRTPT_nf_arm
291 @ note: not std calling convention used
292 @ r0 = CP2 (d,c) (must preserve)
294 @ r4,r5 = VXYZ(v) packed
297 .macro mvma_op do_flags
301 ands r3, r1, #1 @ gteFLAG, shift_need
305 ldmia r7, {r7-r9} @ CV123
306 ldmia r6!,{r10-r12} @ MX1*,MX2*
308 lsl r7, #12 @ expand to 64bit
309 smlalbb r7, r1, r10, r4 @ MX11 * vx
310 smlaltt r7, r1, r10, r4 @ MX12 * vy
311 smlalbb r7, r1, r11, r5 @ MX13 * vz
313 orrne r7, r1, lsl #20 @ gteMAC0
316 adds r2, r7, #0x80000000
319 orrmi r3, #(1<<31)|(1<<27)
320 tst r3, #1 @ repeat shift test
323 lsl r8, #12 @ expand to 64bit
324 smlaltb r8, r1, r11, r4 @ MX21 * vx
325 smlalbt r8, r1, r12, r4 @ MX22 * vy
326 smlaltb r8, r1, r12, r5 @ MX23 * vz
328 orrne r8, r1, lsl #20 @ gteMAC1
331 adds r2, r8, #0x80000000
334 orrmi r3, #(1<<31)|(1<<26)
335 tst r3, #1 @ repeat shift test
337 ldmia r6!,{r10-r11} @ MX3*
339 lsl r9, #12 @ expand to 64bit
340 smlalbb r9, r1, r10, r4 @ MX31 * vx
341 smlaltt r9, r1, r10, r4 @ MX32 * vy
342 smlalbb r9, r1, r11, r5 @ MX33 * vz
344 orrne r9, r1, lsl #20 @ gteMAC2
347 adds r2, r9, #0x80000000
350 orrmi r3, #(1<<31)|(1<<25)
355 str r3, [r0, #4*(32+31)] @ gteFLAG
363 .global gteMVMVA_part_arm
366 .size gteMVMVA_part_arm, .-gteMVMVA_part_arm
368 .global gteMVMVA_part_nf_arm
369 gteMVMVA_part_nf_arm:
371 .size gteMVMVA_part_nf_arm, .-gteMVMVA_part_nf_arm
373 @ common version of MVMVA with cv3 (== 0) and shift12,
374 @ can't overflow so no gteMAC flags needed
375 @ note: not std calling convention used
376 @ r0 = CP2 (d,c) (must preserve)
377 @ r4,r5 = VXYZ(v) packed
379 .global gteMVMVA_part_cv3sh12_arm
380 gteMVMVA_part_cv3sh12_arm:
382 ldmia r6!,{r7-r9} @ MX1*,MX2*
383 smulbb r1, r7, r4 @ MX11 * vx
384 smultt r2, r7, r4 @ MX12 * vy
385 smulbb r3, r8, r5 @ MX13 * vz
387 asr r3, #1 @ prevent oflow, lose a bit
388 add r1, r3, r1, asr #1
390 smultb r1, r8, r4 @ MX21 * vx
391 smulbt r2, r9, r4 @ MX22 * vy
392 smultb r3, r9, r5 @ MX23 * vz
395 add r1, r3, r1, asr #1
397 ldmia r6, {r6,r9} @ MX3*
398 smulbb r1, r6, r4 @ MX31 * vx
399 smultt r2, r6, r4 @ MX32 * vy
400 smulbb r3, r9, r5 @ MX33 * vz
403 add r1, r3, r1, asr #1
408 str r2, [r0, #4*(32+31)] @ gteFLAG
411 .size gteMVMVA_part_cv3sh12_arm, .-gteMVMVA_part_cv3sh12_arm
413 #endif /* HAVE_ARMV5 */
415 .global gteNCLIP_arm @ r0=CP2 (d,c),
418 ldrsh r4, [r0, #4*12+2]
419 ldrsh r5, [r0, #4*13+2]
420 ldrsh r6, [r0, #4*14+2]
421 ldrsh lr, [r0, #4*12]
422 ldrsh r2, [r0, #4*13]
423 sub r12, r4, r5 @ 3: gteSY0 - gteSY1
424 sub r5, r5, r6 @ 1: gteSY1 - gteSY2
425 smull r1, r5, lr, r5 @ RdLo, RdHi
426 sub r6, r4 @ 2: gteSY2 - gteSY0
427 ldrsh r3, [r0, #4*14]
430 smlal r1, r5, r3, r12
437 movtgt lr, #((1<<31)|(1<<16))>>16
445 str lr, [r0, #4*(32+31)] @ gteFLAG
448 .size gteNCLIP_arm, .-gteNCLIP_arm
452 ldr r2, [r0, #4*25] @ gteMAC1
454 ldr r12,[r0, #4*(32+31)] @ gteFLAG
457 orrge r12, #(1<<31)|(1<<24)
467 ldrd r2, [r0, #4*26] @ gteMAC23
472 orrlt r12, #(1<<31)|(1<<24)
498 strd r2, [r0, #4*10] @ gteIR23
503 str r12,[r0, #4*(32+31)] @ gteFLAG
507 .global gteMACtoIR_lm0 @ r0=CP2 (d,c)
510 .size gteMACtoIR_lm0, .-gteMACtoIR_lm0
512 .global gteMACtoIR_lm1 @ r0=CP2 (d,c)
515 .size gteMACtoIR_lm1, .-gteMACtoIR_lm1
518 .global gteMACtoIR_lm0_nf @ r0=CP2 (d,c)
529 .size gteMACtoIR_lm0_nf, .-gteMACtoIR_lm0_nf
532 .global gteMACtoIR_lm1_nf @ r0=CP2 (d,c)
543 .size gteMACtoIR_lm1_nf, .-gteMACtoIR_lm1_nf
547 .global gteMVMVA_test
551 and r2, r1, #0x18000 @ v
552 cmp r2, #0x18000 @ v == 3?
554 addne r3, r0, r2, lsr #12
559 orreq r4, r3, r4, lsl #16 @ r4,r5 = VXYZ(v)
562 and r3, r1, #0x60000 @ mx
564 add r6, r12, r3, lsl #5
567 and r2, r1, #0x06000 @ cv
569 add r7, r12, r2, lsl #5
577 bne gteMVMVA_part_cv3sh12_arm
590 bl gteMVMVA_part_neon
593 bl gteMACtoIR_flags_neon
602 @ vim:filetype=armasm