2 * (C) GraÅžvydas "notaz" Ignotas, 2011
4 * This work is licensed under the terms of GNU GPL version 2 or later.
5 * See the COPYING file in the top-level directory.
8 /* .equiv HAVE_ARMV7, 1 */
22 @ prepare work reg for ssatx
23 @ in: wr reg, bit to saturate to
24 .macro ssatx_prep wr bit
26 mov \wr, #(1<<(\bit-1))
30 .macro ssatx rd wr bit
41 @ prepare work reg for ssatx0 (sat to 0..2^(bit-1))
42 @ in: wr reg, bit to saturate to
43 .macro ssatx0_prep wr bit
44 mov \wr, #(1<<(\bit-1))
47 .macro ssatx0 rd wr bit
69 lsl \rs, \rs, \rd @ shift up divisor
80 .macro newton_step rcp den zero t1 t2
81 umull \t2, \t1, \den, \rcp @ \t2 is dummy
82 sub \t1, \zero, \t1, lsl #2
83 smlal \t2, \rcp, \t1, \rcp
86 .macro udiv_newton rd rm rs t1 t2 t3 t4
90 lsl \rs, \t1 @ normalize for the algo
91 mov \rm, #0x4d000000 @ initial estimate ~1.2
93 newton_step \rm, \rs, \t2, \t3, \t4
94 newton_step \rm, \rs, \t2, \t3, \t4
95 newton_step \rm, \rs, \t2, \t3, \t4
96 newton_step \rm, \rs, \t2, \t3, \t4
98 umull \t4, \rd, \rm, \rd
99 rsb \t2, \t1, #30 @ here t1 is 1..15
100 mov \rd, \rd, lsr \t2
103 @ unsigned divide rd = rm / rs; 16.16 result
107 .macro udiv rd rm rs t1 t2 t3 t4
109 udiv_newton \rd, \rm, \rs, \t1, \t2, \t3, \t4
112 @ calculate RTPS/RTPT MAC values
113 @ in: r0 context, r8,r9 VXYZ
114 @ out: r10-r12 MAC123
118 add r2, r0, #4*(32+5) @ gteTRX
119 ldmia r1!,{r5-r7} @ gteR1*,gteR2*
121 smulbb r2, r5, r8 @ gteR11 * gteVX0
122 smultt r3, r5, r8 @ gteR12 * gteVY0
123 smulbb r4, r6, r9 @ gteR13 * gteVZ0
125 asr r4, r4, #1 @ prevent oflow, lose a bit
126 add r3, r4, r2, asr #1
127 add r10,r10,r3, asr #11 @ gteMAC1
128 smultb r2, r6, r8 @ gteR21 * gteVX0
129 smulbt r3, r7, r8 @ gteR22 * gteVY0
130 smultb r4, r7, r9 @ gteR23 * gteVZ0
131 ldmia r1!,{r5-r6} @ gteR3*
134 add r3, r4, r2, asr #1
135 add r11,r11,r3, asr #11 @ gteMAC2
136 @ be more accurate for gteMAC3, since it's also a divider
137 smulbb r2, r5, r8 @ gteR31 * gteVX0
138 smultt r3, r5, r8 @ gteR32 * gteVY0
139 smulbb r4, r6, r9 @ gteR33 * gteVZ0
141 asr r3, r4, #31 @ expand to 64bit
143 adc r3, r2, asr #31 @ 64bit sum in r3,r1
144 add r12,r12,r3, lsl #20
145 add r12,r12,r1, lsr #12 @ gteMAC3
149 .global gteRTPS_nf_arm @ r0=CP2 (d,c),
153 ldmia r0, {r8,r9} @ VXYZ(0)
155 add r1, r0, #4*25 @ gteMAC1
156 add r2, r0, #4*17 @ gteSZ1
157 stmia r1, {r10-r12} @ gteMAC123 save
159 add r1, r0, #4*16 @ gteSZ0
160 add r2, r0, #4*9 @ gteIR1
162 usat16_ lr, r12 @ limD
166 stmia r1, {r3-r5,lr} @ gteSZ*
167 ldr r3, [r0,#4*(32+26)] @ gteH
168 stmia r2, {r10,r11,r12} @ gteIR123 save
169 cmp r3, lr, lsl #1 @ gteH < gteSZ3*2 ?
173 udiv r9, r3, lr, r1, r2, r6, r7
183 ldrd r6, [r0,#4*(32+24)] @ gteOFXY
185 add r1, r0, #4*12 @ gteSXY0
188 /* quotient */ subhs r9, #1
190 smlal r6, r2, r10, r9
191 stmia r1!,{r3,r4} @ shift gteSXY
193 smlal r7, r3, r11, r9
195 /* gteDQA, gteDQB */ ldrd r10,[r0, #4*(32+27)]
196 orr r6, r2, lsl #16 @ (gteOFX + gteIR1 * q) >> 16
199 /* gteDQB + gteDQA * q */ mla r4, r10, r9, r11
200 orr r7, r3, lsl #16 @ (gteOFY + gteIR2 * q) >> 16
201 ssatx r6, r2, 11 @ gteSX2
202 ssatx r7, r2, 11 @ gteSY2
205 str r4, [r0,#4*24] @ gteMAC0
208 cmp r4, #0x1000 @ limH
210 str r4, [r0,#4*8] @ gteIR0
213 .size gteRTPS_nf_arm, .-gteRTPS_nf_arm
216 .global gteRTPT_nf_arm @ r0=CP2 (d,c),
218 ldr r1, [r0, #4*19] @ gteSZ3
220 str r1, [r0, #4*16] @ gteSZ0
224 add r1, r0, lr, lsl #1
225 ldrd r8, [r1] @ VXYZ(v)
229 usat16_ r2, r12 @ limD
230 add r1, r0, #4*25 @ gteMAC1
231 ldr r3, [r0,#4*(32+26)] @ gteH
232 stmia r1, {r10-r12} @ gteMAC123 save
237 str r2, [r1, lr] @ fSZ(v)
238 cmp r3, r2, lsl #1 @ gteH < gteSZ3*2 ?
242 udiv r9, r3, r2, r1, r4, r6, r7
254 ldrd r6, [r0,#4*(32+24)] @ gteOFXY
255 /* quotient */ subhs r9, #1
257 smlal r6, r2, r10, r9
259 smlal r7, r3, r11, r9
261 orr r6, r2, lsl #16 @ (gteOFX + gteIR1 * q) >> 16
264 orr r7, r3, lsl #16 @ (gteOFY + gteIR2 * q) >> 16
265 ssatx r6, r2, 11 @ gteSX(v)
266 ssatx r7, r2, 11 @ gteSY(v)
273 ldrd r4, [r0, #4*(32+27)] @ gteDQA, gteDQB
274 add r1, r0, #4*9 @ gteIR1
275 mla r3, r4, r9, r5 @ gteDQB + gteDQA * q
276 stmia r1, {r10,r11,r12} @ gteIR123 save
278 str r3, [r0,#4*24] @ gteMAC0
281 cmp r3, #0x1000 @ limH
283 str r3, [r0,#4*8] @ gteIR0
286 .size gteRTPT_nf_arm, .-gteRTPT_nf_arm
289 @ note: not std calling convention used
290 @ r0 = CP2 (d,c) (must preserve)
292 @ r4,r5 = VXYZ(v) packed
295 .macro mvma_op do_flags
299 ands r3, r1, #1 @ gteFLAG, shift_need
303 ldmia r7, {r7-r9} @ CV123
304 ldmia r6!,{r10-r12} @ MX1*,MX2*
306 lsl r7, #12 @ expand to 64bit
307 smlalbb r7, r1, r10, r4 @ MX11 * vx
308 smlaltt r7, r1, r10, r4 @ MX12 * vy
309 smlalbb r7, r1, r11, r5 @ MX13 * vz
311 orrne r7, r1, lsl #20 @ gteMAC0
314 adds r2, r7, #0x80000000
317 orrmi r3, #(1<<31)|(1<<27)
318 tst r3, #1 @ repeat shift test
321 lsl r8, #12 @ expand to 64bit
322 smlaltb r8, r1, r11, r4 @ MX21 * vx
323 smlalbt r8, r1, r12, r4 @ MX22 * vy
324 smlaltb r8, r1, r12, r5 @ MX23 * vz
326 orrne r8, r1, lsl #20 @ gteMAC1
329 adds r2, r8, #0x80000000
332 orrmi r3, #(1<<31)|(1<<26)
333 tst r3, #1 @ repeat shift test
335 ldmia r6!,{r10-r11} @ MX3*
337 lsl r9, #12 @ expand to 64bit
338 smlalbb r9, r1, r10, r4 @ MX31 * vx
339 smlaltt r9, r1, r10, r4 @ MX32 * vy
340 smlalbb r9, r1, r11, r5 @ MX33 * vz
342 orrne r9, r1, lsl #20 @ gteMAC2
345 adds r2, r9, #0x80000000
348 orrmi r3, #(1<<31)|(1<<25)
353 str r3, [r0, #4*(32+31)] @ gteFLAG
361 .global gteMVMVA_part_arm
364 .size gteMVMVA_part_arm, .-gteMVMVA_part_arm
366 .global gteMVMVA_part_nf_arm
367 gteMVMVA_part_nf_arm:
369 .size gteMVMVA_part_nf_arm, .-gteMVMVA_part_nf_arm
371 @ common version of MVMVA with cv3 (== 0) and shift12,
372 @ can't overflow so no gteMAC flags needed
373 @ note: not std calling convention used
374 @ r0 = CP2 (d,c) (must preserve)
375 @ r4,r5 = VXYZ(v) packed
377 .global gteMVMVA_part_cv3sh12_arm
378 gteMVMVA_part_cv3sh12_arm:
380 ldmia r6!,{r7-r9} @ MX1*,MX2*
381 smulbb r1, r7, r4 @ MX11 * vx
382 smultt r2, r7, r4 @ MX12 * vy
383 smulbb r3, r8, r5 @ MX13 * vz
385 asr r3, #1 @ prevent oflow, lose a bit
386 add r1, r3, r1, asr #1
388 smultb r1, r8, r4 @ MX21 * vx
389 smulbt r2, r9, r4 @ MX22 * vy
390 smultb r3, r9, r5 @ MX23 * vz
393 add r1, r3, r1, asr #1
395 ldmia r6, {r6,r9} @ MX3*
396 smulbb r1, r6, r4 @ MX31 * vx
397 smultt r2, r6, r4 @ MX32 * vy
398 smulbb r3, r9, r5 @ MX33 * vz
401 add r1, r3, r1, asr #1
406 str r2, [r0, #4*(32+31)] @ gteFLAG
409 .size gteMVMVA_part_cv3sh12_arm, .-gteMVMVA_part_cv3sh12_arm
412 .global gteNCLIP_arm @ r0=CP2 (d,c),
415 ldrsh r4, [r0, #4*12+2]
416 ldrsh r5, [r0, #4*13+2]
417 ldrsh r6, [r0, #4*14+2]
418 ldrsh lr, [r0, #4*12]
419 ldrsh r2, [r0, #4*13]
420 sub r12, r4, r5 @ 3: gteSY0 - gteSY1
421 sub r5, r5, r6 @ 1: gteSY1 - gteSY2
422 smull r1, r5, lr, r5 @ RdLo, RdHi
423 sub r6, r4 @ 2: gteSY2 - gteSY0
424 ldrsh r3, [r0, #4*14]
427 smlal r1, r5, r3, r12
434 movtgt lr, #((1<<31)|(1<<16))>>16
442 str lr, [r0, #4*(32+31)] @ gteFLAG
445 .size gteNCLIP_arm, .-gteNCLIP_arm
449 ldr r2, [r0, #4*25] @ gteMAC1
451 ldr r12,[r0, #4*(32+31)] @ gteFLAG
454 orrge r12, #(1<<31)|(1<<24)
463 ldrd r2, [r0, #4*26] @ gteMAC23
464 orrlt r12, #(1<<31)|(1<<24)
489 strd r2, [r0, #4*10] @ gteIR23
490 str r12,[r0, #4*(32+31)] @ gteFLAG
494 .global gteMACtoIR_lm0 @ r0=CP2 (d,c)
497 .size gteMACtoIR_lm0, .-gteMACtoIR_lm0
499 .global gteMACtoIR_lm1 @ r0=CP2 (d,c)
502 .size gteMACtoIR_lm1, .-gteMACtoIR_lm1
505 .global gteMACtoIR_lm0_nf @ r0=CP2 (d,c)
516 .size gteMACtoIR_lm0_nf, .-gteMACtoIR_lm0_nf
519 .global gteMACtoIR_lm1_nf @ r0=CP2 (d,c)
530 .size gteMACtoIR_lm1_nf, .-gteMACtoIR_lm1_nf
534 .global gteMVMVA_test
538 and r2, r1, #0x18000 @ v
539 cmp r2, #0x18000 @ v == 3?
541 addne r3, r0, r2, lsr #12
546 orreq r4, r3, r4, lsl #16 @ r4,r5 = VXYZ(v)
549 and r3, r1, #0x60000 @ mx
551 add r6, r12, r3, lsl #5
554 and r2, r1, #0x06000 @ cv
556 add r7, r12, r2, lsl #5
564 bne gteMVMVA_part_cv3sh12_arm
577 bl gteMVMVA_part_neon
580 bl gteMACtoIR_flags_neon
589 @ vim:filetype=armasm