b17618c0 |
1 | /* |
2 | * (C) GraÅžvydas "notaz" Ignotas, 2011 |
3 | * |
4 | * This work is licensed under the terms of any of these licenses |
5 | * (at your option): |
6 | * - GNU GPL, version 2 or later. |
7 | * - GNU LGPL, version 2.1 or later. |
8 | * See the COPYING file in the top-level directory. |
9 | */ |
10 | |
11 | |
12 | .text |
13 | .align 2 |
14 | |
3a721c1f |
15 | @ XXX: should be HAVE_NEON |
16 | .if HAVE_ARMV7 |
b17618c0 |
17 | |
18 | .global mix_chan @ (int start, int count, int lv, int rv) |
19 | mix_chan: |
20 | vmov.32 d14[0], r2 |
21 | vmov.32 d14[1], r3 @ multipliers |
22 | mov r12, r0 |
23 | movw r0, #:lower16:ChanBuf |
24 | movw r2, #:lower16:SSumLR |
25 | movt r0, #:upper16:ChanBuf |
26 | movt r2, #:upper16:SSumLR |
27 | add r0, r12, lsl #2 |
28 | add r2, r12, lsl #3 |
29 | 0: |
30 | vldmia r0!, {d0-d1} |
31 | vldmia r2, {d2-d5} |
32 | vmul.s32 d10, d14, d0[0] |
33 | vmul.s32 d11, d14, d0[1] |
34 | vmul.s32 d12, d14, d1[0] |
35 | vmul.s32 d13, d14, d1[1] |
36 | vsra.s32 q1, q5, #14 |
37 | vsra.s32 q2, q6, #14 |
38 | subs r1, #4 |
39 | blt mc_finish |
40 | vstmia r2!, {d2-d5} |
41 | bgt 0b |
42 | nop |
43 | bxeq lr |
44 | |
45 | mc_finish: |
46 | vstmia r2!, {d2} |
587fa7de |
47 | cmp r1, #-2 |
b17618c0 |
48 | vstmiage r2!, {d3} |
587fa7de |
49 | cmp r1, #-1 |
b17618c0 |
50 | vstmiage r2!, {d4} |
51 | bx lr |
52 | |
53 | |
54 | .global mix_chan_rvb @ (int start, int count, int lv, int rv) |
55 | mix_chan_rvb: |
56 | vmov.32 d14[0], r2 |
57 | vmov.32 d14[1], r3 @ multipliers |
58 | mov r12, r0 |
59 | movw r0, #:lower16:ChanBuf |
60 | movw r3, #:lower16:sRVBStart |
61 | movw r2, #:lower16:SSumLR |
62 | movt r0, #:upper16:ChanBuf |
63 | movt r3, #:upper16:sRVBStart |
64 | movt r2, #:upper16:SSumLR |
65 | ldr r3, [r3] |
66 | add r0, r12, lsl #2 |
67 | add r2, r12, lsl #3 |
68 | add r3, r12, lsl #3 |
69 | 0: |
70 | vldmia r0!, {d0-d1} |
71 | vldmia r2, {d2-d5} |
72 | vldmia r3, {d6-d9} |
73 | vmul.s32 d10, d14, d0[0] |
74 | vmul.s32 d11, d14, d0[1] |
75 | vmul.s32 d12, d14, d1[0] |
76 | vmul.s32 d13, d14, d1[1] |
77 | vsra.s32 q1, q5, #14 |
78 | vsra.s32 q2, q6, #14 |
79 | vsra.s32 q3, q5, #14 |
80 | vsra.s32 q4, q6, #14 |
81 | subs r1, #4 |
82 | blt mcr_finish |
83 | vstmia r2!, {d2-d5} |
84 | vstmia r3!, {d6-d9} |
85 | bgt 0b |
86 | nop |
87 | bxeq lr |
88 | |
89 | mcr_finish: |
90 | vstmia r2!, {d2} |
91 | vstmia r3!, {d6} |
587fa7de |
92 | cmp r1, #-2 |
b17618c0 |
93 | vstmiage r2!, {d3} |
94 | vstmiage r3!, {d7} |
587fa7de |
95 | cmp r1, #-1 |
b17618c0 |
96 | vstmiage r2!, {d4} |
97 | vstmiage r3!, {d8} |
98 | bx lr |
99 | |
3a721c1f |
100 | .else |
101 | |
102 | .global mix_chan @ (int start, int count, int lv, int rv) |
103 | mix_chan: |
104 | stmfd sp!, {r4-r8,lr} |
105 | orr r3, r2, r3, lsl #16 |
106 | lsl r3, #1 @ packed multipliers << 1 |
107 | mov r12, r0 |
108 | ldr r0, =ChanBuf |
109 | ldr r2, =SSumLR |
110 | add r0, r12, lsl #2 |
111 | add r2, r12, lsl #3 |
112 | 0: |
113 | ldmia r0!, {r4,r5} |
114 | ldmia r2, {r6-r8,lr} |
115 | lsl r4, #1 @ adjust for mul |
116 | lsl r5, #1 |
117 | smlawb r6, r4, r3, r6 |
118 | smlawt r7, r4, r3, r7 |
119 | smlawb r8, r5, r3, r8 |
120 | smlawt lr, r5, r3, lr |
121 | subs r1, #2 |
122 | blt mc_finish |
123 | stmia r2!, {r6-r8,lr} |
124 | bgt 0b |
125 | ldmeqfd sp!, {r4-r8,pc} |
126 | |
127 | mc_finish: |
128 | stmia r2!, {r6,r7} |
129 | ldmfd sp!, {r4-r8,pc} |
130 | |
131 | |
132 | .global mix_chan_rvb @ (int start, int count, int lv, int rv) |
133 | mix_chan_rvb: |
134 | stmfd sp!, {r4-r8,lr} |
135 | orr lr, r2, r3, lsl #16 |
136 | lsl lr, #1 |
137 | ldr r3, =sRVBStart |
138 | ldr r2, =SSumLR |
139 | ldr r4, =ChanBuf |
140 | ldr r3, [r3] |
141 | add r2, r2, r0, lsl #3 |
142 | add r3, r3, r0, lsl #3 |
143 | add r0, r4, r0, lsl #2 |
144 | 0: |
145 | ldr r4, [r0], #4 |
146 | ldmia r2, {r6,r7} |
147 | ldmia r3, {r8,r12} |
148 | lsl r4, #1 |
149 | smlawb r6, r4, lr, r6 @ supposedly takes single cycle? |
150 | smlawt r7, r4, lr, r7 |
151 | smlawb r8, r4, lr, r8 |
152 | smlawt r12,r4, lr, r12 |
153 | subs r1, #1 |
154 | stmia r2!, {r6,r7} |
155 | stmia r3!, {r8,r12} |
156 | bgt 0b |
157 | ldmfd sp!, {r4-r8,pc} |
158 | |
159 | .endif |
160 | |
b17618c0 |
161 | @ vim:filetype=armasm |