a327967e |
1 | /* |
2 | * (C) GraÅžvydas "notaz" Ignotas, 2010 |
3 | * |
69af03a2 |
4 | * This work is licensed under the terms of any of these licenses |
5 | * (at your option): |
6 | * - GNU GPL, version 2 or later. |
7 | * - GNU LGPL, version 2.1 or later. |
a327967e |
8 | * See the COPYING file in the top-level directory. |
9 | */ |
10 | |
e5ed711c |
11 | #include "arm_features.h" |
12 | |
b09a1d34 |
13 | /* sanity check */ |
14 | #ifndef __ARM_NEON__ |
15 | #error Compiling NEON code, but appropriate preprocessor flag is missing |
16 | #error This usually means -mfpu=neon or -mfloat-abi= is not correctly specified |
17 | #endif |
18 | |
a327967e |
19 | .text |
20 | .align 2 |
21 | |
288e55cb |
22 | FUNCTION(bgr555_to_rgb565): @ dst, src, bytes |
30f6e5ed |
23 | pld [r1] |
d352cde2 |
24 | mov r3, #0x07c0 |
a327967e |
25 | vdup.16 q15, r3 |
4f949f3c |
26 | tst r0, #8 |
27 | beq 0f |
28 | @ align the dst |
29 | vld1.16 {d0}, [r1]! |
c92e039e |
30 | sub r2, r2, #8 |
4f949f3c |
31 | vshl.u16 d0, d0, #1 |
32 | vshl.u16 d1, d0, #10 |
33 | vsri.u16 d1, d0, #11 |
34 | vbit d1, d0, d30 |
35 | vst1.16 {d1}, [r0]! |
36 | 0: |
69f0df9c |
37 | subs r2, r2, #64 |
38 | blt btr16_end64 |
a327967e |
39 | 0: |
30f6e5ed |
40 | pld [r1, #64*2] |
7aba2372 |
41 | @ Pulls 15-bit BGR color values (which are actually 16 bits) into q0-q3. |
42 | @ example: q0 = 0111 1110 0101 0011 |
a327967e |
43 | vldmia r1!, {q0-q3} |
7aba2372 |
44 | @ Shift BGR color 1 bit to the left, discarding MSB and preparing for vbit. |
45 | @ MSB is used for transparency (not needed here, and can mess with green). |
46 | @ example: q0 = 1111 1100 1010 0110 |
288e55cb |
47 | vshl.u16 q0, q0, #1 |
48 | vshl.u16 q1, q1, #1 |
49 | vshl.u16 q2, q2, #1 |
50 | vshl.u16 q3, q3, #1 |
7aba2372 |
51 | @ Places red value in left most bits, clears bits to the right. |
52 | @ example: q8 = 1001 1000 0000 0000 |
53 | vshl.u16 q8, q0, #10 |
54 | vshl.u16 q9, q1, #10 |
55 | vshl.u16 q10, q2, #10 |
56 | vshl.u16 q11, q3, #10 |
57 | @ Places blue value in right most bits, leaving bits to the left unchanged. |
58 | @ example: q8 = 1001 1000 0001 1111 |
59 | vsri.u16 q8, q0, #11 |
60 | vsri.u16 q9, q1, #11 |
61 | vsri.u16 q10, q2, #11 |
62 | vsri.u16 q11, q3, #11 |
63 | @ Sets green value from shifted BGR color by apply a mask. |
64 | @ example: q15 = 0000 0111 1100 0000 |
65 | @ q8 = 1001 1100 1001 1111 |
288e55cb |
66 | vbit q8, q0, q15 |
67 | vbit q9, q1, q15 |
68 | vbit q10, q2, q15 |
69 | vbit q11, q3, q15 |
70 | vstmia r0!, {q8-q11} |
d352cde2 |
71 | subs r2, r2, #64 |
72 | bge 0b |
73 | |
69f0df9c |
74 | btr16_end64: |
d352cde2 |
75 | adds r2, r2, #64 |
76 | bxeq lr |
69f0df9c |
77 | subs r2, r2, #16 |
78 | blt btr16_end16 |
d352cde2 |
79 | |
69f0df9c |
80 | @ handle the remainder (reasonably rare) |
d352cde2 |
81 | 0: |
69f0df9c |
82 | vld1.16 {q0}, [r1]! |
7aba2372 |
83 | vshl.u16 q0, q0, #1 |
84 | vshl.u16 q1, q0, #10 |
85 | vsri.u16 q1, q0, #11 |
86 | vbit q1, q0, q15 |
d352cde2 |
87 | subs r2, r2, #16 |
69f0df9c |
88 | vst1.16 {q1}, [r0]! |
89 | bge 0b |
90 | |
91 | btr16_end16: |
92 | adds r2, r2, #16 |
93 | bxeq lr |
94 | subs r2, r2, #8 |
95 | bxlt lr |
a327967e |
96 | |
69f0df9c |
97 | @ very rare |
35d3fd2e |
98 | vld1.16 {d0}, [r1]! |
7aba2372 |
99 | vshl.u16 d0, d0, #1 |
100 | vshl.u16 d1, d0, #10 |
101 | vsri.u16 d1, d0, #11 |
102 | vbit d1, d0, d30 |
35d3fd2e |
103 | vst1.16 {d1}, [r0]! |
104 | bx lr |
105 | |
106 | |
107 | @ note: may overflow source |
108 | FUNCTION(bgr555_to_rgb565_b): @ dst, src, bytes, int brightness2k // 0-0x0800 |
109 | pld [r1] |
110 | vdup.16 q15, r3 |
111 | vpush {q4-q7} |
112 | mov r3, #0x1f |
113 | vdup.16 q14, r3 |
114 | 0: |
115 | pld [r1, #64*2] |
116 | vldmia r1!, {q0-q3} |
117 | vand.u16 q8, q0, q14 |
118 | vand.u16 q9, q1, q14 |
119 | vand.u16 q10, q2, q14 |
120 | vand.u16 q11, q3, q14 |
121 | vmul.u16 q4, q8, q15 |
122 | vmul.u16 q5, q9, q15 |
123 | vmul.u16 q6, q10, q15 |
124 | vmul.u16 q7, q11, q15 |
125 | |
126 | vshr.u16 q8, q0, #5 |
127 | vshr.u16 q9, q1, #5 |
128 | vshr.u16 q10, q2, #5 |
129 | vshr.u16 q11, q3, #5 |
130 | vand.u16 q8, q14 |
131 | vand.u16 q9, q14 |
132 | vand.u16 q10, q14 |
133 | vand.u16 q11, q14 |
134 | vmul.u16 q8, q15 |
135 | vmul.u16 q9, q15 |
136 | vmul.u16 q10, q15 |
137 | vmul.u16 q11, q15 |
138 | vsri.u16 q4, q8, #5 |
139 | vsri.u16 q5, q9, #5 |
140 | vsri.u16 q6, q10, #5 |
141 | vsri.u16 q7, q11, #5 |
142 | |
143 | vshr.u16 q8, q0, #10 |
144 | vshr.u16 q9, q1, #10 |
145 | vshr.u16 q10, q2, #10 |
146 | vshr.u16 q11, q3, #10 |
147 | vand.u16 q8, q14 |
148 | vand.u16 q9, q14 |
149 | vand.u16 q10, q14 |
150 | vand.u16 q11, q14 |
151 | vmul.u16 q8, q15 |
152 | vmul.u16 q9, q15 |
153 | vmul.u16 q10, q15 |
154 | vmul.u16 q11, q15 |
155 | vsri.u16 q4, q8, #11 |
156 | vsri.u16 q5, q9, #11 |
157 | vsri.u16 q6, q10, #11 |
158 | vsri.u16 q7, q11, #11 |
159 | |
160 | subs r2, r2, #64 |
161 | ble 1f |
162 | vstmia r0!, {q4-q7} |
163 | b 0b |
164 | |
165 | 1: |
166 | blt 0f |
167 | vstmia r0!, {q4-q7} |
168 | b btr16b_end |
169 | 0: |
170 | subs r2, r2, #8 |
171 | blt btr16b_end |
172 | vst1.16 {q4}, [r0]! |
173 | subs r2, r2, #8 |
174 | blt btr16b_end |
175 | vst1.16 {q5}, [r0]! |
176 | subs r2, r2, #8 |
177 | blt btr16b_end |
178 | vst1.16 {q6}, [r0]! |
179 | subs r2, r2, #8 |
180 | blt btr16b_end |
181 | vst1.16 {q7}, [r0]! |
182 | |
183 | btr16b_end: |
184 | vpop {q4-q7} |
a327967e |
185 | bx lr |
186 | |
1972732a |
187 | |
288e55cb |
188 | FUNCTION(bgr888_to_rgb888): @ dst, src, bytes |
30f6e5ed |
189 | pld [r1] |
1972732a |
190 | @ r2 /= 48 |
191 | mov r2, r2, lsr #4 |
192 | movw r3, #0x5556 |
193 | movt r3, #0x5555 |
194 | umull r12,r2, r3, r2 |
195 | 0: |
30f6e5ed |
196 | pld [r1, #48*3] |
630b122b |
197 | vld3.8 {d0-d2}, [r1]! |
198 | vld3.8 {d3-d5}, [r1]! |
1972732a |
199 | vswp d0, d2 |
200 | vswp d3, d5 |
201 | vst3.8 {d0-d2}, [r0, :64]! |
202 | vst3.8 {d3-d5}, [r0, :64]! |
203 | subs r2, r2, #1 |
204 | bne 0b |
205 | |
206 | bx lr |
207 | |
208 | |
288e55cb |
209 | FUNCTION(bgr888_to_rgb565): @ dst, src, bytes |
30f6e5ed |
210 | pld [r1] |
f3a63e25 |
211 | @ r2 /= 48 |
212 | mov r2, r2, lsr #4 |
213 | movw r3, #0x5556 |
214 | movt r3, #0x5555 |
215 | umull r12,r2, r3, r2 |
216 | |
217 | mov r3, #0x07e0 |
218 | vdup.16 q15, r3 |
219 | 0: |
30f6e5ed |
220 | pld [r1, #48*3] |
630b122b |
221 | vld3.8 {d1-d3}, [r1]! |
222 | vld3.8 {d5-d7}, [r1]! |
f3a63e25 |
223 | |
224 | vshll.u8 q8, d2, #3 @ g |
225 | vshll.u8 q9, d6, #3 |
226 | vshr.u8 d0, d3, #3 @ b |
227 | vshr.u8 d4, d7, #3 |
228 | vzip.8 d0, d1 @ rb |
229 | vzip.8 d4, d5 |
230 | vbit q0, q8, q15 |
231 | vbit q2, q9, q15 |
232 | |
233 | vstmia r0!, {d0,d1} |
234 | vstmia r0!, {d4,d5} |
235 | subs r2, r2, #1 |
236 | bne 0b |
237 | |
238 | bx lr |
239 | |
240 | |
288e55cb |
241 | FUNCTION(rgb888_to_rgb565): @ dst, src, bytes |
00a5d459 |
242 | pld [r1] |
243 | @ r2 /= 48 |
244 | mov r2, r2, lsr #4 |
245 | movw r3, #0x5556 |
246 | movt r3, #0x5555 |
247 | umull r12,r2, r3, r2 |
248 | |
249 | mov r3, #0x07e0 |
250 | vdup.16 q15, r3 |
251 | 0: |
252 | pld [r1, #48*3] |
253 | vld3.8 {d1-d3}, [r1, :64]! |
254 | vld3.8 {d5-d7}, [r1, :64]! |
255 | |
256 | vshll.u8 q8, d2, #3 @ g |
257 | vshll.u8 q9, d6, #3 |
258 | vshr.u8 d2, d1, #3 @ b |
259 | vshr.u8 d6, d5, #3 |
260 | vzip.8 d2, d3 @ rb |
261 | vzip.8 d6, d7 |
262 | vbit q1, q8, q15 |
263 | vbit q3, q9, q15 |
264 | |
265 | vstmia r0!, {d2,d3} |
266 | vstmia r0!, {d6,d7} |
267 | subs r2, r2, #1 |
268 | bne 0b |
269 | |
270 | bx lr |
271 | |
272 | |
a327967e |
273 | @ vim:filetype=armasm |