a327967e |
1 | /* |
2 | * (C) GraÅžvydas "notaz" Ignotas, 2010 |
3 | * |
69af03a2 |
4 | * This work is licensed under the terms of any of these licenses |
5 | * (at your option): |
6 | * - GNU GPL, version 2 or later. |
7 | * - GNU LGPL, version 2.1 or later. |
a327967e |
8 | * See the COPYING file in the top-level directory. |
9 | */ |
10 | |
e5ed711c |
11 | #include "arm_features.h" |
12 | |
b09a1d34 |
13 | /* sanity check */ |
14 | #ifndef __ARM_NEON__ |
15 | #error Compiling NEON code, but appropriate preprocessor flag is missing |
16 | #error This usually means -mfpu=neon or -mfloat-abi= is not correctly specified |
17 | #endif |
18 | |
a327967e |
19 | .text |
20 | .align 2 |
21 | |
288e55cb |
22 | FUNCTION(bgr555_to_rgb565): @ dst, src, bytes |
30f6e5ed |
23 | pld [r1] |
d352cde2 |
24 | mov r3, #0x07c0 |
a327967e |
25 | vdup.16 q15, r3 |
69f0df9c |
26 | subs r2, r2, #64 |
27 | blt btr16_end64 |
a327967e |
28 | 0: |
30f6e5ed |
29 | pld [r1, #64*2] |
7aba2372 |
30 | @ Pulls 15-bit BGR color values (which are actually 16 bits) into q0-q3. |
31 | @ example: q0 = 0111 1110 0101 0011 |
a327967e |
32 | vldmia r1!, {q0-q3} |
7aba2372 |
33 | @ Shift BGR color 1 bit to the left, discarding MSB and preparing for vbit. |
34 | @ MSB is used for transparency (not needed here, and can mess with green). |
35 | @ example: q0 = 1111 1100 1010 0110 |
288e55cb |
36 | vshl.u16 q0, q0, #1 |
37 | vshl.u16 q1, q1, #1 |
38 | vshl.u16 q2, q2, #1 |
39 | vshl.u16 q3, q3, #1 |
7aba2372 |
40 | @ Places red value in left most bits, clears bits to the right. |
41 | @ example: q8 = 1001 1000 0000 0000 |
42 | vshl.u16 q8, q0, #10 |
43 | vshl.u16 q9, q1, #10 |
44 | vshl.u16 q10, q2, #10 |
45 | vshl.u16 q11, q3, #10 |
46 | @ Places blue value in right most bits, leaving bits to the left unchanged. |
47 | @ example: q8 = 1001 1000 0001 1111 |
48 | vsri.u16 q8, q0, #11 |
49 | vsri.u16 q9, q1, #11 |
50 | vsri.u16 q10, q2, #11 |
51 | vsri.u16 q11, q3, #11 |
52 | @ Sets green value from shifted BGR color by apply a mask. |
53 | @ example: q15 = 0000 0111 1100 0000 |
54 | @ q8 = 1001 1100 1001 1111 |
288e55cb |
55 | vbit q8, q0, q15 |
56 | vbit q9, q1, q15 |
57 | vbit q10, q2, q15 |
58 | vbit q11, q3, q15 |
59 | vstmia r0!, {q8-q11} |
d352cde2 |
60 | subs r2, r2, #64 |
61 | bge 0b |
62 | |
69f0df9c |
63 | btr16_end64: |
d352cde2 |
64 | adds r2, r2, #64 |
65 | bxeq lr |
69f0df9c |
66 | subs r2, r2, #16 |
67 | blt btr16_end16 |
d352cde2 |
68 | |
69f0df9c |
69 | @ handle the remainder (reasonably rare) |
d352cde2 |
70 | 0: |
69f0df9c |
71 | vld1.16 {q0}, [r1]! |
7aba2372 |
72 | vshl.u16 q0, q0, #1 |
73 | vshl.u16 q1, q0, #10 |
74 | vsri.u16 q1, q0, #11 |
75 | vbit q1, q0, q15 |
d352cde2 |
76 | subs r2, r2, #16 |
69f0df9c |
77 | vst1.16 {q1}, [r0]! |
78 | bge 0b |
79 | |
80 | btr16_end16: |
81 | adds r2, r2, #16 |
82 | bxeq lr |
83 | subs r2, r2, #8 |
84 | bxlt lr |
a327967e |
85 | |
69f0df9c |
86 | @ very rare |
35d3fd2e |
87 | vld1.16 {d0}, [r1]! |
7aba2372 |
88 | vshl.u16 d0, d0, #1 |
89 | vshl.u16 d1, d0, #10 |
90 | vsri.u16 d1, d0, #11 |
91 | vbit d1, d0, d30 |
35d3fd2e |
92 | vst1.16 {d1}, [r0]! |
93 | bx lr |
94 | |
95 | |
96 | @ note: may overflow source |
97 | FUNCTION(bgr555_to_rgb565_b): @ dst, src, bytes, int brightness2k // 0-0x0800 |
98 | pld [r1] |
99 | vdup.16 q15, r3 |
100 | vpush {q4-q7} |
101 | mov r3, #0x1f |
102 | vdup.16 q14, r3 |
103 | 0: |
104 | pld [r1, #64*2] |
105 | vldmia r1!, {q0-q3} |
106 | vand.u16 q8, q0, q14 |
107 | vand.u16 q9, q1, q14 |
108 | vand.u16 q10, q2, q14 |
109 | vand.u16 q11, q3, q14 |
110 | vmul.u16 q4, q8, q15 |
111 | vmul.u16 q5, q9, q15 |
112 | vmul.u16 q6, q10, q15 |
113 | vmul.u16 q7, q11, q15 |
114 | |
115 | vshr.u16 q8, q0, #5 |
116 | vshr.u16 q9, q1, #5 |
117 | vshr.u16 q10, q2, #5 |
118 | vshr.u16 q11, q3, #5 |
119 | vand.u16 q8, q14 |
120 | vand.u16 q9, q14 |
121 | vand.u16 q10, q14 |
122 | vand.u16 q11, q14 |
123 | vmul.u16 q8, q15 |
124 | vmul.u16 q9, q15 |
125 | vmul.u16 q10, q15 |
126 | vmul.u16 q11, q15 |
127 | vsri.u16 q4, q8, #5 |
128 | vsri.u16 q5, q9, #5 |
129 | vsri.u16 q6, q10, #5 |
130 | vsri.u16 q7, q11, #5 |
131 | |
132 | vshr.u16 q8, q0, #10 |
133 | vshr.u16 q9, q1, #10 |
134 | vshr.u16 q10, q2, #10 |
135 | vshr.u16 q11, q3, #10 |
136 | vand.u16 q8, q14 |
137 | vand.u16 q9, q14 |
138 | vand.u16 q10, q14 |
139 | vand.u16 q11, q14 |
140 | vmul.u16 q8, q15 |
141 | vmul.u16 q9, q15 |
142 | vmul.u16 q10, q15 |
143 | vmul.u16 q11, q15 |
144 | vsri.u16 q4, q8, #11 |
145 | vsri.u16 q5, q9, #11 |
146 | vsri.u16 q6, q10, #11 |
147 | vsri.u16 q7, q11, #11 |
148 | |
149 | subs r2, r2, #64 |
150 | ble 1f |
151 | vstmia r0!, {q4-q7} |
152 | b 0b |
153 | |
154 | 1: |
155 | blt 0f |
156 | vstmia r0!, {q4-q7} |
157 | b btr16b_end |
158 | 0: |
159 | subs r2, r2, #8 |
160 | blt btr16b_end |
161 | vst1.16 {q4}, [r0]! |
162 | subs r2, r2, #8 |
163 | blt btr16b_end |
164 | vst1.16 {q5}, [r0]! |
165 | subs r2, r2, #8 |
166 | blt btr16b_end |
167 | vst1.16 {q6}, [r0]! |
168 | subs r2, r2, #8 |
169 | blt btr16b_end |
170 | vst1.16 {q7}, [r0]! |
171 | |
172 | btr16b_end: |
173 | vpop {q4-q7} |
a327967e |
174 | bx lr |
175 | |
1972732a |
176 | |
288e55cb |
177 | FUNCTION(bgr888_to_rgb888): @ dst, src, bytes |
30f6e5ed |
178 | pld [r1] |
1972732a |
179 | @ r2 /= 48 |
180 | mov r2, r2, lsr #4 |
181 | movw r3, #0x5556 |
182 | movt r3, #0x5555 |
183 | umull r12,r2, r3, r2 |
184 | 0: |
30f6e5ed |
185 | pld [r1, #48*3] |
630b122b |
186 | vld3.8 {d0-d2}, [r1]! |
187 | vld3.8 {d3-d5}, [r1]! |
1972732a |
188 | vswp d0, d2 |
189 | vswp d3, d5 |
190 | vst3.8 {d0-d2}, [r0, :64]! |
191 | vst3.8 {d3-d5}, [r0, :64]! |
192 | subs r2, r2, #1 |
193 | bne 0b |
194 | |
195 | bx lr |
196 | |
197 | |
288e55cb |
198 | FUNCTION(bgr888_to_rgb565): @ dst, src, bytes |
30f6e5ed |
199 | pld [r1] |
f3a63e25 |
200 | @ r2 /= 48 |
201 | mov r2, r2, lsr #4 |
202 | movw r3, #0x5556 |
203 | movt r3, #0x5555 |
204 | umull r12,r2, r3, r2 |
205 | |
206 | mov r3, #0x07e0 |
207 | vdup.16 q15, r3 |
208 | 0: |
30f6e5ed |
209 | pld [r1, #48*3] |
630b122b |
210 | vld3.8 {d1-d3}, [r1]! |
211 | vld3.8 {d5-d7}, [r1]! |
f3a63e25 |
212 | |
213 | vshll.u8 q8, d2, #3 @ g |
214 | vshll.u8 q9, d6, #3 |
215 | vshr.u8 d0, d3, #3 @ b |
216 | vshr.u8 d4, d7, #3 |
217 | vzip.8 d0, d1 @ rb |
218 | vzip.8 d4, d5 |
219 | vbit q0, q8, q15 |
220 | vbit q2, q9, q15 |
221 | |
222 | vstmia r0!, {d0,d1} |
223 | vstmia r0!, {d4,d5} |
224 | subs r2, r2, #1 |
225 | bne 0b |
226 | |
227 | bx lr |
228 | |
229 | |
288e55cb |
230 | FUNCTION(rgb888_to_rgb565): @ dst, src, bytes |
00a5d459 |
231 | pld [r1] |
232 | @ r2 /= 48 |
233 | mov r2, r2, lsr #4 |
234 | movw r3, #0x5556 |
235 | movt r3, #0x5555 |
236 | umull r12,r2, r3, r2 |
237 | |
238 | mov r3, #0x07e0 |
239 | vdup.16 q15, r3 |
240 | 0: |
241 | pld [r1, #48*3] |
242 | vld3.8 {d1-d3}, [r1, :64]! |
243 | vld3.8 {d5-d7}, [r1, :64]! |
244 | |
245 | vshll.u8 q8, d2, #3 @ g |
246 | vshll.u8 q9, d6, #3 |
247 | vshr.u8 d2, d1, #3 @ b |
248 | vshr.u8 d6, d5, #3 |
249 | vzip.8 d2, d3 @ rb |
250 | vzip.8 d6, d7 |
251 | vbit q1, q8, q15 |
252 | vbit q3, q9, q15 |
253 | |
254 | vstmia r0!, {d2,d3} |
255 | vstmia r0!, {d6,d7} |
256 | subs r2, r2, #1 |
257 | bne 0b |
258 | |
259 | bx lr |
260 | |
261 | |
a327967e |
262 | @ vim:filetype=armasm |