+ vbit q4, q0, q15
+ vbit q5, q1, q15
+ vbit q6, q2, q15
+ vbit q7, q3, q15
+ vstmia r0!, {q4-q7}
+ subs r2, r2, #64
+ bge 0b
+
+btr16_end64:
+ adds r2, r2, #64
+ bxeq lr
+ subs r2, r2, #16
+ blt btr16_end16
+
+ @ handle the remainder (reasonably rare)
+0:
+ vld1.16 {q0}, [r1]!
+ vshl.u16 q1, q0, #11
+ vshl.u16 q2, q0, #1
+ vsri.u16 q1, q0, #10
+ vbit q1, q2, q15
+ subs r2, r2, #16
+ vst1.16 {q1}, [r0]!
+ bge 0b
+
+btr16_end16:
+ adds r2, r2, #16
+ bxeq lr
+ subs r2, r2, #8
+ bxlt lr
+
+ @ very rare
+ vld1.16 d0, [r1]!
+ vshl.u16 d1, d0, #11
+ vshl.u16 d2, d0, #1
+ vsri.u16 d1, d0, #10
+ vbit d1, d2, d30
+ vst1.16 d1, [r0]!
+ bx lr
+
+
+.global bgr888_to_rgb888
+bgr888_to_rgb888:
+ @ r2 /= 48
+ mov r2, r2, lsr #4
+ movw r3, #0x5556
+ movt r3, #0x5555
+ umull r12,r2, r3, r2
+0:
+ vld3.8 {d0-d2}, [r1, :64]!
+ vld3.8 {d3-d5}, [r1, :64]!
+ vswp d0, d2
+ vswp d3, d5
+ vst3.8 {d0-d2}, [r0, :64]!
+ vst3.8 {d3-d5}, [r0, :64]!