4 .global vidCpy8to16_40 @ void *dest, void *src, short *pal, int lines
10 orr r3, r3, r3, lsl #8
11 orr r3, r3, #(320/8-1)<<24
21 and r4, lr, r12, lsl #1
23 and r5, lr, r12, lsr #7
25 and r6, lr, r12, lsr #15
27 orr r4, r4, r5, lsl #16
29 and r5, lr, r12, lsr #23
31 and r8, lr, r7, lsl #1
33 orr r5, r6, r5, lsl #16
35 and r6, lr, r7, lsr #7
37 and r12,lr, r7, lsr #15
39 and r9, lr, r7, lsr #23
41 orr r8, r8, r6, lsl #16
44 orr r12,r12, r9, lsl #16
46 stmia r0!, {r4,r5,r8,r12}
49 add r1, r1, #336 @ skip a line and 1 col
50 add r0, r0, #320*2+2*2
51 add r3, r3, #(320/8)<<24
57 orr r3, r3, r4, lsr #8
68 vcloop_40_unaligned_outer:
72 and r4, lr, r12, lsl #1
74 and r5, lr, r12, lsr #7
77 b vcloop_40_unaligned_enter
83 and r6, lr, r12, lsl #1
85 and r5, lr, r12, lsr #7
87 orr r4, r4, r6, lsl #16
90 vcloop_40_unaligned_enter:
91 and r6, lr, r12, lsr #15
94 and r4, lr, r12, lsr #23
96 orr r5, r5, r6, lsl #16
98 and r8, lr, r7, lsl #1
100 and r6, lr, r7, lsr #7
102 orr r8, r4, r8, lsl #16
104 and r12,lr, r7, lsr #15
107 and r4, lr, r7, lsr #23
109 orr r12,r6, r12,lsl #16
112 stmia r0!, {r5,r8,r12}
113 bpl vcloop_40_unaligned
117 add r1, r1, #336 @ skip a line and 1 col
118 add r0, r0, #320*2+2*2
119 add r3, r3, #(320/8)<<24
122 bne vcloop_40_unaligned_outer
124 ldmfd sp!, {r4-r9,lr}