ea8c405f |
1 | @ vim:filetype=armasm |
2 | |
3 | |
4 | .global vidCpy8to16_40 @ void *dest, void *src, short *pal, int lines |
5 | |
6 | vidCpy8to16_40: |
7 | stmfd sp!, {r4-r9,lr} |
8 | |
9 | mov r3, r3, lsr #1 |
10 | orr r3, r3, r3, lsl #8 |
11 | add r1, r1, #8 |
12 | orr r3, r3, #(320/8-1)<<24 |
13 | |
14 | @ even lines |
15 | vcloop_40_aligned: |
16 | ldr r12, [r1], #4 |
17 | ldr r7, [r1], #4 |
18 | |
19 | and r4, lr, r12, lsl #1 |
20 | ldrh r4, [r2, r4] |
21 | and r5, lr, r12, lsr #7 |
22 | ldrh r5, [r2, r5] |
23 | and r6, lr, r12, lsr #15 |
24 | ldrh r6, [r2, r6] |
25 | orr r4, r4, r5, lsl #16 |
26 | |
27 | and r5, lr, r12, lsr #23 |
28 | ldrh r5, [r2, r5] |
29 | and r8, lr, r7, lsl #1 |
30 | ldrh r8, [r2, r8] |
31 | orr r5, r6, r5, lsl #16 |
32 | |
33 | and r6, lr, r7, lsr #7 |
34 | ldrh r6, [r2, r6] |
35 | and r12,lr, r7, lsr #15 |
36 | ldrh r12,[r2, r12] |
37 | and r9, lr, r7, lsr #23 |
38 | ldrh r9, [r2, r9] |
39 | orr r8, r8, r6, lsl #16 |
40 | |
41 | subs r3, r3, #1<<24 |
42 | orr r12,r12, r9, lsl #16 |
43 | |
44 | stmia r0!, {r4,r5,r8,r12} |
45 | bpl vcloop_40_aligned |
46 | |
47 | add r1, r1, #336 @ skip a line and 1 col |
48 | add r0, r0, #320*2+2*2 |
49 | add r3, r3, #(320/8)<<24 |
50 | sub r3, r3, #1 |
51 | tst r3, #0xff |
52 | bne vcloop_40_aligned |
53 | |
54 | and r4, r3, #0xff00 |
55 | orr r3, r3, r4, lsr #8 |
56 | mov r4, r4, lsr #7 |
57 | sub r4, r4, #1 |
58 | mov r5, #320*2 |
59 | add r5, r5, #2 |
60 | mul r4, r5, r4 |
61 | sub r0, r0, r4 |
62 | mov r5, #328 |
63 | mul r4, r5, r4 |
64 | sub r1, r1, r4 |
65 | |
66 | vcloop_40_unaligned: |
67 | ldr r12, [r1], #4 |
68 | ldr r7, [r1], #4 |
69 | |
70 | and r4, lr, r12, lsl #1 |
71 | ldrh r4, [r2, r4] |
72 | and r5, lr, r12, lsr #7 |
73 | ldrh r5, [r2, r5] |
74 | strh r4, [r0]! |
75 | and r6, lr, r12, lsr #15 |
76 | ldrh r6, [r2, r6] |
77 | |
78 | and r4, lr, r12, lsr #23 |
79 | ldrh r4, [r2, r4] |
80 | orr r5, r5, r6, lsl #16 |
81 | |
82 | and r8, lr, r7, lsl #1 |
83 | ldrh r8, [r2, r8] |
84 | |
85 | and r6, lr, r7, lsr #7 |
86 | ldrh r6, [r2, r6] |
87 | orr r8, r4, r8, lsl #16 |
88 | |
89 | and r12,lr, r7, lsr #15 |
90 | ldrh r12,[r2, r12] |
91 | |
92 | and r4, lr, r7, lsr #23 |
93 | ldrh r4, [r2, r6] |
94 | orr r12,r6, r12,lsl #16 |
95 | subs r3, r3, #1 |
96 | |
97 | stmia r0!, {r5,r8,r12} |
98 | strh r4, [r0]! |
99 | bne vcloop_40_unaligned |
100 | |
101 | add r1, r1, #336 @ skip a line and 1 col |
102 | add r0, r0, #320*2+2*2 |
103 | add r3, r3, #(320/8)<<24 |
104 | sub r3, r3, #1 |
105 | tst r3, #0xff |
106 | bne vcloop_40_unaligned |
107 | |
108 | ldmfd sp!, {r4-r9,lr} |
109 | bx lr |
110 | |
111 | |
112 | |