| 1 | @ vim:filetype=armasm |
| 2 | |
| 3 | |
| 4 | .global vidCpy8to16 @ void *dest, void *src, short *pal, int lines|(flags<<16), |
| 5 | @ flags=is32col[0], no_even_lines[1], no_odd_lines[2] |
| 6 | |
| 7 | vidCpy8to16: |
| 8 | stmfd sp!, {r4-r8,lr} |
| 9 | |
| 10 | and r4, r3, #0xff0000 |
| 11 | and r3, r3, #0xff |
| 12 | tst r4, #0x10000 |
| 13 | mov r3, r3, lsr #1 |
| 14 | orr r3, r3, r3, lsl #8 |
| 15 | orreq r3, r3, #(320/8-1)<<24 @ 40 col mode |
| 16 | orrne r3, r3, #(256/8-1)<<24 @ 32 col mode |
| 17 | addne r0, r0, #32*2 |
| 18 | orr r3, r3, r4 |
| 19 | add r1, r1, #8 |
| 20 | mov lr, #0xff |
| 21 | mov lr, lr, lsl #1 |
| 22 | |
| 23 | @ no even lines? |
| 24 | tst r3, #0x20000 |
| 25 | addne r0, r0, #320*2 |
| 26 | addne r1, r1, #328 |
| 27 | bne vcloop_odd |
| 28 | |
| 29 | @ even lines first |
| 30 | vcloop_aligned: |
| 31 | ldr r12, [r1], #4 |
| 32 | ldr r7, [r1], #4 |
| 33 | |
| 34 | and r4, lr, r12,lsl #1 |
| 35 | ldrh r4, [r2, r4] |
| 36 | and r5, lr, r12,lsr #7 |
| 37 | ldrh r5, [r2, r5] |
| 38 | and r6, lr, r12,lsr #15 |
| 39 | ldrh r6, [r2, r6] |
| 40 | orr r4, r4, r5, lsl #16 |
| 41 | |
| 42 | and r5, lr, r12,lsr #23 |
| 43 | ldrh r5, [r2, r5] |
| 44 | and r8, lr, r7, lsl #1 |
| 45 | ldrh r8, [r2, r8] |
| 46 | orr r5, r6, r5, lsl #16 |
| 47 | |
| 48 | and r6, lr, r7, lsr #7 |
| 49 | ldrh r6, [r2, r6] |
| 50 | and r12,lr, r7, lsr #15 |
| 51 | ldrh r12,[r2, r12] |
| 52 | and r7, lr, r7, lsr #23 |
| 53 | ldrh r7, [r2, r7] |
| 54 | orr r8, r8, r6, lsl #16 |
| 55 | |
| 56 | subs r3, r3, #1<<24 |
| 57 | orr r12,r12, r7, lsl #16 |
| 58 | |
| 59 | stmia r0!, {r4,r5,r8,r12} |
| 60 | bpl vcloop_aligned |
| 61 | |
| 62 | tst r3, #0x10000 |
| 63 | add r1, r1, #336 @ skip a line and 1 col |
| 64 | addne r1, r1, #64 @ skip more for 32col mode |
| 65 | add r0, r0, #(320+2)*2 |
| 66 | addne r0, r0, #64*2 |
| 67 | addeq r3, r3, #(320/8)<<24 |
| 68 | addne r3, r3, #(256/8)<<24 |
| 69 | sub r3, r3, #1 |
| 70 | tst r3, #0xff |
| 71 | bne vcloop_aligned |
| 72 | |
| 73 | @ no odd lines? |
| 74 | tst r3, #0x40000 |
| 75 | ldmnefd sp!, {r4-r8,pc} |
| 76 | |
| 77 | and r4, r3, #0xff00 |
| 78 | orr r3, r3, r4, lsr #8 |
| 79 | mov r4, r4, lsr #7 |
| 80 | sub r6, r4, #1 |
| 81 | mov r5, #320*2 |
| 82 | add r5, r5, #2 |
| 83 | mul r4, r5, r6 |
| 84 | sub r0, r0, r4 |
| 85 | mov r5, #328 |
| 86 | mul r4, r5, r6 |
| 87 | sub r1, r1, r4 |
| 88 | |
| 89 | sub r0, r0, #2 |
| 90 | vcloop_odd: |
| 91 | mov r8, #0 |
| 92 | |
| 93 | vcloop_unaligned: |
| 94 | ldr r12, [r1], #4 |
| 95 | ldr r7, [r1], #4 |
| 96 | |
| 97 | and r6, lr, r12, lsl #1 |
| 98 | ldrh r6, [r2, r6] |
| 99 | and r5, lr, r12, lsr #7 |
| 100 | ldrh r5, [r2, r5] |
| 101 | orr r4, r8, r6, lsl #16 |
| 102 | |
| 103 | and r6, lr, r12, lsr #15 |
| 104 | ldrh r6, [r2, r6] |
| 105 | and r8, lr, r12, lsr #23 |
| 106 | ldrh r8, [r2, r8] |
| 107 | orr r5, r5, r6, lsl #16 |
| 108 | |
| 109 | and r6, lr, r7, lsl #1 |
| 110 | ldrh r6, [r2, r6] |
| 111 | and r12,lr, r7, lsr #7 |
| 112 | ldrh r12,[r2, r12] |
| 113 | orr r6, r8, r6, lsl #16 |
| 114 | |
| 115 | and r8, lr, r7, lsr #15 |
| 116 | ldrh r8, [r2, r8] |
| 117 | |
| 118 | subs r3, r3, #1<<24 |
| 119 | and r7, lr, r7, lsr #23 |
| 120 | orr r12,r12,r8, lsl #16 |
| 121 | |
| 122 | ldrh r8, [r2, r7] |
| 123 | |
| 124 | stmia r0!, {r4,r5,r6,r12} |
| 125 | bpl vcloop_unaligned |
| 126 | |
| 127 | strh r8, [r0] |
| 128 | mov r8, #0 |
| 129 | |
| 130 | tst r3, #0x10000 |
| 131 | add r1, r1, #336 @ skip a line and 1 col |
| 132 | addne r1, r1, #64 @ skip more for 32col mode |
| 133 | add r0, r0, #(320+2)*2 |
| 134 | addne r0, r0, #64*2 |
| 135 | addeq r3, r3, #(320/8)<<24 |
| 136 | addne r3, r3, #(256/8)<<24 |
| 137 | sub r3, r3, #1 |
| 138 | tst r3, #0xff |
| 139 | bne vcloop_unaligned |
| 140 | |
| 141 | ldmfd sp!, {r4-r8,lr} |
| 142 | bx lr |
| 143 | |
| 144 | |