| 1 | @ vim:filetype=armasm |
| 2 | |
| 3 | @ test |
| 4 | .global flushcache @ beginning_addr, end_addr, flags |
| 5 | |
| 6 | flushcache: |
| 7 | swi #0x9f0002 |
| 8 | mov pc, lr |
| 9 | |
| 10 | |
| 11 | .global block_or @ void *src, size_t n, int pat |
| 12 | |
| 13 | block_or: |
| 14 | stmfd sp!, {r4-r5} |
| 15 | orr r2, r2, r2, lsl #8 |
| 16 | orr r2, r2, r2, lsl #16 |
| 17 | mov r1, r1, lsr #4 |
| 18 | block_loop_or: |
| 19 | ldmia r0, {r3-r5,r12} |
| 20 | subs r1, r1, #1 |
| 21 | orr r3, r3, r2 |
| 22 | orr r4, r4, r2 |
| 23 | orr r5, r5, r2 |
| 24 | orr r12,r12,r2 |
| 25 | stmia r0!, {r3-r5,r12} |
| 26 | bne block_loop_or |
| 27 | ldmfd sp!, {r4-r5} |
| 28 | bx lr |
| 29 | |
| 30 | |
| 31 | .global block_andor @ void *src, size_t n, int andpat, int orpat |
| 32 | |
| 33 | block_andor: |
| 34 | stmfd sp!, {r4-r6} |
| 35 | orr r2, r2, r2, lsl #8 |
| 36 | orr r2, r2, r2, lsl #16 |
| 37 | orr r3, r3, r3, lsl #8 |
| 38 | orr r3, r3, r3, lsl #16 |
| 39 | mov r1, r1, lsr #4 |
| 40 | block_loop_andor: |
| 41 | ldmia r0, {r4-r6,r12} |
| 42 | subs r1, r1, #1 |
| 43 | and r4, r4, r2 |
| 44 | orr r4, r4, r3 |
| 45 | and r5, r5, r2 |
| 46 | orr r5, r5, r3 |
| 47 | and r6, r6, r2 |
| 48 | orr r6, r6, r3 |
| 49 | and r12,r12,r2 |
| 50 | orr r12,r12,r3 |
| 51 | stmia r0!, {r4-r6,r12} |
| 52 | bne block_loop_andor |
| 53 | ldmfd sp!, {r4-r6} |
| 54 | bx lr |
| 55 | |
| 56 | |
| 57 | .global spend_cycles @ c |
| 58 | |
| 59 | spend_cycles: |
| 60 | mov r0, r0, lsr #2 @ 4 cycles/iteration |
| 61 | sub r0, r0, #2 @ entry/exit/init |
| 62 | .sc_loop: |
| 63 | subs r0, r0, #1 |
| 64 | bpl .sc_loop |
| 65 | |
| 66 | bx lr |
| 67 | |
| 68 | |
| 69 | |
| 70 | /* buggy and slow, probably because function call overhead |
| 71 | @ renderer helper, based on bitbank's method |
| 72 | .global draw8pix @ uint8 *P, uint8 *C, uint8 *PALRAM @ dest, src, pal |
| 73 | |
| 74 | draw8pix: |
| 75 | stmfd sp!, {r4,r5} |
| 76 | |
| 77 | ldrb r3, [r1] @ get bit 0 pixels |
| 78 | mov r12,#1 |
| 79 | orr r12,r12,r12,lsl #8 |
| 80 | orr r12,r12,r12,lsl #16 |
| 81 | ldrb r1, [r1, #8] @ get bit 1 pixels |
| 82 | orr r3, r3, r3, lsl #9 @ shift them over 1 byte + 1 bit |
| 83 | orr r3, r3, r3, lsl #18 @ now 4 pixels take up 4 bytes |
| 84 | and r4, r12,r3, lsr #7 @ mask off the upper nibble pixels we want |
| 85 | and r5, r12,r3, lsr #3 @ mask off the lower nibble pixels we want |
| 86 | ldr r2, [r2] |
| 87 | |
| 88 | orr r1, r1, r1, lsl #9 @ process the bit 1 pixels |
| 89 | orr r1, r1, r1, lsl #18 |
| 90 | and r3, r12,r1, lsr #7 @ mask off the upper nibble pixels we want |
| 91 | and r1, r12,r1, lsr #3 @ mask off the lower nibble |
| 92 | orr r4, r4, r3, lsl #1 |
| 93 | orr r5, r5, r1, lsl #5 |
| 94 | |
| 95 | @ can this be avoided? |
| 96 | mov r4, r4, lsl #3 @ *8 |
| 97 | mov r3, r2, ror r4 |
| 98 | strb r3, [r0], #1 |
| 99 | mov r4, r4, lsr #8 |
| 100 | mov r3, r2, ror r4 |
| 101 | strb r3, [r0], #1 |
| 102 | mov r4, r4, lsr #8 |
| 103 | mov r3, r2, ror r4 |
| 104 | strb r3, [r0], #1 |
| 105 | mov r4, r4, lsr #8 |
| 106 | mov r3, r2, ror r4 |
| 107 | strb r3, [r0], #1 |
| 108 | |
| 109 | mov r5, r5, lsl #3 @ *8 |
| 110 | mov r3, r2, ror r5 |
| 111 | strb r3, [r0], #1 |
| 112 | mov r5, r5, lsr #8 |
| 113 | mov r3, r2, ror r5 |
| 114 | strb r3, [r0], #1 |
| 115 | mov r5, r5, lsr #8 |
| 116 | mov r3, r2, ror r5 |
| 117 | strb r3, [r0], #1 |
| 118 | mov r5, r5, lsr #8 |
| 119 | mov r3, r2, ror r5 |
| 120 | strb r3, [r0], #1 |
| 121 | |
| 122 | ldmfd sp!, {r4,r5} |
| 123 | bx lr |
| 124 | */ |
| 125 | |