| 1 | /* |
| 2 | * (C) GraÅžvydas "notaz" Ignotas, 2011 |
| 3 | * |
| 4 | * This work is licensed under the terms of any of these licenses |
| 5 | * (at your option): |
| 6 | * - GNU GPL, version 2 or later. |
| 7 | * - GNU LGPL, version 2.1 or later. |
| 8 | * See the COPYING file in the top-level directory. |
| 9 | */ |
| 10 | |
| 11 | #include "arm_features.h" |
| 12 | |
| 13 | .text |
| 14 | .align 2 |
| 15 | |
| 16 | .macro load_varadr reg var |
| 17 | #if defined(__ARM_ARCH_7A__) && !defined(__PIC__) |
| 18 | movw \reg, #:lower16:\var |
| 19 | movt \reg, #:upper16:\var |
| 20 | #else |
| 21 | ldr \reg, =\var |
| 22 | #endif |
| 23 | .endm |
| 24 | |
| 25 | #ifdef __ARM_NEON__ |
| 26 | |
| 27 | .global mix_chan @ (int start, int count, int lv, int rv) |
| 28 | mix_chan: |
| 29 | vmov.32 d14[0], r2 |
| 30 | vmov.32 d14[1], r3 @ multipliers |
| 31 | mov r12, r0 |
| 32 | load_varadr r0, ChanBuf |
| 33 | load_varadr r2, SSumLR |
| 34 | add r0, r12, lsl #2 |
| 35 | add r2, r12, lsl #3 |
| 36 | 0: |
| 37 | vldmia r0!, {d0-d1} |
| 38 | vldmia r2, {d2-d5} |
| 39 | vmul.s32 d10, d14, d0[0] |
| 40 | vmul.s32 d11, d14, d0[1] |
| 41 | vmul.s32 d12, d14, d1[0] |
| 42 | vmul.s32 d13, d14, d1[1] |
| 43 | vsra.s32 q1, q5, #14 |
| 44 | vsra.s32 q2, q6, #14 |
| 45 | subs r1, #4 |
| 46 | blt mc_finish |
| 47 | vstmia r2!, {d2-d5} |
| 48 | bgt 0b |
| 49 | nop |
| 50 | bxeq lr |
| 51 | |
| 52 | mc_finish: |
| 53 | vstmia r2!, {d2} |
| 54 | cmp r1, #-2 |
| 55 | vstmiage r2!, {d3} |
| 56 | cmp r1, #-1 |
| 57 | vstmiage r2!, {d4} |
| 58 | bx lr |
| 59 | |
| 60 | |
| 61 | .global mix_chan_rvb @ (int start, int count, int lv, int rv) |
| 62 | mix_chan_rvb: |
| 63 | vmov.32 d14[0], r2 |
| 64 | vmov.32 d14[1], r3 @ multipliers |
| 65 | mov r12, r0 |
| 66 | load_varadr r0, ChanBuf |
| 67 | load_varadr r3, sRVBStart |
| 68 | load_varadr r2, SSumLR |
| 69 | ldr r3, [r3] |
| 70 | add r0, r12, lsl #2 |
| 71 | add r2, r12, lsl #3 |
| 72 | add r3, r12, lsl #3 |
| 73 | 0: |
| 74 | vldmia r0!, {d0-d1} |
| 75 | vldmia r2, {d2-d5} |
| 76 | vldmia r3, {d6-d9} |
| 77 | vmul.s32 d10, d14, d0[0] |
| 78 | vmul.s32 d11, d14, d0[1] |
| 79 | vmul.s32 d12, d14, d1[0] |
| 80 | vmul.s32 d13, d14, d1[1] |
| 81 | vsra.s32 q1, q5, #14 |
| 82 | vsra.s32 q2, q6, #14 |
| 83 | vsra.s32 q3, q5, #14 |
| 84 | vsra.s32 q4, q6, #14 |
| 85 | subs r1, #4 |
| 86 | blt mcr_finish |
| 87 | vstmia r2!, {d2-d5} |
| 88 | vstmia r3!, {d6-d9} |
| 89 | bgt 0b |
| 90 | nop |
| 91 | bxeq lr |
| 92 | |
| 93 | mcr_finish: |
| 94 | vstmia r2!, {d2} |
| 95 | vstmia r3!, {d6} |
| 96 | cmp r1, #-2 |
| 97 | vstmiage r2!, {d3} |
| 98 | vstmiage r3!, {d7} |
| 99 | cmp r1, #-1 |
| 100 | vstmiage r2!, {d4} |
| 101 | vstmiage r3!, {d8} |
| 102 | bx lr |
| 103 | |
| 104 | #elif defined(HAVE_ARMV5) |
| 105 | |
| 106 | .global mix_chan @ (int start, int count, int lv, int rv) |
| 107 | mix_chan: |
| 108 | stmfd sp!, {r4-r8,lr} |
| 109 | orr r3, r2, r3, lsl #16 |
| 110 | lsl r3, #1 @ packed multipliers << 1 |
| 111 | mov r12, r0 |
| 112 | load_varadr r0, ChanBuf |
| 113 | load_varadr r2, SSumLR |
| 114 | add r0, r12, lsl #2 |
| 115 | add r2, r12, lsl #3 |
| 116 | 0: |
| 117 | ldmia r0!, {r4,r5} |
| 118 | ldmia r2, {r6-r8,lr} |
| 119 | lsl r4, #1 @ adjust for mul |
| 120 | lsl r5, #1 |
| 121 | smlawb r6, r4, r3, r6 |
| 122 | smlawt r7, r4, r3, r7 |
| 123 | smlawb r8, r5, r3, r8 |
| 124 | smlawt lr, r5, r3, lr |
| 125 | subs r1, #2 |
| 126 | blt mc_finish |
| 127 | stmia r2!, {r6-r8,lr} |
| 128 | bgt 0b |
| 129 | ldmeqfd sp!, {r4-r8,pc} |
| 130 | |
| 131 | mc_finish: |
| 132 | stmia r2!, {r6,r7} |
| 133 | ldmfd sp!, {r4-r8,pc} |
| 134 | |
| 135 | |
| 136 | .global mix_chan_rvb @ (int start, int count, int lv, int rv) |
| 137 | mix_chan_rvb: |
| 138 | stmfd sp!, {r4-r8,lr} |
| 139 | orr lr, r2, r3, lsl #16 |
| 140 | lsl lr, #1 |
| 141 | load_varadr r3, sRVBStart |
| 142 | load_varadr r2, SSumLR |
| 143 | load_varadr r4, ChanBuf |
| 144 | ldr r3, [r3] |
| 145 | add r2, r2, r0, lsl #3 |
| 146 | add r3, r3, r0, lsl #3 |
| 147 | add r0, r4, r0, lsl #2 |
| 148 | 0: |
| 149 | ldr r4, [r0], #4 |
| 150 | ldmia r2, {r6,r7} |
| 151 | ldmia r3, {r8,r12} |
| 152 | lsl r4, #1 |
| 153 | smlawb r6, r4, lr, r6 @ supposedly takes single cycle? |
| 154 | smlawt r7, r4, lr, r7 |
| 155 | smlawb r8, r4, lr, r8 |
| 156 | smlawt r12,r4, lr, r12 |
| 157 | subs r1, #1 |
| 158 | stmia r2!, {r6,r7} |
| 159 | stmia r3!, {r8,r12} |
| 160 | bgt 0b |
| 161 | ldmfd sp!, {r4-r8,pc} |
| 162 | |
| 163 | #endif |
| 164 | |
| 165 | @ vim:filetype=armasm |