--- /dev/null
+@@\r
+@@ Copyright (C) 2012 Roman Pauer\r
+@@\r
+@@ Permission is hereby granted, free of charge, to any person obtaining a copy of\r
+@@ this software and associated documentation files (the "Software"), to deal in\r
+@@ the Software without restriction, including without limitation the rights to\r
+@@ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\r
+@@ of the Software, and to permit persons to whom the Software is furnished to do\r
+@@ so, subject to the following conditions:\r
+@@\r
+@@ The above copyright notice and this permission notice shall be included in all\r
+@@ copies or substantial portions of the Software.\r
+@@\r
+@@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+@@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+@@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+@@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+@@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+@@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+@@ SOFTWARE.\r
+@@\r
+\r
+.arm\r
+\r
+.include "neon_eagle2x.Sinc"\r
+.include "neon_normalxx.Sinc"\r
+\r
+.global neon_eagle2x_8_8\r
+.global neon_eagle2x_16_16\r
+.global neon_eagle2x_8_16\r
+\r
+.align 4\r
+neon_eagle2x_8_8:\r
+\r
+@ r0 = const uint8_t *src\r
+@ r1 = uint8_t *dst\r
+@ r2 = unsigned int width (pixels)\r
+@ r3 = unsigned int srcstride (bytes)\r
+@ [sp] = unsigned int dststride (bytes)\r
+@ [sp+4] = unsigned int height\r
+@ lr = return address\r
+\r
+ ldr ip, [sp] @ ip = dststride\r
+ push {r4-r10}\r
+ ldr r9, [sp, #(8*4)] @ r9 = height\r
+ sub r4, r0, r3 @ r4 = src - srcstride\r
+ mov r10, sp @ oldsp = sp\r
+ add r5, r0, r3 @ r5 = src + srcstride\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ add r6, r1, ip @ r6 = dst + dststride\r
+ sub sp, sp, #64 @ sp -= 64\r
+ sub r3, r3, r2 @ r3 = srcstride - width\r
+ vst1.64 {d8-d11}, [sp:256] @ save q4,q5\r
+ add r7, sp, #32 @ r7 = sp + 32\r
+ sub ip, ip, r2 @ ip = dststride - width\r
+ vst1.64 {d12-d15}, [r7:256] @ save q6,q7\r
+ lsl ip, #1 @ ip = 2 * dststride - 2 * width\r
+ mov r7, r2 @ r7 = width\r
+ sub r9, r9, #2 @ r9 = height - 2\r
+\r
+\r
+@ r0 = src\r
+@ r1 = dst\r
+@ r2 = width\r
+@ r3 = srcdiff (srcstride - width)\r
+@ r4 = src - srcstride\r
+@ r5 = src + srcstride\r
+@ r6 = dst + dststride\r
+@ r7 = counter\r
+@ r8 = tmpreg\r
+@ r9 = height\r
+@ r10 = oldsp\r
+@ ip = dstdiff (2 * dststride - 2 * width)\r
+\r
+ @ first line\r
+ neon_eagle2x_8_8_line first, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ add r0, r0, r3\r
+ add r4, r4, r3\r
+ add r5, r5, r3\r
+ add r1, r1, ip\r
+ add r6, r6, ip\r
+\r
+ @ middle lines\r
+ 101:\r
+ mov r7, r2\r
+\r
+ neon_eagle2x_8_8_line middle, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ subS r9, r9, #1\r
+ add r0, r0, r3\r
+ add r4, r4, r3\r
+ add r5, r5, r3\r
+ add r1, r1, ip\r
+ add r6, r6, ip\r
+ bne 101b\r
+\r
+ @ last line\r
+ mov r7, r2\r
+\r
+ neon_eagle2x_8_8_line last, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ add ip, sp, #32 @ ip = sp + 32\r
+ vld1.64 {d8-d11}, [sp:256] @ restore q4,q5\r
+ mov sp, r10 @ sp = oldsp\r
+ vld1.64 {d12-d15}, [ip:256] @ restore q6,q7\r
+ pop {r4-r10}\r
+ bx lr\r
+\r
+@ end procedure neon_eagle2x_8_8\r
+\r
+\r
+neon_eagle2x_16_16:\r
+\r
+@ r0 = const uint16_t *src\r
+@ r1 = uint16_t *dst\r
+@ r2 = unsigned int width (pixels)\r
+@ r3 = unsigned int srcstride (bytes)\r
+@ [sp] = unsigned int dststride (bytes)\r
+@ [sp+4] = unsigned int height\r
+@ lr = return address\r
+\r
+ ldr ip, [sp] @ ip = dststride\r
+ push {r4-r10}\r
+ ldr r9, [sp, #(8*4)] @ r9 = height\r
+ sub r4, r0, r3 @ r4 = src - srcstride\r
+ mov r10, sp @ oldsp = sp\r
+ add r5, r0, r3 @ r5 = src + srcstride\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ add r6, r1, ip @ r6 = dst + dststride\r
+ sub sp, sp, #64 @ sp -= 64\r
+ sub r3, r3, r2, lsl #1 @ r3 = srcstride - 2 * width\r
+ vst1.64 {d8-d11}, [sp:256] @ save q4,q5\r
+ add r7, sp, #32 @ r7 = sp + 32\r
+ sub ip, ip, r2, lsl #1 @ ip = dststride - 2 * width\r
+ vst1.64 {d12-d15}, [r7:256] @ save q6,q7\r
+ lsl ip, #1 @ ip = 2 * dststride - 4 * width\r
+ mov r7, r2 @ r7 = width\r
+ sub r9, r9, #2 @ r9 = height - 2\r
+\r
+@ r0 = src\r
+@ r1 = dst\r
+@ r2 = width\r
+@ r3 = srcdiff (srcstride - 2 * width)\r
+@ r4 = src - srcstride\r
+@ r5 = src + srcstride\r
+@ r6 = dst + dststride\r
+@ r7 = counter\r
+@ r8 = tmpreg\r
+@ r9 = height\r
+@ r10 = oldsp\r
+@ ip = dstdiff (2 * dststride - 4 * width)\r
+\r
+ @ first line\r
+ neon_eagle2x_16_16_line first, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ add r0, r0, r3\r
+ add r4, r4, r3\r
+ add r5, r5, r3\r
+ add r1, r1, ip\r
+ add r6, r6, ip\r
+\r
+ @ middle lines\r
+ 101:\r
+ mov r7, r2\r
+\r
+ neon_eagle2x_16_16_line middle, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ subS r9, r9, #1\r
+ add r0, r0, r3\r
+ add r4, r4, r3\r
+ add r5, r5, r3\r
+ add r1, r1, ip\r
+ add r6, r6, ip\r
+ bne 101b\r
+\r
+ @ last line\r
+ mov r7, r2\r
+\r
+ neon_eagle2x_16_16_line last, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ add ip, sp, #32 @ ip = sp + 32\r
+ vld1.64 {d8-d11}, [sp:256] @ restore q4,q5\r
+ mov sp, r10 @ sp = oldsp\r
+ vld1.64 {d12-d15}, [ip:256] @ restore q6,q7\r
+ pop {r4-r10}\r
+ bx lr\r
+\r
+@ end procedure neon_eagle2x_16_16\r
+\r
+\r
+neon_eagle2x_8_16:\r
+\r
+@ r0 = const uint8_t *src\r
+@ r1 = uint8_t *dst\r
+@ r2 = const uint32_t *palette\r
+@ r3 = unsigned int width (pixels)\r
+@ [sp] = unsigned int srcstride (bytes)\r
+@ [sp+4] = unsigned int dststride (bytes)\r
+@ [sp+8] = unsigned int height\r
+@ lr = return address\r
+\r
+@ three temporary lines\r
+\r
+ ldr ip, [sp] @ ip = srcstride\r
+ push {r4-r11,lr}\r
+ ldr r4, [sp, #(4*10)] @ r4 = dststride\r
+ ldr r5, [sp, #(4*11)] @ r5 = height\r
+ mov r6, sp @ r6 = sp\r
+ sub ip, ip, r3 @ ip = srcstride - width\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ sub r7, r4, r3, lsl #1 @ r7 = dststride - 2 * width\r
+ sub sp, sp, r3, lsl #1 @ sp -= 2 * width\r
+ sub r5, r5, #2 @ height -= 2\r
+ mov r10, sp @ tmpline3 = sp\r
+ lsl r7, #1 @ r7 = 2 * dststride - 4 * width\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ sub sp, sp, r3, lsl #1 @ sp -= 2 * width\r
+ mov r11, sp @ tmpline2 = sp\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ sub sp, sp, r3, lsl #1 @ sp -= 2 * width\r
+ mov lr, sp @ tmpline1 = sp\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ sub r8, sp, #64 @ r8 = sp - 64\r
+ vst1.64 {d8-d11}, [r8:256] @ save q4,q5\r
+ sub r9, sp, #32 @ r9 = sp - 32\r
+ vst1.64 {d12-d15}, [r9:256] @ save q6,q7\r
+ sub sp, sp, #(36 + 64) @ sp -= (36 + 64)\r
+ str r6, [sp] @ oldsp = r6\r
+ str r5, [sp, #4] @ height = r5\r
+ str ip, [sp, #8] @ srcdiff = ip\r
+ str r7, [sp, #12] @ dstdiff = r7\r
+ str r4, [sp, #16] @ dststride = r4\r
+ str lr, [sp, #20] @ tmpline1 = lr\r
+ str r11, [sp, #24] @ tmpline2 = r11\r
+ str r10, [sp, #28] @ tmpline3 = r10\r
+ str r3, [sp, #32] @ width = r3\r
+\r
+@ r0 = src\r
+@ r1 = dst\r
+@ r2 = palette\r
+@ r3 = counter\r
+@ r4 = dst2\r
+\r
+@ r11 = bufptr1\r
+@ ip = bufptr2\r
+@ lr = bufptr3\r
+\r
+@ [sp] = oldsp\r
+@ [sp, #4] = height\r
+@ [sp, #8] = srcdiff (srcstride - width)\r
+@ [sp, #12] = dstdiff (2 * dststride - 4 * width)\r
+@ [sp, #16] = dststride\r
+@ [sp, #20] = tmpline1\r
+@ [sp, #24] = tmpline2\r
+@ [sp, #28] = tmpline3\r
+@ [sp, #32] = width\r
+\r
+ @ lr = tmpline1\r
+ @ r3 = counter\r
+\r
+ @ first line\r
+ neon_normal1x_8_16_line r0, lr, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, ip\r
+\r
+ ldr r7, [sp, #8] @ r7 = srcdiff\r
+ ldr r3, [sp, #32] @ counter = width\r
+ ldr lr, [sp, #24] @ bufptr3 = tmpline2\r
+ add r0, r0, r7 @ src += srcdiff\r
+\r
+ @ second line\r
+ neon_normal1x_8_16_line r0, lr, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, ip\r
+\r
+ ldr r9, [sp, #16] @ r9 = dststride\r
+ ldr r3, [sp, #32] @ counter = width\r
+ ldr ip, [sp, #20] @ bufptr2 = tmpline1\r
+ ldr lr, [sp, #24] @ bufptr3 = tmpline2\r
+ add r4, r1, r9 @ dst2 = dst + dststride\r
+\r
+ @ first temporary line\r
+ neon_eagle2x_16_16_line first, r11, ip, lr, r3, r1, r4, r5, 1, 0\r
+\r
+ ldr r7, [sp, #8] @ r7 = srcdiff\r
+ ldr r8, [sp, #12] @ r8 = dstdiff\r
+ ldr r3, [sp, #32] @ counter = width\r
+ ldr lr, [sp, #28] @ bufptr3 = tmpline3\r
+ add r0, r0, r7 @ src += srcdiff\r
+ add r1, r1, r8 @ dst += dstdiff\r
+\r
+ 100:\r
+\r
+ @ line n+1\r
+ neon_normal1x_8_16_line r0, lr, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, ip\r
+\r
+ ldr r9, [sp, #16] @ r9 = dststride\r
+ ldr r11, [sp, #20] @ bufptr1 = tmpline1\r
+ ldr ip, [sp, #24] @ bufptr2 = tmpline2\r
+ ldr lr, [sp, #28] @ bufptr3 = tmpline3\r
+ add r4, r1, r9 @ dst2 = dst + dststride\r
+ ldr r3, [sp, #32] @ counter = width\r
+ str r11, [sp, #28] @ tmpline3 = bufptr1\r
+ str ip, [sp, #20] @ tmpline1 = bufptr2\r
+ str lr, [sp, #24] @ tmpline2 = bufptr3\r
+\r
+ @ temporary line n\r
+ neon_eagle2x_16_16_line middle, r11, ip, lr, r3, r1, r4, r5, 1, 0\r
+\r
+ ldr r6, [sp, #4] @ r6 = height\r
+ ldr r7, [sp, #8] @ r7 = srcdiff\r
+ ldr r8, [sp, #12] @ r8 = dstdiff\r
+ ldr r3, [sp, #32] @ counter = width\r
+ subS r6, r6, #1 @ height--\r
+ ldr lr, [sp, #28] @ bufptr3 = tmpline3\r
+ add r0, r0, r7 @ src += srcdiff\r
+ add r1, r1, r8 @ dst += dstdiff\r
+ str r6, [sp, #4] @ height = r6\r
+ bne 100b\r
+\r
+\r
+ ldr r9, [sp, #16] @ r9 = dststride\r
+ ldr r11, [sp, #20] @ bufptr1 = tmpline1\r
+ ldr ip, [sp, #24] @ bufptr2 = tmpline2\r
+ add r4, r1, r9 @ dst2 = dst + dststride\r
+\r
+ @ last temporary line\r
+ neon_eagle2x_16_16_line last, r11, ip, lr, r3, r1, r4, r5, 1, 0\r
+\r
+\r
+ add r6, sp, #36 @ r6 = sp + 36\r
+ ldr sp, [sp] @ sp = oldsp\r
+ vld1.64 {d8-d11}, [r6:256] @ restore q4,q5\r
+ add ip, r6, #32 @ ip = r6 + 32\r
+ vld1.64 {d12-d15}, [ip:256] @ restore q6,q7\r
+ pop {r4-r11,lr}\r
+ bx lr\r
+\r
+@ end procedure neon_eagle2x_8_16\r
+\r
--- /dev/null
+@@\r
+@@ Copyright (C) 2012 Roman Pauer\r
+@@\r
+@@ Permission is hereby granted, free of charge, to any person obtaining a copy of\r
+@@ this software and associated documentation files (the "Software"), to deal in\r
+@@ the Software without restriction, including without limitation the rights to\r
+@@ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\r
+@@ of the Software, and to permit persons to whom the Software is furnished to do\r
+@@ so, subject to the following conditions:\r
+@@\r
+@@ The above copyright notice and this permission notice shall be included in all\r
+@@ copies or substantial portions of the Software.\r
+@@\r
+@@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+@@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+@@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+@@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+@@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+@@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+@@ SOFTWARE.\r
+@@\r
+\r
+\r
+@ S T U --\ E1 E2\r
+@ V C W --/ E3 E4\r
+@ X Y Z\r
+\r
+@ q0 = S1sl < S >\r
+@ q1 = S2sl < V >\r
+@ q2 = S3sl < X >\r
+@ q3 = S1sr < U >\r
+@ q4 = S2sr < W >\r
+@ q5 = S3sr < Z >\r
+@ q6 = E3\r
+@ q7 = E4\r
+@ q8 = S1\r
+@ q9 = S2\r
+@ q10 = S3\r
+@ q11 = S1prev < T >\r
+@ q12 = S2prev < C >\r
+@ q13 = S3prev < Y >\r
+@ q14 = E1\r
+@ q15 = E2\r
+\r
+\r
+.macro __neon_eagle2x_8_8_line src1, src2, src3, counter, dst1, dst2, reg1, qT, qY, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vld1.8 {d23[7]}, [\src1] @ S1prev[15] = src[-srcstride]\r
+ .endif\r
+ vld1.8 {d25[7]}, [\src2] @ S2prev[15] = src[0]\r
+ .ifeqs "\qY", "q13"\r
+ vld1.8 {d27[7]}, [\src3] @ S3prev[15] = src[srcstride]\r
+ .endif\r
+ andS \reg1, \counter, #15 @ reg1 = counter & 15\r
+\r
+ .ifnes "\qT", "q11"\r
+ add \src1, \src1, \counter @ src1 += counter\r
+ .endif\r
+ .ifnes "\qY", "q13"\r
+ add \src3, \src3, \counter @ src3 += counter\r
+ .endif\r
+ beq 1f\r
+\r
+ @ first 1-15 pixels - align counter to 16 bytes\r
+\r
+@ q0 = S1sl < S >\r
+@ q2 = S3sl < X >\r
+@ q7 = tmp2\r
+@ q15 = tmp1\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vld1.8 {q8}, [\src1], \reg1 @ S1 = [src - srcstride]; src1 += counter & 15\r
+ .endif\r
+\r
+ vld1.8 {q9}, [\src2], \reg1 @ S2 = [src ]; src2 += counter & 15\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vld1.8 {q10}, [\src3], \reg1 @ S3 = [src + srcstride]; src3 += counter & 15\r
+ .endif\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q0, \qT, q8, #15 @ S1sl = S1prev[15] | (S1 << 8) < S >\r
+\r
+ vmov \qT, q8 @ S1prev = S1 < T >\r
+ .endif\r
+ vext.8 q1, q12, q9, #15 @ S2sl = S2prev[15] | (S2 << 8) < V >\r
+\r
+ vmov q12, q9 @ S2prev = S2 < C >\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q2, \qY, q10, #15 @ S3sl = S3prev[15] | (S3 << 8) < X >\r
+\r
+ vmov \qY, q10 @ S3prev = S3 < Y >\r
+ .endif\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q3, \qT, q8, #1 @ S1sr = (S1prev >> 8) | ... < U >\r
+ .endif\r
+\r
+ vext.8 q4, q12, q9, #1 @ S2sr = (S2prev >> 8) | ... < W >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q5, \qY, q10, #1 @ S3sr = (S3prev >> 8) | ... < Z >\r
+ .else\r
+ vmov q2, q1 @ S3sl = S2sl < X >\r
+\r
+ vmov q5, q4 @ S3sr = S2sr < Z >\r
+ .endif\r
+\r
+ .ifnes "\qT", "q11"\r
+ vmov q0, q1 @ S1sl = S2sl < S >\r
+\r
+ vmov q3, q4 @ S1sr = S2sr < U >\r
+ .endif\r
+\r
+ vceq.i8 q14, q0, \qT @ E1 = < S == T >\r
+\r
+ vceq.i8 q15, q0, q1 @ tmp1 = < S == V >\r
+\r
+ vceq.i8 q6, q2, \qY @ E3 = < X == Y >\r
+\r
+ vceq.i8 q7, q2, q1 @ tmp2 = < X == V >\r
+\r
+ vand q14, q14, q15 @ E1 = < S == T && S == V >\r
+\r
+@ q0 = tmp3\r
+@ q15 = E2\r
+\r
+ vceq.i8 q15, q3, \qT @ E2 = < U == T >\r
+\r
+ vceq.i8 q0, q3, q4 @ tmp3 = < U == W >\r
+\r
+ vand q6, q6, q7 @ E3 = < X == Y && X == V >\r
+\r
+@ q2 = tmp4\r
+@ q7 = E4\r
+ vceq.i8 q7, q5, \qY @ E4 = < Z == Y >\r
+\r
+ vceq.i8 q2, q5, q4 @ tmp4 = < Z == W >\r
+\r
+ vand q15, q15, q0 @ E2 = < U == T && U == W >\r
+\r
+ vbsl q14, \qT, q12 @ E1 = < (S == T && S == V) ? T : C >\r
+\r
+ vbsl q15, \qT, q12 @ E2 = < (U == T && U == W) ? T : C >\r
+\r
+ vand q7, q7, q2 @ E4 = < Z == Y && Z == W >\r
+\r
+ vbsl q6, \qY, q12 @ E3 = < (X == Y && X == V) ? Y : C >\r
+\r
+ .ifeqs "\qT", "q11"\r
+ sub \reg1, \src1, #1\r
+ .else\r
+ sub \reg1, \src2, #1\r
+ .endif\r
+\r
+ vbsl q7, \qY, q12 @ E4 = < (Z == Y && Z == W) ? Y : C >\r
+ .ifeqs "\qT", "q11"\r
+ vld1.8 {d23[7]}, [\reg1] @ S1prev[15] = src[counter & 15 - 1 - srcstride]\r
+\r
+ sub \reg1, \src2, #1\r
+ .endif\r
+\r
+ vld1.8 {d25[7]}, [\reg1] @ S2prev[15] = src[counter & 15 - 1]\r
+\r
+ .ifeqs "\qY", "q13"\r
+ sub \reg1, \src3, #1\r
+\r
+ vld1.8 {d27[7]}, [\reg1] @ S3prev[15] = src[counter & 15 - 1 + srcstride]\r
+ .endif\r
+\r
+ ubfx \reg1, \counter, #0, #4 @ reg1 = counter & 15\r
+\r
+ lsl \reg1, #1\r
+\r
+ vst2.8 {q14-q15}, [\dst1],\reg1 @ [dst] = E1,E2; dst1 += reg1\r
+\r
+ bic \counter, \counter, #15\r
+\r
+ vst2.8 {q6-q7}, [\dst2], \reg1 @ [dst + dststride] = E3,E4; dst2 += reg1\r
+\r
+ @ counter is aligned to 16 bytes\r
+\r
+ 1:\r
+ .ifeqs "\qT", "q11"\r
+ vld1.8 {q8}, [\alsrc1]! @ S1 = [src - srcstride]; src1 += 16\r
+ .endif\r
+ vld1.8 {q9}, [\alsrc2]! @ S2 = [src ]; src2 += 16\r
+ .ifeqs "\qY", "q13"\r
+ vld1.8 {q10}, [\alsrc3]! @ S3 = [src + srcstride]; src3 += 16\r
+ .endif\r
+\r
+ @ inner loop (16 pixels per iteration)\r
+ 2:\r
+\r
+@ q0 = S1sl < S >\r
+@ q2 = S3sl < X >\r
+@ q7 = tmp2\r
+@ q15 = tmp1\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q0, \qT, q8, #15 @ S1sl = S1prev[15] | (S1 << 8) < S >\r
+ vmov \qT, q8 @ S1prev = S1 < T >\r
+ .endif\r
+\r
+ vext.8 q1, q12, q9, #15 @ S2sl = S2prev[15] | (S2 << 8) < V >\r
+ vmov q12, q9 @ S2prev = S2 < C >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q2, \qY, q10, #15 @ S3sl = S3prev[15] | (S3 << 8) < X >\r
+ vmov \qY, q10 @ S3prev = S3 < Y >\r
+ .endif\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vld1.8 {q8}, [\alsrc1]! @ S1 = [src - srcstride]; src1 += 16\r
+ vext.8 q3, \qT, q8, #1 @ S1sr = (S1prev >> 8) | S1[0] < U >\r
+ .endif\r
+\r
+ vld1.8 {q9}, [\alsrc2]! @ S2 = [src ]; src2 += 16\r
+ vext.8 q4, q12, q9, #1 @ S2sr = (S2prev >> 8) | S2[0] < W >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vld1.8 {q10}, [\alsrc3]! @ S3 = [src + srcstride]; src3 += 16\r
+ vext.8 q5, \qY, q10, #1 @ S3sr = (S3prev >> 8) | S3[0] < Z >\r
+ .else\r
+ vmov q2, q1 @ S3sl = S2sl < X >\r
+\r
+ vmov q5, q4 @ S3sr = S2sr < Z >\r
+ .endif\r
+\r
+ .ifnes "\qT", "q11"\r
+ vmov q0, q1 @ S1sl = S2sl < S >\r
+\r
+ vmov q3, q4 @ S1sr = S2sr < U >\r
+ .endif\r
+\r
+ sub \counter, \counter, #16 @ counter -= 16\r
+ vceq.i8 q14, q0, \qT @ E1 = < S == T >\r
+\r
+ vceq.i8 q15, q0, q1 @ tmp1 = < S == V >\r
+\r
+ vceq.i8 q6, q2, \qY @ E3 = < X == Y >\r
+\r
+ vceq.i8 q7, q2, q1 @ tmp2 = < X == V >\r
+\r
+ vand q14, q14, q15 @ E1 = < S == T && S == V >\r
+\r
+@ q0 = tmp3\r
+@ q15 = E2\r
+\r
+ vceq.i8 q15, q3, \qT @ E2 = < U == T >\r
+\r
+ vceq.i8 q0, q3, q4 @ tmp3 = < U == W >\r
+\r
+ vand q6, q6, q7 @ E3 = < X == Y && X == V >\r
+\r
+@ q2 = tmp4\r
+@ q7 = E4\r
+ vceq.i8 q7, q5, \qY @ E4 = < Z == Y >\r
+\r
+ vceq.i8 q2, q5, q4 @ tmp4 = < Z == W >\r
+\r
+ vand q15, q15, q0 @ E2 = < U == T && U == W >\r
+\r
+ vbsl q14, \qT, q12 @ E1 = < (S == T && S == V) ? T : C >\r
+\r
+ vbsl q15, \qT, q12 @ E2 = < (U == T && U == W) ? T : C >\r
+\r
+ vand q7, q7, q2 @ E4 = < Z == Y && Z == W >\r
+\r
+ vbsl q6, \qY, q12 @ E3 = < (X == Y && X == V) ? Y : C >\r
+\r
+ vbsl q7, \qY, q12 @ E4 = < (Z == Y && Z == W) ? Y : C >\r
+ vst2.8 {q14-q15}, [\aldst1]! @ [dst] = E1,E2; dst1 += 2*16\r
+\r
+ cmp \counter, #16\r
+\r
+ vst2.8 {q6-q7}, [\aldst2]! @ [dst + dststride] = E3,E4; dst2 += 2*16\r
+ bhi 2b\r
+\r
+ @ last 16 pixels\r
+\r
+@ q0 = S1sl < S >\r
+@ q2 = S3sl < X >\r
+@ q7 = tmp2\r
+@ q15 = tmp1\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q0, \qT, q8, #15 @ S1sl = S1prev[15] | (S1 << 8) < S >\r
+ vmov \qT, q8 @ S1prev = S1 < T >\r
+ .endif\r
+\r
+ vext.8 q1, q12, q9, #15 @ S2sl = S2prev[15] | (S2 << 8) < V >\r
+ vmov q12, q9 @ S2prev = S2 < C >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q2, \qY, q10, #15 @ S3sl = S3prev[15] | (S3 << 8) < X >\r
+ vmov \qY, q10 @ S3prev = S3 < Y >\r
+ .endif\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vshr.u64 d16, d17, #(64-8) @ S1[0] = S1[15] | ...\r
+ .endif\r
+\r
+ vshr.u64 d18, d19, #(64-8) @ S2[0] = S2[15] | ...\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vshr.u64 d20, d21, #(64-8) @ S3[0] = S3[15] | ...\r
+ .endif\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q3, \qT, q8, #1 @ S1sr = (S1prev >> 8) | S1[0] < U >\r
+ .endif\r
+\r
+ vext.8 q4, q12, q9, #1 @ S2sr = (S2prev >> 8) | S2[0] < W >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q5, \qY, q10, #1 @ S3sr = (S3prev >> 8) | S3[0] < Z >\r
+ .else\r
+ vmov q2, q1 @ S3sl = S2sl < X >\r
+\r
+ vmov q5, q4 @ S3sr = S2sr < Z >\r
+ .endif\r
+\r
+ .ifnes "\qT", "q11"\r
+ vmov q0, q1 @ S1sl = S2sl < S >\r
+\r
+ vmov q3, q4 @ S1sr = S2sr < U >\r
+ .endif\r
+\r
+ vceq.i8 q14, q0, \qT @ E1 = < S == T >\r
+\r
+ vceq.i8 q15, q0, q1 @ tmp1 = < S == V >\r
+\r
+ vceq.i8 q6, q2, \qY @ E3 = < X == Y >\r
+\r
+ vceq.i8 q7, q2, q1 @ tmp2 = < X == V >\r
+\r
+ vand q14, q14, q15 @ E1 = < S == T && S == V >\r
+\r
+@ q0 = tmp3\r
+@ q15 = E2\r
+\r
+ vceq.i8 q15, q3, \qT @ E2 = < U == T >\r
+\r
+ vceq.i8 q0, q3, q4 @ tmp3 = < U == W >\r
+\r
+ vand q6, q6, q7 @ E3 = < X == Y && X == V >\r
+\r
+@ q2 = tmp4\r
+@ q7 = E4\r
+ vceq.i8 q7, q5, \qY @ E4 = < Z == Y >\r
+\r
+ vceq.i8 q2, q5, q4 @ tmp4 = < Z == W >\r
+\r
+ vand q15, q15, q0 @ E2 = < U == T && U == W >\r
+\r
+ vbsl q14, \qT, q12 @ E1 = < (S == T && S == V) ? T : C >\r
+\r
+ vbsl q15, \qT, q12 @ E2 = < (U == T && U == W) ? T : C >\r
+\r
+ vand q7, q7, q2 @ E4 = < Z == Y && Z == W >\r
+\r
+ vbsl q6, \qY, q12 @ E3 = < (X == Y && X == V) ? Y : C >\r
+\r
+ vbsl q7, \qY, q12 @ E4 = < (Z == Y && Z == W) ? Y : C >\r
+ vst2.8 {q14-q15}, [\aldst1]! @ [dst] = E1,E2; dst1 += 2*16\r
+\r
+ vst2.8 {q6-q7}, [\aldst2]! @ [dst + dststride] = E3,E4; dst2 += 2*16\r
+\r
+.endm\r
+\r
+.macro _neon_eagle2x_8_8_line_first src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_eagle2x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q12, q13, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro _neon_eagle2x_8_8_line_middle src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_eagle2x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q11, q13, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro _neon_eagle2x_8_8_line_last src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_eagle2x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q11, q12, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro neon_eagle2x_8_8_line part, src1, src2, src3, counter, dst1, dst2, reg1, srcalign16, dstalign32\r
+ .ifeq \srcalign16\r
+\r
+ .ifeq \dstalign32\r
+ _neon_eagle2x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1, \src2, \src3, \dst1, \dst2\r
+ .else\r
+ _neon_eagle2x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1, \src2, \src3, \dst1:256, \dst2:256\r
+ .endif\r
+\r
+ .else\r
+\r
+ .ifeq \dstalign32\r
+ _neon_eagle2x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1:128, \src2:128, \src3:128, \dst1, \dst2\r
+ .else\r
+ _neon_eagle2x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1:128, \src2:128, \src3:128, \dst1:256, \dst2:256\r
+ .endif\r
+\r
+ .endif\r
+.endm\r
+\r
+\r
+.macro __neon_eagle2x_16_16_line src1, src2, src3, counter, dst1, dst2, reg1, qT, qY, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vld1.16 {d23[3]}, [\src1] @ S1prev[7] = src[-srcstride]\r
+ .endif\r
+ vld1.16 {d25[3]}, [\src2] @ S2prev[7] = src[0]\r
+ .ifeqs "\qY", "q13"\r
+ vld1.16 {d27[3]}, [\src3] @ S3prev[7] = src[srcstride]\r
+ .endif\r
+ andS \reg1, \counter, #7 @ reg1 = counter & 7\r
+\r
+ .ifnes "\qT", "q11"\r
+ add \src1, \src1, \counter, lsl #1 @ src1 += 2 * counter\r
+ .endif\r
+ .ifnes "\qY", "q13"\r
+ add \src3, \src3, \counter, lsl #1 @ src3 += 2 * counter\r
+ .endif\r
+ beq 1f\r
+\r
+ @ first 1-7 pixels - align counter to 16 bytes\r
+\r
+@ q0 = S1sl < S >\r
+@ q2 = S3sl < X >\r
+@ q7 = tmp2\r
+@ q15 = tmp1\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vld1.16 {q8}, [\src1] @ S1 = [src - srcstride]\r
+ add \src1, \src1, \reg1, lsl #1 @ src1 += 2 * (counter & 7)\r
+ .endif\r
+\r
+ vld1.16 {q9}, [\src2] @ S2 = [src ]\r
+ add \src2, \src2, \reg1, lsl #1 @ src2 += 2 * (counter & 7)\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vld1.16 {q10}, [\src3] @ S3 = [src + srcstride]\r
+ add \src3, \src3, \reg1, lsl #1 @ src3 += 2 * (counter & 7)\r
+ .endif\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q0, \qT, q8, #14 @ S1sl = S1prev[7] | (S1 << 16) < S >\r
+\r
+ vmov \qT, q8 @ S1prev = S1 < T >\r
+ .endif\r
+ vext.8 q1, q12, q9, #14 @ S2sl = S2prev[7] | (S2 << 16) < V >\r
+\r
+ vmov q12, q9 @ S2prev = S2 < C >\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q2, \qY, q10, #14 @ S3sl = S3prev[7] | (S3 << 16) < X >\r
+\r
+ vmov \qY, q10 @ S3prev = S3 < Y >\r
+ .endif\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q3, \qT, q8, #2 @ S1sr = (S1prev >> 16) | ... < U >\r
+ .endif\r
+\r
+ vext.8 q4, q12, q9, #2 @ S2sr = (S2prev >> 16) | ... < W >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q5, \qY, q10, #2 @ S3sr = (S3prev >> 16) | ... < Z >\r
+ .else\r
+ vmov q2, q1 @ S3sl = S2sl < X >\r
+\r
+ vmov q5, q4 @ S3sr = S2sr < Z >\r
+ .endif\r
+\r
+ .ifnes "\qT", "q11"\r
+ vmov q0, q1 @ S1sl = S2sl < S >\r
+\r
+ vmov q3, q4 @ S1sr = S2sr < U >\r
+ .endif\r
+\r
+ vceq.i16 q14, q0, \qT @ E1 = < S == T >\r
+\r
+ vceq.i16 q15, q0, q1 @ tmp1 = < S == V >\r
+\r
+ vceq.i16 q6, q2, \qY @ E3 = < X == Y >\r
+\r
+ vceq.i16 q7, q2, q1 @ tmp2 = < X == V >\r
+\r
+ vand q14, q14, q15 @ E1 = < S == T && S == V >\r
+\r
+@ q0 = tmp3\r
+@ q15 = E2\r
+\r
+ vceq.i16 q15, q3, \qT @ E2 = < U == T >\r
+\r
+ vceq.i16 q0, q3, q4 @ tmp3 = < U == W >\r
+\r
+ vand q6, q6, q7 @ E3 = < X == Y && X == V >\r
+\r
+@ q2 = tmp4\r
+@ q7 = E4\r
+ vceq.i16 q7, q5, \qY @ E4 = < Z == Y >\r
+\r
+ vceq.i16 q2, q5, q4 @ tmp4 = < Z == W >\r
+\r
+ vand q15, q15, q0 @ E2 = < U == T && U == W >\r
+\r
+ vbsl q14, \qT, q12 @ E1 = < (S == T && S == V) ? T : C >\r
+\r
+ vbsl q15, \qT, q12 @ E2 = < (U == T && U == W) ? T : C >\r
+\r
+ vand q7, q7, q2 @ E4 = < Z == Y && Z == W >\r
+\r
+ vbsl q6, \qY, q12 @ E3 = < (X == Y && X == V) ? Y : C >\r
+\r
+ .ifeqs "\qT", "q11"\r
+ sub \reg1, \src1, #2\r
+ .else\r
+ sub \reg1, \src2, #2\r
+ .endif\r
+\r
+ vbsl q7, \qY, q12 @ E4 = < (Z == Y && Z == W) ? Y : C >\r
+ .ifeqs "\qT", "q11"\r
+ vld1.16 {d23[3]}, [\reg1] @ S1prev[7] = src[2 * (counter & 7) - 2 - srcstride]\r
+\r
+ sub \reg1, \src2, #2\r
+ .endif\r
+\r
+ vld1.16 {d25[3]}, [\reg1] @ S2prev[7] = src[2 * (counter & 7) - 2]\r
+\r
+ .ifeqs "\qY", "q13"\r
+ sub \reg1, \src3, #2\r
+\r
+ vld1.16 {d27[3]}, [\reg1] @ S3prev[7] = src[2 * (counter & 7) - 2 + srcstride]\r
+ .endif\r
+\r
+ ubfx \reg1, \counter, #0, #3 @ reg1 = counter & 7\r
+\r
+ lsl \reg1, #2\r
+\r
+ vst2.16 {q14-q15}, [\dst1], \reg1 @ [dst] = E1,E2; dst1 += reg1\r
+\r
+ bic \counter, \counter, #7\r
+\r
+ vst2.16 {q6-q7}, [\dst2], \reg1 @ [dst + dststride] = E3,E4; dst2 += reg1\r
+\r
+ @ counter is aligned to 16 bytes\r
+\r
+ 1:\r
+ .ifeqs "\qT", "q11"\r
+ vld1.16 {q8}, [\alsrc1]! @ S1 = [src - srcstride]; src1 += 2*8\r
+ .endif\r
+ vld1.16 {q9}, [\alsrc2]! @ S2 = [src ]; src2 += 2*8\r
+ .ifeqs "\qY", "q13"\r
+ vld1.16 {q10}, [\alsrc3]! @ S3 = [src + srcstride]; src3 += 2*8\r
+ .endif\r
+\r
+ @ inner loop (8 pixels per iteration)\r
+ 2:\r
+\r
+@ q0 = S1sl < S >\r
+@ q2 = S3sl < X >\r
+@ q7 = tmp2\r
+@ q15 = tmp1\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q0, \qT, q8, #14 @ S1sl = S1prev[7] | (S1 << 16) < S >\r
+ vmov \qT, q8 @ S1prev = S1 < T >\r
+ .endif\r
+\r
+ vext.8 q1, q12, q9, #14 @ S2sl = S2prev[7] | (S2 << 16) < V >\r
+ vmov q12, q9 @ S2prev = S2 < C >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q2, \qY, q10, #14 @ S3sl = S3prev[7] | (S3 << 16) < X >\r
+ vmov \qY, q10 @ S3prev = S3 < Y >\r
+ .endif\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vld1.16 {q8}, [\alsrc1]! @ S1 = [src - srcstride]; src1 += 2*8\r
+ vext.8 q3, \qT, q8, #2 @ S1sr = (S1prev >> 16) | S1[0] < U >\r
+ .endif\r
+\r
+ vld1.16 {q9}, [\alsrc2]! @ S2 = [src ]; src2 += 2*8\r
+ vext.8 q4, q12, q9, #2 @ S2sr = (S2prev >> 16) | S2[0] < W >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vld1.16 {q10}, [\alsrc3]! @ S3 = [src + srcstride]; src3 += 2*8\r
+ vext.8 q5, \qY, q10, #2 @ S3sr = (S3prev >> 16) | S3[0] < Z >\r
+ .else\r
+ vmov q2, q1 @ S3sl = S2sl < X >\r
+\r
+ vmov q5, q4 @ S3sr = S2sr < Z >\r
+ .endif\r
+\r
+ .ifnes "\qT", "q11"\r
+ vmov q0, q1 @ S1sl = S2sl < S >\r
+\r
+ vmov q3, q4 @ S1sr = S2sr < U >\r
+ .endif\r
+\r
+ sub \counter, \counter, #8 @ counter -= 8\r
+ vceq.i16 q14, q0, \qT @ E1 = < S == T >\r
+\r
+ vceq.i16 q15, q0, q1 @ tmp1 = < S == V >\r
+\r
+ vceq.i16 q6, q2, \qY @ E3 = < X == Y >\r
+\r
+ vceq.i16 q7, q2, q1 @ tmp2 = < X == V >\r
+\r
+ vand q14, q14, q15 @ E1 = < S == T && S == V >\r
+\r
+@ q0 = tmp3\r
+@ q15 = E2\r
+\r
+ vceq.i16 q15, q3, \qT @ E2 = < U == T >\r
+\r
+ vceq.i16 q0, q3, q4 @ tmp3 = < U == W >\r
+\r
+ vand q6, q6, q7 @ E3 = < X == Y && X == V >\r
+\r
+@ q2 = tmp4\r
+@ q7 = E4\r
+ vceq.i16 q7, q5, \qY @ E4 = < Z == Y >\r
+\r
+ vceq.i16 q2, q5, q4 @ tmp4 = < Z == W >\r
+\r
+ vand q15, q15, q0 @ E2 = < U == T && U == W >\r
+\r
+ vbsl q14, \qT, q12 @ E1 = < (S == T && S == V) ? T : C >\r
+\r
+ vbsl q15, \qT, q12 @ E2 = < (U == T && U == W) ? T : C >\r
+\r
+ vand q7, q7, q2 @ E4 = < Z == Y && Z == W >\r
+\r
+ vbsl q6, \qY, q12 @ E3 = < (X == Y && X == V) ? Y : C >\r
+\r
+ vbsl q7, \qY, q12 @ E4 = < (Z == Y && Z == W) ? Y : C >\r
+ vst2.16 {q14-q15}, [\aldst1]! @ [dst] = E1,E2; dst1 += 2*2*8\r
+\r
+ cmp \counter, #8\r
+\r
+ vst2.16 {q6-q7}, [\aldst2]! @ [dst + dststride] = E3,E4; dst2 += 2*2*8\r
+ bhi 2b\r
+\r
+ @ last 8 pixels\r
+\r
+@ q0 = S1sl < S >\r
+@ q2 = S3sl < X >\r
+@ q7 = tmp2\r
+@ q15 = tmp1\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q0, \qT, q8, #14 @ S1sl = S1prev[7] | (S1 << 16) < S >\r
+ vmov \qT, q8 @ S1prev = S1 < T >\r
+ .endif\r
+\r
+ vext.8 q1, q12, q9, #14 @ S2sl = S2prev[7] | (S2 << 16) < V >\r
+ vmov q12, q9 @ S2prev = S2 < C >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q2, \qY, q10, #14 @ S3sl = S3prev[7] | (S3 << 16) < X >\r
+ vmov \qY, q10 @ S3prev = S3 < Y >\r
+ .endif\r
+\r
+ .ifeqs "\qT", "q11"\r
+ vshr.u64 d16, d17, #(64-16) @ S1[0] = S1[7] | ...\r
+ .endif\r
+\r
+ vshr.u64 d18, d19, #(64-16) @ S2[0] = S2[7] | ...\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vshr.u64 d20, d21, #(64-16) @ S3[0] = S3[7] | ...\r
+ .endif\r
+ .ifeqs "\qT", "q11"\r
+ vext.8 q3, \qT, q8, #2 @ S1sr = (S1prev >> 16) | S1[0] < U >\r
+ .endif\r
+\r
+ vext.8 q4, q12, q9, #2 @ S2sr = (S2prev >> 16) | S2[0] < W >\r
+\r
+ .ifeqs "\qY", "q13"\r
+ vext.8 q5, \qY, q10, #2 @ S3sr = (S3prev >> 16) | S3[0] < Z >\r
+ .else\r
+ vmov q2, q1 @ S3sl = S2sl < X >\r
+\r
+ vmov q5, q4 @ S3sr = S2sr < Z >\r
+ .endif\r
+\r
+ .ifnes "\qT", "q11"\r
+ vmov q0, q1 @ S1sl = S2sl < S >\r
+\r
+ vmov q3, q4 @ S1sr = S2sr < U >\r
+ .endif\r
+\r
+ vceq.i16 q14, q0, \qT @ E1 = < S == T >\r
+\r
+ vceq.i16 q15, q0, q1 @ tmp1 = < S == V >\r
+\r
+ vceq.i16 q6, q2, \qY @ E3 = < X == Y >\r
+\r
+ vceq.i16 q7, q2, q1 @ tmp2 = < X == V >\r
+\r
+ vand q14, q14, q15 @ E1 = < S == T && S == V >\r
+\r
+@ q0 = tmp3\r
+@ q15 = E2\r
+\r
+ vceq.i16 q15, q3, \qT @ E2 = < U == T >\r
+\r
+ vceq.i16 q0, q3, q4 @ tmp3 = < U == W >\r
+\r
+ vand q6, q6, q7 @ E3 = < X == Y && X == V >\r
+\r
+@ q2 = tmp4\r
+@ q7 = E4\r
+ vceq.i16 q7, q5, \qY @ E4 = < Z == Y >\r
+\r
+ vceq.i16 q2, q5, q4 @ tmp4 = < Z == W >\r
+\r
+ vand q15, q15, q0 @ E2 = < U == T && U == W >\r
+\r
+ vbsl q14, \qT, q12 @ E1 = < (S == T && S == V) ? T : C >\r
+\r
+ vbsl q15, \qT, q12 @ E2 = < (U == T && U == W) ? T : C >\r
+\r
+ vand q7, q7, q2 @ E4 = < Z == Y && Z == W >\r
+\r
+ vbsl q6, \qY, q12 @ E3 = < (X == Y && X == V) ? Y : C >\r
+\r
+ vbsl q7, \qY, q12 @ E4 = < (Z == Y && Z == W) ? Y : C >\r
+ vst2.16 {q14-q15}, [\aldst1]! @ [dst] = E1,E2; dst1 += 2*2*8\r
+\r
+ vst2.16 {q6-q7}, [\aldst2]! @ [dst + dststride] = E3,E4; dst2 += 2*2*8\r
+\r
+.endm\r
+\r
+.macro _neon_eagle2x_16_16_line_first src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_eagle2x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q12, q13, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro _neon_eagle2x_16_16_line_middle src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_eagle2x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q11, q13, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro _neon_eagle2x_16_16_line_last src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_eagle2x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q11, q12, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro neon_eagle2x_16_16_line part, src1, src2, src3, counter, dst1, dst2, reg1, srcalign16, dstalign32\r
+ .ifeq \srcalign16\r
+\r
+ .ifeq \dstalign32\r
+ _neon_eagle2x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1, \src2, \src3, \dst1, \dst2\r
+ .else\r
+ _neon_eagle2x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1, \src2, \src3, \dst1:256, \dst2:256\r
+ .endif\r
+\r
+ .else\r
+\r
+ .ifeq \dstalign32\r
+ _neon_eagle2x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1:128, \src2:128, \src3:128, \dst1, \dst2\r
+ .else\r
+ _neon_eagle2x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1:128, \src2:128, \src3:128, \dst1:256, \dst2:256\r
+ .endif\r
+\r
+ .endif\r
+.endm\r
+\r
--- /dev/null
+/**
+ *
+ * Copyright (C) 2012 Roman Pauer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#if !defined(_NEON_EAGLE2X_H_INCLUDED_)
+#define _NEON_EAGLE2X_H_INCLUDED_
+
+#include <inttypes.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void neon_eagle2x_8_8(const uint8_t *src, uint8_t *dst, unsigned int width, unsigned int srcstride, unsigned int dststride, unsigned int height);
+extern void neon_eagle2x_16_16(const uint16_t *src, uint16_t *dst, unsigned int width, unsigned int srcstride, unsigned int dststride, unsigned int height);
+
+extern void neon_eagle2x_8_16(const uint8_t *src, uint16_t *dst, const uint32_t *palette, unsigned int width, unsigned int srcstride, unsigned int dststride, unsigned int height);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _NEON_EAGLE2X_H_INCLUDED_ */
--- /dev/null
+@@\r
+@@ Copyright (C) 2012 Roman Pauer\r
+@@\r
+@@ Permission is hereby granted, free of charge, to any person obtaining a copy of\r
+@@ this software and associated documentation files (the "Software"), to deal in\r
+@@ the Software without restriction, including without limitation the rights to\r
+@@ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\r
+@@ of the Software, and to permit persons to whom the Software is furnished to do\r
+@@ so, subject to the following conditions:\r
+@@\r
+@@ The above copyright notice and this permission notice shall be included in all\r
+@@ copies or substantial portions of the Software.\r
+@@\r
+@@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+@@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+@@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+@@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+@@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+@@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+@@ SOFTWARE.\r
+@@\r
+\r
+\r
+\r
+.macro _neon_normalxx_8_16_line_middle src, dst, pal, counter, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, dststride, dA, dB\r
+ ldr \reg1, [\src] @ reg1 = src[0-3]\r
+\r
+ ldr \reg2, [\src, #4] @ reg2 = src[4-7]\r
+\r
+ ldr \reg3, [\src, #8] @ reg3 = src[8-11]\r
+\r
+ ldr \reg4, [\src, #12] @ reg4 = src[12-15]\r
+ ubfx \reg5, \reg1, #0, #8 @ reg5 = src[0]\r
+\r
+ ldr \reg5, [\pal, \reg5, lsl #2] @ reg5 = pal[src[0]]\r
+ ubfx \reg6, \reg1, #8, #8 @ reg6 = src[1]\r
+\r
+ ldr \reg6, [\pal, \reg6, lsl #2] @ reg6 = pal[src[1]]\r
+ ubfx \reg7, \reg1, #16, #8 @ reg7 = src[2]\r
+\r
+ ldr \reg7, [\pal, \reg7, lsl #2] @ reg7 = pal[src[2]]\r
+ lsr \reg1, \reg1, #24 @ reg1 = src[3]\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[3]]\r
+ ubfx \reg8, \reg2, #0, #8 @ reg8 = src[4]\r
+\r
+ ldr \reg8, [\pal, \reg8, lsl #2] @ reg8 = pal[src[4]]\r
+ ubfx \reg9, \reg2, #8, #8 @ reg9 = src[5]\r
+\r
+ ldr \reg9, [\pal, \reg9, lsl #2] @ reg9 = pal[src[5]]\r
+ bfi \reg5, \reg6, #16, #16 @ reg5 = pal[src[0]] | pal[src[1]] << 16\r
+\r
+ bfi \reg7, \reg1, #16, #16 @ reg7 = pal[src[2]] | pal[src[3]] << 16\r
+ ubfx \reg6, \reg2, #16, #8 @ reg6 = src[6]\r
+\r
+ vmov d16, \reg5, \reg7 @ d16 = pal[src[0-3]]\r
+ lsr \reg2, \reg2, #24 @ reg2 = src[7]\r
+\r
+ ldr \reg6, [\pal, \reg6, lsl #2] @ reg6 = pal[src[6]]\r
+ bfi \reg8, \reg9, #16, #16 @ reg8 = pal[src[4]] | pal[src[5]] << 16\r
+\r
+ ldr \reg2, [\pal, \reg2, lsl #2] @ reg2 = pal[src[7]]\r
+ ubfx \reg1, \reg3, #0, #8 @ reg1 = src[8]\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[8]]\r
+ ubfx \reg5, \reg3, #8, #8 @ reg5 = src[9]\r
+\r
+ ldr \reg5, [\pal, \reg5, lsl #2] @ reg5 = pal[src[9]]\r
+ ubfx \reg7, \reg3, #16, #8 @ reg7 = src[10]\r
+\r
+ ldr \reg7, [\pal, \reg7, lsl #2] @ reg7 = pal[src[10]]\r
+ bfi \reg6, \reg2, #16, #16 @ reg6 = pal[src[6]] | pal[src[7]] << 16\r
+\r
+ vmov d17, \reg8, \reg6 @ d17 = pal[src[4-7]]\r
+ lsr \reg3, \reg3, #24 @ reg3 = src[11]\r
+\r
+ ldr \reg3, [\pal, \reg3, lsl #2] @ reg3 = pal[src[11]]\r
+ ubfx \reg2, \reg4, #0, #8 @ reg2 = src[12]\r
+\r
+ ldr \reg2, [\pal, \reg2, lsl #2] @ reg2 = pal[src[12]]\r
+ ubfx \reg6, \reg4, #8, #8 @ reg6 = src[13]\r
+\r
+ ldr \reg6, [\pal, \reg6, lsl #2] @ reg6 = pal[src[13]]\r
+ ubfx \reg8, \reg4, #16, #8 @ reg8 = src[14]\r
+\r
+ ldr \reg8, [\pal, \reg8, lsl #2] @ reg8 = pal[src[14]]\r
+ lsr \reg4, \reg4, #24 @ reg4 = src[15]\r
+\r
+ ldr \reg4, [\pal, \reg4, lsl #2] @ reg4 = pal[src[15]]\r
+ bfi \reg1, \reg5, #16, #16 @ reg1 = pal[src[8]] | pal[src[9]] << 16\r
+\r
+ bfi \reg7, \reg3, #16, #16 @ reg7 = pal[src[10]] | pal[src[11]] << 16\r
+ bfi \reg2, \reg6, #16, #16 @ reg2 = pal[src[12]] | pal[src[13]] << 16\r
+\r
+ vmov \dA, \reg1, \reg7 @ dA = pal[src[8-11]]\r
+ sub \counter, \counter, #16 @ counter -= 16\r
+\r
+ bfi \reg8, \reg4, #16, #16 @ reg8 = pal[src[14]] | pal[src[15]] << 16\r
+ add \src, \src, #16 @ src += 16\r
+\r
+ vmov \dB, \reg2, \reg8 @ dB = pal[src[12-15]]\r
+ cmp \counter, #16\r
+.endm\r
+\r
+.macro neon_normal1x_8_16_line src, dst, pal, counter, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9\r
+ @ align src to 4 bytes\r
+ andS \reg5, \src, #3 @ reg5 = src & 3\r
+ beq 10f\r
+\r
+ @ first 1-3 pixels\r
+ ldr \reg1, [\src] @ reg1 = src[0-3]\r
+ rsb \reg5, \reg5, #4 @ reg5 = 4 - (src & 3)\r
+\r
+ add \src, \src, \reg5 @ src += reg5\r
+ sub \counter, \counter, \reg5 @ counter -= reg5\r
+\r
+ subS \reg5, \reg5, #1 @ reg5--\r
+\r
+ ubfx \reg2, \reg1, #0, #8 @ reg2 = src[0]\r
+ ubfxne \reg3, \reg1, #8, #8 @ reg3 = src[1]\r
+\r
+ ldr \reg2, [\pal, \reg2, lsl #2] @ reg2 = pal[reg2]\r
+\r
+ ldrne \reg3, [\pal, \reg3, lsl #2] @ reg3 = pal[reg3]\r
+\r
+ strh \reg2, [\dst] @ dst[0] = reg2\r
+\r
+ strneh \reg3, [\dst, #2]! @ dst[1] = reg3; dst++\r
+ subneS \reg5, \reg5, #1 @ reg5--\r
+\r
+ ubfxne \reg4, \reg1, #16, #8 @ reg4 = src[2]\r
+ add \dst, \dst, #2 @ dst++\r
+\r
+ ldrne \reg4, [\pal, \reg4, lsl #2] @ reg4 = pal[reg4]\r
+\r
+ strneh \reg4, [\dst], #2 @ dst[2] = reg4; dst++\r
+\r
+ @ middle pixels (16 per iteration)\r
+ 10:\r
+ _neon_normalxx_8_16_line_middle \src, \dst, \pal, \counter, \reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8, \reg9, , d18, d19\r
+\r
+ vst1.16 {d16-d19}, [\dst]! @ dst[0-15] = d16-d19; dst += 2*16\r
+ bhs 10b\r
+\r
+ @ last 0-15 bytes\r
+\r
+ cmp \counter, #0\r
+ beq 40f\r
+\r
+ cmp \counter, #4\r
+ blo 30f\r
+\r
+ @ 4-12 pixels (4 pre iteration)\r
+ 20:\r
+ ldr \reg1, [\src] @ reg1 = src[0-3]\r
+ sub \counter, \counter, #4 @ counter -= 4\r
+\r
+ add \src, \src, #4 @ src += 4\r
+ add \dst, \dst, #(2*4) @ dst += 4\r
+\r
+ ubfx \reg2, \reg1, #0, #8 @ reg2 = src[0]\r
+ cmp \counter, #4\r
+\r
+ ldr \reg2, [\pal, \reg2, lsl #2] @ reg2 = pal[src[0]]\r
+ ubfx \reg3, \reg1, #8, #8 @ reg3 = src[1]\r
+\r
+ ldr \reg3, [\pal, \reg3, lsl #2] @ reg3 = pal[src[1]]\r
+ ubfx \reg4, \reg1, #16, #8 @ reg4 = src[2]\r
+\r
+ ldr \reg4, [\pal, \reg4, lsl #2] @ reg4 = pal[src[2]]\r
+ lsr \reg1, \reg1, #24 @ reg1 = src[3]\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[3]]\r
+\r
+ strh \reg2, [\dst, #-8] @ dst[0] = reg2\r
+\r
+ strh \reg3, [\dst, #-6] @ dst[1] = reg3\r
+\r
+ strh \reg4, [\dst, #-4] @ dst[2] = reg4\r
+\r
+ strh \reg1, [\dst, #-2] @ dst[3] = reg1\r
+ bhs 20b\r
+\r
+ cmp \counter, #0\r
+ beq 40f\r
+\r
+ @ last 1-3 pixels\r
+ 30:\r
+ ldrb \reg1, [\src] @ reg1 = src[0]\r
+ subS \counter, \counter, #1 @ counter--\r
+\r
+ ldrneb \reg2, [\src, #1]! @ reg2 = src[1]; src++\r
+\r
+ add \src, \src, #1 @ src++\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[0]]\r
+\r
+ ldrne \reg2, [\pal, \reg2, lsl #2] @ reg2 = pal[src[1]]\r
+\r
+ strh \reg1, [\dst] @ dst[0] = reg1\r
+\r
+ strneh \reg2, [\dst, #2]! @ dst[1] = reg2; dst++\r
+ subneS \counter, \counter, #1 @ counter--\r
+\r
+ ldrneb \reg3, [\src], #1 @ reg3 = src[2]; src++\r
+ add \dst, \dst, #2 @ dst++\r
+\r
+ ldrne \reg3, [\pal, \reg3, lsl #2] @ reg3 = pal[src[2]]\r
+\r
+ strneh \reg3, [\dst], #2 @ dst[2] = reg3; dst++\r
+\r
+ 40:\r
+.endm\r
+\r
+.macro neon_normal2x_8_16_line src, dst, pal, counter, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, dststride\r
+ @ align src to 4 bytes\r
+ andS \reg5, \src, #3 @ reg5 = src & 3\r
+ beq 10f\r
+\r
+ @ first 1-3 pixels\r
+ rsb \reg5, \reg5, #4 @ reg5 = 4 - (src & 3)\r
+ 1:\r
+ ldrb \reg1, [\src], #1 @ reg1 = src[0]; src++\r
+ add \reg2, \dst, \dststride\r
+\r
+ add \dst, \dst, #4 @ dst += 2*2\r
+ sub \counter, \counter, #1 @ counter--\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[0]]\r
+ subS \reg5, \reg5, #1 @ reg5--\r
+\r
+ strh \reg1, [\dst, #-4] @ dst[0] = reg1\r
+\r
+ strh \reg1, [\dst, #-2] @ dst[1] = reg1\r
+\r
+ strh \reg1, [\reg2] @ dst1[0] = reg1\r
+\r
+ strh \reg1, [\reg2, #2] @ dst1[1] = reg1\r
+ bne 1b\r
+\r
+ @ middle pixels (16 per iteration)\r
+ 10:\r
+ _neon_normalxx_8_16_line_middle \src, \dst, \pal, \counter, \reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8, \reg9, \dststride, d20, d21\r
+\r
+ vmov q9, q8\r
+ add \reg1, \dst, \dststride @ reg1 = dst + dststride\r
+\r
+ vmov q11, q10\r
+ vst2.16 {q8,q9}, [\dst]! @ dst[0-7] = q8-q9; dst += 2*2*8\r
+\r
+ vst2.16 {q10,q11}, [\dst]! @ dst[8-15] = q10-q11; dst += 2*2*8\r
+\r
+ vst2.16 {q8,q9}, [\reg1]! @ dst1[0-7] = q8-q9; dst1 += 2*2*8\r
+\r
+ vst2.16 {q10,q11}, [\reg1]! @ dst1[8-15] = q10-q11; dst1 += 2*2*8\r
+ bhs 10b\r
+\r
+ @ last 0-15 bytes\r
+\r
+ cmp \counter, #0\r
+ beq 40f\r
+\r
+ cmp \counter, #4\r
+ blo 30f\r
+\r
+ @ 4-12 pixels (4 pre iteration)\r
+ 20:\r
+ ldr \reg1, [\src] @ reg1 = src[0-3]\r
+ sub \counter, \counter, #4 @ counter -= 4\r
+\r
+ add \src, \src, #4 @ src += 4\r
+\r
+ ubfx \reg2, \reg1, #0, #8 @ reg2 = src[0]\r
+ cmp \counter, #4\r
+\r
+ ldr \reg2, [\pal, \reg2, lsl #2] @ reg2 = pal[src[0]]\r
+ ubfx \reg3, \reg1, #8, #8 @ reg3 = src[1]\r
+\r
+ ldr \reg3, [\pal, \reg3, lsl #2] @ reg3 = pal[src[1]]\r
+ ubfx \reg4, \reg1, #16, #8 @ reg4 = src[2]\r
+\r
+ ldr \reg4, [\pal, \reg4, lsl #2] @ reg4 = pal[src[2]]\r
+ lsr \reg1, \reg1, #24 @ reg1 = src[3]\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[3]]\r
+\r
+ add \reg5, \dst, \dststride\r
+ bfi \reg2, \reg3, #16, #16 @ reg2 = reg2 | reg3 << 16\r
+\r
+ vmov.32 d16[0], \reg2\r
+\r
+ bfi \reg4, \reg1, #16, #16 @ reg4 = reg4 | reg1 << 16\r
+\r
+ vmov.32 d16[1], \reg4\r
+\r
+ vmov d17, d16\r
+\r
+ vst2.16 {d16,d17}, [\dst]! @ dst[0-7] = d16-d17; dst += 2*2*4\r
+\r
+ vst2.16 {d16,d17}, [\reg5] @ dst1[0-7] = d16-d17\r
+ bhs 20b\r
+\r
+ cmp \counter, #0\r
+ beq 40f\r
+\r
+ @ last 1-3 pixels\r
+ 30:\r
+ ldrb \reg1, [\src], #1 @ reg1 = src[0]; src++\r
+ add \reg2, \dst, \dststride\r
+\r
+ add \dst, \dst, #4 @ dst += 2*2\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[0]]\r
+ subS \counter, \counter, #1 @ counter--\r
+\r
+ strh \reg1, [\dst, #-4] @ dst[0] = reg1\r
+\r
+ strh \reg1, [\dst, #-2] @ dst[1] = reg1\r
+\r
+ strh \reg1, [\reg2] @ dst1[0] = reg1\r
+\r
+ strh \reg1, [\reg2, #2] @ dst1[1] = reg1\r
+ bne 30b\r
+\r
+ 40:\r
+.endm\r
+\r
+.macro neon_normal3x_8_16_line src, dst, pal, counter, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, dststride\r
+ @ align src to 4 bytes\r
+ andS \reg5, \src, #3 @ reg5 = src & 3\r
+ beq 10f\r
+\r
+ @ first 1-3 pixels\r
+ rsb \reg5, \reg5, #4 @ reg5 = 4 - (src & 3)\r
+ 1:\r
+ ldrb \reg1, [\src], #1 @ reg1 = src[0]; src++\r
+ add \reg2, \dst, \dststride\r
+\r
+ add \reg3, \reg2, \dststride\r
+ add \dst, \dst, #6 @ dst += 3*2\r
+\r
+ sub \counter, \counter, #1 @ counter--\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[0]]\r
+ subS \reg5, \reg5, #1 @ reg5--\r
+\r
+ strh \reg1, [\dst, #-6] @ dst[0] = reg1\r
+\r
+ strh \reg1, [\dst, #-4] @ dst[1] = reg1\r
+\r
+ strh \reg1, [\dst, #-2] @ dst[2] = reg1\r
+ bfi \reg1, \reg1, #16, #16 @ reg1 = reg1 | reg1 << 16\r
+\r
+ strh \reg1, [\reg2] @ dst1[0] = reg1\r
+\r
+ str \reg1, [\reg2, #2] @ dst1[1-2] = reg1\r
+\r
+ strh \reg1, [\reg3] @ dst2[0] = reg1\r
+\r
+ str \reg1, [\reg3, #2] @ dst2[1-2] = reg1\r
+ bne 1b\r
+\r
+ @ middle pixels (16 per iteration)\r
+ 10:\r
+ _neon_normalxx_8_16_line_middle \src, \dst, \pal, \counter, \reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8, \reg9, \dststride, d22, d23\r
+\r
+ vmov q9, q8\r
+ add \reg1, \dst, \dststride @ reg1 = dst + dststride\r
+\r
+ vmov q10, q8\r
+ add \reg2, \dst, \dststride, lsl #1 @ reg1 = dst + 2 * dststride\r
+\r
+ vmov q12, q11\r
+ vst3.16 {d16,d18,d20}, [\dst]! @ dst[0-3] = q8-q10[0]; dst += 3*2*4\r
+\r
+ vmov q13, q11\r
+ vst3.16 {d17,d19,d21}, [\dst]! @ dst[4-7] = q8-q10[1]; dst += 3*2*4\r
+\r
+ vst3.16 {d22,d24,d26}, [\dst]! @ dst[8-11] = q11-q13[0]; dst += 3*2*4\r
+\r
+ vst3.16 {d23,d25,d27}, [\dst]! @ dst[12-15] = q11-q13[1]; dst += 3*2*4\r
+\r
+ vst3.16 {d16,d18,d20}, [\reg1]! @ dst1[0-3] = q8-q10[0]; dst1 += 3*2*4\r
+\r
+ vst3.16 {d17,d19,d21}, [\reg1]! @ dst1[4-7] = q8-q10[1]; dst1 += 3*2*4\r
+\r
+ vst3.16 {d22,d24,d26}, [\reg1]! @ dst1[8-11] = q11-q13[0]; dst1 += 3*2*4\r
+\r
+ vst3.16 {d23,d25,d27}, [\reg1]! @ dst1[12-15] = q11-q13[1]; dst1 += 3*2*4\r
+\r
+ vst3.16 {d16,d18,d20}, [\reg2]! @ dst2[0-3] = q8-q10[0]; dst2 += 3*2*4\r
+\r
+ vst3.16 {d17,d19,d21}, [\reg2]! @ dst2[4-7] = q8-q10[1]; dst2 += 3*2*4\r
+\r
+ vst3.16 {d22,d24,d26}, [\reg2]! @ dst2[8-11] = q11-q13[0]; dst2 += 3*2*4\r
+\r
+ vst3.16 {d23,d25,d27}, [\reg2]! @ dst2[12-15] = q11-q13[1]; dst2 += 3*2*4\r
+ bhs 10b\r
+\r
+ @ last 0-15 bytes\r
+\r
+ cmp \counter, #0\r
+ beq 40f\r
+\r
+ cmp \counter, #4\r
+ blo 30f\r
+\r
+ @ 4-12 pixels (4 pre iteration)\r
+ 20:\r
+ ldr \reg1, [\src] @ reg1 = src[0-3]\r
+ sub \counter, \counter, #4 @ counter -= 4\r
+\r
+ add \src, \src, #4 @ src += 4\r
+\r
+ ubfx \reg2, \reg1, #0, #8 @ reg2 = src[0]\r
+ cmp \counter, #4\r
+\r
+ ldr \reg2, [\pal, \reg2, lsl #2] @ reg2 = pal[src[0]]\r
+ ubfx \reg3, \reg1, #8, #8 @ reg3 = src[1]\r
+\r
+ ldr \reg3, [\pal, \reg3, lsl #2] @ reg3 = pal[src[1]]\r
+ ubfx \reg4, \reg1, #16, #8 @ reg4 = src[2]\r
+\r
+ ldr \reg4, [\pal, \reg4, lsl #2] @ reg4 = pal[src[2]]\r
+ lsr \reg1, \reg1, #24 @ reg1 = src[3]\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[3]]\r
+\r
+ add \reg5, \dst, \dststride\r
+ bfi \reg2, \reg3, #16, #16 @ reg2 = reg2 | reg3 << 16\r
+\r
+ vmov.32 d16[0], \reg2\r
+ add \reg6, \reg5, \dststride\r
+\r
+ bfi \reg4, \reg1, #16, #16 @ reg4 = reg4 | reg1 << 16\r
+\r
+ vmov.32 d16[1], \reg4\r
+\r
+ vmov d17, d16\r
+\r
+ vmov d18, d16\r
+\r
+ vst3.16 {d16,d17,d18}, [\dst]! @ dst[0-11] = d16-d18; dst += 3*2*4\r
+\r
+ vst3.16 {d16,d17,d18}, [\reg5] @ dst1[0-11] = d16-d18\r
+\r
+ vst3.16 {d16,d17,d18}, [\reg6] @ dst2[0-11] = d16-d18\r
+ bhs 20b\r
+\r
+ cmp \counter, #0\r
+ beq 40f\r
+\r
+ @ last 1-3 pixels\r
+ 30:\r
+ ldrb \reg1, [\src], #1 @ reg1 = src[0]; src++\r
+ add \reg2, \dst, \dststride\r
+\r
+ add \reg3, \reg2, \dststride\r
+ add \dst, \dst, #6 @ dst += 3*2\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[0]]\r
+ subS \counter, \counter, #1 @ counter--\r
+\r
+ strh \reg1, [\dst, #-6] @ dst[0] = reg1\r
+\r
+ strh \reg1, [\dst, #-4] @ dst[1] = reg1\r
+\r
+ strh \reg1, [\dst, #-2] @ dst[2] = reg1\r
+ bfi \reg1, \reg1, #16, #16 @ reg1 = reg1 | reg1 << 16\r
+\r
+ strh \reg1, [\reg2] @ dst1[0] = reg1\r
+\r
+ str \reg1, [\reg2, #2] @ dst1[1-2] = reg1\r
+\r
+ strh \reg1, [\reg3] @ dst2[0] = reg1\r
+\r
+ str \reg1, [\reg3, #2] @ dst2[1-2] = reg1\r
+ bne 30b\r
+\r
+ 40:\r
+.endm\r
+\r
+.macro neon_normal4x_8_16_line src, dst, pal, counter, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, dststride\r
+ @ align src to 4 bytes\r
+ andS \reg5, \src, #3 @ reg5 = src & 3\r
+ beq 10f\r
+\r
+ @ first 1-3 pixels\r
+ rsb \reg5, \reg5, #4 @ reg5 = 4 - (src & 3)\r
+ 1:\r
+ ldrb \reg1, [\src], #1 @ reg1 = src[0]; src++\r
+ add \reg2, \dst, \dststride\r
+\r
+ add \reg3, \reg2, \dststride\r
+ add \dst, \dst, #8 @ dst += 4*2\r
+\r
+ sub \counter, \counter, #1 @ counter--\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[0]]\r
+ add \reg4, \reg3, \dststride\r
+\r
+ strh \reg1, [\dst, #-8] @ dst[0] = reg1\r
+ subS \reg5, \reg5, #1 @ reg5--\r
+\r
+ strh \reg1, [\dst, #-6] @ dst[1] = reg1\r
+\r
+ bfi \reg1, \reg1, #16, #16 @ reg1 = reg1 | reg1 << 16\r
+ str \reg1, [\dst, #-4] @ dst[2-3] = reg1\r
+\r
+ str \reg1, [\reg2] @ dst1[0-1] = reg1\r
+\r
+ str \reg1, [\reg2, #4] @ dst1[2-3] = reg1\r
+\r
+ str \reg1, [\reg3] @ dst2[0-1] = reg1\r
+\r
+ str \reg1, [\reg3, #4] @ dst2[2-3] = reg1\r
+\r
+ str \reg1, [\reg4] @ dst3[0-1] = reg1\r
+\r
+ str \reg1, [\reg4, #4] @ dst3[2-3] = reg1\r
+ bne 1b\r
+\r
+ @ middle pixels (16 per iteration)\r
+ 10:\r
+ _neon_normalxx_8_16_line_middle \src, \dst, \pal, \counter, \reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8, \reg9, \dststride, d24, d25\r
+\r
+ vmov q9, q8\r
+ add \reg1, \dst, \dststride @ reg1 = dst + dststride\r
+\r
+ vmov q10, q8\r
+ add \reg2, \dst, \dststride, lsl #1 @ reg2 = dst + 2 * dststride\r
+\r
+ vmov q11, q8\r
+ add \reg3, \reg1, \dststride,lsl #1 @ reg3 = dst + 3 * dststride\r
+\r
+ vmov q13, q12\r
+ vst4.16 {d16,d18,d20,d22}, [\dst]! @ dst[0-3] = q8-q11[0]; dst += 4*2*4\r
+\r
+ vmov q14, q12\r
+\r
+ vmov q15, q12\r
+ vst4.16 {d17,d19,d21,d23}, [\dst]! @ dst[4-7] = q8-q11[1]; dst += 4*2*4\r
+\r
+ vst4.16 {d24,d26,d28,d30}, [\dst]! @ dst[8-11] = q12-q15[0]; dst += 4*2*4\r
+\r
+ vst4.16 {d25,d27,d29,d31}, [\dst]! @ dst[12-15] = q12-q15[1]; dst += 4*2*4\r
+\r
+ vst4.16 {d16,d18,d20,d22}, [\reg1]! @ dst1[0-3] = q8-q11[0]; dst1 += 4*2*4\r
+\r
+ vst4.16 {d17,d19,d21,d23}, [\reg1]! @ dst1[4-7] = q8-q11[1]; dst1 += 4*2*4\r
+\r
+ vst4.16 {d24,d26,d28,d30}, [\reg1]! @ dst1[8-11] = q12-q15[0]; dst1 += 4*2*4\r
+\r
+ vst4.16 {d25,d27,d29,d31}, [\reg1]! @ dst1[12-15] = q12-q15[1]; dst1 += 4*2*4\r
+\r
+ vst4.16 {d16,d18,d20,d22}, [\reg2]! @ dst2[0-3] = q8-q11[0]; dst2 += 4*2*4\r
+\r
+ vst4.16 {d17,d19,d21,d23}, [\reg2]! @ dst2[4-7] = q8-q11[1]; dst2 += 4*2*4\r
+\r
+ vst4.16 {d24,d26,d28,d30}, [\reg2]! @ dst2[8-11] = q12-q15[0]; dst2 += 4*2*4\r
+\r
+ vst4.16 {d25,d27,d29,d31}, [\reg2]! @ dst2[12-15] = q12-q15[1]; dst2 += 4*2*4\r
+\r
+ vst4.16 {d16,d18,d20,d22}, [\reg3]! @ dst3[0-3] = q8-q11[0]; dst3 += 4*2*4\r
+\r
+ vst4.16 {d17,d19,d21,d23}, [\reg3]! @ dst3[4-7] = q8-q11[1]; dst3 += 4*2*4\r
+\r
+ vst4.16 {d24,d26,d28,d30}, [\reg3]! @ dst3[8-11] = q12-q15[0]; dst3 += 4*2*4\r
+\r
+ vst4.16 {d25,d27,d29,d31}, [\reg3]! @ dst3[12-15] = q12-q15[1]; dst3 += 4*2*4\r
+ bhs 10b\r
+\r
+ @ last 0-15 bytes\r
+\r
+ cmp \counter, #0\r
+ beq 40f\r
+\r
+ cmp \counter, #4\r
+ blo 30f\r
+\r
+ @ 4-12 pixels (4 pre iteration)\r
+ 20:\r
+ ldr \reg1, [\src] @ reg1 = src[0-3]\r
+ sub \counter, \counter, #4 @ counter -= 4\r
+\r
+ add \src, \src, #4 @ src += 4\r
+\r
+ ubfx \reg2, \reg1, #0, #8 @ reg2 = src[0]\r
+ cmp \counter, #4\r
+\r
+ ldr \reg2, [\pal, \reg2, lsl #2] @ reg2 = pal[src[0]]\r
+ ubfx \reg3, \reg1, #8, #8 @ reg3 = src[1]\r
+\r
+ ldr \reg3, [\pal, \reg3, lsl #2] @ reg3 = pal[src[1]]\r
+ ubfx \reg4, \reg1, #16, #8 @ reg4 = src[2]\r
+\r
+ ldr \reg4, [\pal, \reg4, lsl #2] @ reg4 = pal[src[2]]\r
+ lsr \reg1, \reg1, #24 @ reg1 = src[3]\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[3]]\r
+\r
+ add \reg5, \dst, \dststride\r
+ bfi \reg2, \reg3, #16, #16 @ reg2 = reg2 | reg3 << 16\r
+\r
+ vmov.32 d16[0], \reg2\r
+ add \reg6, \reg5, \dststride\r
+\r
+ bfi \reg4, \reg1, #16, #16 @ reg4 = reg4 | reg1 << 16\r
+ add \reg7, \reg6, \dststride\r
+\r
+ vmov.32 d16[1], \reg4\r
+\r
+ vmov d17, d16\r
+\r
+ vmov d18, d16\r
+\r
+ vmov d19, d16\r
+\r
+ vst4.16 {d16,d17,d18,d19}, [\dst]! @ dst[0-15] = d16-d19; dst += 4*2*4\r
+\r
+ vst4.16 {d16,d17,d18,d19}, [\reg5] @ dst1[0-15] = d16-d19\r
+\r
+ vst4.16 {d16,d17,d18,d19}, [\reg6] @ dst2[0-15] = d16-d19\r
+\r
+ vst4.16 {d16,d17,d18,d19}, [\reg7] @ dst3[0-15] = d16-d19\r
+ bhs 20b\r
+\r
+ cmp \counter, #0\r
+ beq 40f\r
+\r
+ @ last 1-3 pixels\r
+ 30:\r
+ ldrb \reg1, [\src], #1 @ reg1 = src[0]; src++\r
+ add \reg2, \dst, \dststride\r
+\r
+ add \reg3, \reg2, \dststride\r
+ add \dst, \dst, #8 @ dst += 4*2\r
+\r
+ ldr \reg1, [\pal, \reg1, lsl #2] @ reg1 = pal[src[0]]\r
+ add \reg4, \reg3, \dststride\r
+\r
+ strh \reg1, [\dst, #-8] @ dst[0] = reg1\r
+ subS \counter, \counter, #1 @ counter--\r
+\r
+ strh \reg1, [\dst, #-6] @ dst[1] = reg1\r
+\r
+ bfi \reg1, \reg1, #16, #16 @ reg1 = reg1 | reg1 << 16\r
+ str \reg1, [\dst, #-4] @ dst[2-3] = reg1\r
+\r
+ str \reg1, [\reg2] @ dst1[0-1] = reg1\r
+\r
+ str \reg1, [\reg2, #4] @ dst1[2-3] = reg1\r
+\r
+ str \reg1, [\reg3] @ dst2[0-1] = reg1\r
+\r
+ str \reg1, [\reg3, #4] @ dst2[2-3] = reg1\r
+\r
+ str \reg1, [\reg4] @ dst3[0-1] = reg1\r
+\r
+ str \reg1, [\reg4, #4] @ dst3[2-3] = reg1\r
+ bne 30b\r
+\r
+ 40:\r
+.endm\r
+\r
--- /dev/null
+@@\r
+@@ Copyright (C) 2012 Roman Pauer\r
+@@\r
+@@ Permission is hereby granted, free of charge, to any person obtaining a copy of\r
+@@ this software and associated documentation files (the "Software"), to deal in\r
+@@ the Software without restriction, including without limitation the rights to\r
+@@ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\r
+@@ of the Software, and to permit persons to whom the Software is furnished to do\r
+@@ so, subject to the following conditions:\r
+@@\r
+@@ The above copyright notice and this permission notice shall be included in all\r
+@@ copies or substantial portions of the Software.\r
+@@\r
+@@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+@@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+@@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+@@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+@@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+@@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+@@ SOFTWARE.\r
+@@\r
+\r
+.arm\r
+\r
+.include "neon_scale2x.Sinc"\r
+.include "neon_normalxx.Sinc"\r
+\r
+.global neon_scale2x_8_8\r
+.global neon_scale2x_16_16\r
+.global neon_scale2x_8_16\r
+\r
+.align 4\r
+neon_scale2x_8_8:\r
+\r
+@ r0 = const uint8_t *src\r
+@ r1 = uint8_t *dst\r
+@ r2 = unsigned int width (pixels)\r
+@ r3 = unsigned int srcstride (bytes)\r
+@ [sp] = unsigned int dststride (bytes)\r
+@ [sp+4] = unsigned int height\r
+@ lr = return address\r
+\r
+ ldr ip, [sp] @ ip = dststride\r
+ push {r4-r9}\r
+ ldr r9, [sp, #(7*4)] @ r9 = height\r
+ sub r4, r0, r3 @ r4 = src - srcstride\r
+ add r5, r0, r3 @ r5 = src + srcstride\r
+ add r6, r1, ip @ r6 = dst + dststride\r
+ sub r3, r3, r2 @ r3 = srcstride - width\r
+ sub ip, ip, r2 @ ip = dststride - width\r
+ lsl ip, #1 @ ip = 2 * dststride - 2 * width\r
+ mov r7, r2 @ r7 = width\r
+ sub r9, r9, #2 @ r9 = height - 2\r
+\r
+@ r0 = src\r
+@ r1 = dst\r
+@ r2 = width\r
+@ r3 = srcdiff (srcstride - width)\r
+@ r4 = src - srcstride\r
+@ r5 = src + srcstride\r
+@ r6 = dst + dststride\r
+@ r7 = counter\r
+@ r8 = tmpreg\r
+@ r9 = height\r
+@ ip = dstdiff (2 * dststride - 2 * width)\r
+\r
+ @ first line\r
+ neon_scale2x_8_8_line first, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ add r0, r0, r3\r
+ add r4, r4, r3\r
+ add r5, r5, r3\r
+ add r1, r1, ip\r
+ add r6, r6, ip\r
+\r
+ @ middle lines\r
+ 101:\r
+ mov r7, r2\r
+\r
+ neon_scale2x_8_8_line middle, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ subS r9, r9, #1\r
+ add r0, r0, r3\r
+ add r4, r4, r3\r
+ add r5, r5, r3\r
+ add r1, r1, ip\r
+ add r6, r6, ip\r
+ bne 101b\r
+\r
+ @ last line\r
+ mov r7, r2\r
+\r
+ neon_scale2x_8_8_line last, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ pop {r4-r9}\r
+ bx lr\r
+\r
+@ end procedure neon_scale2x_8_8\r
+\r
+\r
+neon_scale2x_16_16:\r
+\r
+@ r0 = const uint16_t *src\r
+@ r1 = uint16_t *dst\r
+@ r2 = unsigned int width (pixels)\r
+@ r3 = unsigned int srcstride (bytes)\r
+@ [sp] = unsigned int dststride (bytes)\r
+@ [sp+4] = unsigned int height\r
+@ lr = return address\r
+\r
+ ldr ip, [sp] @ ip = dststride\r
+ push {r4-r9}\r
+ ldr r9, [sp, #(7*4)] @ r9 = height\r
+ sub r4, r0, r3 @ r4 = src - srcstride\r
+ add r5, r0, r3 @ r5 = src + srcstride\r
+ add r6, r1, ip @ r6 = dst + dststride\r
+ sub r3, r3, r2, lsl #1 @ r3 = srcstride - 2 * width\r
+ sub ip, ip, r2, lsl #1 @ ip = dststride - 2 * width\r
+ lsl ip, #1 @ ip = 2 * dststride - 4 * width\r
+ mov r7, r2 @ r7 = width\r
+ sub r9, r9, #2 @ r9 = height - 2\r
+\r
+@ r0 = src\r
+@ r1 = dst\r
+@ r2 = width\r
+@ r3 = srcdiff (srcstride - 2 * width)\r
+@ r4 = src - srcstride\r
+@ r5 = src + srcstride\r
+@ r6 = dst + dststride\r
+@ r7 = counter\r
+@ r8 = tmpreg\r
+@ r9 = height\r
+@ ip = dstdiff (2 * dststride - 4 * width)\r
+\r
+ @ first line\r
+ neon_scale2x_16_16_line first, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ add r0, r0, r3\r
+ add r4, r4, r3\r
+ add r5, r5, r3\r
+ add r1, r1, ip\r
+ add r6, r6, ip\r
+\r
+ @ middle lines\r
+ 101:\r
+ mov r7, r2\r
+\r
+ neon_scale2x_16_16_line middle, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ subS r9, r9, #1\r
+ add r0, r0, r3\r
+ add r4, r4, r3\r
+ add r5, r5, r3\r
+ add r1, r1, ip\r
+ add r6, r6, ip\r
+ bne 101b\r
+\r
+ @ last line\r
+ mov r7, r2\r
+\r
+ neon_scale2x_16_16_line last, r4, r0, r5, r7, r1, r6, r8, 0, 0\r
+\r
+ pop {r4-r9}\r
+ bx lr\r
+\r
+@ end procedure neon_scale2x_16_16\r
+\r
+\r
+neon_scale2x_8_16:\r
+\r
+@ r0 = const uint8_t *src\r
+@ r1 = uint8_t *dst\r
+@ r2 = const uint32_t *palette\r
+@ r3 = unsigned int width (pixels)\r
+@ [sp] = unsigned int srcstride (bytes)\r
+@ [sp+4] = unsigned int dststride (bytes)\r
+@ [sp+8] = unsigned int height\r
+@ lr = return address\r
+\r
+@ three temporary lines\r
+\r
+ ldr ip, [sp] @ ip = srcstride\r
+ push {r4-r11,lr}\r
+ ldr r4, [sp, #(4*10)] @ r4 = dststride\r
+ ldr r5, [sp, #(4*11)] @ r5 = height\r
+ mov r6, sp @ r6 = sp\r
+ sub ip, ip, r3 @ ip = srcstride - width\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ sub r7, r4, r3, lsl #1 @ r7 = dststride - 2 * width\r
+ sub sp, sp, r3, lsl #1 @ sp -= 2 * width\r
+ sub r5, r5, #2 @ height -= 2\r
+ mov r10, sp @ tmpline3 = sp\r
+ lsl r7, #1 @ r7 = 2 * dststride - 4 * width\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ sub sp, sp, r3, lsl #1 @ sp -= 2 * width\r
+ mov r11, sp @ tmpline2 = sp\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ sub sp, sp, r3, lsl #1 @ sp -= 2 * width\r
+ mov lr, sp @ tmpline1 = sp\r
+ bic sp, sp, #31 @ align sp to 32 bytes\r
+ sub sp, sp, #36\r
+ str r6, [sp] @ oldsp = r6\r
+ str r5, [sp, #4] @ height = r5\r
+ str ip, [sp, #8] @ srcdiff = ip\r
+ str r7, [sp, #12] @ dstdiff = r7\r
+ str r4, [sp, #16] @ dststride = r4\r
+ str lr, [sp, #20] @ tmpline1 = lr\r
+ str r11, [sp, #24] @ tmpline2 = r11\r
+ str r10, [sp, #28] @ tmpline3 = r10\r
+ str r3, [sp, #32] @ width = r3\r
+\r
+@ r0 = src\r
+@ r1 = dst\r
+@ r2 = palette\r
+@ r3 = counter\r
+@ r4 = dst2\r
+\r
+@ r11 = bufptr1\r
+@ ip = bufptr2\r
+@ lr = bufptr3\r
+\r
+@ [sp] = oldsp\r
+@ [sp, #4] = height\r
+@ [sp, #8] = srcdiff (srcstride - width)\r
+@ [sp, #12] = dstdiff (2 * dststride - 4 * width)\r
+@ [sp, #16] = dststride\r
+@ [sp, #20] = tmpline1\r
+@ [sp, #24] = tmpline2\r
+@ [sp, #28] = tmpline3\r
+@ [sp, #32] = width\r
+\r
+ @ lr = tmpline1\r
+ @ r3 = counter\r
+\r
+ @ first line\r
+ neon_normal1x_8_16_line r0, lr, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, ip\r
+\r
+ ldr r7, [sp, #8] @ r7 = srcdiff\r
+ ldr r3, [sp, #32] @ counter = width\r
+ ldr lr, [sp, #24] @ bufptr3 = tmpline2\r
+ add r0, r0, r7 @ src += srcdiff\r
+\r
+ @ second line\r
+ neon_normal1x_8_16_line r0, lr, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, ip\r
+\r
+ ldr r9, [sp, #16] @ r9 = dststride\r
+ ldr r3, [sp, #32] @ counter = width\r
+ ldr ip, [sp, #20] @ bufptr2 = tmpline1\r
+ ldr lr, [sp, #24] @ bufptr3 = tmpline2\r
+ add r4, r1, r9 @ dst2 = dst + dststride\r
+\r
+ @ first temporary line\r
+ neon_scale2x_16_16_line first, r11, ip, lr, r3, r1, r4, r5, 1, 0\r
+\r
+ ldr r7, [sp, #8] @ r7 = srcdiff\r
+ ldr r8, [sp, #12] @ r8 = dstdiff\r
+ ldr r3, [sp, #32] @ counter = width\r
+ ldr lr, [sp, #28] @ bufptr3 = tmpline3\r
+ add r0, r0, r7 @ src += srcdiff\r
+ add r1, r1, r8 @ dst += dstdiff\r
+\r
+ 100:\r
+\r
+ @ line n+1\r
+ neon_normal1x_8_16_line r0, lr, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, ip\r
+\r
+ ldr r9, [sp, #16] @ r9 = dststride\r
+ ldr r11, [sp, #20] @ bufptr1 = tmpline1\r
+ ldr ip, [sp, #24] @ bufptr2 = tmpline2\r
+ ldr lr, [sp, #28] @ bufptr3 = tmpline3\r
+ add r4, r1, r9 @ dst2 = dst + dststride\r
+ ldr r3, [sp, #32] @ counter = width\r
+ str r11, [sp, #28] @ tmpline3 = bufptr1\r
+ str ip, [sp, #20] @ tmpline1 = bufptr2\r
+ str lr, [sp, #24] @ tmpline2 = bufptr3\r
+\r
+ @ temporary line n\r
+ neon_scale2x_16_16_line middle, r11, ip, lr, r3, r1, r4, r5, 1, 0\r
+\r
+ ldr r6, [sp, #4] @ r6 = height\r
+ ldr r7, [sp, #8] @ r7 = srcdiff\r
+ ldr r8, [sp, #12] @ r8 = dstdiff\r
+ ldr r3, [sp, #32] @ counter = width\r
+ subS r6, r6, #1 @ height--\r
+ ldr lr, [sp, #28] @ bufptr3 = tmpline3\r
+ add r0, r0, r7 @ src += srcdiff\r
+ add r1, r1, r8 @ dst += dstdiff\r
+ str r6, [sp, #4] @ height = r6\r
+ bne 100b\r
+\r
+\r
+ ldr r9, [sp, #16] @ r9 = dststride\r
+ ldr r11, [sp, #20] @ bufptr1 = tmpline1\r
+ ldr ip, [sp, #24] @ bufptr2 = tmpline2\r
+ add r4, r1, r9 @ dst2 = dst + dststride\r
+\r
+ @ last temporary line\r
+ neon_scale2x_16_16_line last, r11, ip, lr, r3, r1, r4, r5, 1, 0\r
+\r
+\r
+ ldr sp, [sp] @ sp = oldsp\r
+ pop {r4-r11,lr}\r
+ bx lr\r
+\r
+@ end procedure neon_scale2x_8_16\r
+\r
--- /dev/null
+@@\r
+@@ Copyright (C) 2012 Roman Pauer\r
+@@\r
+@@ Permission is hereby granted, free of charge, to any person obtaining a copy of\r
+@@ this software and associated documentation files (the "Software"), to deal in\r
+@@ the Software without restriction, including without limitation the rights to\r
+@@ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\r
+@@ of the Software, and to permit persons to whom the Software is furnished to do\r
+@@ so, subject to the following conditions:\r
+@@\r
+@@ The above copyright notice and this permission notice shall be included in all\r
+@@ copies or substantial portions of the Software.\r
+@@\r
+@@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+@@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+@@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+@@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+@@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+@@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+@@ SOFTWARE.\r
+@@\r
+\r
+\r
+@ A B C --\ E0 E1\r
+@ D E F --/ E2 E3\r
+@ G H I\r
+\r
+@ q0 = E0 (tmp0)\r
+@ q1 = E1 (tmp1)\r
+@ q2 = E2 (tmp2)\r
+@ q3 = E3 (tmp3)\r
+@ q8 = S2prev\r
+@ q9 = S2next\r
+@ q10 = C0 < B == H || D == F >\r
+@ q11 = S1 < B >\r
+@ q12 = S2 < E >\r
+@ q13 = S3 < H >\r
+@ q14 = S2sl < D >\r
+@ q15 = S2sr < F >\r
+\r
+\r
+.macro __neon_scale2x_8_8_line src1, src2, src3, counter, dst1, dst2, reg1, qB, qH, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+\r
+ vld1.8 {d17[7]}, [\src2] @ S2prev[15] = src[0]\r
+ andS \reg1, \counter, #15 @ reg1 = counter & 15\r
+\r
+ .ifnes "\qB", "q11"\r
+ add \src1, \src1, \counter @ src1 += counter\r
+ .endif\r
+ .ifnes "\qH", "q13"\r
+ add \src3, \src3, \counter @ src3 += counter\r
+ .endif\r
+ beq 1f\r
+\r
+ @ first 1-15 pixels - align counter to 16 bytes\r
+ vld1.8 {q12}, [\src2], \reg1 @ S2 = [src] < E >; src2 += counter & 15\r
+\r
+ .ifeqs "\qB", "q11"\r
+ vld1.8 {\qB}, [\src1], \reg1 @ S1 = [src - srcstride] < B >; src1 += counter & 15\r
+ .endif\r
+\r
+ .ifeqs "\qH", "q13"\r
+ vld1.8 {\qH}, [\src3], \reg1 @ S3 = [src + srcstride] < H >; src3 += counter & 15\r
+ .endif\r
+ vext.8 q14, q8, q12, #15 @ S2sl = S2prev[15] | (S2 << 8) < D >\r
+\r
+ vceq.i8 q2, \qB, \qH @ tmp2 = < B == H >\r
+\r
+ vmov.8 d17[7], \reg1 @ S2prev[15] = reg1\r
+ vext.8 q15, q12, q9, #1 @ S2sr = (S2 >> 8) | ... < F >\r
+\r
+ vceq.i8 q0, q14, \qB @ tmp0 = < D == B >\r
+\r
+ vceq.i8 q3, q14, q15 @ tmp3 = < D == F >\r
+\r
+ vceq.i8 q1, \qB, q15 @ tmp1 = < B == F >\r
+ vtbl.8 d17, {d28, d29}, d17 @ S2prev[15] = src[reg1 - 1]\r
+\r
+ lsl \reg1, #1\r
+ vorr q10, q2, q3 @ C0 = < B == H || D == F >\r
+\r
+ vceq.i8 q2, q14, \qH @ tmp2 = < D == H >\r
+\r
+ vceq.i8 q3, \qH, q15 @ tmp3 = < H == F >\r
+\r
+ vorn q0, q10, q0 @ tmp0 = < C0 || !(D == B) >\r
+\r
+ vorn q1, q10, q1 @ tmp1 = < C0 || !(B == F) >\r
+\r
+ vbsl q0, q12, q14 @ E0 = < (C0 || !(D == B)) ? E : D >\r
+\r
+ vbsl q1, q12, q15 @ E1 = < (C0 || !(B == F)) ? E : F >\r
+\r
+ vorn q2, q10, q2 @ tmp2 = < C0 || !(D == H) >\r
+\r
+ vorn q3, q10, q3 @ tmp3 = < C0 || !(H == F) >\r
+\r
+ vbsl q2, q12, q14 @ E2 = < (C0 || !(D == H)) ? E : D >\r
+ vst2.8 {q0-q1}, [\dst1], \reg1 @ [dst] = E0,E1; dst1 += reg1\r
+\r
+ vbsl q3, q12, q15 @ E3 = < (C0 || !(H == F)) ? E : F >\r
+ bic \counter, \counter, #15\r
+\r
+ vst2.8 {q2-q3}, [\dst2], \reg1 @ [dst + dststride] = E2,E3; dst2 += reg1\r
+\r
+ @ counter is aligned to 16 bytes\r
+\r
+ 1:\r
+ vld1.8 {q9}, [\alsrc2]! @ S2next = [src]; src2 += 16\r
+\r
+ @ inner loop (16 pixels per iteration)\r
+ 2:\r
+\r
+ vmov q12, q9 @ S2 = S2next < E >\r
+ .ifeqs "\qB", "q11"\r
+ vld1.8 {\qB}, [\alsrc1]! @ S1 = [src - srcstride] < B >; src1 += 16\r
+ .endif\r
+\r
+ .ifeqs "\qH", "q13"\r
+ vld1.8 {\qH}, [\alsrc3]! @ S3 = [src + srcstride] < H >; src3 += 16\r
+ .endif\r
+\r
+ vext.8 q14, q8, q12, #15 @ S2sl = S2prev[15] | (S2 << 8) < D >\r
+ vld1.8 {q9}, [\alsrc2]! @ S2next = [src]; src2 += 16\r
+\r
+ vceq.i8 q2, \qB, \qH @ tmp2 = < B == H >\r
+\r
+ vmov q8, q12 @ S2prev = S2\r
+ vext.8 q15, q12, q9, #1 @ S2sr = (S2 >> 8) | S2next[0] < F >\r
+\r
+ vceq.i8 q0, q14, \qB @ tmp0 = < D == B >\r
+\r
+ vceq.i8 q3, q14, q15 @ tmp3 = < D == F >\r
+\r
+ vceq.i8 q1, \qB, q15 @ tmp1 = < B == F >\r
+\r
+ sub \counter, \counter, #16 @ counter -= 16\r
+\r
+ vorr q10, q2, q3 @ C0 = < B == H || D == F >\r
+\r
+ vceq.i8 q2, q14, \qH @ tmp2 = < D == H >\r
+\r
+ vceq.i8 q3, \qH, q15 @ tmp3 = < H == F >\r
+\r
+ vorn q0, q10, q0 @ tmp0 = < C0 || !(D == B) >\r
+\r
+ vorn q1, q10, q1 @ tmp1 = < C0 || !(B == F) >\r
+\r
+ vbsl q0, q12, q14 @ E0 = < (C0 || !(D == B)) ? E : D >\r
+\r
+ vbsl q1, q12, q15 @ E1 = < (C0 || !(B == F)) ? E : F >\r
+\r
+ vorn q2, q10, q2 @ tmp2 = < C0 || !(D == H) >\r
+\r
+ vorn q3, q10, q3 @ tmp3 = < C0 || !(H == F) >\r
+\r
+ vbsl q2, q12, q14 @ E2 = < (C0 || !(D == H)) ? E : D >\r
+ vst2.8 {q0-q1}, [\aldst1]! @ [dst] = E0,E1; dst1 += 2*16\r
+\r
+ cmp \counter, #16\r
+\r
+ vbsl q3, q12, q15 @ E3 = < (C0 || !(H == F)) ? E : F >\r
+\r
+ vst2.8 {q2-q3}, [\aldst2]! @ [dst + dststride] = E2,E3; dst2 += 2*16\r
+\r
+ bhi 2b\r
+\r
+ @ last 16 pixels\r
+\r
+ vmov q12, q9 @ S2 = S2next < E >\r
+\r
+ vshr.u64 d18, d19, #(64-8) @ S2next[0] = S2[15] | ...\r
+ .ifeqs "\qB", "q11"\r
+ vld1.8 {\qB}, [\alsrc1]! @ S1 = [src - srcstride] < B >; src1 += 16\r
+ .endif\r
+\r
+ vext.8 q14, q8, q12, #15 @ S2sl = S2prev[15] | (S2 << 8) < D >\r
+\r
+ vext.8 q15, q12, q9, #1 @ S2sr = (S2 >> 8) | S2next[0] < F >\r
+ .ifeqs "\qH", "q13"\r
+ vld1.8 {\qH}, [\alsrc3]! @ S3 = [src + srcstride] < H >; src3 += 16\r
+ .endif\r
+\r
+ vceq.i8 q0, q14, \qB @ tmp0 = < D == B >\r
+\r
+ vceq.i8 q2, \qB, \qH @ tmp2 = < B == H >\r
+\r
+ vceq.i8 q3, q14, q15 @ tmp3 = < D == F >\r
+\r
+ vceq.i8 q1, \qB, q15 @ tmp1 = < B == F >\r
+\r
+ vorr q10, q2, q3 @ C0 = < B == H || D == F >\r
+\r
+ vceq.i8 q2, q14, \qH @ tmp2 = < D == H >\r
+\r
+ vceq.i8 q3, \qH, q15 @ tmp3 = < H == F >\r
+\r
+ vorn q0, q10, q0 @ tmp0 = < C0 || !(D == B) >\r
+\r
+ vorn q1, q10, q1 @ tmp1 = < C0 || !(B == F) >\r
+\r
+ vbsl q0, q12, q14 @ E0 = < (C0 || !(D == B)) ? E : D >\r
+\r
+ vbsl q1, q12, q15 @ E1 = < (C0 || !(B == F)) ? E : F >\r
+\r
+ vorn q2, q10, q2 @ tmp2 = < C0 || !(D == H) >\r
+\r
+ vorn q3, q10, q3 @ tmp3 = < C0 || !(H == F) >\r
+\r
+ vbsl q2, q12, q14 @ E2 = < (C0 || !(D == H)) ? E : D >\r
+ vst2.8 {q0-q1}, [\aldst1]! @ [dst] = E0,E1; dst1 += 2*16\r
+\r
+ vbsl q3, q12, q15 @ E3 = < (C0 || !(H == F)) ? E : F >\r
+\r
+ vst2.8 {q2-q3}, [\aldst2]! @ [dst + dststride] = E2,E3; dst2 += 2*16\r
+\r
+.endm\r
+\r
+.macro _neon_scale2x_8_8_line_first src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_scale2x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q12, q13, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro _neon_scale2x_8_8_line_middle src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_scale2x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q11, q13, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro _neon_scale2x_8_8_line_last src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_scale2x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q11, q12, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro neon_scale2x_8_8_line part, src1, src2, src3, counter, dst1, dst2, reg1, srcalign16, dstalign32\r
+ .ifeq \srcalign16\r
+\r
+ .ifeq \dstalign32\r
+ _neon_scale2x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1, \src2, \src3, \dst1, \dst2\r
+ .else\r
+ _neon_scale2x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1, \src2, \src3, \dst1:256, \dst2:256\r
+ .endif\r
+\r
+ .else\r
+\r
+ .ifeq \dstalign32\r
+ _neon_scale2x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1:128, \src2:128, \src3:128, \dst1, \dst2\r
+ .else\r
+ _neon_scale2x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1:128, \src2:128, \src3:128, \dst1:256, \dst2:256\r
+ .endif\r
+\r
+ .endif\r
+.endm\r
+\r
+\r
+.macro __neon_scale2x_16_16_line src1, src2, src3, counter, dst1, dst2, reg1, qB, qH, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+\r
+ vld1.16 {d17[3]}, [\src2] @ S2prev[7] = src[0]\r
+ andS \reg1, \counter, #7 @ reg1 = counter & 7\r
+\r
+ .ifnes "\qB", "q11"\r
+ add \src1, \src1, \counter, lsl #1 @ src1 += 2 * counter\r
+ .endif\r
+ .ifnes "\qH", "q13"\r
+ add \src3, \src3, \counter, lsl #1 @ src3 += 2 * counter\r
+ .endif\r
+ beq 1f\r
+\r
+ @ first 1-7 pixels - align counter to 16 bytes\r
+ vld1.16 {q12}, [\src2] @ S2 = [src] < E >\r
+ lsl \reg1, #1\r
+\r
+ .ifeqs "\qB", "q11"\r
+ vld1.16 {\qB}, [\src1] @ S1 = [src - srcstride] < B >\r
+ .endif\r
+ bfi \reg1, \reg1, #8, #8\r
+\r
+ .ifeqs "\qH", "q13"\r
+ vld1.16 {\qH}, [\src3] @ S3 = [src + srcstride] < H >\r
+ .endif\r
+ vext.8 q14, q8, q12, #14 @ S2sl = S2prev[7] | (S2 << 16) < D >\r
+\r
+ add \reg1, \reg1, #256\r
+ vceq.i16 q2, \qB, \qH @ tmp2 = < B == H >\r
+\r
+ vmov.16 d17[3], \reg1 @ S2prev[7] = reg1\r
+ vext.8 q15, q12, q9, #2 @ S2sr = (S2 >> 16) | ... < F >\r
+\r
+ vceq.i16 q0, q14, \qB @ tmp0 = < D == B >\r
+\r
+ vceq.i16 q3, q14, q15 @ tmp3 = < D == F >\r
+\r
+ vceq.i16 q1, \qB, q15 @ tmp1 = < B == F >\r
+ vtbl.8 d17, {d28, d29}, d17 @ S2prev[7] = src[reg1 - 1]\r
+\r
+ vorr q10, q2, q3 @ C0 = < B == H || D == F >\r
+ and \reg1, \counter, #7\r
+\r
+ vceq.i16 q2, q14, \qH @ tmp2 = < D == H >\r
+\r
+ vceq.i16 q3, \qH, q15 @ tmp3 = < H == F >\r
+\r
+ vorn q0, q10, q0 @ tmp0 = < C0 || !(D == B) >\r
+\r
+ vorn q1, q10, q1 @ tmp1 = < C0 || !(B == F) >\r
+\r
+ vbsl q0, q12, q14 @ E0 = < (C0 || !(D == B)) ? E : D >\r
+\r
+ vbsl q1, q12, q15 @ E1 = < (C0 || !(B == F)) ? E : F >\r
+\r
+ vorn q2, q10, q2 @ tmp2 = < C0 || !(D == H) >\r
+\r
+ vorn q3, q10, q3 @ tmp3 = < C0 || !(H == F) >\r
+\r
+ vbsl q2, q12, q14 @ E2 = < (C0 || !(D == H)) ? E : D >\r
+ vst2.16 {q0-q1}, [\dst1] @ [dst] = E0,E1\r
+\r
+ vbsl q3, q12, q15 @ E3 = < (C0 || !(H == F)) ? E : F >\r
+\r
+ bic \counter, \counter, #7\r
+ .ifeqs "\qB", "q11"\r
+ add \src1, \src1, \reg1, lsl #1\r
+ .endif\r
+ add \src2, \src2, \reg1, lsl #1\r
+ .ifeqs "\qH", "q13"\r
+ add \src3, \src3, \reg1, lsl #1\r
+ .endif\r
+\r
+ vst2.16 {q2-q3}, [\dst2] @ [dst + dststride] = E2,E3\r
+\r
+ add \dst1, \dst1, \reg1, lsl #2\r
+ add \dst2, \dst2, \reg1, lsl #2\r
+\r
+ @ counter is aligned to 16 bytes\r
+\r
+ 1:\r
+ vld1.16 {q9}, [\alsrc2]! @ S2next = [src]; src2 += 2*8\r
+\r
+ @ inner loop (8 pixels per iteration)\r
+ 2:\r
+\r
+ vmov q12, q9 @ S2 = S2next < E >\r
+ .ifeqs "\qB", "q11"\r
+ vld1.16 {\qB}, [\alsrc1]! @ S1 = [src - srcstride] < B >; src1 += 2*8\r
+ .endif\r
+\r
+ .ifeqs "\qH", "q13"\r
+ vld1.16 {\qH}, [\alsrc3]! @ S3 = [src + srcstride] < H >; src3 += 2*8\r
+ .endif\r
+\r
+ vext.8 q14, q8, q12, #14 @ S2sl = S2prev[7] | (S2 << 16) < D >\r
+ vld1.16 {q9}, [\alsrc2]! @ S2next = [src]; src2 += 2*8\r
+\r
+ vceq.i16 q2, \qB, \qH @ tmp2 = < B == H >\r
+\r
+ vmov q8, q12 @ S2prev = S2\r
+ vext.8 q15, q12, q9, #2 @ S2sr = (S2 >> 16) | S2next[0] < F >\r
+\r
+ vceq.i16 q0, q14, \qB @ tmp0 = < D == B >\r
+\r
+ vceq.i16 q3, q14, q15 @ tmp3 = < D == F >\r
+\r
+ vceq.i16 q1, \qB, q15 @ tmp1 = < B == F >\r
+\r
+ sub \counter, \counter, #8 @ counter -= 8\r
+\r
+ vorr q10, q2, q3 @ C0 = < B == H || D == F >\r
+\r
+ vceq.i16 q2, q14, \qH @ tmp2 = < D == H >\r
+\r
+ vceq.i16 q3, \qH, q15 @ tmp3 = < H == F >\r
+\r
+ vorn q0, q10, q0 @ tmp0 = < C0 || !(D == B) >\r
+\r
+ vorn q1, q10, q1 @ tmp1 = < C0 || !(B == F) >\r
+\r
+ vbsl q0, q12, q14 @ E0 = < (C0 || !(D == B)) ? E : D >\r
+\r
+ vbsl q1, q12, q15 @ E1 = < (C0 || !(B == F)) ? E : F >\r
+\r
+ vorn q2, q10, q2 @ tmp2 = < C0 || !(D == H) >\r
+\r
+ vorn q3, q10, q3 @ tmp3 = < C0 || !(H == F) >\r
+\r
+ vbsl q2, q12, q14 @ E2 = < (C0 || !(D == H)) ? E : D >\r
+ vst2.16 {q0-q1}, [\aldst1]! @ [dst] = E0,E1; dst1 += 2*2*8\r
+\r
+ cmp \counter, #8\r
+\r
+ vbsl q3, q12, q15 @ E3 = < (C0 || !(H == F)) ? E : F >\r
+\r
+ vst2.16 {q2-q3}, [\aldst2]! @ [dst + dststride] = E2,E3; dst2 += 2*2*8\r
+\r
+ bhi 2b\r
+\r
+ @ last 8 pixels\r
+\r
+ vmov q12, q9 @ S2 = S2next < E >\r
+\r
+ vshr.u64 d18, d19, #(64-16) @ S2next[0] = S2[7] | ...\r
+ .ifeqs "\qB", "q11"\r
+ vld1.16 {\qB}, [\alsrc1]! @ S1 = [src - srcstride] < B >; src1 += 2*8\r
+ .endif\r
+\r
+ vext.8 q14, q8, q12, #14 @ S2sl = S2prev[7] | (S2 << 16) < D >\r
+\r
+ vext.8 q15, q12, q9, #2 @ S2sr = (S2 >> 16) | S2next[0] < F >\r
+ .ifeqs "\qH", "q13"\r
+ vld1.16 {\qH}, [\alsrc3]! @ S3 = [src + srcstride] < H >; src3 += 2*8\r
+ .endif\r
+\r
+ vceq.i16 q0, q14, \qB @ tmp0 = < D == B >\r
+\r
+ vceq.i16 q2, \qB, \qH @ tmp2 = < B == H >\r
+\r
+ vceq.i16 q3, q14, q15 @ tmp3 = < D == F >\r
+\r
+ vceq.i16 q1, \qB, q15 @ tmp1 = < B == F >\r
+\r
+ vorr q10, q2, q3 @ C0 = < B == H || D == F >\r
+\r
+ vceq.i16 q2, q14, \qH @ tmp2 = < D == H >\r
+\r
+ vceq.i16 q3, \qH, q15 @ tmp3 = < H == F >\r
+\r
+ vorn q0, q10, q0 @ tmp0 = < C0 || !(D == B) >\r
+\r
+ vorn q1, q10, q1 @ tmp1 = < C0 || !(B == F) >\r
+\r
+ vbsl q0, q12, q14 @ E0 = < (C0 || !(D == B)) ? E : D >\r
+\r
+ vbsl q1, q12, q15 @ E1 = < (C0 || !(B == F)) ? E : F >\r
+\r
+ vorn q2, q10, q2 @ tmp2 = < C0 || !(D == H) >\r
+\r
+ vorn q3, q10, q3 @ tmp3 = < C0 || !(H == F) >\r
+\r
+ vbsl q2, q12, q14 @ E2 = < (C0 || !(D == H)) ? E : D >\r
+ vst2.16 {q0-q1}, [\aldst1]! @ [dst] = E0,E1; dst1 += 2*2*8\r
+\r
+ vbsl q3, q12, q15 @ E3 = < (C0 || !(H == F)) ? E : F >\r
+\r
+ vst2.16 {q2-q3}, [\aldst2]! @ [dst + dststride] = E2,E3; dst2 += 2*2*8\r
+\r
+.endm\r
+\r
+.macro _neon_scale2x_16_16_line_first src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_scale2x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q12, q13, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro _neon_scale2x_16_16_line_middle src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_scale2x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q11, q13, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro _neon_scale2x_16_16_line_last src1, src2, src3, counter, dst1, dst2, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2\r
+ __neon_scale2x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, q11, q12, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2\r
+.endm\r
+\r
+.macro neon_scale2x_16_16_line part, src1, src2, src3, counter, dst1, dst2, reg1, srcalign16, dstalign32\r
+ .ifeq \srcalign16\r
+\r
+ .ifeq \dstalign32\r
+ _neon_scale2x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1, \src2, \src3, \dst1, \dst2\r
+ .else\r
+ _neon_scale2x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1, \src2, \src3, \dst1:256, \dst2:256\r
+ .endif\r
+\r
+ .else\r
+\r
+ .ifeq \dstalign32\r
+ _neon_scale2x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1:128, \src2:128, \src3:128, \dst1, \dst2\r
+ .else\r
+ _neon_scale2x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \reg1, \src1:128, \src2:128, \src3:128, \dst1:256, \dst2:256\r
+ .endif\r
+\r
+ .endif\r
+.endm\r
+\r
--- /dev/null
+/**
+ *
+ * Copyright (C) 2012 Roman Pauer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#if !defined(_NEON_SCALE2X_H_INCLUDED_)
+#define _NEON_SCALE2X_H_INCLUDED_
+
+#include <inttypes.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void neon_scale2x_8_8(const uint8_t *src, uint8_t *dst, unsigned int width, unsigned int srcstride, unsigned int dststride, unsigned int height);
+extern void neon_scale2x_16_16(const uint16_t *src, uint16_t *dst, unsigned int width, unsigned int srcstride, unsigned int dststride, unsigned int height);
+
+extern void neon_scale2x_8_16(const uint8_t *src, uint16_t *dst, const uint32_t *palette, unsigned int width, unsigned int srcstride, unsigned int dststride, unsigned int height);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _NEON_SCALE2X_H_INCLUDED_ */
--- /dev/null
+Copyright (C) 2012 Roman Pauer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.