shithub: openh264

Download patch

ref: ff94e7053df980111eb7f22a04e4c0cfd6b84c97
parent: 5bc4a398201b07cba6cafbd0d3561b0d52608fa9
parent: 03e0dcd814645cad167227bcd74d7966eaf526d2
author: Ethan Hugg <ethanhugg@gmail.com>
date: Mon Mar 3 17:55:09 EST 2014

Merge pull request #400 from mstorsjo/cleanup-arm-assembly

Convert the arm assembly sources to unix newlines

--- a/codec/common/arm_arch_common_macro.S
+++ b/codec/common/arm_arch_common_macro.S
@@ -1,55 +1,55 @@
-/*!
- * \copy
- *     Copyright (c)  2013, Cisco Systems
- *     All rights reserved.
- *
- *     Redistribution and use in source and binary forms, with or without
- *     modification, are permitted provided that the following conditions
- *     are met:
- *
- *        * Redistributions of source code must retain the above copyright
- *          notice, this list of conditions and the following disclaimer.
- *
- *        * Redistributions in binary form must reproduce the above copyright
- *          notice, this list of conditions and the following disclaimer in
- *          the documentation and/or other materials provided with the
- *          distribution.
- *
- *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- *     POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifdef APPLE_IOS
-
-.macro WELS_ASM_FUNC_BEGIN
-.align 2
-.arm
-.globl _$0
-_$0:
-.endm
-
-#else
-
-.macro WELS_ASM_FUNC_BEGIN funcName
-.align 2
-.arm
-.global \funcName
-\funcName:
-.endm
-
-#endif
-
-.macro WELS_ASM_FUNC_END
-mov pc, lr
-.endm
+/*!
+ * \copy
+ *     Copyright (c)  2013, Cisco Systems
+ *     All rights reserved.
+ *
+ *     Redistribution and use in source and binary forms, with or without
+ *     modification, are permitted provided that the following conditions
+ *     are met:
+ *
+ *        * Redistributions of source code must retain the above copyright
+ *          notice, this list of conditions and the following disclaimer.
+ *
+ *        * Redistributions in binary form must reproduce the above copyright
+ *          notice, this list of conditions and the following disclaimer in
+ *          the documentation and/or other materials provided with the
+ *          distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *     POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifdef APPLE_IOS
+
+.macro WELS_ASM_FUNC_BEGIN
+.align 2
+.arm
+.globl _$0
+_$0:
+.endm
+
+#else
+
+.macro WELS_ASM_FUNC_BEGIN funcName
+.align 2
+.arm
+.global \funcName
+\funcName:
+.endm
+
+#endif
+
+.macro WELS_ASM_FUNC_END
+mov pc, lr
+.endm
--- a/codec/common/deblocking_neon.S
+++ b/codec/common/deblocking_neon.S
@@ -1,812 +1,812 @@
-/*!
-* \copy
-*     Copyright (c)  2013, Cisco Systems
-*     All rights reserved.
-
-*     Redistribution and use in source and binary forms, with or without
-*     modification, are permitted provided that the following conditions
-*     are met:
-
-*        * Redistributions of source code must retain the above copyright
-*          notice, this list of conditions and the following disclaimer.
-
-*        * Redistributions in binary form must reproduce the above copyright
-*          notice, this list of conditions and the following disclaimer in
-*          the documentation and/or other materials provided with the
-*          distribution.
-
-*     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-*     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-*     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-*     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-*     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-*     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-*     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-*     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-*     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-*     POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifdef HAVE_NEON
-.text
-
-#include "arm_arch_common_macro.S"
-
-#ifdef APPLE_IOS
-.macro	JMP_IF_128BITS_IS_ZERO
-    vorr.s16	$2, $0, $1
-    vmov		r3, r2, $2
-    orr			r3, r3, r2
-    cmp			r3, #0
-.endm
-
-.macro	MASK_MATRIX
-    vabd.u8	$6, $1, $2
-    vcgt.u8	$6, $4, $6
-
-    vabd.u8	$4, $0, $1
-    vclt.u8	$4, $4, $5
-    vand.u8	$6, $6, $4
-
-    vabd.u8	$4, $3, $2
-    vclt.u8	$4, $4, $5
-    vand.u8	$6, $6, $4
-.endm
-
-
-.macro	DIFF_LUMA_LT4_P1_Q1
-    vabd.u8	$9, $0, $2
-    vclt.u8	$9, $9, $4
-    vrhadd.u8	$8, $2, $3
-    vhadd.u8	$8, $0, $8
-    vsub.s8	$8, $8, $1
-    vmax.s8	$8, $8, $5
-    vmin.s8	$8, $8, $6
-    vand.s8	$8, $8, $9
-    vand.s8	$8, $8, $7
-    vadd.u8	$8, $1, $8
-    vabs.s8	$9, $9
-.endm
-
-.macro	DIFF_LUMA_LT4_P0_Q0
-    vsubl.u8	$5, $0, $3
-    vsubl.u8	$6, $2, $1
-    vshl.s16	$6, $6, #2
-    vadd.s16	$5, $5, $6
-    vrshrn.s16		$4, $5, #3
-.endm
-
-.macro	DIFF_LUMA_EQ4_P2P1P0
-    vaddl.u8	q4, $1, $2
-    vaddl.u8	q5, $3, $4
-    vadd.u16	q5, q4, q5
-
-    vaddl.u8	q4, $0, $1
-    vshl.u16	q4, q4, #1
-    vadd.u16	q4, q5, q4
-
-    vrshrn.u16		$0, q5, #2
-    vrshrn.u16		$7, q4, #3
-
-    vshl.u16	q5, q5, #1
-    vsubl.u8	q4, $5, $1
-    vadd.u16	q5, q4,q5
-
-    vaddl.u8	q4, $2, $5
-    vaddw.u8	q4, q4, $2
-    vaddw.u8	q4, q4, $3
-
-    vrshrn.u16		d10,q5, #3
-    vrshrn.u16		d8, q4, #2
-    vbsl.u8		$6, d10, d8
-.endm
-
-.macro	DIFF_LUMA_EQ4_MASK
-    vmov	$3, $2
-    vbsl.u8	$3, $0, $1
-.endm
-
-.macro	DIFF_CHROMA_EQ4_P0Q0
-    vaddl.u8	$4, $0, $3
-    vaddw.u8	$5, $4, $1
-    vaddw.u8	$6, $4, $2
-    vaddw.u8	$5, $5, $0
-
-    vaddw.u8	$6, $6, $3
-    vrshrn.u16		$7, $5, #2
-    vrshrn.u16		$8, $6, #2
-.endm
-
-.macro	LORD_CHROMA_DATA_4
-    vld4.u8	{$0[$8],$1[$8],$2[$8],$3[$8]}, [r0], r2
-    vld4.u8	{$4[$8],$5[$8],$6[$8],$7[$8]}, [r1], r2
-.endm
-
-.macro	STORE_CHROMA_DATA_4
-    vst4.u8	{$0[$8],$1[$8],$2[$8],$3[$8]}, [r0], r2
-    vst4.u8	{$4[$8],$5[$8],$6[$8],$7[$8]}, [r1], r2
-.endm
-
-.macro	LORD_LUMA_DATA_3
-    vld3.u8	{$0[$6],$1[$6],$2[$6]}, [r2], r1
-    vld3.u8	{$3[$6],$4[$6],$5[$6]}, [r0], r1
-.endm
-
-.macro	STORE_LUMA_DATA_4
-    vst4.u8	{$0[$4],$1[$4],$2[$4],$3[$4]}, [r0], r1
-    vst4.u8	{$0[$5],$1[$5],$2[$5],$3[$5]}, [r2], r1
-.endm
-
-.macro	LORD_LUMA_DATA_4
-    vld4.u8	{$0[$8],$1[$8],$2[$8],$3[$8]}, [r3], r1
-    vld4.u8	{$4[$8],$5[$8],$6[$8],$7[$8]}, [r0], r1
-.endm
-
-.macro	STORE_LUMA_DATA_3
-    vst3.u8	{$0[$6],$1[$6],$2[$6]}, [r3], r1
-    vst3.u8	{$3[$6],$4[$6],$5[$6]}, [r0], r1
-.endm
-
-.macro	EXTRACT_DELTA_INTO_TWO_PART
-    vcge.s8	$1, $0, #0
-    vand	$1, $0, $1
-    vsub.s8	$0, $1, $0
-.endm
-#else
-.macro	JMP_IF_128BITS_IS_ZERO arg0, arg1, arg2
-    vorr.s16	\arg2, \arg0, \arg1
-    vmov		r3, r2, \arg2
-    orr			r3, r3, r2
-    cmp			r3, #0
-.endm
-
-.macro	MASK_MATRIX arg0, arg1, arg2, arg3, arg4, arg5, arg6
-    vabd.u8	\arg6, \arg1, \arg2
-    vcgt.u8	\arg6, \arg4, \arg6
-
-    vabd.u8	\arg4, \arg0, \arg1
-    vclt.u8	\arg4, \arg4, \arg5
-    vand.u8	\arg6, \arg6, \arg4
-
-    vabd.u8	\arg4, \arg3, \arg2
-    vclt.u8	\arg4, \arg4, \arg5
-    vand.u8	\arg6, \arg6, \arg4
-.endm
-
-.macro	DIFF_LUMA_LT4_P1_Q1 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
-    vabd.u8	\arg9, \arg0, \arg2
-    vclt.u8	\arg9, \arg9, \arg4
-    vrhadd.u8	\arg8, \arg2, \arg3
-    vhadd.u8	\arg8, \arg0, \arg8
-    vsub.s8	\arg8, \arg8, \arg1
-    vmax.s8	\arg8, \arg8, \arg5
-    vmin.s8	\arg8, \arg8, \arg6
-    vand.s8	\arg8, \arg8, \arg9
-    vand.s8	\arg8, \arg8, \arg7
-    vadd.u8	\arg8, \arg1, \arg8
-    vabs.s8	\arg9, \arg9
-.endm
-
-.macro	DIFF_LUMA_LT4_P0_Q0 arg0, arg1, arg2, arg3, arg4, arg5, arg6
-    vsubl.u8	\arg5, \arg0, \arg3
-    vsubl.u8	\arg6, \arg2, \arg1
-    vshl.s16	\arg6, \arg6, #2
-    vadd.s16	\arg5, \arg5, \arg6
-    vrshrn.s16		\arg4, \arg5, #3
-.endm
-
-
-.macro	DIFF_LUMA_EQ4_P2P1P0 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
-    vaddl.u8	q4, \arg1, \arg2
-    vaddl.u8	q5, \arg3, \arg4
-    vadd.u16	q5, q4, q5
-
-    vaddl.u8	q4, \arg0, \arg1
-    vshl.u16	q4, q4, #1
-    vadd.u16	q4, q5, q4
-
-    vrshrn.u16		\arg0, q5, #2
-    vrshrn.u16		\arg7, q4, #3
-
-    vshl.u16	q5, q5, #1
-    vsubl.u8	q4, \arg5, \arg1
-    vadd.u16	q5, q4,q5
-
-    vaddl.u8	q4, \arg2, \arg5
-    vaddw.u8	q4, q4, \arg2
-    vaddw.u8	q4, q4, \arg3
-
-    vrshrn.u16		d10,q5, #3
-    vrshrn.u16		d8, q4, #2
-    vbsl.u8		\arg6, d10, d8
-.endm
-
-.macro	DIFF_LUMA_EQ4_MASK arg0, arg1, arg2, arg3
-    vmov	\arg3, \arg2
-    vbsl.u8	\arg3, \arg0, \arg1
-.endm
-
-.macro	DIFF_CHROMA_EQ4_P0Q0 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
-    vaddl.u8	\arg4, \arg0, \arg3
-    vaddw.u8	\arg5, \arg4, \arg1
-    vaddw.u8	\arg6, \arg4, \arg2
-    vaddw.u8	\arg5, \arg5, \arg0
-    vaddw.u8	\arg6, \arg6, \arg3
-    vrshrn.u16		\arg7, \arg5, #2
-    vrshrn.u16		\arg8, \arg6, #2
-.endm
-
-.macro	LORD_CHROMA_DATA_4 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
-    vld4.u8	{\arg0[\arg8],\arg1[\arg8],\arg2[\arg8],\arg3[\arg8]}, [r0], r2
-    vld4.u8	{\arg4[\arg8],\arg5[\arg8],\arg6[\arg8],\arg7[\arg8]}, [r1], r2
-.endm
-
-.macro	STORE_CHROMA_DATA_4 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
-    vst4.u8	{\arg0[\arg8],\arg1[\arg8],\arg2[\arg8],\arg3[\arg8]}, [r0], r2
-    vst4.u8	{\arg4[\arg8],\arg5[\arg8],\arg6[\arg8],\arg7[\arg8]}, [r1], r2
-.endm
-
-.macro	LORD_LUMA_DATA_3 arg0, arg1, arg2, arg3, arg4, arg5, arg6
-    vld3.u8	{\arg0[\arg6],\arg1[\arg6],\arg2[\arg6]}, [r2], r1
-    vld3.u8	{\arg3[\arg6],\arg4[\arg6],\arg5[\arg6]}, [r0], r1
-.endm
-
-.macro	STORE_LUMA_DATA_4 arg0, arg1, arg2, arg3, arg4, arg5
-    vst4.u8	{\arg0[\arg4],\arg1[\arg4],\arg2[\arg4],\arg3[\arg4]}, [r0], r1
-    vst4.u8	{\arg0[\arg5],\arg1[\arg5],\arg2[\arg5],\arg3[\arg5]}, [r2], r1
-.endm
-
-.macro	LORD_LUMA_DATA_4 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
-    vld4.u8	{\arg0[\arg8],\arg1[\arg8],\arg2[\arg8],\arg3[\arg8]}, [r3], r1
-    vld4.u8	{\arg4[\arg8],\arg5[\arg8],\arg6[\arg8],\arg7[\arg8]}, [r0], r1
-.endm
-
-.macro	STORE_LUMA_DATA_3 arg0, arg1, arg2, arg3, arg4, arg5, arg6
-    vst3.u8	{\arg0[\arg6],\arg1[\arg6],\arg2[\arg6]}, [r3], r1
-    vst3.u8	{\arg3[\arg6],\arg4[\arg6],\arg5[\arg6]}, [r0], r1
-.endm
-
-.macro	EXTRACT_DELTA_INTO_TWO_PART arg0, arg1
-    vcge.s8	\arg1, \arg0, #0
-    vand	\arg1, \arg0, \arg1
-    vsub.s8	\arg0, \arg1, \arg0
-.endm
-#endif
-
-WELS_ASM_FUNC_BEGIN DeblockLumaLt4V_neon
-    vdup.u8	q11, r2
-    vdup.u8	q9, r3
-
-    add			r2, r1, r1, lsl #1
-    sub			r2, r0, r2
-    vld1.u8	{q0}, [r2], r1
-    vld1.u8	{q3}, [r0], r1
-    vld1.u8	{q1}, [r2], r1
-    vld1.u8	{q4}, [r0], r1
-    vld1.u8	{q2}, [r2]
-    vld1.u8	{q5}, [r0]
-    sub			r2, r2, r1
-
-    ldr			r3, [sp, #0]
-    vld1.s8	{d31}, [r3]
-    vdup.s8	d28, d31[0]
-    vdup.s8	d30, d31[1]
-    vdup.s8	d29, d31[2]
-    vdup.s8	d31, d31[3]
-    vtrn.32	d28, d30
-    vtrn.32	d29, d31
-    vcge.s8	q10, q14, #0
-
-    MASK_MATRIX	q1, q2, q3, q4, q11, q9, q15
-    vand.u8	q10, q10, q15
-
-    veor		q15, q15
-    vsub.i8	q15,q15,q14
-
-    DIFF_LUMA_LT4_P1_Q1	q0, q1, q2, q3, q9, q15, q14, q10, q6, q12
-    vst1.u8	{q6}, [r2], r1
-
-    DIFF_LUMA_LT4_P1_Q1	q5, q4, q3, q2, q9, q15, q14, q10, q7, q13
-
-    vabs.s8	q12, q12
-    vabs.s8	q13, q13
-    vadd.u8	q14,q14,q12
-    vadd.u8	q14,q14,q13
-    veor		q15, q15
-    vsub.i8	q15,q15,q14
-
-    DIFF_LUMA_LT4_P0_Q0	d2, d4, d6, d8, d16, q12, q13
-    DIFF_LUMA_LT4_P0_Q0	d3, d5, d7, d9, d17, q12, q13
-    vmax.s8	q8, q8, q15
-    vmin.s8	q8, q8, q14
-    vand.s8	q8, q8, q10
-    EXTRACT_DELTA_INTO_TWO_PART	q8, q9
-    vqadd.u8	q2, q2, q9
-    vqsub.u8	q2, q2, q8
-    vst1.u8	{q2}, [r2], r1
-    vqsub.u8	q3, q3, q9
-    vqadd.u8	q3, q3, q8
-    vst1.u8	{q3}, [r2]	, r1
-    vst1.u8	{q7}, [r2]
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN DeblockLumaEq4V_neon
-
-    vdup.u8	q5, r2
-    vdup.u8	q4, r3
-
-    sub			r3, r0, r1, lsl #2
-    vld1.u8	{q8},  [r3], r1
-    vld1.u8	{q12}, [r0], r1
-    vld1.u8	{q9},  [r3], r1
-    vld1.u8	{q13}, [r0], r1
-    vld1.u8	{q10}, [r3], r1
-    vld1.u8	{q14}, [r0], r1
-    vld1.u8	{q11}, [r3]
-    vld1.u8	{q15}, [r0]
-    sub			r3, r3, r1	, lsl #1
-
-    MASK_MATRIX	q10, q11, q12, q13, q5, q4, q6
-
-    mov			r2, r2, lsr #2
-    add			r2, r2, #2
-    vdup.u8	q5, r2
-    vabd.u8	q0, q11, q12
-    vclt.u8	q7, q0, q5
-
-    vabd.u8	q1, q9, q11
-    vclt.u8	q1, q1, q4
-    vand.s8	q1, q1, q7
-
-    vabd.u8	q2, q14,q12
-    vclt.u8	q2, q2, q4
-    vand.s8	q2, q2, q7
-    vand.u8	q7, q7, q6
-
-    vmov		q3, q1
-
-    DIFF_LUMA_EQ4_P2P1P0		d16, d18, d20, d22, d24, d26, d2, d0
-    DIFF_LUMA_EQ4_P2P1P0		d17, d19, d21, d23, d25, d27, d3, d1
-
-    vand.u8	q3, q7, q3
-    DIFF_LUMA_EQ4_MASK	q0, q9, q3, q4
-    vst1.u8	{q4}, [r3], r1
-    DIFF_LUMA_EQ4_MASK	q8,q10, q3, q4
-    vst1.u8	{q4}, [r3], r1
-    DIFF_LUMA_EQ4_MASK	q1,q11, q6, q4
-    vst1.u8	{q4}, [r3], r1
-
-    vmov		q0, q2
-    DIFF_LUMA_EQ4_P2P1P0		d30, d28, d26, d24, d22, d20, d4, d6
-    DIFF_LUMA_EQ4_P2P1P0		d31, d29, d27, d25, d23, d21, d5, d7
-
-    vand.u8	q0, q7, q0
-    DIFF_LUMA_EQ4_MASK	q2,  q12, q6, q4
-    vst1.u8	{q4}, [r3], r1
-    DIFF_LUMA_EQ4_MASK	q15, q13, q0, q4
-    vst1.u8	{q4}, [r3], r1
-    DIFF_LUMA_EQ4_MASK	q3,  q14, q0, q4
-    vst1.u8	{q4}, [r3], r1
-
-WELS_ASM_FUNC_END
-
-
-    WELS_ASM_FUNC_BEGIN DeblockLumaLt4H_neon
-
-    vdup.u8	q11, r2
-    vdup.u8	q9, r3
-
-    sub			r2, r0, #3
-    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 0
-    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 1
-    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 2
-    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 3
-    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 4
-    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 5
-    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 6
-    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 7
-
-    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 0
-    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 1
-    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 2
-    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 3
-    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 4
-    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 5
-    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 6
-    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 7
-
-    vswp		d1, d2
-    vswp		d3, d4
-    vswp		d1, d4
-    vswp		d7, d8
-    vswp		d9, d10
-    vswp		d7, d10
-
-    sub			r0, r0, r1, lsl #4
-
-    ldr			r3, [sp, #0]
-    vld1.s8	{d31}, [r3]
-    vdup.s8	d28, d31[0]
-    vdup.s8	d30, d31[1]
-    vdup.s8	d29, d31[2]
-    vdup.s8	d31, d31[3]
-    vtrn.32	d28, d30
-    vtrn.32	d29, d31
-    vcge.s8	q10, q14, #0
-
-    MASK_MATRIX	q1, q2, q3, q4, q11, q9, q15
-    vand.u8	q10, q10, q15
-
-    veor		q15, q15
-    vsub.i8	q15,q15,q14
-
-    DIFF_LUMA_LT4_P1_Q1	q0, q1, q2, q3, q9, q15, q14, q10, q6, q12
-    DIFF_LUMA_LT4_P1_Q1	q5, q4, q3, q2, q9, q15, q14, q10, q7, q13
-
-    vabs.s8	q12, q12
-    vabs.s8	q13, q13
-    vadd.u8	q14,q14,q12
-    vadd.u8	q14,q14,q13
-    veor		q15, q15
-    vsub.i8	q15,q15,q14
-
-    DIFF_LUMA_LT4_P0_Q0	d2, d4, d6, d8, d16, q12, q13
-    DIFF_LUMA_LT4_P0_Q0	d3, d5, d7, d9, d17, q12, q13
-    vmax.s8	q8, q8, q15
-    vmin.s8	q8, q8, q14
-    vand.s8	q8, q8, q10
-    EXTRACT_DELTA_INTO_TWO_PART	q8, q9
-    vqadd.u8	q2, q2, q9
-    vqsub.u8	q2, q2, q8
-
-    vqsub.u8	q3, q3, q9
-    vqadd.u8	q3, q3, q8
-
-    sub		r0, #2
-    add		r2, r0, r1
-    lsl		r1, #1
-
-    vmov		q1, q6
-    vmov		q4, q7
-
-    vswp		q2, q3
-    vswp		d3, d6
-    vswp		d5, d8
-
-    STORE_LUMA_DATA_4		d2, d3, d4, d5, 0, 1
-    STORE_LUMA_DATA_4		d2, d3, d4, d5, 2, 3
-    STORE_LUMA_DATA_4		d2, d3, d4, d5, 4, 5
-    STORE_LUMA_DATA_4		d2, d3, d4, d5, 6, 7
-
-    STORE_LUMA_DATA_4		d6, d7, d8, d9, 0, 1
-    STORE_LUMA_DATA_4		d6, d7, d8, d9, 2, 3
-    STORE_LUMA_DATA_4		d6, d7, d8, d9, 4, 5
-    STORE_LUMA_DATA_4		d6, d7, d8, d9, 6, 7
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN DeblockLumaEq4H_neon
-    vdup.u8	q5, r2
-    vdup.u8	q4, r3
-
-    sub			r3, r0, #4				//	pix -= 4
-    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,0
-    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,1
-    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,2
-    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,3
-    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,4
-    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,5
-    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,6
-    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,7
-
-    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,0
-    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,1
-    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,2
-    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,3
-    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,4
-    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,5
-    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,6
-    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,7
-
-    vswp		q9, q10
-    vswp		d17,d18
-    vswp		d21,d22
-    vswp		q13,q14
-    vswp		d25,d26
-    vswp		d29,d30
-    sub			r0, r0, r1	, lsl #4
-
-    MASK_MATRIX	q10, q11, q12, q13, q5, q4, q6
-
-    mov			r2, r2, lsr #2
-    add			r2, r2, #2
-    vdup.u8	q5, r2
-    vabd.u8	q0, q11, q12
-    vclt.u8	q7, q0, q5
-
-    vabd.u8	q1, q9, q11
-    vclt.u8	q1, q1, q4
-    vand.s8	q1, q1, q7
-
-    vabd.u8	q2, q14,q12
-    vclt.u8	q2, q2, q4
-    vand.s8	q2, q2, q7
-    vand.u8	q7, q7, q6
-
-    vmov		q3, q1
-
-    DIFF_LUMA_EQ4_P2P1P0		d16, d18, d20, d22, d24, d26, d2, d0
-    DIFF_LUMA_EQ4_P2P1P0		d17, d19, d21, d23, d25, d27, d3, d1
-
-    vand.u8	q3, q7, q3
-    DIFF_LUMA_EQ4_MASK	q0, q9, q3, q4
-    vmov		q9, q4
-    vbsl.u8	q3, q8, q10
-    DIFF_LUMA_EQ4_MASK	q1,q11, q6, q8
-
-    vand.u8	q7, q7, q2
-
-    DIFF_LUMA_EQ4_P2P1P0		d30, d28, d26, d24, d22, d20, d4, d0
-    DIFF_LUMA_EQ4_P2P1P0		d31, d29, d27, d25, d23, d21, d5, d1
-
-    vbsl.u8	q6, q2, q12
-    DIFF_LUMA_EQ4_MASK	q15, q13, q7, q4
-
-    vbsl.u8	q7, q0, q14
-
-    vmov		q5, q6
-    vmov		q2, q9
-    vmov		q6, q4
-    vmov		q4, q8
-
-    vswp	d8, d6
-    vswp	d5, d7
-    vswp	d5, d8
-    vswp	d14, d12
-    vswp	d11, d13
-    vswp	d11, d14
-
-    sub		r3, r0, #3
-    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,0
-    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,1
-    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,2
-    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,3
-    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,4
-    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,5
-    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,6
-    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,7
-
-    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,0
-    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,1
-    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,2
-    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,3
-    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,4
-    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,5
-    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,6
-    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,7
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN DeblockChromaLt4V_neon
-    vdup.u8	q11, r3
-    ldr			r3, [sp, #0]
-
-    sub			r0, r0, r2	, lsl #1
-    sub			r1, r1, r2, lsl #1
-    vdup.u8	    q9, r3
-    ldr			r3, [sp, #4]
-
-    vld1.u8	{d0}, [r0], r2
-    vld1.u8	{d1}, [r1], r2
-    vld1.u8	{d2}, [r0], r2
-    vld1.u8	{d3}, [r1], r2
-    vld1.u8	{d4}, [r0], r2
-    vld1.u8	{d5}, [r1], r2
-    vld1.u8	{d6}, [r0]
-    vld1.u8	{d7}, [r1]
-
-    sub			r0, r0, r2, lsl #1
-    sub			r1, r1, r2, lsl #1
-
-    vld1.s8	{d15}, [r3]
-    vmovl.u8	q6, d15
-    vshl.u64	d13,d12,#8
-    vorr		d12,d13
-    vmov		d13, d12
-    veor		q7, q7
-    vsub.i8	q7,q7,q6
-
-    MASK_MATRIX	q0, q1, q2, q3, q11, q9, q5
-
-    DIFF_LUMA_LT4_P0_Q0	d0, d2, d4, d6, d8, q12, q13
-    DIFF_LUMA_LT4_P0_Q0	d1, d3, d5, d7, d9, q12, q13
-    vmax.s8	q4, q4, q7
-    vmin.s8	q4, q4, q6
-
-    vand.s8	q4, q4, q5
-    vcge.s8	q6, q6, #0
-    vand.s8	q4, q4, q6
-    EXTRACT_DELTA_INTO_TWO_PART	q4, q5
-    vqadd.u8	q1, q1, q5
-    vqsub.u8	q1, q1, q4
-    vst1.u8	{d2}, [r0], r2
-    vst1.u8	{d3}, [r1], r2
-    vqsub.u8	q2, q2, q5
-    vqadd.u8	q2, q2, q4
-    vst1.u8	{d4}, [r0]
-    vst1.u8	{d5}, [r1]
-
-WELS_ASM_FUNC_END
-
-
-    WELS_ASM_FUNC_BEGIN DeblockChromaEq4V_neon
-
-    vdup.u8	q11, r3
-    ldr			r3, [sp, #0]
-
-    sub			r0, r0, r2	, lsl #1
-    sub			r1, r1, r2, lsl #1
-    vdup.u8	q9, r3
-    vld1.u8	{d0}, [r0], r2		//	q0::p1
-    vld1.u8	{d1}, [r1], r2
-    vld1.u8	{d2}, [r0], r2		//	q1::p0
-    vld1.u8	{d3}, [r1], r2
-    vld1.u8	{d4}, [r0], r2		//	q2::q0
-    vld1.u8	{d5}, [r1], r2
-    vld1.u8	{d6}, [r0]				//	q3::q1
-    vld1.u8	{d7}, [r1]
-
-    sub			r0, r0, r2, lsl #1	//	pix = [-1*src_stride]
-    sub			r1, r1, r2, lsl #1
-
-    MASK_MATRIX	q0, q1, q2, q3, q11, q9, q10
-
-    vmov			q11, q10
-
-    DIFF_CHROMA_EQ4_P0Q0		d0, d2, d4, d6, q4, q5, q6, d14, d0		// Cb::p0' q0'
-    DIFF_CHROMA_EQ4_P0Q0		d1, d3, d5, d7, q12, q13, q14, d15, d1	// Cr::p0' q0'
-
-    vbsl.u8	q10, q7, q1
-    vst1.u8	{d20}, [r0], r2
-    vst1.u8	{d21}, [r1], r2
-
-    vbsl.u8	q11, q0, q2
-    vst1.u8	{d22}, [r0]
-    vst1.u8	{d23}, [r1]
-
-WELS_ASM_FUNC_END
-
-WELS_ASM_FUNC_BEGIN DeblockChromaLt4H_neon
-
-    vdup.u8	q11, r3
-    ldr			r3, [sp, #0]
-
-    sub			r0, r0, #2
-    vdup.u8	q9, r3
-    ldr			r3, [sp, #4]
-    sub			r1, r1, #2
-    vld1.s8	{d15}, [r3]
-
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 0
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 1
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 2
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 3
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 4
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 5
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 6
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 7
-    vswp		q1, q2
-    vswp		d1, d2
-    vswp		d6, d5
-
-    vmovl.u8	q6, d15
-    vshl.u64	d13,d12,#8
-    vorr		d12,d13
-    vmov		d13, d12
-    veor		q7, q7
-    vsub.i8	q7,q7,q6
-
-    MASK_MATRIX	q0, q1, q2, q3, q11, q9, q5
-
-    DIFF_LUMA_LT4_P0_Q0	d0, d2, d4, d6, d8, q12, q13
-    DIFF_LUMA_LT4_P0_Q0	d1, d3, d5, d7, d9, q12, q13
-    vmax.s8	q4, q4, q7
-    vmin.s8	q4, q4, q6
-
-    vand.s8	q4, q4, q5
-    vcge.s8	q6, q6, #0
-    vand.s8	q4, q4, q6
-    EXTRACT_DELTA_INTO_TWO_PART	q4, q5
-    vqadd.u8	q1, q1, q5
-    vqsub.u8	q1, q1, q4
-    vqsub.u8	q2, q2, q5
-    vqadd.u8	q2, q2, q4
-
-    sub			r0, r0, r2, lsl #3
-    sub			r1, r1, r2, lsl #3
-    vswp		d1, d2
-    vswp		d6, d5
-    vswp		q1, q2
-
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 0
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 1
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 2
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 3
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 4
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 5
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 6
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 7
-
-WELS_ASM_FUNC_END
-
-WELS_ASM_FUNC_BEGIN DeblockChromaEq4H_neon
-    vdup.u8	q11, r3
-    ldr			r3, [sp, #0]
-
-    sub			r0, r0, #2
-    sub			r1, r1, #2
-
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 0
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 1
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 2
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 3
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 4
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 5
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 6
-    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 7
-    vswp		q1, q2
-    vswp		d1, d2
-    vswp		d6, d5
-
-    vdup.u8	q9, r3
-    MASK_MATRIX	q0, q1, q2, q3, q11, q9, q10
-    vmov			q11, q10
-
-    DIFF_CHROMA_EQ4_P0Q0		d0, d2, d4, d6, q8, q9, q12, d8, d10
-    DIFF_CHROMA_EQ4_P0Q0		d1, d3, d5, d7, q13, q14, q15, d9, d11
-
-    vbsl.u8	q10, q4, q1
-    vbsl.u8	q11, q5, q2
-    sub			r0, r0, r2, lsl #3	//	pix: 0th row	[-2]
-    sub			r1, r1, r2, lsl #3
-
-    vmov		q1, q10
-    vmov		q2, q11
-    vswp		d1, d2
-    vswp		d6, d5
-    vswp		q1, q2
-    //	Cb:d0d1d2d3, Cr:d4d5d6d7
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 0
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 1
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 2
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 3
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 4
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 5
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 6
-    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 7
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN enc_avc_non_zero_count_neon
-
-    vld1.64	{d0-d2}, [r0]
-
-    vceq.s8	q0, q0, #0
-    vceq.s8	d2, d2, #0
-    vmvn	q0, q0
-    vmvn	d2, d2
-    vabs.s8	q0, q0
-    vabs.s8	d2, d2
-
-    vst1.64	{d0-d2}, [r0]
-WELS_ASM_FUNC_END
-
-#endif
+/*!
+* \copy
+*     Copyright (c)  2013, Cisco Systems
+*     All rights reserved.
+
+*     Redistribution and use in source and binary forms, with or without
+*     modification, are permitted provided that the following conditions
+*     are met:
+
+*        * Redistributions of source code must retain the above copyright
+*          notice, this list of conditions and the following disclaimer.
+
+*        * Redistributions in binary form must reproduce the above copyright
+*          notice, this list of conditions and the following disclaimer in
+*          the documentation and/or other materials provided with the
+*          distribution.
+
+*     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+*     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+*     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+*     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+*     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+*     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+*     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+*     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+*     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+*     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+*     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+*     POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifdef HAVE_NEON
+.text
+
+#include "arm_arch_common_macro.S"
+
+#ifdef APPLE_IOS
+.macro	JMP_IF_128BITS_IS_ZERO
+    vorr.s16	$2, $0, $1
+    vmov		r3, r2, $2
+    orr			r3, r3, r2
+    cmp			r3, #0
+.endm
+
+.macro	MASK_MATRIX
+    vabd.u8	$6, $1, $2
+    vcgt.u8	$6, $4, $6
+
+    vabd.u8	$4, $0, $1
+    vclt.u8	$4, $4, $5
+    vand.u8	$6, $6, $4
+
+    vabd.u8	$4, $3, $2
+    vclt.u8	$4, $4, $5
+    vand.u8	$6, $6, $4
+.endm
+
+
+.macro	DIFF_LUMA_LT4_P1_Q1
+    vabd.u8	$9, $0, $2
+    vclt.u8	$9, $9, $4
+    vrhadd.u8	$8, $2, $3
+    vhadd.u8	$8, $0, $8
+    vsub.s8	$8, $8, $1
+    vmax.s8	$8, $8, $5
+    vmin.s8	$8, $8, $6
+    vand.s8	$8, $8, $9
+    vand.s8	$8, $8, $7
+    vadd.u8	$8, $1, $8
+    vabs.s8	$9, $9
+.endm
+
+.macro	DIFF_LUMA_LT4_P0_Q0
+    vsubl.u8	$5, $0, $3
+    vsubl.u8	$6, $2, $1
+    vshl.s16	$6, $6, #2
+    vadd.s16	$5, $5, $6
+    vrshrn.s16		$4, $5, #3
+.endm
+
+.macro	DIFF_LUMA_EQ4_P2P1P0
+    vaddl.u8	q4, $1, $2
+    vaddl.u8	q5, $3, $4
+    vadd.u16	q5, q4, q5
+
+    vaddl.u8	q4, $0, $1
+    vshl.u16	q4, q4, #1
+    vadd.u16	q4, q5, q4
+
+    vrshrn.u16		$0, q5, #2
+    vrshrn.u16		$7, q4, #3
+
+    vshl.u16	q5, q5, #1
+    vsubl.u8	q4, $5, $1
+    vadd.u16	q5, q4,q5
+
+    vaddl.u8	q4, $2, $5
+    vaddw.u8	q4, q4, $2
+    vaddw.u8	q4, q4, $3
+
+    vrshrn.u16		d10,q5, #3
+    vrshrn.u16		d8, q4, #2
+    vbsl.u8		$6, d10, d8
+.endm
+
+.macro	DIFF_LUMA_EQ4_MASK
+    vmov	$3, $2
+    vbsl.u8	$3, $0, $1
+.endm
+
+.macro	DIFF_CHROMA_EQ4_P0Q0
+    vaddl.u8	$4, $0, $3
+    vaddw.u8	$5, $4, $1
+    vaddw.u8	$6, $4, $2
+    vaddw.u8	$5, $5, $0
+
+    vaddw.u8	$6, $6, $3
+    vrshrn.u16		$7, $5, #2
+    vrshrn.u16		$8, $6, #2
+.endm
+
+.macro	LORD_CHROMA_DATA_4
+    vld4.u8	{$0[$8],$1[$8],$2[$8],$3[$8]}, [r0], r2
+    vld4.u8	{$4[$8],$5[$8],$6[$8],$7[$8]}, [r1], r2
+.endm
+
+.macro	STORE_CHROMA_DATA_4
+    vst4.u8	{$0[$8],$1[$8],$2[$8],$3[$8]}, [r0], r2
+    vst4.u8	{$4[$8],$5[$8],$6[$8],$7[$8]}, [r1], r2
+.endm
+
+.macro	LORD_LUMA_DATA_3
+    vld3.u8	{$0[$6],$1[$6],$2[$6]}, [r2], r1
+    vld3.u8	{$3[$6],$4[$6],$5[$6]}, [r0], r1
+.endm
+
+.macro	STORE_LUMA_DATA_4
+    vst4.u8	{$0[$4],$1[$4],$2[$4],$3[$4]}, [r0], r1
+    vst4.u8	{$0[$5],$1[$5],$2[$5],$3[$5]}, [r2], r1
+.endm
+
+.macro	LORD_LUMA_DATA_4
+    vld4.u8	{$0[$8],$1[$8],$2[$8],$3[$8]}, [r3], r1
+    vld4.u8	{$4[$8],$5[$8],$6[$8],$7[$8]}, [r0], r1
+.endm
+
+.macro	STORE_LUMA_DATA_3
+    vst3.u8	{$0[$6],$1[$6],$2[$6]}, [r3], r1
+    vst3.u8	{$3[$6],$4[$6],$5[$6]}, [r0], r1
+.endm
+
+.macro	EXTRACT_DELTA_INTO_TWO_PART
+    vcge.s8	$1, $0, #0
+    vand	$1, $0, $1
+    vsub.s8	$0, $1, $0
+.endm
+#else
+.macro	JMP_IF_128BITS_IS_ZERO arg0, arg1, arg2
+    vorr.s16	\arg2, \arg0, \arg1
+    vmov		r3, r2, \arg2
+    orr			r3, r3, r2
+    cmp			r3, #0
+.endm
+
+.macro	MASK_MATRIX arg0, arg1, arg2, arg3, arg4, arg5, arg6
+    vabd.u8	\arg6, \arg1, \arg2
+    vcgt.u8	\arg6, \arg4, \arg6
+
+    vabd.u8	\arg4, \arg0, \arg1
+    vclt.u8	\arg4, \arg4, \arg5
+    vand.u8	\arg6, \arg6, \arg4
+
+    vabd.u8	\arg4, \arg3, \arg2
+    vclt.u8	\arg4, \arg4, \arg5
+    vand.u8	\arg6, \arg6, \arg4
+.endm
+
+.macro	DIFF_LUMA_LT4_P1_Q1 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
+    vabd.u8	\arg9, \arg0, \arg2
+    vclt.u8	\arg9, \arg9, \arg4
+    vrhadd.u8	\arg8, \arg2, \arg3
+    vhadd.u8	\arg8, \arg0, \arg8
+    vsub.s8	\arg8, \arg8, \arg1
+    vmax.s8	\arg8, \arg8, \arg5
+    vmin.s8	\arg8, \arg8, \arg6
+    vand.s8	\arg8, \arg8, \arg9
+    vand.s8	\arg8, \arg8, \arg7
+    vadd.u8	\arg8, \arg1, \arg8
+    vabs.s8	\arg9, \arg9
+.endm
+
+.macro	DIFF_LUMA_LT4_P0_Q0 arg0, arg1, arg2, arg3, arg4, arg5, arg6
+    vsubl.u8	\arg5, \arg0, \arg3
+    vsubl.u8	\arg6, \arg2, \arg1
+    vshl.s16	\arg6, \arg6, #2
+    vadd.s16	\arg5, \arg5, \arg6
+    vrshrn.s16		\arg4, \arg5, #3
+.endm
+
+
+.macro	DIFF_LUMA_EQ4_P2P1P0 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
+    vaddl.u8	q4, \arg1, \arg2
+    vaddl.u8	q5, \arg3, \arg4
+    vadd.u16	q5, q4, q5
+
+    vaddl.u8	q4, \arg0, \arg1
+    vshl.u16	q4, q4, #1
+    vadd.u16	q4, q5, q4
+
+    vrshrn.u16		\arg0, q5, #2
+    vrshrn.u16		\arg7, q4, #3
+
+    vshl.u16	q5, q5, #1
+    vsubl.u8	q4, \arg5, \arg1
+    vadd.u16	q5, q4,q5
+
+    vaddl.u8	q4, \arg2, \arg5
+    vaddw.u8	q4, q4, \arg2
+    vaddw.u8	q4, q4, \arg3
+
+    vrshrn.u16		d10,q5, #3
+    vrshrn.u16		d8, q4, #2
+    vbsl.u8		\arg6, d10, d8
+.endm
+
+.macro	DIFF_LUMA_EQ4_MASK arg0, arg1, arg2, arg3
+    vmov	\arg3, \arg2
+    vbsl.u8	\arg3, \arg0, \arg1
+.endm
+
+.macro	DIFF_CHROMA_EQ4_P0Q0 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
+    vaddl.u8	\arg4, \arg0, \arg3
+    vaddw.u8	\arg5, \arg4, \arg1
+    vaddw.u8	\arg6, \arg4, \arg2
+    vaddw.u8	\arg5, \arg5, \arg0
+    vaddw.u8	\arg6, \arg6, \arg3
+    vrshrn.u16		\arg7, \arg5, #2
+    vrshrn.u16		\arg8, \arg6, #2
+.endm
+
+.macro	LORD_CHROMA_DATA_4 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
+    vld4.u8	{\arg0[\arg8],\arg1[\arg8],\arg2[\arg8],\arg3[\arg8]}, [r0], r2
+    vld4.u8	{\arg4[\arg8],\arg5[\arg8],\arg6[\arg8],\arg7[\arg8]}, [r1], r2
+.endm
+
+.macro	STORE_CHROMA_DATA_4 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
+    vst4.u8	{\arg0[\arg8],\arg1[\arg8],\arg2[\arg8],\arg3[\arg8]}, [r0], r2
+    vst4.u8	{\arg4[\arg8],\arg5[\arg8],\arg6[\arg8],\arg7[\arg8]}, [r1], r2
+.endm
+
+.macro	LORD_LUMA_DATA_3 arg0, arg1, arg2, arg3, arg4, arg5, arg6
+    vld3.u8	{\arg0[\arg6],\arg1[\arg6],\arg2[\arg6]}, [r2], r1
+    vld3.u8	{\arg3[\arg6],\arg4[\arg6],\arg5[\arg6]}, [r0], r1
+.endm
+
+.macro	STORE_LUMA_DATA_4 arg0, arg1, arg2, arg3, arg4, arg5
+    vst4.u8	{\arg0[\arg4],\arg1[\arg4],\arg2[\arg4],\arg3[\arg4]}, [r0], r1
+    vst4.u8	{\arg0[\arg5],\arg1[\arg5],\arg2[\arg5],\arg3[\arg5]}, [r2], r1
+.endm
+
+.macro	LORD_LUMA_DATA_4 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
+    vld4.u8	{\arg0[\arg8],\arg1[\arg8],\arg2[\arg8],\arg3[\arg8]}, [r3], r1
+    vld4.u8	{\arg4[\arg8],\arg5[\arg8],\arg6[\arg8],\arg7[\arg8]}, [r0], r1
+.endm
+
+.macro	STORE_LUMA_DATA_3 arg0, arg1, arg2, arg3, arg4, arg5, arg6
+    vst3.u8	{\arg0[\arg6],\arg1[\arg6],\arg2[\arg6]}, [r3], r1
+    vst3.u8	{\arg3[\arg6],\arg4[\arg6],\arg5[\arg6]}, [r0], r1
+.endm
+
+.macro	EXTRACT_DELTA_INTO_TWO_PART arg0, arg1
+    vcge.s8	\arg1, \arg0, #0
+    vand	\arg1, \arg0, \arg1
+    vsub.s8	\arg0, \arg1, \arg0
+.endm
+#endif
+
+WELS_ASM_FUNC_BEGIN DeblockLumaLt4V_neon
+    vdup.u8	q11, r2
+    vdup.u8	q9, r3
+
+    add			r2, r1, r1, lsl #1
+    sub			r2, r0, r2
+    vld1.u8	{q0}, [r2], r1
+    vld1.u8	{q3}, [r0], r1
+    vld1.u8	{q1}, [r2], r1
+    vld1.u8	{q4}, [r0], r1
+    vld1.u8	{q2}, [r2]
+    vld1.u8	{q5}, [r0]
+    sub			r2, r2, r1
+
+    ldr			r3, [sp, #0]
+    vld1.s8	{d31}, [r3]
+    vdup.s8	d28, d31[0]
+    vdup.s8	d30, d31[1]
+    vdup.s8	d29, d31[2]
+    vdup.s8	d31, d31[3]
+    vtrn.32	d28, d30
+    vtrn.32	d29, d31
+    vcge.s8	q10, q14, #0
+
+    MASK_MATRIX	q1, q2, q3, q4, q11, q9, q15
+    vand.u8	q10, q10, q15
+
+    veor		q15, q15
+    vsub.i8	q15,q15,q14
+
+    DIFF_LUMA_LT4_P1_Q1	q0, q1, q2, q3, q9, q15, q14, q10, q6, q12
+    vst1.u8	{q6}, [r2], r1
+
+    DIFF_LUMA_LT4_P1_Q1	q5, q4, q3, q2, q9, q15, q14, q10, q7, q13
+
+    vabs.s8	q12, q12
+    vabs.s8	q13, q13
+    vadd.u8	q14,q14,q12
+    vadd.u8	q14,q14,q13
+    veor		q15, q15
+    vsub.i8	q15,q15,q14
+
+    DIFF_LUMA_LT4_P0_Q0	d2, d4, d6, d8, d16, q12, q13
+    DIFF_LUMA_LT4_P0_Q0	d3, d5, d7, d9, d17, q12, q13
+    vmax.s8	q8, q8, q15
+    vmin.s8	q8, q8, q14
+    vand.s8	q8, q8, q10
+    EXTRACT_DELTA_INTO_TWO_PART	q8, q9
+    vqadd.u8	q2, q2, q9
+    vqsub.u8	q2, q2, q8
+    vst1.u8	{q2}, [r2], r1
+    vqsub.u8	q3, q3, q9
+    vqadd.u8	q3, q3, q8
+    vst1.u8	{q3}, [r2]	, r1
+    vst1.u8	{q7}, [r2]
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN DeblockLumaEq4V_neon
+
+    vdup.u8	q5, r2
+    vdup.u8	q4, r3
+
+    sub			r3, r0, r1, lsl #2
+    vld1.u8	{q8},  [r3], r1
+    vld1.u8	{q12}, [r0], r1
+    vld1.u8	{q9},  [r3], r1
+    vld1.u8	{q13}, [r0], r1
+    vld1.u8	{q10}, [r3], r1
+    vld1.u8	{q14}, [r0], r1
+    vld1.u8	{q11}, [r3]
+    vld1.u8	{q15}, [r0]
+    sub			r3, r3, r1	, lsl #1
+
+    MASK_MATRIX	q10, q11, q12, q13, q5, q4, q6
+
+    mov			r2, r2, lsr #2
+    add			r2, r2, #2
+    vdup.u8	q5, r2
+    vabd.u8	q0, q11, q12
+    vclt.u8	q7, q0, q5
+
+    vabd.u8	q1, q9, q11
+    vclt.u8	q1, q1, q4
+    vand.s8	q1, q1, q7
+
+    vabd.u8	q2, q14,q12
+    vclt.u8	q2, q2, q4
+    vand.s8	q2, q2, q7
+    vand.u8	q7, q7, q6
+
+    vmov		q3, q1
+
+    DIFF_LUMA_EQ4_P2P1P0		d16, d18, d20, d22, d24, d26, d2, d0
+    DIFF_LUMA_EQ4_P2P1P0		d17, d19, d21, d23, d25, d27, d3, d1
+
+    vand.u8	q3, q7, q3
+    DIFF_LUMA_EQ4_MASK	q0, q9, q3, q4
+    vst1.u8	{q4}, [r3], r1
+    DIFF_LUMA_EQ4_MASK	q8,q10, q3, q4
+    vst1.u8	{q4}, [r3], r1
+    DIFF_LUMA_EQ4_MASK	q1,q11, q6, q4
+    vst1.u8	{q4}, [r3], r1
+
+    vmov		q0, q2
+    DIFF_LUMA_EQ4_P2P1P0		d30, d28, d26, d24, d22, d20, d4, d6
+    DIFF_LUMA_EQ4_P2P1P0		d31, d29, d27, d25, d23, d21, d5, d7
+
+    vand.u8	q0, q7, q0
+    DIFF_LUMA_EQ4_MASK	q2,  q12, q6, q4
+    vst1.u8	{q4}, [r3], r1
+    DIFF_LUMA_EQ4_MASK	q15, q13, q0, q4
+    vst1.u8	{q4}, [r3], r1
+    DIFF_LUMA_EQ4_MASK	q3,  q14, q0, q4
+    vst1.u8	{q4}, [r3], r1
+
+WELS_ASM_FUNC_END
+
+
+    WELS_ASM_FUNC_BEGIN DeblockLumaLt4H_neon
+
+    vdup.u8	q11, r2
+    vdup.u8	q9, r3
+
+    sub			r2, r0, #3
+    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 0
+    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 1
+    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 2
+    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 3
+    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 4
+    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 5
+    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 6
+    LORD_LUMA_DATA_3		d0, d1, d2, d6, d7, d8, 7
+
+    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 0
+    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 1
+    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 2
+    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 3
+    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 4
+    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 5
+    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 6
+    LORD_LUMA_DATA_3		d3, d4, d5, d9, d10, d11, 7
+
+    vswp		d1, d2
+    vswp		d3, d4
+    vswp		d1, d4
+    vswp		d7, d8
+    vswp		d9, d10
+    vswp		d7, d10
+
+    sub			r0, r0, r1, lsl #4
+
+    ldr			r3, [sp, #0]
+    vld1.s8	{d31}, [r3]
+    vdup.s8	d28, d31[0]
+    vdup.s8	d30, d31[1]
+    vdup.s8	d29, d31[2]
+    vdup.s8	d31, d31[3]
+    vtrn.32	d28, d30
+    vtrn.32	d29, d31
+    vcge.s8	q10, q14, #0
+
+    MASK_MATRIX	q1, q2, q3, q4, q11, q9, q15
+    vand.u8	q10, q10, q15
+
+    veor		q15, q15
+    vsub.i8	q15,q15,q14
+
+    DIFF_LUMA_LT4_P1_Q1	q0, q1, q2, q3, q9, q15, q14, q10, q6, q12
+    DIFF_LUMA_LT4_P1_Q1	q5, q4, q3, q2, q9, q15, q14, q10, q7, q13
+
+    vabs.s8	q12, q12
+    vabs.s8	q13, q13
+    vadd.u8	q14,q14,q12
+    vadd.u8	q14,q14,q13
+    veor		q15, q15
+    vsub.i8	q15,q15,q14
+
+    DIFF_LUMA_LT4_P0_Q0	d2, d4, d6, d8, d16, q12, q13
+    DIFF_LUMA_LT4_P0_Q0	d3, d5, d7, d9, d17, q12, q13
+    vmax.s8	q8, q8, q15
+    vmin.s8	q8, q8, q14
+    vand.s8	q8, q8, q10
+    EXTRACT_DELTA_INTO_TWO_PART	q8, q9
+    vqadd.u8	q2, q2, q9
+    vqsub.u8	q2, q2, q8
+
+    vqsub.u8	q3, q3, q9
+    vqadd.u8	q3, q3, q8
+
+    sub		r0, #2
+    add		r2, r0, r1
+    lsl		r1, #1
+
+    vmov		q1, q6
+    vmov		q4, q7
+
+    vswp		q2, q3
+    vswp		d3, d6
+    vswp		d5, d8
+
+    STORE_LUMA_DATA_4		d2, d3, d4, d5, 0, 1
+    STORE_LUMA_DATA_4		d2, d3, d4, d5, 2, 3
+    STORE_LUMA_DATA_4		d2, d3, d4, d5, 4, 5
+    STORE_LUMA_DATA_4		d2, d3, d4, d5, 6, 7
+
+    STORE_LUMA_DATA_4		d6, d7, d8, d9, 0, 1
+    STORE_LUMA_DATA_4		d6, d7, d8, d9, 2, 3
+    STORE_LUMA_DATA_4		d6, d7, d8, d9, 4, 5
+    STORE_LUMA_DATA_4		d6, d7, d8, d9, 6, 7
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN DeblockLumaEq4H_neon
+    vdup.u8	q5, r2
+    vdup.u8	q4, r3
+
+    sub			r3, r0, #4				//	pix -= 4
+    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,0
+    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,1
+    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,2
+    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,3
+    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,4
+    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,5
+    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,6
+    LORD_LUMA_DATA_4		d16,d17,d18,d19,d24,d25,d26,d27,7
+
+    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,0
+    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,1
+    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,2
+    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,3
+    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,4
+    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,5
+    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,6
+    LORD_LUMA_DATA_4		d20,d21,d22,d23,d28,d29,d30,d31,7
+
+    vswp		q9, q10
+    vswp		d17,d18
+    vswp		d21,d22
+    vswp		q13,q14
+    vswp		d25,d26
+    vswp		d29,d30
+    sub			r0, r0, r1	, lsl #4
+
+    MASK_MATRIX	q10, q11, q12, q13, q5, q4, q6
+
+    mov			r2, r2, lsr #2
+    add			r2, r2, #2
+    vdup.u8	q5, r2
+    vabd.u8	q0, q11, q12
+    vclt.u8	q7, q0, q5
+
+    vabd.u8	q1, q9, q11
+    vclt.u8	q1, q1, q4
+    vand.s8	q1, q1, q7
+
+    vabd.u8	q2, q14,q12
+    vclt.u8	q2, q2, q4
+    vand.s8	q2, q2, q7
+    vand.u8	q7, q7, q6
+
+    vmov		q3, q1
+
+    DIFF_LUMA_EQ4_P2P1P0		d16, d18, d20, d22, d24, d26, d2, d0
+    DIFF_LUMA_EQ4_P2P1P0		d17, d19, d21, d23, d25, d27, d3, d1
+
+    vand.u8	q3, q7, q3
+    DIFF_LUMA_EQ4_MASK	q0, q9, q3, q4
+    vmov		q9, q4
+    vbsl.u8	q3, q8, q10
+    DIFF_LUMA_EQ4_MASK	q1,q11, q6, q8
+
+    vand.u8	q7, q7, q2
+
+    DIFF_LUMA_EQ4_P2P1P0		d30, d28, d26, d24, d22, d20, d4, d0
+    DIFF_LUMA_EQ4_P2P1P0		d31, d29, d27, d25, d23, d21, d5, d1
+
+    vbsl.u8	q6, q2, q12
+    DIFF_LUMA_EQ4_MASK	q15, q13, q7, q4
+
+    vbsl.u8	q7, q0, q14
+
+    vmov		q5, q6
+    vmov		q2, q9
+    vmov		q6, q4
+    vmov		q4, q8
+
+    vswp	d8, d6
+    vswp	d5, d7
+    vswp	d5, d8
+    vswp	d14, d12
+    vswp	d11, d13
+    vswp	d11, d14
+
+    sub		r3, r0, #3
+    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,0
+    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,1
+    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,2
+    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,3
+    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,4
+    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,5
+    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,6
+    STORE_LUMA_DATA_3		d4,d5,d6,d10,d11,d12,7
+
+    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,0
+    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,1
+    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,2
+    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,3
+    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,4
+    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,5
+    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,6
+    STORE_LUMA_DATA_3		d7,d8,d9,d13,d14,d15,7
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN DeblockChromaLt4V_neon
+    vdup.u8	q11, r3
+    ldr			r3, [sp, #0]
+
+    sub			r0, r0, r2	, lsl #1
+    sub			r1, r1, r2, lsl #1
+    vdup.u8	    q9, r3
+    ldr			r3, [sp, #4]
+
+    vld1.u8	{d0}, [r0], r2
+    vld1.u8	{d1}, [r1], r2
+    vld1.u8	{d2}, [r0], r2
+    vld1.u8	{d3}, [r1], r2
+    vld1.u8	{d4}, [r0], r2
+    vld1.u8	{d5}, [r1], r2
+    vld1.u8	{d6}, [r0]
+    vld1.u8	{d7}, [r1]
+
+    sub			r0, r0, r2, lsl #1
+    sub			r1, r1, r2, lsl #1
+
+    vld1.s8	{d15}, [r3]
+    vmovl.u8	q6, d15
+    vshl.u64	d13,d12,#8
+    vorr		d12,d13
+    vmov		d13, d12
+    veor		q7, q7
+    vsub.i8	q7,q7,q6
+
+    MASK_MATRIX	q0, q1, q2, q3, q11, q9, q5
+
+    DIFF_LUMA_LT4_P0_Q0	d0, d2, d4, d6, d8, q12, q13
+    DIFF_LUMA_LT4_P0_Q0	d1, d3, d5, d7, d9, q12, q13
+    vmax.s8	q4, q4, q7
+    vmin.s8	q4, q4, q6
+
+    vand.s8	q4, q4, q5
+    vcge.s8	q6, q6, #0
+    vand.s8	q4, q4, q6
+    EXTRACT_DELTA_INTO_TWO_PART	q4, q5
+    vqadd.u8	q1, q1, q5
+    vqsub.u8	q1, q1, q4
+    vst1.u8	{d2}, [r0], r2
+    vst1.u8	{d3}, [r1], r2
+    vqsub.u8	q2, q2, q5
+    vqadd.u8	q2, q2, q4
+    vst1.u8	{d4}, [r0]
+    vst1.u8	{d5}, [r1]
+
+WELS_ASM_FUNC_END
+
+
+    WELS_ASM_FUNC_BEGIN DeblockChromaEq4V_neon
+
+    vdup.u8	q11, r3
+    ldr			r3, [sp, #0]
+
+    sub			r0, r0, r2	, lsl #1
+    sub			r1, r1, r2, lsl #1
+    vdup.u8	q9, r3
+    vld1.u8	{d0}, [r0], r2		//	q0::p1
+    vld1.u8	{d1}, [r1], r2
+    vld1.u8	{d2}, [r0], r2		//	q1::p0
+    vld1.u8	{d3}, [r1], r2
+    vld1.u8	{d4}, [r0], r2		//	q2::q0
+    vld1.u8	{d5}, [r1], r2
+    vld1.u8	{d6}, [r0]				//	q3::q1
+    vld1.u8	{d7}, [r1]
+
+    sub			r0, r0, r2, lsl #1	//	pix = [-1*src_stride]
+    sub			r1, r1, r2, lsl #1
+
+    MASK_MATRIX	q0, q1, q2, q3, q11, q9, q10
+
+    vmov			q11, q10
+
+    DIFF_CHROMA_EQ4_P0Q0		d0, d2, d4, d6, q4, q5, q6, d14, d0		// Cb::p0' q0'
+    DIFF_CHROMA_EQ4_P0Q0		d1, d3, d5, d7, q12, q13, q14, d15, d1	// Cr::p0' q0'
+
+    vbsl.u8	q10, q7, q1
+    vst1.u8	{d20}, [r0], r2
+    vst1.u8	{d21}, [r1], r2
+
+    vbsl.u8	q11, q0, q2
+    vst1.u8	{d22}, [r0]
+    vst1.u8	{d23}, [r1]
+
+WELS_ASM_FUNC_END
+
+WELS_ASM_FUNC_BEGIN DeblockChromaLt4H_neon
+
+    vdup.u8	q11, r3
+    ldr			r3, [sp, #0]
+
+    sub			r0, r0, #2
+    vdup.u8	q9, r3
+    ldr			r3, [sp, #4]
+    sub			r1, r1, #2
+    vld1.s8	{d15}, [r3]
+
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 0
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 1
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 2
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 3
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 4
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 5
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 6
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 7
+    vswp		q1, q2
+    vswp		d1, d2
+    vswp		d6, d5
+
+    vmovl.u8	q6, d15
+    vshl.u64	d13,d12,#8
+    vorr		d12,d13
+    vmov		d13, d12
+    veor		q7, q7
+    vsub.i8	q7,q7,q6
+
+    MASK_MATRIX	q0, q1, q2, q3, q11, q9, q5
+
+    DIFF_LUMA_LT4_P0_Q0	d0, d2, d4, d6, d8, q12, q13
+    DIFF_LUMA_LT4_P0_Q0	d1, d3, d5, d7, d9, q12, q13
+    vmax.s8	q4, q4, q7
+    vmin.s8	q4, q4, q6
+
+    vand.s8	q4, q4, q5
+    vcge.s8	q6, q6, #0
+    vand.s8	q4, q4, q6
+    EXTRACT_DELTA_INTO_TWO_PART	q4, q5
+    vqadd.u8	q1, q1, q5
+    vqsub.u8	q1, q1, q4
+    vqsub.u8	q2, q2, q5
+    vqadd.u8	q2, q2, q4
+
+    sub			r0, r0, r2, lsl #3
+    sub			r1, r1, r2, lsl #3
+    vswp		d1, d2
+    vswp		d6, d5
+    vswp		q1, q2
+
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 0
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 1
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 2
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 3
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 4
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 5
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 6
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 7
+
+WELS_ASM_FUNC_END
+
+WELS_ASM_FUNC_BEGIN DeblockChromaEq4H_neon
+    vdup.u8	q11, r3
+    ldr			r3, [sp, #0]
+
+    sub			r0, r0, #2
+    sub			r1, r1, #2
+
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 0
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 1
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 2
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 3
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 4
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 5
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 6
+    LORD_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 7
+    vswp		q1, q2
+    vswp		d1, d2
+    vswp		d6, d5
+
+    vdup.u8	q9, r3
+    MASK_MATRIX	q0, q1, q2, q3, q11, q9, q10
+    vmov			q11, q10
+
+    DIFF_CHROMA_EQ4_P0Q0		d0, d2, d4, d6, q8, q9, q12, d8, d10
+    DIFF_CHROMA_EQ4_P0Q0		d1, d3, d5, d7, q13, q14, q15, d9, d11
+
+    vbsl.u8	q10, q4, q1
+    vbsl.u8	q11, q5, q2
+    sub			r0, r0, r2, lsl #3	//	pix: 0th row	[-2]
+    sub			r1, r1, r2, lsl #3
+
+    vmov		q1, q10
+    vmov		q2, q11
+    vswp		d1, d2
+    vswp		d6, d5
+    vswp		q1, q2
+    //	Cb:d0d1d2d3, Cr:d4d5d6d7
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 0
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 1
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 2
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 3
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 4
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 5
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 6
+    STORE_CHROMA_DATA_4	d0, d1, d2, d3, d4, d5, d6, d7, 7
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN enc_avc_non_zero_count_neon
+
+    vld1.64	{d0-d2}, [r0]
+
+    vceq.s8	q0, q0, #0
+    vceq.s8	d2, d2, #0
+    vmvn	q0, q0
+    vmvn	d2, d2
+    vabs.s8	q0, q0
+    vabs.s8	d2, d2
+
+    vst1.64	{d0-d2}, [r0]
+WELS_ASM_FUNC_END
+
+#endif
--- a/codec/decoder/core/arm/block_add_neon.S
+++ b/codec/decoder/core/arm/block_add_neon.S
@@ -1,203 +1,203 @@
-/*!
- * \copy
- *     Copyright (c)  2013, Cisco Systems
- *     All rights reserved.
- *
- *     Redistribution and use in source and binary forms, with or without
- *     modification, are permitted provided that the following conditions
- *     are met:
- *
- *        * Redistributions of source code must retain the above copyright
- *          notice, this list of conditions and the following disclaimer.
- *
- *        * Redistributions in binary form must reproduce the above copyright
- *          notice, this list of conditions and the following disclaimer in
- *          the documentation and/or other materials provided with the
- *          distribution.
- *
- *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- *     POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifdef HAVE_NEON
-.text
-#include "arm_arch_common_macro.S"
-#ifdef APPLE_IOS
-
-.macro	ROW_TRANSFORM_1_STEP
-//	{	//	input: src_d[0]~[3], output: e_q[0]~[3]; working: $8 $9
-    vaddl.s16		$4, $0, $2			//int32 e[i][0] = src[0] + src[2];
-    vsubl.s16		$5, $0, $2			//int32 e[i][1] = src[0] - src[2];
-    vshr.s16		$8, $1, #1
-    vshr.s16		$9, $3, #1
-    vsubl.s16		$6, $8, $3			//int32 e[i][2] = (src[1]>>1)-src[3];
-    vaddl.s16		$7, $1, $9			//int32 e[i][3] = src[1] + (src[3]>>1);
-//	}
-.endm
-
-.macro	TRANSFORM_4BYTES	// both row & col transform used
-//	{	//	output: f_q[0]~[3], input: e_q[0]~[3];
-    vadd.s32		$0, $4, $7			//int16 f[i][0] = e[i][0] + e[i][3];
-    vadd.s32		$1, $5, $6			//int16 f[i][1] = e[i][1] + e[i][2];
-    vsub.s32		$2, $5, $6			//int16 f[i][2] = e[i][1] - e[i][2];
-    vsub.s32		$3, $4, $7			//int16 f[i][3] = e[i][0] - e[i][3];
-//	}
-.endm
-
-.macro	COL_TRANSFORM_1_STEP
-//	{	//	input: src_q[0]~[3], output: e_q[0]~[3];
-    vadd.s32		$4, $0, $2			//int32 e[0][j] = f[0][j] + f[2][j];
-    vsub.s32		$5, $0, $2			//int32 e[1][j] = f[0][j] - f[2][j];
-    vshr.s32		$6, $1, #1
-    vshr.s32		$7, $3, #1
-    vsub.s32		$6, $6, $3			//int32 e[2][j] = (f[1][j]>>1) - f[3][j];
-    vadd.s32		$7, $1, $7			//int32 e[3][j] = f[1][j] + (f[3][j]>>1);
-//	}
-.endm
-
-#else
-
-.macro	ROW_TRANSFORM_1_STEP arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
-//	{	//	input: src_d[0]~[3], output: e_q[0]~[3]; working: $8 $9
-    vaddl.s16		\arg4, \arg0, \arg2			//int32 e[i][0] = src[0] + src[2];
-    vsubl.s16		\arg5, \arg0, \arg2			//int32 e[i][1] = src[0] - src[2];
-    vshr.s16		\arg8, \arg1, #1
-    vshr.s16		\arg9, \arg3, #1
-    vsubl.s16		\arg6, \arg8, \arg3			//int32 e[i][2] = (src[1]>>1)-src[3];
-    vaddl.s16		\arg7, \arg1, \arg9			//int32 e[i][3] = src[1] + (src[3]>>1);
-//	}
-.endm
-
-.macro	TRANSFORM_4BYTES  arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 // both row & col transform used
-//	{	//	output: f_q[0]~[3], input: e_q[0]~[3];
-    vadd.s32		\arg0, \arg4, \arg7			//int16 f[i][0] = e[i][0] + e[i][3];
-    vadd.s32		\arg1, \arg5, \arg6			//int16 f[i][1] = e[i][1] + e[i][2];
-    vsub.s32		\arg2, \arg5, \arg6			//int16 f[i][2] = e[i][1] - e[i][2];
-    vsub.s32		\arg3, \arg4, \arg7			//int16 f[i][3] = e[i][0] - e[i][3];
-//	}
-.endm
-
-.macro	COL_TRANSFORM_1_STEP arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
-//	{	//	input: src_q[0]~[3], output: e_q[0]~[3];
-    vadd.s32		\arg4, \arg0, \arg2			//int32 e[0][j] = f[0][j] + f[2][j];
-    vsub.s32		\arg5, \arg0, \arg2			//int32 e[1][j] = f[0][j] - f[2][j];
-    vshr.s32		\arg6, \arg1, #1
-    vshr.s32		\arg7, \arg3, #1
-    vsub.s32		\arg6, \arg6, \arg3			//int32 e[2][j] = (f[1][j]>>1) - f[3][j];
-    vadd.s32		\arg7, \arg1, \arg7			//int32 e[3][j] = f[1][j] + (f[3][j]>>1);
-//	}
-.endm
-#endif
-// r0    int16_t* block,
-// r1    int8_t* non_zero_count,
-WELS_ASM_FUNC_BEGIN SetNonZeroCount_neon
-
-	vld1.64	{d0-d2}, [r1]
-
-	vceq.s8	q0, q0, #0
-	vceq.s8	d2, d2, #0
-	vmvn	q0, q0
-	vmvn	d2, d2
-	vabs.s8	q0, q0
-	vabs.s8	d2, d2
-
-	vst1.64	{d0-d2}, [r1]
-WELS_ASM_FUNC_END
-
-
-//	r0 int16_t * block,
-//	r1	int32_t stride
-WELS_ASM_FUNC_BEGIN WelsResBlockZero16x16_neon// can use for 256*sizeof(int16_t)
-	push		{r2}
-	mov			r2, #16
-// each row 16 elements, 16*sizeof(int16_t)
-//	memset(ptr_dest, 0, 16*sizeof(int16_t));
-//	ptr_dest += stride;
-	lsl			r1, r1, #1	// r1 = 2*r1
-	veor.i16	q0, q0, q0
-	veor.i16	q1, q1, q1
-
-block_zero_16x16_luma_loop:
-	vst1.i16	{q0, q1}, [r0], r1
-	subs		r2,	r2, #2
-	vst1.i16	{q0, q1}, [r0], r1
-	bne			block_zero_16x16_luma_loop
-
-	pop		{r2}
-WELS_ASM_FUNC_END
-
-WELS_ASM_FUNC_BEGIN WelsResBlockZero8x8_neon// can use for 64*sizeof(int16_t)
-	push		{r2}
-	mov			r2, #8
-// each row 8 elements, 8*sizeof(int16_t)
-//	memset(ptr_dest, 0, 8*sizeof(int16_t));
-//	ptr_dest += stride;
-	lsl			r1, r1, #1
-	veor.i16	q0, q0, q0
-
-block_zero_8x8_chma_loop:
-	vst1.i16	{q0}, [r0], r1
-	subs		r2,	r2, #2
-	vst1.i16	{q0}, [r0], r1
-	bne			block_zero_8x8_chma_loop
-
-	pop		{r2}
-WELS_ASM_FUNC_END
-
-
-//	uint8_t *pred, const int32_t stride, int16_t *rs
-WELS_ASM_FUNC_BEGIN IdctResAddPred_neon
-
-	vld4.s16		{d0, d1, d2, d3}, [r2]		// cost 3 cycles!
-
-	ROW_TRANSFORM_1_STEP		d0, d1, d2, d3, q4, q5, q6, q7, d4, d5
-
-	TRANSFORM_4BYTES		q0, q1, q2, q3, q4, q5, q6, q7
-
-	// transform element 32bits
-	vtrn.s32		q0, q1				//[0 1 2 3]+[4 5 6 7]-->[0 4 2 6]+[1 5 3 7]
-	vtrn.s32		q2, q3				//[8 9 10 11]+[12 13 14 15]-->[8 12 10 14]+[9 13 11 15]
-	vswp			d1, d4				//[0 4 2 6]+[8 12 10 14]-->[0 4 8 12]+[2 6 10 14]
-	vswp			d3, d6				//[1 5 3 7]+[9 13 11 15]-->[1 5 9 13]+[3 7 11 15]
-
-	COL_TRANSFORM_1_STEP		q0, q1, q2, q3, q4, q5, q6, q7
-
-	TRANSFORM_4BYTES		q0, q1, q2, q3, q4, q5, q6, q7
-
-	//after clip_table[MAX_NEG_CROP] into [0, 255]
-	mov			r2, r0
-	vld1.32		{d12[0]},[r0],r1
-	vld1.32		{d12[1]},[r0],r1
-	vld1.32		{d14[0]},[r0],r1
-	vld1.32		{d14[1]},[r0]
-
-	vrshrn.s32		d8, q0, #6
-	vrshrn.s32		d9, q1, #6
-	vrshrn.s32		d10, q2, #6
-	vrshrn.s32		d11, q3, #6
-
-	vmovl.u8		q0,d12
-	vmovl.u8		q1,d14
-	vadd.s16		q0,q4
-	vadd.s16		q1,q5
-
-	vqmovun.s16		d12,q0
-	vqmovun.s16		d14,q1
-
-	vst1.32		{d12[0]},[r2],r1
-	vst1.32		{d12[1]},[r2],r1
-	vst1.32		{d14[0]},[r2],r1
-	vst1.32		{d14[1]},[r2]
-WELS_ASM_FUNC_END
-#endif
+/*!
+ * \copy
+ *     Copyright (c)  2013, Cisco Systems
+ *     All rights reserved.
+ *
+ *     Redistribution and use in source and binary forms, with or without
+ *     modification, are permitted provided that the following conditions
+ *     are met:
+ *
+ *        * Redistributions of source code must retain the above copyright
+ *          notice, this list of conditions and the following disclaimer.
+ *
+ *        * Redistributions in binary form must reproduce the above copyright
+ *          notice, this list of conditions and the following disclaimer in
+ *          the documentation and/or other materials provided with the
+ *          distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *     POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifdef HAVE_NEON
+.text
+#include "arm_arch_common_macro.S"
+#ifdef APPLE_IOS
+
+.macro	ROW_TRANSFORM_1_STEP
+//	{	//	input: src_d[0]~[3], output: e_q[0]~[3]; working: $8 $9
+    vaddl.s16		$4, $0, $2			//int32 e[i][0] = src[0] + src[2];
+    vsubl.s16		$5, $0, $2			//int32 e[i][1] = src[0] - src[2];
+    vshr.s16		$8, $1, #1
+    vshr.s16		$9, $3, #1
+    vsubl.s16		$6, $8, $3			//int32 e[i][2] = (src[1]>>1)-src[3];
+    vaddl.s16		$7, $1, $9			//int32 e[i][3] = src[1] + (src[3]>>1);
+//	}
+.endm
+
+.macro	TRANSFORM_4BYTES	// both row & col transform used
+//	{	//	output: f_q[0]~[3], input: e_q[0]~[3];
+    vadd.s32		$0, $4, $7			//int16 f[i][0] = e[i][0] + e[i][3];
+    vadd.s32		$1, $5, $6			//int16 f[i][1] = e[i][1] + e[i][2];
+    vsub.s32		$2, $5, $6			//int16 f[i][2] = e[i][1] - e[i][2];
+    vsub.s32		$3, $4, $7			//int16 f[i][3] = e[i][0] - e[i][3];
+//	}
+.endm
+
+.macro	COL_TRANSFORM_1_STEP
+//	{	//	input: src_q[0]~[3], output: e_q[0]~[3];
+    vadd.s32		$4, $0, $2			//int32 e[0][j] = f[0][j] + f[2][j];
+    vsub.s32		$5, $0, $2			//int32 e[1][j] = f[0][j] - f[2][j];
+    vshr.s32		$6, $1, #1
+    vshr.s32		$7, $3, #1
+    vsub.s32		$6, $6, $3			//int32 e[2][j] = (f[1][j]>>1) - f[3][j];
+    vadd.s32		$7, $1, $7			//int32 e[3][j] = f[1][j] + (f[3][j]>>1);
+//	}
+.endm
+
+#else
+
+.macro	ROW_TRANSFORM_1_STEP arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
+//	{	//	input: src_d[0]~[3], output: e_q[0]~[3]; working: $8 $9
+    vaddl.s16		\arg4, \arg0, \arg2			//int32 e[i][0] = src[0] + src[2];
+    vsubl.s16		\arg5, \arg0, \arg2			//int32 e[i][1] = src[0] - src[2];
+    vshr.s16		\arg8, \arg1, #1
+    vshr.s16		\arg9, \arg3, #1
+    vsubl.s16		\arg6, \arg8, \arg3			//int32 e[i][2] = (src[1]>>1)-src[3];
+    vaddl.s16		\arg7, \arg1, \arg9			//int32 e[i][3] = src[1] + (src[3]>>1);
+//	}
+.endm
+
+.macro	TRANSFORM_4BYTES  arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 // both row & col transform used
+//	{	//	output: f_q[0]~[3], input: e_q[0]~[3];
+    vadd.s32		\arg0, \arg4, \arg7			//int16 f[i][0] = e[i][0] + e[i][3];
+    vadd.s32		\arg1, \arg5, \arg6			//int16 f[i][1] = e[i][1] + e[i][2];
+    vsub.s32		\arg2, \arg5, \arg6			//int16 f[i][2] = e[i][1] - e[i][2];
+    vsub.s32		\arg3, \arg4, \arg7			//int16 f[i][3] = e[i][0] - e[i][3];
+//	}
+.endm
+
+.macro	COL_TRANSFORM_1_STEP arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7
+//	{	//	input: src_q[0]~[3], output: e_q[0]~[3];
+    vadd.s32		\arg4, \arg0, \arg2			//int32 e[0][j] = f[0][j] + f[2][j];
+    vsub.s32		\arg5, \arg0, \arg2			//int32 e[1][j] = f[0][j] - f[2][j];
+    vshr.s32		\arg6, \arg1, #1
+    vshr.s32		\arg7, \arg3, #1
+    vsub.s32		\arg6, \arg6, \arg3			//int32 e[2][j] = (f[1][j]>>1) - f[3][j];
+    vadd.s32		\arg7, \arg1, \arg7			//int32 e[3][j] = f[1][j] + (f[3][j]>>1);
+//	}
+.endm
+#endif
+// r0    int16_t* block,
+// r1    int8_t* non_zero_count,
+WELS_ASM_FUNC_BEGIN SetNonZeroCount_neon
+
+	vld1.64	{d0-d2}, [r1]
+
+	vceq.s8	q0, q0, #0
+	vceq.s8	d2, d2, #0
+	vmvn	q0, q0
+	vmvn	d2, d2
+	vabs.s8	q0, q0
+	vabs.s8	d2, d2
+
+	vst1.64	{d0-d2}, [r1]
+WELS_ASM_FUNC_END
+
+
+//	r0 int16_t * block,
+//	r1	int32_t stride
+WELS_ASM_FUNC_BEGIN WelsResBlockZero16x16_neon// can use for 256*sizeof(int16_t)
+	push		{r2}
+	mov			r2, #16
+// each row 16 elements, 16*sizeof(int16_t)
+//	memset(ptr_dest, 0, 16*sizeof(int16_t));
+//	ptr_dest += stride;
+	lsl			r1, r1, #1	// r1 = 2*r1
+	veor.i16	q0, q0, q0
+	veor.i16	q1, q1, q1
+
+block_zero_16x16_luma_loop:
+	vst1.i16	{q0, q1}, [r0], r1
+	subs		r2,	r2, #2
+	vst1.i16	{q0, q1}, [r0], r1
+	bne			block_zero_16x16_luma_loop
+
+	pop		{r2}
+WELS_ASM_FUNC_END
+
+WELS_ASM_FUNC_BEGIN WelsResBlockZero8x8_neon// can use for 64*sizeof(int16_t)
+	push		{r2}
+	mov			r2, #8
+// each row 8 elements, 8*sizeof(int16_t)
+//	memset(ptr_dest, 0, 8*sizeof(int16_t));
+//	ptr_dest += stride;
+	lsl			r1, r1, #1
+	veor.i16	q0, q0, q0
+
+block_zero_8x8_chma_loop:
+	vst1.i16	{q0}, [r0], r1
+	subs		r2,	r2, #2
+	vst1.i16	{q0}, [r0], r1
+	bne			block_zero_8x8_chma_loop
+
+	pop		{r2}
+WELS_ASM_FUNC_END
+
+
+//	uint8_t *pred, const int32_t stride, int16_t *rs
+WELS_ASM_FUNC_BEGIN IdctResAddPred_neon
+
+	vld4.s16		{d0, d1, d2, d3}, [r2]		// cost 3 cycles!
+
+	ROW_TRANSFORM_1_STEP		d0, d1, d2, d3, q4, q5, q6, q7, d4, d5
+
+	TRANSFORM_4BYTES		q0, q1, q2, q3, q4, q5, q6, q7
+
+	// transform element 32bits
+	vtrn.s32		q0, q1				//[0 1 2 3]+[4 5 6 7]-->[0 4 2 6]+[1 5 3 7]
+	vtrn.s32		q2, q3				//[8 9 10 11]+[12 13 14 15]-->[8 12 10 14]+[9 13 11 15]
+	vswp			d1, d4				//[0 4 2 6]+[8 12 10 14]-->[0 4 8 12]+[2 6 10 14]
+	vswp			d3, d6				//[1 5 3 7]+[9 13 11 15]-->[1 5 9 13]+[3 7 11 15]
+
+	COL_TRANSFORM_1_STEP		q0, q1, q2, q3, q4, q5, q6, q7
+
+	TRANSFORM_4BYTES		q0, q1, q2, q3, q4, q5, q6, q7
+
+	//after clip_table[MAX_NEG_CROP] into [0, 255]
+	mov			r2, r0
+	vld1.32		{d12[0]},[r0],r1
+	vld1.32		{d12[1]},[r0],r1
+	vld1.32		{d14[0]},[r0],r1
+	vld1.32		{d14[1]},[r0]
+
+	vrshrn.s32		d8, q0, #6
+	vrshrn.s32		d9, q1, #6
+	vrshrn.s32		d10, q2, #6
+	vrshrn.s32		d11, q3, #6
+
+	vmovl.u8		q0,d12
+	vmovl.u8		q1,d14
+	vadd.s16		q0,q4
+	vadd.s16		q1,q5
+
+	vqmovun.s16		d12,q0
+	vqmovun.s16		d14,q1
+
+	vst1.32		{d12[0]},[r2],r1
+	vst1.32		{d12[1]},[r2],r1
+	vst1.32		{d14[0]},[r2],r1
+	vst1.32		{d14[1]},[r2]
+WELS_ASM_FUNC_END
+#endif
--- a/codec/decoder/core/arm/intra_pred_neon.S
+++ b/codec/decoder/core/arm/intra_pred_neon.S
@@ -1,649 +1,649 @@
-/*!
- * \copy
- *     Copyright (c)  2013, Cisco Systems
- *     All rights reserved.
- *
- *     Redistribution and use in source and binary forms, with or without
- *     modification, are permitted provided that the following conditions
- *     are met:
- *
- *        * Redistributions of source code must retain the above copyright
- *          notice, this list of conditions and the following disclaimer.
- *
- *        * Redistributions in binary form must reproduce the above copyright
- *          notice, this list of conditions and the following disclaimer in
- *          the documentation and/or other materials provided with the
- *          distribution.
- *
- *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- *     POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifdef  HAVE_NEON
-//Global macro
-.text
-#include "arm_arch_common_macro.S"
-
-#ifdef APPLE_IOS
-//Global macro
-.macro GET_8BYTE_DATA
-	vld1.8 {$0[0]}, [$1], $2
-	vld1.8 {$0[1]}, [$1], $2
-	vld1.8 {$0[2]}, [$1], $2
-	vld1.8 {$0[3]}, [$1], $2
-	vld1.8 {$0[4]}, [$1], $2
-	vld1.8 {$0[5]}, [$1], $2
-	vld1.8 {$0[6]}, [$1], $2
-	vld1.8 {$0[7]}, [$1], $2
-.endmacro
-#else
-//Global macro
-.macro GET_8BYTE_DATA arg0, arg1, arg2
-	vld1.8 {\arg0[0]}, [\arg1], \arg2
-	vld1.8 {\arg0[1]}, [\arg1], \arg2
-	vld1.8 {\arg0[2]}, [\arg1], \arg2
-	vld1.8 {\arg0[3]}, [\arg1], \arg2
-	vld1.8 {\arg0[4]}, [\arg1], \arg2
-	vld1.8 {\arg0[5]}, [\arg1], \arg2
-	vld1.8 {\arg0[6]}, [\arg1], \arg2
-	vld1.8 {\arg0[7]}, [\arg1], \arg2
-.endm
-#endif
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI16x16LumaPredV_neon
-	//Get the top line data to 'q0'
-	sub  r2, r0, r1
-	vldm r2, {d0, d1}
-
-	mov  r2, r0
-	mov  r3, #4
-	//Set the top line to the each line of MB(16*16)
-loop_0_get_i16x16_luma_pred_v:
-	vst1.8 {d0,d1}, [r2], r1
-	vst1.8 {d0,d1}, [r2], r1
-	vst1.8 {d0,d1}, [r2], r1
-	vst1.8 {d0,d1}, [r2], r1
-	subs  r3, #1
-	bne  loop_0_get_i16x16_luma_pred_v
-
-WELS_ASM_FUNC_END
-
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI16x16LumaPredH_neon
-	sub  r2, r0, #1
-	mov  r3, #4
-loop_0_get_i16x16_luma_pred_h:
-	//Get one byte data from left side
-	vld1.8 {d0[],d1[]}, [r2], r1
-	vld1.8 {d2[],d3[]}, [r2], r1
-	vld1.8 {d4[],d5[]}, [r2], r1
-	vld1.8 {d6[],d7[]}, [r2], r1
-
-	//Set the line of MB using the left side byte data
-	vst1.8 {d0,d1}, [r0], r1
-	vst1.8 {d2,d3}, [r0], r1
-	vst1.8 {d4,d5}, [r0], r1
-	vst1.8 {d6,d7}, [r0], r1
-
-	subs  r3, #1
-	bne  loop_0_get_i16x16_luma_pred_h
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI16x16LumaPredDc_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Get the left vertical line data
-	sub r2, r0, #1
-	GET_8BYTE_DATA d0, r2, r1
-	GET_8BYTE_DATA d1, r2, r1
-
-	//Get the top horizontal line data
-	sub  r2, r0, r1
-	vldm r2, {d2, d3}
-
-	//Calculate the sum of top horizontal line data and vertical line data
-	vpaddl.u8 q0, q0
-	vpaddl.u8 q1, q1
-	vadd.u16  q0, q0, q1
-	vadd.u16  d0, d0, d1
-	vpaddl.u16 d0, d0
-	vpaddl.u32 d0, d0
-
-	//Calculate the mean value
-	vrshr.u16  d0, d0, #5
-	vdup.8     q0, d0[0]
-
-	//Set the mean value to the all of member of MB
-	mov  r2, #4
-loop_0_get_i16x16_luma_pred_dc_both:
-	vst1.8 {d0,d1}, [r0], r1
-	vst1.8 {d0,d1}, [r0], r1
-	vst1.8 {d0,d1}, [r0], r1
-	vst1.8 {d0,d1}, [r0], r1
-	subs  r2, #1
-	bne  loop_0_get_i16x16_luma_pred_dc_both
-
-WELS_ASM_FUNC_END
-
-
-
-//The table for SIMD instruction {(8,7,6,5,4,3,2,1) * 5}
-CONST0_GET_I16X16_LUMA_PRED_PLANE: .long 0x191e2328, 0x050a0f14
-
-//The table for SIMD instruction {-7,-6,-5,-4,-3,-2,-1,0}
-CONST1_GET_I16X16_LUMA_PRED_PLANE: .long 0xfcfbfaf9, 0x00fffefd
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI16x16LumaPredPlane_neon
-	//stmdb sp!, { r2-r5, lr}
-
-	//Load the table {(8,7,6,5,4,3,2,1) * 5}
-	adr r2, CONST0_GET_I16X16_LUMA_PRED_PLANE
-	vldr    d0, [r2]
-
-	//Pack the top[-1] ~ top[6] to d1
-	sub       r2,  r0, r1
-	sub       r3,  r2, #1
-	vld1.8    d1, [r3]
-
-	//Pack the top[8] ~ top[15] to d2
-	add       r3, #9
-	vld1.8    d2, [r3]
-
-	//Save the top[15] to d6 for next step
-	vdup.u8   d6,   d2[7]
-
-	//Get and pack left[-1] ~ left[6] to d4
-	sub       r3,  r2, #1
-	GET_8BYTE_DATA d4, r3, r1
-
-	//Get and pack left[8] ~ left[15] to d3
-	add       r3,  r1
-	GET_8BYTE_DATA d3, r3, r1
-
-	//Save the left[15] to d7 for next step
-	vdup.u8   d7,   d3[7]
-
-	//revert the sequence of d2,d3
-	vrev64.8   q1, q1
-
-	vsubl.u8   q2, d3, d4 //q2={left[8]-left[6],left[9]-left[5],left[10]-left[4], ...}
-	vsubl.u8   q1, d2, d1 //q1={top[8]-top[6],top[9]-top[5],top[10]-top[4], ...}
-
-
-	vmovl.u8   q0, d0
-	vmul.s16   q1, q0, q1 //q1 = q1*{(8,7,6,5,4,3,2,1) * 5}
-	vmul.s16   q2, q0, q2 //q2 = q2*{(8,7,6,5,4,3,2,1) * 5}
-
-	//Calculate the sum of items of q1, q2
-	vpadd.s16  d0, d2, d3
-	vpadd.s16  d1, d4, d5
-	vpaddl.s16 q0, q0
-	vpaddl.s32 q0, q0
-
-	//Get the value of 'b', 'c' and extend to q1, q2.
-	vrshr.s64  q0, #6
-	vdup.s16   q1, d0[0]
-	vdup.s16   q2, d1[0]
-
-	//Load the table {-7,-6,-5,-4,-3,-2,-1,0} to d0
-	adr r2, CONST1_GET_I16X16_LUMA_PRED_PLANE
-	vld1.32   {d0}, [r2]
-
-	//Get the value of 'a' and save to q3
-	vaddl.u8  q3, d6, d7
-	vshl.u16  q3, #4
-
-	//calculate a+'b'*{-7,-6,-5,-4,-3,-2,-1,0} + c*{-7}
-	vmovl.s8  q0, d0
-	vmla.s16  q3, q0, q1
-	vmla.s16  q3, q2, d0[0]
-
-	//Calculate a+'b'*{1,2,3,4,5,6,7,8} + c*{-7}
-	vshl.s16  q5, q1, #3
-	vadd.s16  q5, q3
-
-	//right shift 5 bits and rounding
-	vqrshrun.s16 d0, q3, #5
-	vqrshrun.s16 d1, q5, #5
-
-	//Set the line of MB
-	vst1.u32  {d0,d1}, [r0], r1
-
-
-	//Do the same processing for setting other lines
-	mov  r2, #15
-loop_0_get_i16x16_luma_pred_plane:
-	vadd.s16  q3, q2
-	vadd.s16  q5, q2
-	vqrshrun.s16 d0, q3, #5
-	vqrshrun.s16 d1, q5, #5
-	vst1.u32  {d0,d1}, [r0], r1
-	subs  r2, #1
-	bne  loop_0_get_i16x16_luma_pred_plane
-
-WELS_ASM_FUNC_END
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredV_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the top row (4 bytes)
-	sub  r2, r0, r1
-	ldr  r2, [r2]
-
-	//Set the luma MB using top line
-	str  r2, [r0], r1
-	str  r2, [r0], r1
-	str  r2, [r0], r1
-	str  r2, [r0]
-
-WELS_ASM_FUNC_END
-
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredH_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the left column (4 bytes)
-	sub  r2, r0, #1
-	vld1.8 {d0[]}, [r2], r1
-	vld1.8 {d1[]}, [r2], r1
-	vld1.8 {d2[]}, [r2], r1
-	vld1.8 {d3[]}, [r2]
-
-	//Set the luma MB using the left side byte
-	vst1.32 {d0[0]}, [r0], r1
-	vst1.32 {d1[0]}, [r0], r1
-	vst1.32 {d2[0]}, [r0], r1
-	vst1.32 {d3[0]}, [r0]
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredDDL_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the top row data(8 bytes)
-	sub    r2,  r0, r1
-	vld1.32  {d0}, [r2]
-
-	//For "t7 + (t7<<1)"
-	vdup.8   d1,  d0[7]
-
-	//calculate "t0+t1,t1+t2,t2+t3...t6+t7,t7+t7"
-	vext.8   d1,  d0, d1, #1
-	vaddl.u8 q1,  d1, d0
-
-	//calculate "x,t0+t1+t1+t2,t1+t2+t2+t3,...t5+t6+t6+t7,t6+t7+t7+t7"
-	vext.8   q2,  q1, q1, #14
-	vadd.u16 q0,  q1, q2
-
-	//right shift 2 bits and rounding
-	vqrshrn.u16  d0,  q0, #2
-
-	//Save "ddl0, ddl1, ddl2, ddl3"
-	vext.8   d1, d0, d0, #1
-	vst1.32  d1[0], [r0], r1
-
-	//Save "ddl1, ddl2, ddl3, ddl4"
-	vext.8   d1, d0, d0, #2
-	vst1.32  d1[0], [r0], r1
-
-	//Save "ddl2, ddl3, ddl4, ddl5"
-	vext.8   d1, d0, d0, #3
-	vst1.32  d1[0], [r0], r1
-
-	//Save "ddl3, ddl4, ddl5, ddl6"
-	vst1.32  d0[1], [r0]
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredDDR_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the top row (4 bytes)
-	sub    r2,  r0, r1
-	vld1.32  {d0[1]}, [r2]
-
-	//Load the left column (5 bytes)
-	sub    r2,  #1
-	vld1.8 {d0[3]}, [r2], r1
-	vld1.8 {d0[2]}, [r2], r1
-	vld1.8 {d0[1]}, [r2], r1
-	vld1.8 {d0[0]}, [r2], r1
-	vld1.8 {d1[7]}, [r2] //For packing the right sequence to do SIMD processing
-
-
-	vext.8   d2, d1, d0, #7   //d0:{L2,L1,L0,LT,T0,T1,T2,T3}
-	                          //d2:{L3,L2,L1,L0,LT,T0,T1,T2}
-
-	//q2:{L2+L3,L1+L2,L0+L1...T1+T2,T2+T3}
-	vaddl.u8 q2, d2, d0
-
-	//q1:{TL0+LT0,LT0+T01,...L12+L23}
-	vext.8   q3, q3, q2, #14
-	vadd.u16 q1, q2, q3
-
-	//right shift 2 bits and rounding
-	vqrshrn.u16 d0, q1, #2
-
-	//Adjust the data sequence for setting luma MB of 'pred'
-	vst1.32   d0[1], [r0], r1
-	vext.8    d0, d0, d0, #7
-	vst1.32   d0[1], [r0], r1
-	vext.8    d0, d0, d0, #7
-	vst1.32   d0[1], [r0], r1
-	vext.8    d0, d0, d0, #7
-	vst1.32   d0[1], [r0]
-
-WELS_ASM_FUNC_END
-
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredVL_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the top row (8 bytes)
-	sub    r2,  r0, r1
-	vld1.32  {d0}, [r2]
-
-
-	vext.8   d1,  d0, d0, #1
-	vaddl.u8 q1,  d1, d0     //q1:{t0+t1,t1+t2,t2+t3...t5+t6,x,x}
-
-	vext.8   q2,  q1, q1, #2
-	vadd.u16 q2,  q1, q2     //q2:{t0+t1+t1+t2,t1+t2+t2+t3,...t4+t5+t5+t6,x,x}
-
-	//calculate the "vl0,vl1,vl2,vl3,vl4"
-	vqrshrn.u16  d0,  q1, #1
-
-	//calculate the "vl5,vl6,vl7,vl8,vl9"
-	vqrshrn.u16  d1,  q2, #2
-
-	//Adjust the data sequence for setting the luma MB
-	vst1.32  d0[0], [r0], r1
-	vst1.32  d1[0], [r0], r1
-	vext.8   d0,  d0, d0, #1
-	vext.8   d1,  d1, d1, #1
-	vst1.32  d0[0], [r0], r1
-	vst1.32  d1[0], [r0]
-
-WELS_ASM_FUNC_END
-
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredVR_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the top row (4 bytes)
-	sub       r2,  r0, r1
-	vld1.32   {d0[1]}, [r2]
-
-	//Load the left column (4 bytes)
-	sub       r2,  #1
-	vld1.8    {d0[3]}, [r2], r1
-	vld1.8    {d0[2]}, [r2], r1
-	vld1.8    {d0[1]}, [r2], r1
-	vld1.8    {d0[0]}, [r2]
-
-
-	vext.8    d1, d0, d0, #7
-	vaddl.u8  q1, d0, d1      //q1:{X,L2+L1,L1+L0,L0+LT,LT+T0,T0+T1,T1+T2,T2+T3}
-
-	vext.u8   q2, q1, q1, #14
-	vadd.u16  q2, q2, q1      //q2:{X,L2+L1+L1+L0,L1+L0+L0+LT,...T1+T2+T2+T3}
-
-	//Calculate the vr0 ~ vr9
-	vqrshrn.u16 d1, q2, #2
-	vqrshrn.u16 d0, q1, #1
-
-	//Adjust the data sequence for setting the luma MB
-	vst1.32  d0[1], [r0], r1
-	vst1.32  d1[1], [r0], r1
-	add    r2, r0, r1
-	vst1.8   d1[3], [r0]!
-	vst1.16  d0[2], [r0]!
-	vst1.8   d0[6], [r0]!
-	vst1.8   d1[2], [r2]!
-	vst1.16  d1[2], [r2]!
-	vst1.8   d1[6], [r2]
-WELS_ASM_FUNC_END
-
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredHU_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the left column data
-	sub       r2,  r0, #1
-	mov       r3,  #3
-	mul       r3,  r1
-	add       r3,  r2
-	vld1.8    {d0[]},  [r3]
-	vld1.8    {d0[4]}, [r2], r1
-	vld1.8    {d0[5]}, [r2], r1
-	vld1.8    {d0[6]}, [r2], r1 //d0:{L3,L3,L3,L3,L0,L1,L2,L3}
-
-	vext.8    d1, d0, d0, #1
-	vaddl.u8  q2, d0, d1        //q2:{L3+L3,L3+L3,L3+L3,L3+L0,L0+L1,L1+L2,L2+L3,L3+L3}
-
-	vext.u8   d2, d5, d4, #2
-	vadd.u16  d3, d2, d5        //d3:{L0+L1+L1+L2,L1+L2+L2+L3,L2+L3+L3+L3,L3+L3+L3+L3}
-
-	//Calculate the hu0 ~ hu5
-	vqrshrn.u16 d2, q2, #1
-	vqrshrn.u16 d1, q1, #2
-
-	//Adjust the data sequence for setting the luma MB
-	vzip.8   d2, d1
-	vst1.32  d1[0], [r0], r1
-	vext.8   d2, d1, d1, #2
-	vst1.32  d2[0], [r0], r1
-	vst1.32  d1[1], [r0], r1
-	vst1.32  d0[0], [r0]
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredHD_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the data
-	sub       r2,  r0, r1
-	sub       r2,  #1
-	vld1.32   {d0[1]}, [r2], r1
-	vld1.8    {d0[3]}, [r2], r1
-	vld1.8    {d0[2]}, [r2], r1
-	vld1.8    {d0[1]}, [r2], r1
-	vld1.8    {d0[0]}, [r2]	    //d0:{L3,L2,L1,L0,LT,T0,T1,T2}
-
-
-	vext.8    d1, d0, d0, #7
-	vaddl.u8  q1, d0, d1        //q1:{x,L3+L2,L2+L1,L1+L0,L0+LT,LT+T0,T0+T1,T1+T2}
-
-	vext.u8   q2, q1, q1, #14   //q2:{x,x, L3+L2,L2+L1,L1+L0,L0+LT,LT+T0,T0+T1}
-	vadd.u16  q3, q2, q1        //q3:{x,x,L3+L2+L2+L1,L2+L1+L1+L0,L1+L0+L0+LT,L0+LT+LT+T0,LT+T0+T0+T1,T0+T1+T1+T2}
-
-	//Calculate the hd0~hd9
-	vqrshrn.u16 d1, q3, #2
-	vqrshrn.u16 d0, q2, #1
-
-	//Adjust the data sequence for setting the luma MB
-	vmov      d3, d1
-	vtrn.8    d0, d1
-	vext.u8   d2, d1, d1, #6
-	vst2.16  {d2[3], d3[3]}, [r0], r1
-	vst2.16  {d0[2], d1[2]}, [r0], r1
-	vmov     d3, d0
-	vst2.16  {d2[2], d3[2]}, [r0], r1
-	vst2.16  {d0[1], d1[1]}, [r0]
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredV_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Get the top row (8 byte)
-	sub  r2, r0, r1
-	vldr d0, [r2]
-
-	//Set the chroma MB using top row data
-	vst1.8 {d0}, [r0], r1
-	vst1.8 {d0}, [r0], r1
-	vst1.8 {d0}, [r0], r1
-	vst1.8 {d0}, [r0], r1
-	vst1.8 {d0}, [r0], r1
-	vst1.8 {d0}, [r0], r1
-	vst1.8 {d0}, [r0], r1
-	vst1.8 {d0}, [r0]
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredH_neon
-	//stmdb sp!, { r2-r5, lr}
-	////Get the left column (8 byte)
-	sub  r2, r0, #1
-	vld1.8 {d0[]}, [r2], r1
-	vld1.8 {d1[]}, [r2], r1
-	vld1.8 {d2[]}, [r2], r1
-	vld1.8 {d3[]}, [r2], r1
-	vld1.8 {d4[]}, [r2], r1
-	vld1.8 {d5[]}, [r2], r1
-	vld1.8 {d6[]}, [r2], r1
-	vld1.8 {d7[]}, [r2]
-
-	//Set the chroma MB using left column data
-	vst1.8 {d0}, [r0], r1
-	vst1.8 {d1}, [r0], r1
-	vst1.8 {d2}, [r0], r1
-	vst1.8 {d3}, [r0], r1
-	vst1.8 {d4}, [r0], r1
-	vst1.8 {d5}, [r0], r1
-	vst1.8 {d6}, [r0], r1
-	vst1.8 {d7}, [r0]
-
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredDC_neon
-    //stmdb sp!, { r2-r5, lr}
-    //Load the left column data (8 bytes)
-    sub r2, r0, #1
-    GET_8BYTE_DATA d0, r2, r1
-
-    //Load the top row data (8 bytes)
-    sub  r2, r0, r1
-    vldr d1, [r2]
-
-    //Calculate the sum of left column and top row
-    vpaddl.u8  q0, q0
-    vpaddl.u16 q0, q0
-    vadd.u32   d2, d0, d1 //'m1' save to d2
-
-    vrshr.u32  q0, q0, #2 //calculate 'm2','m3'
-    vrshr.u32  d2, d2, #3 //calculate 'm4'
-
-    //duplicate the 'mx' to a vector line
-    vdup.8     d4, d2[0]
-    vdup.8     d5, d1[4]
-    vdup.8     d6, d0[4]
-    vdup.8     d7, d2[4]
-
-    //Set the chroma MB
-    vst2.32 {d4[0],d5[0]}, [r0], r1
-    vst2.32 {d4[0],d5[0]}, [r0], r1
-    vst2.32 {d4[0],d5[0]}, [r0], r1
-    vst2.32 {d4[0],d5[0]}, [r0], r1
-    vst2.32 {d6[0],d7[0]}, [r0], r1
-    vst2.32 {d6[0],d7[0]}, [r0], r1
-    vst2.32 {d6[0],d7[0]}, [r0], r1
-    vst2.32 {d6[0],d7[0]}, [r0]
-
-WELS_ASM_FUNC_END
-
-
-//Table {{1,2,3,4,1,2,3,4}*17}
-CONST0_GET_I_CHROMA_PRED_PLANE: .long 0x44332211, 0x44332211//0x140f0a05, 0x28231e19
-//Table {-3,-2,-1,0,1,2,3,4}
-CONST1_GET_I_CHROMA_PRED_PLANE: .long 0xfffefffd, 0x0000ffff,0x00020001,0x00040003
-
-WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredPlane_neon
-	//stmdb sp!, { r2-r5, lr}
-	//Load the top row data
-	sub  r2, r0, #1
-	sub  r2, r1
-	vld1.32 {d1[0]}, [r2]
-	add  r2, #5
-	vld1.32 {d0[0]}, [r2]
-
-	//Load the left column data
-	sub  r2, #5
-	vld1.8 {d1[4]}, [r2], r1
-	vld1.8 {d1[5]}, [r2], r1
-	vld1.8 {d1[6]}, [r2], r1
-	vld1.8 {d1[7]}, [r2], r1 //d1:{LT,T0,T1,T2,LT,L0,L1,L2}
-	add  r2, r1
-	vld1.8 {d0[4]}, [r2], r1
-	vld1.8 {d0[5]}, [r2], r1
-	vld1.8 {d0[6]}, [r2], r1
-	vld1.8 {d0[7]}, [r2]     //d0:{T4,T5,T6,T7,L4,L5,L6.L7}
-
-
-	//Save T7 to d3 for next step
-	vdup.u8   d3,   d0[3]
-	//Save L7 to d4 for next step
-	vdup.u8   d4,   d0[7]
-
-	//Calculate the value of 'a' and save to q2
-	vaddl.u8  q2, d3, d4
-	vshl.u16  q2, #4
-
-	//Load the table {{1,2,3,4,1,2,3,4}*17}
-	adr r2, CONST0_GET_I_CHROMA_PRED_PLANE
-	vld1.32   {d2}, [r2]
-
-	//Calculate the 'b','c', and save to q0
-	vrev32.8  d1, d1
-	vsubl.u8  q0, d0, d1
-	vmovl.u8   q1, d2
-	vmul.s16   q0, q1
-	vpaddl.s16 q0, q0
-	vpaddl.s32 q0, q0
-	vrshr.s64  q0, #5
-
-	//Load the table {-3,-2,-1,0,1,2,3,4} to q3
-	adr r2, CONST1_GET_I_CHROMA_PRED_PLANE
-	vld1.32   {d6, d7}, [r2]
-
-	//Duplicate the 'b','c' to q0, q1 for SIMD instruction
-	vdup.s16   q1, d1[0]
-	vdup.s16   q0, d0[0]
-
-	//Calculate the "(a + b * (j - 3) + c * (- 3) + 16) >> 5;"
-	vmla.s16   q2, q0, q3
-	vmla.s16   q2, q1, d6[0]
-	vqrshrun.s16 d0, q2, #5
-
-	//Set a line of chroma MB
-	vst1.u32  {d0}, [r0], r1
-
-	//Do the same processing for each line.
-	mov  r2, #7
-loop_0_get_i_chroma_pred_plane:
-	vadd.s16   q2, q1
-	vqrshrun.s16 d0, q2, #5
-	vst1.u32  {d0}, [r0], r1
-	subs  r2, #1
-	bne  loop_0_get_i_chroma_pred_plane
-
-WELS_ASM_FUNC_END
-
-#endif
+/*!
+ * \copy
+ *     Copyright (c)  2013, Cisco Systems
+ *     All rights reserved.
+ *
+ *     Redistribution and use in source and binary forms, with or without
+ *     modification, are permitted provided that the following conditions
+ *     are met:
+ *
+ *        * Redistributions of source code must retain the above copyright
+ *          notice, this list of conditions and the following disclaimer.
+ *
+ *        * Redistributions in binary form must reproduce the above copyright
+ *          notice, this list of conditions and the following disclaimer in
+ *          the documentation and/or other materials provided with the
+ *          distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *     POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifdef  HAVE_NEON
+//Global macro
+.text
+#include "arm_arch_common_macro.S"
+
+#ifdef APPLE_IOS
+//Global macro
+.macro GET_8BYTE_DATA
+	vld1.8 {$0[0]}, [$1], $2
+	vld1.8 {$0[1]}, [$1], $2
+	vld1.8 {$0[2]}, [$1], $2
+	vld1.8 {$0[3]}, [$1], $2
+	vld1.8 {$0[4]}, [$1], $2
+	vld1.8 {$0[5]}, [$1], $2
+	vld1.8 {$0[6]}, [$1], $2
+	vld1.8 {$0[7]}, [$1], $2
+.endmacro
+#else
+//Global macro
+.macro GET_8BYTE_DATA arg0, arg1, arg2
+	vld1.8 {\arg0[0]}, [\arg1], \arg2
+	vld1.8 {\arg0[1]}, [\arg1], \arg2
+	vld1.8 {\arg0[2]}, [\arg1], \arg2
+	vld1.8 {\arg0[3]}, [\arg1], \arg2
+	vld1.8 {\arg0[4]}, [\arg1], \arg2
+	vld1.8 {\arg0[5]}, [\arg1], \arg2
+	vld1.8 {\arg0[6]}, [\arg1], \arg2
+	vld1.8 {\arg0[7]}, [\arg1], \arg2
+.endm
+#endif
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI16x16LumaPredV_neon
+	//Get the top line data to 'q0'
+	sub  r2, r0, r1
+	vldm r2, {d0, d1}
+
+	mov  r2, r0
+	mov  r3, #4
+	//Set the top line to the each line of MB(16*16)
+loop_0_get_i16x16_luma_pred_v:
+	vst1.8 {d0,d1}, [r2], r1
+	vst1.8 {d0,d1}, [r2], r1
+	vst1.8 {d0,d1}, [r2], r1
+	vst1.8 {d0,d1}, [r2], r1
+	subs  r3, #1
+	bne  loop_0_get_i16x16_luma_pred_v
+
+WELS_ASM_FUNC_END
+
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI16x16LumaPredH_neon
+	sub  r2, r0, #1
+	mov  r3, #4
+loop_0_get_i16x16_luma_pred_h:
+	//Get one byte data from left side
+	vld1.8 {d0[],d1[]}, [r2], r1
+	vld1.8 {d2[],d3[]}, [r2], r1
+	vld1.8 {d4[],d5[]}, [r2], r1
+	vld1.8 {d6[],d7[]}, [r2], r1
+
+	//Set the line of MB using the left side byte data
+	vst1.8 {d0,d1}, [r0], r1
+	vst1.8 {d2,d3}, [r0], r1
+	vst1.8 {d4,d5}, [r0], r1
+	vst1.8 {d6,d7}, [r0], r1
+
+	subs  r3, #1
+	bne  loop_0_get_i16x16_luma_pred_h
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI16x16LumaPredDc_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Get the left vertical line data
+	sub r2, r0, #1
+	GET_8BYTE_DATA d0, r2, r1
+	GET_8BYTE_DATA d1, r2, r1
+
+	//Get the top horizontal line data
+	sub  r2, r0, r1
+	vldm r2, {d2, d3}
+
+	//Calculate the sum of top horizontal line data and vertical line data
+	vpaddl.u8 q0, q0
+	vpaddl.u8 q1, q1
+	vadd.u16  q0, q0, q1
+	vadd.u16  d0, d0, d1
+	vpaddl.u16 d0, d0
+	vpaddl.u32 d0, d0
+
+	//Calculate the mean value
+	vrshr.u16  d0, d0, #5
+	vdup.8     q0, d0[0]
+
+	//Set the mean value to the all of member of MB
+	mov  r2, #4
+loop_0_get_i16x16_luma_pred_dc_both:
+	vst1.8 {d0,d1}, [r0], r1
+	vst1.8 {d0,d1}, [r0], r1
+	vst1.8 {d0,d1}, [r0], r1
+	vst1.8 {d0,d1}, [r0], r1
+	subs  r2, #1
+	bne  loop_0_get_i16x16_luma_pred_dc_both
+
+WELS_ASM_FUNC_END
+
+
+
+//The table for SIMD instruction {(8,7,6,5,4,3,2,1) * 5}
+CONST0_GET_I16X16_LUMA_PRED_PLANE: .long 0x191e2328, 0x050a0f14
+
+//The table for SIMD instruction {-7,-6,-5,-4,-3,-2,-1,0}
+CONST1_GET_I16X16_LUMA_PRED_PLANE: .long 0xfcfbfaf9, 0x00fffefd
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI16x16LumaPredPlane_neon
+	//stmdb sp!, { r2-r5, lr}
+
+	//Load the table {(8,7,6,5,4,3,2,1) * 5}
+	adr r2, CONST0_GET_I16X16_LUMA_PRED_PLANE
+	vldr    d0, [r2]
+
+	//Pack the top[-1] ~ top[6] to d1
+	sub       r2,  r0, r1
+	sub       r3,  r2, #1
+	vld1.8    d1, [r3]
+
+	//Pack the top[8] ~ top[15] to d2
+	add       r3, #9
+	vld1.8    d2, [r3]
+
+	//Save the top[15] to d6 for next step
+	vdup.u8   d6,   d2[7]
+
+	//Get and pack left[-1] ~ left[6] to d4
+	sub       r3,  r2, #1
+	GET_8BYTE_DATA d4, r3, r1
+
+	//Get and pack left[8] ~ left[15] to d3
+	add       r3,  r1
+	GET_8BYTE_DATA d3, r3, r1
+
+	//Save the left[15] to d7 for next step
+	vdup.u8   d7,   d3[7]
+
+	//revert the sequence of d2,d3
+	vrev64.8   q1, q1
+
+	vsubl.u8   q2, d3, d4 //q2={left[8]-left[6],left[9]-left[5],left[10]-left[4], ...}
+	vsubl.u8   q1, d2, d1 //q1={top[8]-top[6],top[9]-top[5],top[10]-top[4], ...}
+
+
+	vmovl.u8   q0, d0
+	vmul.s16   q1, q0, q1 //q1 = q1*{(8,7,6,5,4,3,2,1) * 5}
+	vmul.s16   q2, q0, q2 //q2 = q2*{(8,7,6,5,4,3,2,1) * 5}
+
+	//Calculate the sum of items of q1, q2
+	vpadd.s16  d0, d2, d3
+	vpadd.s16  d1, d4, d5
+	vpaddl.s16 q0, q0
+	vpaddl.s32 q0, q0
+
+	//Get the value of 'b', 'c' and extend to q1, q2.
+	vrshr.s64  q0, #6
+	vdup.s16   q1, d0[0]
+	vdup.s16   q2, d1[0]
+
+	//Load the table {-7,-6,-5,-4,-3,-2,-1,0} to d0
+	adr r2, CONST1_GET_I16X16_LUMA_PRED_PLANE
+	vld1.32   {d0}, [r2]
+
+	//Get the value of 'a' and save to q3
+	vaddl.u8  q3, d6, d7
+	vshl.u16  q3, #4
+
+	//calculate a+'b'*{-7,-6,-5,-4,-3,-2,-1,0} + c*{-7}
+	vmovl.s8  q0, d0
+	vmla.s16  q3, q0, q1
+	vmla.s16  q3, q2, d0[0]
+
+	//Calculate a+'b'*{1,2,3,4,5,6,7,8} + c*{-7}
+	vshl.s16  q5, q1, #3
+	vadd.s16  q5, q3
+
+	//right shift 5 bits and rounding
+	vqrshrun.s16 d0, q3, #5
+	vqrshrun.s16 d1, q5, #5
+
+	//Set the line of MB
+	vst1.u32  {d0,d1}, [r0], r1
+
+
+	//Do the same processing for setting other lines
+	mov  r2, #15
+loop_0_get_i16x16_luma_pred_plane:
+	vadd.s16  q3, q2
+	vadd.s16  q5, q2
+	vqrshrun.s16 d0, q3, #5
+	vqrshrun.s16 d1, q5, #5
+	vst1.u32  {d0,d1}, [r0], r1
+	subs  r2, #1
+	bne  loop_0_get_i16x16_luma_pred_plane
+
+WELS_ASM_FUNC_END
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredV_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the top row (4 bytes)
+	sub  r2, r0, r1
+	ldr  r2, [r2]
+
+	//Set the luma MB using top line
+	str  r2, [r0], r1
+	str  r2, [r0], r1
+	str  r2, [r0], r1
+	str  r2, [r0]
+
+WELS_ASM_FUNC_END
+
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredH_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the left column (4 bytes)
+	sub  r2, r0, #1
+	vld1.8 {d0[]}, [r2], r1
+	vld1.8 {d1[]}, [r2], r1
+	vld1.8 {d2[]}, [r2], r1
+	vld1.8 {d3[]}, [r2]
+
+	//Set the luma MB using the left side byte
+	vst1.32 {d0[0]}, [r0], r1
+	vst1.32 {d1[0]}, [r0], r1
+	vst1.32 {d2[0]}, [r0], r1
+	vst1.32 {d3[0]}, [r0]
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredDDL_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the top row data(8 bytes)
+	sub    r2,  r0, r1
+	vld1.32  {d0}, [r2]
+
+	//For "t7 + (t7<<1)"
+	vdup.8   d1,  d0[7]
+
+	//calculate "t0+t1,t1+t2,t2+t3...t6+t7,t7+t7"
+	vext.8   d1,  d0, d1, #1
+	vaddl.u8 q1,  d1, d0
+
+	//calculate "x,t0+t1+t1+t2,t1+t2+t2+t3,...t5+t6+t6+t7,t6+t7+t7+t7"
+	vext.8   q2,  q1, q1, #14
+	vadd.u16 q0,  q1, q2
+
+	//right shift 2 bits and rounding
+	vqrshrn.u16  d0,  q0, #2
+
+	//Save "ddl0, ddl1, ddl2, ddl3"
+	vext.8   d1, d0, d0, #1
+	vst1.32  d1[0], [r0], r1
+
+	//Save "ddl1, ddl2, ddl3, ddl4"
+	vext.8   d1, d0, d0, #2
+	vst1.32  d1[0], [r0], r1
+
+	//Save "ddl2, ddl3, ddl4, ddl5"
+	vext.8   d1, d0, d0, #3
+	vst1.32  d1[0], [r0], r1
+
+	//Save "ddl3, ddl4, ddl5, ddl6"
+	vst1.32  d0[1], [r0]
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredDDR_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the top row (4 bytes)
+	sub    r2,  r0, r1
+	vld1.32  {d0[1]}, [r2]
+
+	//Load the left column (5 bytes)
+	sub    r2,  #1
+	vld1.8 {d0[3]}, [r2], r1
+	vld1.8 {d0[2]}, [r2], r1
+	vld1.8 {d0[1]}, [r2], r1
+	vld1.8 {d0[0]}, [r2], r1
+	vld1.8 {d1[7]}, [r2] //For packing the right sequence to do SIMD processing
+
+
+	vext.8   d2, d1, d0, #7   //d0:{L2,L1,L0,LT,T0,T1,T2,T3}
+	                          //d2:{L3,L2,L1,L0,LT,T0,T1,T2}
+
+	//q2:{L2+L3,L1+L2,L0+L1...T1+T2,T2+T3}
+	vaddl.u8 q2, d2, d0
+
+	//q1:{TL0+LT0,LT0+T01,...L12+L23}
+	vext.8   q3, q3, q2, #14
+	vadd.u16 q1, q2, q3
+
+	//right shift 2 bits and rounding
+	vqrshrn.u16 d0, q1, #2
+
+	//Adjust the data sequence for setting luma MB of 'pred'
+	vst1.32   d0[1], [r0], r1
+	vext.8    d0, d0, d0, #7
+	vst1.32   d0[1], [r0], r1
+	vext.8    d0, d0, d0, #7
+	vst1.32   d0[1], [r0], r1
+	vext.8    d0, d0, d0, #7
+	vst1.32   d0[1], [r0]
+
+WELS_ASM_FUNC_END
+
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredVL_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the top row (8 bytes)
+	sub    r2,  r0, r1
+	vld1.32  {d0}, [r2]
+
+
+	vext.8   d1,  d0, d0, #1
+	vaddl.u8 q1,  d1, d0     //q1:{t0+t1,t1+t2,t2+t3...t5+t6,x,x}
+
+	vext.8   q2,  q1, q1, #2
+	vadd.u16 q2,  q1, q2     //q2:{t0+t1+t1+t2,t1+t2+t2+t3,...t4+t5+t5+t6,x,x}
+
+	//calculate the "vl0,vl1,vl2,vl3,vl4"
+	vqrshrn.u16  d0,  q1, #1
+
+	//calculate the "vl5,vl6,vl7,vl8,vl9"
+	vqrshrn.u16  d1,  q2, #2
+
+	//Adjust the data sequence for setting the luma MB
+	vst1.32  d0[0], [r0], r1
+	vst1.32  d1[0], [r0], r1
+	vext.8   d0,  d0, d0, #1
+	vext.8   d1,  d1, d1, #1
+	vst1.32  d0[0], [r0], r1
+	vst1.32  d1[0], [r0]
+
+WELS_ASM_FUNC_END
+
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredVR_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the top row (4 bytes)
+	sub       r2,  r0, r1
+	vld1.32   {d0[1]}, [r2]
+
+	//Load the left column (4 bytes)
+	sub       r2,  #1
+	vld1.8    {d0[3]}, [r2], r1
+	vld1.8    {d0[2]}, [r2], r1
+	vld1.8    {d0[1]}, [r2], r1
+	vld1.8    {d0[0]}, [r2]
+
+
+	vext.8    d1, d0, d0, #7
+	vaddl.u8  q1, d0, d1      //q1:{X,L2+L1,L1+L0,L0+LT,LT+T0,T0+T1,T1+T2,T2+T3}
+
+	vext.u8   q2, q1, q1, #14
+	vadd.u16  q2, q2, q1      //q2:{X,L2+L1+L1+L0,L1+L0+L0+LT,...T1+T2+T2+T3}
+
+	//Calculate the vr0 ~ vr9
+	vqrshrn.u16 d1, q2, #2
+	vqrshrn.u16 d0, q1, #1
+
+	//Adjust the data sequence for setting the luma MB
+	vst1.32  d0[1], [r0], r1
+	vst1.32  d1[1], [r0], r1
+	add    r2, r0, r1
+	vst1.8   d1[3], [r0]!
+	vst1.16  d0[2], [r0]!
+	vst1.8   d0[6], [r0]!
+	vst1.8   d1[2], [r2]!
+	vst1.16  d1[2], [r2]!
+	vst1.8   d1[6], [r2]
+WELS_ASM_FUNC_END
+
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredHU_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the left column data
+	sub       r2,  r0, #1
+	mov       r3,  #3
+	mul       r3,  r1
+	add       r3,  r2
+	vld1.8    {d0[]},  [r3]
+	vld1.8    {d0[4]}, [r2], r1
+	vld1.8    {d0[5]}, [r2], r1
+	vld1.8    {d0[6]}, [r2], r1 //d0:{L3,L3,L3,L3,L0,L1,L2,L3}
+
+	vext.8    d1, d0, d0, #1
+	vaddl.u8  q2, d0, d1        //q2:{L3+L3,L3+L3,L3+L3,L3+L0,L0+L1,L1+L2,L2+L3,L3+L3}
+
+	vext.u8   d2, d5, d4, #2
+	vadd.u16  d3, d2, d5        //d3:{L0+L1+L1+L2,L1+L2+L2+L3,L2+L3+L3+L3,L3+L3+L3+L3}
+
+	//Calculate the hu0 ~ hu5
+	vqrshrn.u16 d2, q2, #1
+	vqrshrn.u16 d1, q1, #2
+
+	//Adjust the data sequence for setting the luma MB
+	vzip.8   d2, d1
+	vst1.32  d1[0], [r0], r1
+	vext.8   d2, d1, d1, #2
+	vst1.32  d2[0], [r0], r1
+	vst1.32  d1[1], [r0], r1
+	vst1.32  d0[0], [r0]
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderI4x4LumaPredHD_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the data
+	sub       r2,  r0, r1
+	sub       r2,  #1
+	vld1.32   {d0[1]}, [r2], r1
+	vld1.8    {d0[3]}, [r2], r1
+	vld1.8    {d0[2]}, [r2], r1
+	vld1.8    {d0[1]}, [r2], r1
+	vld1.8    {d0[0]}, [r2]	    //d0:{L3,L2,L1,L0,LT,T0,T1,T2}
+
+
+	vext.8    d1, d0, d0, #7
+	vaddl.u8  q1, d0, d1        //q1:{x,L3+L2,L2+L1,L1+L0,L0+LT,LT+T0,T0+T1,T1+T2}
+
+	vext.u8   q2, q1, q1, #14   //q2:{x,x, L3+L2,L2+L1,L1+L0,L0+LT,LT+T0,T0+T1}
+	vadd.u16  q3, q2, q1        //q3:{x,x,L3+L2+L2+L1,L2+L1+L1+L0,L1+L0+L0+LT,L0+LT+LT+T0,LT+T0+T0+T1,T0+T1+T1+T2}
+
+	//Calculate the hd0~hd9
+	vqrshrn.u16 d1, q3, #2
+	vqrshrn.u16 d0, q2, #1
+
+	//Adjust the data sequence for setting the luma MB
+	vmov      d3, d1
+	vtrn.8    d0, d1
+	vext.u8   d2, d1, d1, #6
+	vst2.16  {d2[3], d3[3]}, [r0], r1
+	vst2.16  {d0[2], d1[2]}, [r0], r1
+	vmov     d3, d0
+	vst2.16  {d2[2], d3[2]}, [r0], r1
+	vst2.16  {d0[1], d1[1]}, [r0]
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredV_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Get the top row (8 byte)
+	sub  r2, r0, r1
+	vldr d0, [r2]
+
+	//Set the chroma MB using top row data
+	vst1.8 {d0}, [r0], r1
+	vst1.8 {d0}, [r0], r1
+	vst1.8 {d0}, [r0], r1
+	vst1.8 {d0}, [r0], r1
+	vst1.8 {d0}, [r0], r1
+	vst1.8 {d0}, [r0], r1
+	vst1.8 {d0}, [r0], r1
+	vst1.8 {d0}, [r0]
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredH_neon
+	//stmdb sp!, { r2-r5, lr}
+	////Get the left column (8 byte)
+	sub  r2, r0, #1
+	vld1.8 {d0[]}, [r2], r1
+	vld1.8 {d1[]}, [r2], r1
+	vld1.8 {d2[]}, [r2], r1
+	vld1.8 {d3[]}, [r2], r1
+	vld1.8 {d4[]}, [r2], r1
+	vld1.8 {d5[]}, [r2], r1
+	vld1.8 {d6[]}, [r2], r1
+	vld1.8 {d7[]}, [r2]
+
+	//Set the chroma MB using left column data
+	vst1.8 {d0}, [r0], r1
+	vst1.8 {d1}, [r0], r1
+	vst1.8 {d2}, [r0], r1
+	vst1.8 {d3}, [r0], r1
+	vst1.8 {d4}, [r0], r1
+	vst1.8 {d5}, [r0], r1
+	vst1.8 {d6}, [r0], r1
+	vst1.8 {d7}, [r0]
+
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredDC_neon
+    //stmdb sp!, { r2-r5, lr}
+    //Load the left column data (8 bytes)
+    sub r2, r0, #1
+    GET_8BYTE_DATA d0, r2, r1
+
+    //Load the top row data (8 bytes)
+    sub  r2, r0, r1
+    vldr d1, [r2]
+
+    //Calculate the sum of left column and top row
+    vpaddl.u8  q0, q0
+    vpaddl.u16 q0, q0
+    vadd.u32   d2, d0, d1 //'m1' save to d2
+
+    vrshr.u32  q0, q0, #2 //calculate 'm2','m3'
+    vrshr.u32  d2, d2, #3 //calculate 'm4'
+
+    //duplicate the 'mx' to a vector line
+    vdup.8     d4, d2[0]
+    vdup.8     d5, d1[4]
+    vdup.8     d6, d0[4]
+    vdup.8     d7, d2[4]
+
+    //Set the chroma MB
+    vst2.32 {d4[0],d5[0]}, [r0], r1
+    vst2.32 {d4[0],d5[0]}, [r0], r1
+    vst2.32 {d4[0],d5[0]}, [r0], r1
+    vst2.32 {d4[0],d5[0]}, [r0], r1
+    vst2.32 {d6[0],d7[0]}, [r0], r1
+    vst2.32 {d6[0],d7[0]}, [r0], r1
+    vst2.32 {d6[0],d7[0]}, [r0], r1
+    vst2.32 {d6[0],d7[0]}, [r0]
+
+WELS_ASM_FUNC_END
+
+
+//Table {{1,2,3,4,1,2,3,4}*17}
+CONST0_GET_I_CHROMA_PRED_PLANE: .long 0x44332211, 0x44332211//0x140f0a05, 0x28231e19
+//Table {-3,-2,-1,0,1,2,3,4}
+CONST1_GET_I_CHROMA_PRED_PLANE: .long 0xfffefffd, 0x0000ffff,0x00020001,0x00040003
+
+WELS_ASM_FUNC_BEGIN WelsDecoderIChromaPredPlane_neon
+	//stmdb sp!, { r2-r5, lr}
+	//Load the top row data
+	sub  r2, r0, #1
+	sub  r2, r1
+	vld1.32 {d1[0]}, [r2]
+	add  r2, #5
+	vld1.32 {d0[0]}, [r2]
+
+	//Load the left column data
+	sub  r2, #5
+	vld1.8 {d1[4]}, [r2], r1
+	vld1.8 {d1[5]}, [r2], r1
+	vld1.8 {d1[6]}, [r2], r1
+	vld1.8 {d1[7]}, [r2], r1 //d1:{LT,T0,T1,T2,LT,L0,L1,L2}
+	add  r2, r1
+	vld1.8 {d0[4]}, [r2], r1
+	vld1.8 {d0[5]}, [r2], r1
+	vld1.8 {d0[6]}, [r2], r1
+	vld1.8 {d0[7]}, [r2]     //d0:{T4,T5,T6,T7,L4,L5,L6.L7}
+
+
+	//Save T7 to d3 for next step
+	vdup.u8   d3,   d0[3]
+	//Save L7 to d4 for next step
+	vdup.u8   d4,   d0[7]
+
+	//Calculate the value of 'a' and save to q2
+	vaddl.u8  q2, d3, d4
+	vshl.u16  q2, #4
+
+	//Load the table {{1,2,3,4,1,2,3,4}*17}
+	adr r2, CONST0_GET_I_CHROMA_PRED_PLANE
+	vld1.32   {d2}, [r2]
+
+	//Calculate the 'b','c', and save to q0
+	vrev32.8  d1, d1
+	vsubl.u8  q0, d0, d1
+	vmovl.u8   q1, d2
+	vmul.s16   q0, q1
+	vpaddl.s16 q0, q0
+	vpaddl.s32 q0, q0
+	vrshr.s64  q0, #5
+
+	//Load the table {-3,-2,-1,0,1,2,3,4} to q3
+	adr r2, CONST1_GET_I_CHROMA_PRED_PLANE
+	vld1.32   {d6, d7}, [r2]
+
+	//Duplicate the 'b','c' to q0, q1 for SIMD instruction
+	vdup.s16   q1, d1[0]
+	vdup.s16   q0, d0[0]
+
+	//Calculate the "(a + b * (j - 3) + c * (- 3) + 16) >> 5;"
+	vmla.s16   q2, q0, q3
+	vmla.s16   q2, q1, d6[0]
+	vqrshrun.s16 d0, q2, #5
+
+	//Set a line of chroma MB
+	vst1.u32  {d0}, [r0], r1
+
+	//Do the same processing for each line.
+	mov  r2, #7
+loop_0_get_i_chroma_pred_plane:
+	vadd.s16   q2, q1
+	vqrshrun.s16 d0, q2, #5
+	vst1.u32  {d0}, [r0], r1
+	subs  r2, #1
+	bne  loop_0_get_i_chroma_pred_plane
+
+WELS_ASM_FUNC_END
+
+#endif
--- a/codec/decoder/core/arm/mc_neon.S
+++ b/codec/decoder/core/arm/mc_neon.S
@@ -1,1602 +1,1602 @@
-/*!
- * \copy
- *     Copyright (c)  2013, Cisco Systems
- *     All rights reserved.
- *
- *     Redistribution and use in source and binary forms, with or without
- *     modification, are permitted provided that the following conditions
- *     are met:
- *
- *        * Redistributions of source code must retain the above copyright
- *          notice, this list of conditions and the following disclaimer.
- *
- *        * Redistributions in binary form must reproduce the above copyright
- *          notice, this list of conditions and the following disclaimer in
- *          the documentation and/or other materials provided with the
- *          distribution.
- *
- *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- *     POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifdef  HAVE_NEON
-.text
-#include "arm_arch_common_macro.S"
-
-#ifdef APPLE_IOS
-.macro	AVERAGE_TWO_8BITS
-//	{	// input:dst_d, src_d A and B; working: q13
-    vaddl.u8	q13, $2, $1
-    vrshrn.u16		$0, q13, #1
-//	}
-.endm
-
-.macro	FILTER_6TAG_8BITS
-//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
-    vaddl.u8	q12, $0, $5	//q12=src[-2]+src[3]
-    vaddl.u8	q13, $2, $3	//src[0]+src[1]
-    vmla.u16	q12, q13, $7	//q12 += 20*(src[0]+src[1]), 2 cycles
-    vaddl.u8	q13, $1, $4	//src[-1]+src[2]
-    vmls.s16	q12, q13, $8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
-    vqrshrun.s16		$6, q12, #5
-//	}
-.endm
-
-.macro	FILTER_6TAG_8BITS_AVERAGE_WITH_0
-//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
-    vaddl.u8	q12, $0, $5	//q12=src[-2]+src[3]
-    vaddl.u8	q13, $2, $3	//src[0]+src[1]
-    vmla.u16	q12, q13, $7	//q12 += 20*(src[0]+src[1]), 2 cycles
-    vaddl.u8	q13, $1, $4	//src[-1]+src[2]
-    vmls.s16	q12, q13, $8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
-    vqrshrun.s16		$6, q12, #5
-    vaddl.u8	q13, $2, $6
-    vrshrn.u16		$6, q13, #1
-//	}
-.endm
-
-.macro	FILTER_6TAG_8BITS_AVERAGE_WITH_1
-//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
-    vaddl.u8	q12, $0, $5	//q12=src[-2]+src[3]
-    vaddl.u8	q13, $2, $3	//src[0]+src[1]
-    vmla.u16	q12, q13, $7	//q12 += 20*(src[0]+src[1]), 2 cycles
-    vaddl.u8	q13, $1, $4	//src[-1]+src[2]
-    vmls.s16	q12, q13, $8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
-    vqrshrun.s16		$6, q12, #5
-    vaddl.u8	q13, $3, $6
-    vrshrn.u16		$6, q13, #1
-//	}
-.endm
-
-.macro	FILTER_6TAG_8BITS_TO_16BITS
-//	{	// input:d_src[-2], d_src[-1], d_src[0], d_src[1], d_src[2], d_src[3], dst_q, multiplier a/b; working:q13
-    vaddl.u8	$6, $0, $5		//dst_q=src[-2]+src[3]
-    vaddl.u8	q13, $2, $3	//src[0]+src[1]
-    vmla.u16	$6, q13, $7	//dst_q += 20*(src[0]+src[1]), 2 cycles
-    vaddl.u8	q13, $1, $4	//src[-1]+src[2]
-    vmls.s16	$6, q13, $8	//dst_q -= 5*(src[-1]+src[2]), 2 cycles
-//	}
-.endm
-
-.macro	FILTER_3_IN_16BITS_TO_8BITS
-//	{	// input:a, b, c, dst_d;
-    vsub.s16	$0, $0, $1			//a-b
-    vshr.s16	$0, $0, #2			//(a-b)/4
-    vsub.s16	$0, $0, $1			//(a-b)/4-b
-    vadd.s16	$0, $0, $2			//(a-b)/4-b+c
-    vshr.s16	$0, $0, #2			//((a-b)/4-b+c)/4
-    vadd.s16	$0, $0, $2			//((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
-    vqrshrun.s16	$3, $0, #6		//(+32)>>6
-//	}
-.endm
-
-.macro	UNPACK_2_16BITS_TO_ABC
-//	{	// input:q_src[-2:5], q_src[6:13](avail 8+5)/q_src[6:**](avail 4+5), dst_a, dst_b, dst_c;
-    vext.16	$4, $0, $1, #2		//src[0]
-    vext.16	$3, $0, $1, #3		//src[1]
-    vadd.s16	$4, $3					//c=src[0]+src[1]
-
-    vext.16	$3, $0, $1, #1		//src[-1]
-    vext.16	$2, $0, $1, #4		//src[2]
-    vadd.s16	$3, $2					//b=src[-1]+src[2]
-
-    vext.16	$2, $0, $1, #5		//src[3]
-    vadd.s16	$2, $0					//a=src[-2]+src[3]
-//	}
-.endm
-#else
-.macro	AVERAGE_TWO_8BITS arg0, arg1, arg2
-//	{	// input:dst_d, src_d A and B; working: q13
-    vaddl.u8	q13, \arg2, \arg1
-    vrshrn.u16		\arg0, q13, #1
-//	}
-.endm
-
-.macro	FILTER_6TAG_8BITS arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
-//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
-    vaddl.u8	q12, \arg0, \arg5	//q12=src[-2]+src[3]
-    vaddl.u8	q13, \arg2, \arg3	//src[0]+src[1]
-    vmla.u16	q12, q13, \arg7	//q12 += 20*(src[0]+src[1]), 2 cycles
-    vaddl.u8	q13, \arg1, \arg4	//src[-1]+src[2]
-    vmls.s16	q12, q13, \arg8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
-    vqrshrun.s16		\arg6, q12, #5
-//	}
-.endm
-
-.macro	FILTER_6TAG_8BITS_AVERAGE_WITH_0 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
-//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
-    vaddl.u8	q12, \arg0, \arg5	//q12=src[-2]+src[3]
-    vaddl.u8	q13, \arg2, \arg3	//src[0]+src[1]
-    vmla.u16	q12, q13, \arg7	//q12 += 20*(src[0]+src[1]), 2 cycles
-    vaddl.u8	q13, \arg1, \arg4	//src[-1]+src[2]
-    vmls.s16	q12, q13, \arg8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
-    vqrshrun.s16		\arg6, q12, #5
-    vaddl.u8	q13, \arg2, \arg6
-    vrshrn.u16		\arg6, q13, #1
-//	}
-.endm
-
-.macro	FILTER_6TAG_8BITS_AVERAGE_WITH_1 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
-//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
-    vaddl.u8	q12, \arg0, \arg5	//q12=src[-2]+src[3]
-    vaddl.u8	q13, \arg2, \arg3	//src[0]+src[1]
-    vmla.u16	q12, q13, \arg7	//q12 += 20*(src[0]+src[1]), 2 cycles
-    vaddl.u8	q13, \arg1, \arg4	//src[-1]+src[2]
-    vmls.s16	q12, q13, \arg8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
-    vqrshrun.s16		\arg6, q12, #5
-    vaddl.u8	q13, \arg3, \arg6
-    vrshrn.u16		\arg6, q13, #1
-//	}
-.endm
-
-.macro	FILTER_6TAG_8BITS_TO_16BITS arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
-//	{	// input:d_src[-2], d_src[-1], d_src[0], d_src[1], d_src[2], d_src[3], dst_q, multiplier a/b; working:q13
-    vaddl.u8	\arg6, \arg0, \arg5		//dst_q=src[-2]+src[3]
-    vaddl.u8	q13, \arg2, \arg3	//src[0]+src[1]
-    vmla.u16	\arg6, q13, \arg7	//dst_q += 20*(src[0]+src[1]), 2 cycles
-    vaddl.u8	q13, \arg1, \arg4	//src[-1]+src[2]
-    vmls.s16	\arg6, q13, \arg8	//dst_q -= 5*(src[-1]+src[2]), 2 cycles
-//	}
-.endm
-
-.macro	FILTER_3_IN_16BITS_TO_8BITS arg0, arg1, arg2, arg3
-//	{	// input:a, b, c, dst_d;
-    vsub.s16	\arg0, \arg0, \arg1			//a-b
-    vshr.s16	\arg0, \arg0, #2			//(a-b)/4
-    vsub.s16	\arg0, \arg0, \arg1			//(a-b)/4-b
-    vadd.s16	\arg0, \arg0, \arg2			//(a-b)/4-b+c
-    vshr.s16	\arg0, \arg0, #2			//((a-b)/4-b+c)/4
-    vadd.s16	\arg0, \arg0, \arg2			//((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
-    vqrshrun.s16	\arg3, \arg0, #6		//(+32)>>6
-//	}
-.endm
-
-.macro	UNPACK_2_16BITS_TO_ABC arg0, arg1, arg2, arg3, arg4
-//	{	// input:q_src[-2:5], q_src[6:13](avail 8+5)/q_src[6:**](avail 4+5), dst_a, dst_b, dst_c;
-    vext.16	\arg4, \arg0, \arg1, #2		//src[0]
-    vext.16	\arg3, \arg0, \arg1, #3		//src[1]
-    vadd.s16	\arg4, \arg3					//c=src[0]+src[1]
-
-    vext.16	\arg3, \arg0, \arg1, #1		//src[-1]
-    vext.16	\arg2, \arg0, \arg1, #4		//src[2]
-    vadd.s16	\arg3,\arg2					//b=src[-1]+src[2]
-
-    vext.16	\arg2, \arg0, \arg1, #5		//src[3]
-    vadd.s16	\arg2, \arg0					//a=src[-2]+src[3]
-//	}
-.endm
-#endif
-
-WELS_ASM_FUNC_BEGIN McHorVer20WidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w16_h_mc_luma_loop:
-	vld1.u8	{d0,d1,d2}, [r0], r1	//only use 21(16+5); q0=src[-2]
-	pld			[r0]
-	pld			[r0, #16]
-
-	vext.8		q2, q0, q1, #1		//q2=src[-1]
-	vext.8		q3, q0, q1, #2		//q3=src[0]
-	vext.8		q4, q0, q1, #3		//q4=src[1]
-	vext.8		q5, q0, q1, #4		//q5=src[2]
-	vext.8		q6, q0, q1, #5		//q6=src[3]
-
-	FILTER_6TAG_8BITS 	d0, d4, d6, d8, d10, d12, d2, q14, q15
-
-	FILTER_6TAG_8BITS 	d1, d5, d7, d9, d11, d13, d3, q14, q15
-
-	sub		r4, #1
-	vst1.u8	{d2, d3}, [r2], r3		//write 16Byte
-
-	cmp		r4, #0
-	bne		w16_h_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer20WidthEq8_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w8_h_mc_luma_loop:
-	vld1.u8	{d0,d1}, [r0], r1	//only use 13(8+5); q0=src[-2]
-	pld			[r0]
-
-	vext.8		d2, d0, d1, #1		//d2=src[-1]
-	vext.8		d3, d0, d1, #2		//d3=src[0]
-	vext.8		d4, d0, d1, #3		//d4=src[1]
-	vext.8		d5, d0, d1, #4		//d5=src[2]
-	vext.8		d6, d0, d1, #5		//d6=src[3]
-
-	FILTER_6TAG_8BITS 	d0, d2, d3, d4, d5, d6, d1, q14, q15
-
-	sub		r4, #1
-	vst1.u8	{d1}, [r2], r3
-
-	cmp		r4, #0
-	bne		w8_h_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer20WidthEq4_neon
-	push		{r4, r5, r6}
-	ldr			r6, [sp, #12]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w4_h_mc_luma_loop:
-	vld1.u8	{d0, d1}, [r0], r1	//only use 9(4+5);d0: 1st row src[-2:5]
-	pld			[r0]
-	vld1.u8	{d2, d3}, [r0], r1	//d2: 2nd row src[-2:5]
-	pld			[r0]
-
-	vext.8		d4, d0, d1, #1		//d4: 1st row src[-1:6]
-	vext.8		d5, d2, d3, #1		//d5: 2nd row src[-1:6]
-	vext.8		q3, q2, q2, #1		//src[0:6 *]
-	vext.8		q4, q2, q2, #2		//src[1:6 * *]
-
-	vtrn.32	q3, q4					//q3::d6:1st row [0:3]+[1:4]; d7:2nd row [0:3]+[1:4]
-	vtrn.32	d6, d7					//d6:[0:3]; d7[1:4]
-	vtrn.32		d0, d2				//d0:[-2:1]; d2[2:5]
-	vtrn.32		d4, d5				//d4:[-1:2]; d5[3:6]
-
-	FILTER_6TAG_8BITS 	d0, d4, d6, d7, d2, d5, d1, q14, q15
-
-	vmov		r4, r5, d1
-	str	r4, [r2], r3
-	str	r5, [r2], r3
-
-	sub		r6, #2
-	cmp		r6, #0
-	bne		w4_h_mc_luma_loop
-
-	pop		{r4, r5, r6}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer10WidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w16_xy_10_mc_luma_loop:
-	vld1.u8	{d0,d1,d2}, [r0], r1	//only use 21(16+5); q0=src[-2]
-	pld			[r0]
-	pld			[r0, #16]
-
-	vext.8		q2, q0, q1, #1		//q2=src[-1]
-	vext.8		q3, q0, q1, #2		//q3=src[0]
-	vext.8		q4, q0, q1, #3		//q4=src[1]
-	vext.8		q5, q0, q1, #4		//q5=src[2]
-	vext.8		q6, q0, q1, #5		//q6=src[3]
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d4, d6, d8, d10, d12, d2, q14, q15
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d1, d5, d7, d9, d11, d13, d3, q14, q15
-
-	sub		r4, #1
-	vst1.u8	{d2, d3}, [r2], r3		//write 16Byte
-
-	cmp		r4, #0
-	bne		w16_xy_10_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer10WidthEq8_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w8_xy_10_mc_luma_loop:
-	vld1.u8	{d0,d1}, [r0], r1	//only use 13(8+5); q0=src[-2]
-	pld			[r0]
-
-	vext.8		d2, d0, d1, #1		//d2=src[-1]
-	vext.8		d3, d0, d1, #2		//d3=src[0]
-	vext.8		d4, d0, d1, #3		//d4=src[1]
-	vext.8		d5, d0, d1, #4		//d5=src[2]
-	vext.8		d6, d0, d1, #5		//d6=src[3]
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d2, d3, d4, d5, d6, d1, q14, q15
-
-	sub		r4, #1
-	vst1.u8	{d1}, [r2], r3
-
-	cmp		r4, #0
-	bne		w8_xy_10_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer10WidthEq4_neon
-	push		{r4, r5, r6}
-	ldr			r6, [sp, #12]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w4_xy_10_mc_luma_loop:
-	vld1.u8	{d0, d1}, [r0], r1	//only use 9(4+5);d0: 1st row src[-2:5]
-	pld			[r0]
-	vld1.u8	{d2, d3}, [r0], r1	//d2: 2nd row src[-2:5]
-	pld			[r0]
-
-	vext.8		d4, d0, d1, #1		//d4: 1st row src[-1:6]
-	vext.8		d5, d2, d3, #1		//d5: 2nd row src[-1:6]
-	vext.8		q3, q2, q2, #1		//src[0:6 *]
-	vext.8		q4, q2, q2, #2		//src[1:6 * *]
-
-	vtrn.32	q3, q4					//q3::d6:1st row [0:3]+[1:4]; d7:2nd row [0:3]+[1:4]
-	vtrn.32	d6, d7					//d6:[0:3]; d7[1:4]
-	vtrn.32		d0, d2				//d0:[-2:1]; d2[2:5]
-	vtrn.32		d4, d5				//d4:[-1:2]; d5[3:6]
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d4, d6, d7, d2, d5, d1, q14, q15
-
-	vmov		r4, r5, d1
-	str	r4, [r2], r3
-	str	r5, [r2], r3
-
-	sub		r6, #2
-	cmp		r6, #0
-	bne		w4_xy_10_mc_luma_loop
-
-	pop		{r4, r5, r6}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer30WidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w16_xy_30_mc_luma_loop:
-	vld1.u8	{d0,d1,d2}, [r0], r1	//only use 21(16+5); q0=src[-2]
-	pld			[r0]
-	pld			[r0, #16]
-
-	vext.8		q2, q0, q1, #1		//q2=src[-1]
-	vext.8		q3, q0, q1, #2		//q3=src[0]
-	vext.8		q4, q0, q1, #3		//q4=src[1]
-	vext.8		q5, q0, q1, #4		//q5=src[2]
-	vext.8		q6, q0, q1, #5		//q6=src[3]
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d4, d6, d8, d10, d12, d2, q14, q15
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d1, d5, d7, d9, d11, d13, d3, q14, q15
-
-	sub		r4, #1
-	vst1.u8	{d2, d3}, [r2], r3		//write 16Byte
-
-	cmp		r4, #0
-	bne		w16_xy_30_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer30WidthEq8_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w8_xy_30_mc_luma_loop:
-	vld1.u8	{d0,d1}, [r0], r1	//only use 13(8+5); q0=src[-2]
-	pld			[r0]
-
-	vext.8		d2, d0, d1, #1		//d2=src[-1]
-	vext.8		d3, d0, d1, #2		//d3=src[0]
-	vext.8		d4, d0, d1, #3		//d4=src[1]
-	vext.8		d5, d0, d1, #4		//d5=src[2]
-	vext.8		d6, d0, d1, #5		//d6=src[3]
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d2, d3, d4, d5, d6, d1, q14, q15
-
-	sub		r4, #1
-	vst1.u8	{d1}, [r2], r3
-
-	cmp		r4, #0
-	bne		w8_xy_30_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer30WidthEq4_neon
-	push		{r4, r5, r6}
-	ldr			r6, [sp, #12]
-
-	sub			r0, #2
-	vmov.u16	q14, #0x0014				// 20
-	vshr.u16	q15, q14, #2				// 5
-
-w4_xy_30_mc_luma_loop:
-	vld1.u8	{d0, d1}, [r0], r1	//only use 9(4+5);d0: 1st row src[-2:5]
-	pld			[r0]
-	vld1.u8	{d2, d3}, [r0], r1	//d2: 2nd row src[-2:5]
-	pld			[r0]
-
-	vext.8		d4, d0, d1, #1		//d4: 1st row src[-1:6]
-	vext.8		d5, d2, d3, #1		//d5: 2nd row src[-1:6]
-	vext.8		q3, q2, q2, #1		//src[0:6 *]
-	vext.8		q4, q2, q2, #2		//src[1:6 * *]
-
-	vtrn.32	q3, q4					//q3::d6:1st row [0:3]+[1:4]; d7:2nd row [0:3]+[1:4]
-	vtrn.32	d6, d7					//d6:[0:3]; d7[1:4]
-	vtrn.32		d0, d2				//d0:[-2:1]; d2[2:5]
-	vtrn.32		d4, d5				//d4:[-1:2]; d5[3:6]
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d4, d6, d7, d2, d5, d1, q14, q15
-
-	vmov		r4, r5, d1
-	str	r4, [r2], r3
-	str	r5, [r2], r3
-
-	sub		r6, #2
-	cmp		r6, #0
-	bne		w4_xy_30_mc_luma_loop
-
-	pop		{r4, r5, r6}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer01WidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	vld1.u8	{q0}, [r0], r1		//q0=src[-2]
-	vld1.u8	{q1}, [r0], r1		//q1=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	vld1.u8	{q2}, [r0], r1		//q2=src[0]
-	vld1.u8	{q3}, [r0], r1		//q3=src[1]
-	vld1.u8	{q4}, [r0], r1		//q4=src[2]
-
-w16_xy_01_luma_loop:
-
-	vld1.u8	{q5}, [r0], r1		//q5=src[3]
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d2, d4, d6, d8, d10, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d1, d3, d5, d7, d9, d11, d13, q14, q15
-	vld1.u8	{q0}, [r0], r1		//read 2nd row
-	vst1.u8	{q6}, [r2], r3			//write 1st 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d2, d4, d6, d8, d10, d0, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d3, d5, d7, d9, d11, d1, d13, q14, q15
-	vld1.u8	{q1}, [r0], r1		//read 3rd row
-	vst1.u8	{q6}, [r2], r3			//write 2nd 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d4, d6, d8, d10, d0, d2, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d5, d7, d9, d11, d1, d3, d13, q14, q15
-	vld1.u8	{q2}, [r0], r1		//read 4th row
-	vst1.u8	{q6}, [r2], r3			//write 3rd 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d6, d8, d10, d0, d2, d4, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d7, d9, d11, d1, d3, d5, d13, q14, q15
-	vld1.u8	{q3}, [r0], r1		//read 5th row
-	vst1.u8	{q6}, [r2], r3			//write 4th 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d8, d10, d0, d2, d4, d6, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d9, d11, d1, d3, d5, d7, d13, q14, q15
-	vld1.u8	{q4}, [r0], r1		//read 6th row
-	vst1.u8	{q6}, [r2], r3			//write 5th 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d10, d0, d2, d4, d6, d8, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d11, d1, d3, d5, d7, d9, d13, q14, q15
-	vld1.u8	{q5}, [r0], r1		//read 7th row
-	vst1.u8	{q6}, [r2], r3			//write 6th 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d2, d4, d6, d8, d10, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d1, d3, d5, d7, d9, d11, d13, q14, q15
-	vld1.u8	{q0}, [r0], r1		//read 8th row
-	vst1.u8	{q6}, [r2], r3			//write 7th 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d2, d4, d6, d8, d10, d0, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d3, d5, d7, d9, d11, d1, d13, q14, q15
-	vst1.u8	{q6}, [r2], r3			//write 8th 16Byte
-
-	//q2, q3, q4, q5, q0 --> q0~q4
-	vswp	q0, q4
-	vswp	q0, q2
-	vmov	q1, q3
-	vmov	q3, q5						//q0~q4
-
-	sub		r4, #8
-	cmp		r4, #0
-	bne		w16_xy_01_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer01WidthEq8_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	vld1.u8	{d0}, [r0], r1		//d0=src[-2]
-	vld1.u8	{d1}, [r0], r1		//d1=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	vld1.u8	{d2}, [r0], r1		//d2=src[0]
-	vld1.u8	{d3}, [r0], r1		//d3=src[1]
-
-	vld1.u8	{d4}, [r0], r1		//d4=src[2]
-	vld1.u8	{d5}, [r0], r1		//d5=src[3]
-
-w8_xy_01_mc_luma_loop:
-
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d1, d2, d3, d4, d5, d12, q14, q15
-	vld1.u8	{d0}, [r0], r1		//read 2nd row
-	vst1.u8	{d12}, [r2], r3		//write 1st 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d1, d2, d3, d4, d5, d0, d12, q14, q15
-	vld1.u8	{d1}, [r0], r1		//read 3rd row
-	vst1.u8	{d12}, [r2], r3		//write 2nd 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d2, d3, d4, d5, d0, d1, d12, q14, q15
-	vld1.u8	{d2}, [r0], r1		//read 4th row
-	vst1.u8	{d12}, [r2], r3		//write 3rd 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d3, d4, d5, d0, d1, d2, d12, q14, q15
-	vld1.u8	{d3}, [r0], r1		//read 5th row
-	vst1.u8	{d12}, [r2], r3		//write 4th 8Byte
-
-	//d4, d5, d0, d1, d2, d3 --> d0, d1, d2, d3, d4, d5
-	vswp	q0, q2
-	vswp	q1, q2
-
-	sub		r4, #4
-	cmp		r4, #0
-	bne		w8_xy_01_mc_luma_loop
-
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer01WidthEq4_neon
-	push		{r4, r5, r6, r7}
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	ldr		r4, [r0], r1		//r4=src[-2]
-	ldr		r5, [r0], r1		//r5=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	ldr		r6, [r0], r1		//r6=src[0]
-	ldr		r7, [r0], r1		//r7=src[1]
-
-	vmov		d0, r4, r5
-	vmov		d1, r5, r6
-	vmov		d2, r6, r7
-
-	ldr		r4, [r0], r1		//r4=src[2]
-	vmov		d3, r7, r4
-	ldr			r7, [sp, #16]
-
-w4_xy_01_mc_luma_loop:
-
-//	pld			[r0]
-	//using reserving r4
-	ldr		r5, [r0], r1		//r5=src[3]
-	ldr		r6, [r0], r1		//r6=src[0]
-	vmov		d4, r4, r5
-	vmov		d5, r5, r6			//reserved r6
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d1, d2, d3, d4, d5, d12, q14, q15
-	vmov		r4, r5, d12
-	str	r4, [r2], r3			//write 1st 4Byte
-	str	r5, [r2], r3			//write 2nd 4Byte
-
-	ldr		r5, [r0], r1		//r5=src[1]
-	ldr		r4, [r0], r1		//r4=src[2]
-	vmov		d0, r6, r5
-	vmov		d1, r5, r4			//reserved r4
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d2, d3, d4, d5, d0, d1, d12, q14, q15
-	vmov		r5, r6, d12
-	str	r5, [r2], r3			//write 3rd 4Byte
-	str	r6, [r2], r3			//write 4th 4Byte
-
-	//d4, d5, d0, d1 --> d0, d1, d2, d3
-	vmov	q1, q0
-	vmov	q0, q2
-
-	sub		r7, #4
-	cmp		r7, #0
-	bne		w4_xy_01_mc_luma_loop
-
-	pop		{r4, r5, r6, r7}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer03WidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	vld1.u8	{q0}, [r0], r1		//q0=src[-2]
-	vld1.u8	{q1}, [r0], r1		//q1=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	vld1.u8	{q2}, [r0], r1		//q2=src[0]
-	vld1.u8	{q3}, [r0], r1		//q3=src[1]
-	vld1.u8	{q4}, [r0], r1		//q4=src[2]
-
-w16_xy_03_luma_loop:
-
-	vld1.u8	{q5}, [r0], r1		//q5=src[3]
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d2, d4, d6, d8, d10, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d1, d3, d5, d7, d9, d11, d13, q14, q15
-	vld1.u8	{q0}, [r0], r1		//read 2nd row
-	vst1.u8	{q6}, [r2], r3			//write 1st 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d2, d4, d6, d8, d10, d0, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d3, d5, d7, d9, d11, d1, d13, q14, q15
-	vld1.u8	{q1}, [r0], r1		//read 3rd row
-	vst1.u8	{q6}, [r2], r3			//write 2nd 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d4, d6, d8, d10, d0, d2, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d5, d7, d9, d11, d1, d3, d13, q14, q15
-	vld1.u8	{q2}, [r0], r1		//read 4th row
-	vst1.u8	{q6}, [r2], r3			//write 3rd 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d6, d8, d10, d0, d2, d4, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d7, d9, d11, d1, d3, d5, d13, q14, q15
-	vld1.u8	{q3}, [r0], r1		//read 5th row
-	vst1.u8	{q6}, [r2], r3			//write 4th 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d8, d10, d0, d2, d4, d6, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d9, d11, d1, d3, d5, d7, d13, q14, q15
-	vld1.u8	{q4}, [r0], r1		//read 6th row
-	vst1.u8	{q6}, [r2], r3			//write 5th 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d10, d0, d2, d4, d6, d8, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d11, d1, d3, d5, d7, d9, d13, q14, q15
-	vld1.u8	{q5}, [r0], r1		//read 7th row
-	vst1.u8	{q6}, [r2], r3			//write 6th 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d2, d4, d6, d8, d10, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d1, d3, d5, d7, d9, d11, d13, q14, q15
-	vld1.u8	{q0}, [r0], r1		//read 8th row
-	vst1.u8	{q6}, [r2], r3			//write 7th 16Byte
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d2, d4, d6, d8, d10, d0, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d3, d5, d7, d9, d11, d1, d13, q14, q15
-	vst1.u8	{q6}, [r2], r3			//write 8th 16Byte
-
-	//q2, q3, q4, q5, q0 --> q0~q4
-	vswp	q0, q4
-	vswp	q0, q2
-	vmov	q1, q3
-	vmov	q3, q5						//q0~q4
-
-	sub		r4, #8
-	cmp		r4, #0
-	bne		w16_xy_03_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer03WidthEq8_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	vld1.u8	{d0}, [r0], r1		//d0=src[-2]
-	vld1.u8	{d1}, [r0], r1		//d1=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	vld1.u8	{d2}, [r0], r1		//d2=src[0]
-	vld1.u8	{d3}, [r0], r1		//d3=src[1]
-
-	vld1.u8	{d4}, [r0], r1		//d4=src[2]
-	vld1.u8	{d5}, [r0], r1		//d5=src[3]
-
-w8_xy_03_mc_luma_loop:
-
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d1, d2, d3, d4, d5, d12, q14, q15
-	vld1.u8	{d0}, [r0], r1		//read 2nd row
-	vst1.u8	{d12}, [r2], r3		//write 1st 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d1, d2, d3, d4, d5, d0, d12, q14, q15
-	vld1.u8	{d1}, [r0], r1		//read 3rd row
-	vst1.u8	{d12}, [r2], r3		//write 2nd 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d2, d3, d4, d5, d0, d1, d12, q14, q15
-	vld1.u8	{d2}, [r0], r1		//read 4th row
-	vst1.u8	{d12}, [r2], r3		//write 3rd 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d3, d4, d5, d0, d1, d2, d12, q14, q15
-	vld1.u8	{d3}, [r0], r1		//read 5th row
-	vst1.u8	{d12}, [r2], r3		//write 4th 8Byte
-
-	//d4, d5, d0, d1, d2, d3 --> d0, d1, d2, d3, d4, d5
-	vswp	q0, q2
-	vswp	q1, q2
-
-	sub		r4, #4
-	cmp		r4, #0
-	bne		w8_xy_03_mc_luma_loop
-
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer03WidthEq4_neon
-	push		{r4, r5, r6, r7}
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	ldr		r4, [r0], r1		//r4=src[-2]
-	ldr		r5, [r0], r1		//r5=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	ldr		r6, [r0], r1		//r6=src[0]
-	ldr		r7, [r0], r1		//r7=src[1]
-
-	vmov		d0, r4, r5
-	vmov		d1, r5, r6
-	vmov		d2, r6, r7
-
-	ldr		r4, [r0], r1		//r4=src[2]
-	vmov		d3, r7, r4
-	ldr			r7, [sp, #16]
-
-w4_xy_03_mc_luma_loop:
-
-//	pld			[r0]
-	//using reserving r4
-	ldr		r5, [r0], r1		//r5=src[3]
-	ldr		r6, [r0], r1		//r6=src[0]
-	vmov		d4, r4, r5
-	vmov		d5, r5, r6			//reserved r6
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d1, d2, d3, d4, d5, d12, q14, q15
-	vmov		r4, r5, d12
-	str	r4, [r2], r3			//write 1st 4Byte
-	str	r5, [r2], r3			//write 2nd 4Byte
-
-	ldr		r5, [r0], r1		//r5=src[1]
-	ldr		r4, [r0], r1		//r4=src[2]
-	vmov		d0, r6, r5
-	vmov		d1, r5, r4			//reserved r4
-
-	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d2, d3, d4, d5, d0, d1, d12, q14, q15
-	vmov		r5, r6, d12
-	str	r5, [r2], r3			//write 3rd 4Byte
-	str	r6, [r2], r3			//write 4th 4Byte
-
-	//d4, d5, d0, d1 --> d0, d1, d2, d3
-	vmov	q1, q0
-	vmov	q0, q2
-
-	sub		r7, #4
-	cmp		r7, #0
-	bne		w4_xy_03_mc_luma_loop
-
-	pop		{r4, r5, r6, r7}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer02WidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	vld1.u8	{q0}, [r0], r1		//q0=src[-2]
-	vld1.u8	{q1}, [r0], r1		//q1=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	vld1.u8	{q2}, [r0], r1		//q2=src[0]
-	vld1.u8	{q3}, [r0], r1		//q3=src[1]
-	vld1.u8	{q4}, [r0], r1		//q4=src[2]
-
-w16_v_mc_luma_loop:
-
-	vld1.u8	{q5}, [r0], r1		//q5=src[3]
-
-	FILTER_6TAG_8BITS 	d0, d2, d4, d6, d8, d10, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d1, d3, d5, d7, d9, d11, d13, q14, q15
-	vld1.u8	{q0}, [r0], r1		//read 2nd row
-	vst1.u8	{q6}, [r2], r3			//write 1st 16Byte
-
-	FILTER_6TAG_8BITS 	d2, d4, d6, d8, d10, d0, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d3, d5, d7, d9, d11, d1, d13, q14, q15
-	vld1.u8	{q1}, [r0], r1		//read 3rd row
-	vst1.u8	{q6}, [r2], r3			//write 2nd 16Byte
-
-	FILTER_6TAG_8BITS 	d4, d6, d8, d10, d0, d2, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d5, d7, d9, d11, d1, d3, d13, q14, q15
-	vld1.u8	{q2}, [r0], r1		//read 4th row
-	vst1.u8	{q6}, [r2], r3			//write 3rd 16Byte
-
-	FILTER_6TAG_8BITS 	d6, d8, d10, d0, d2, d4, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d7, d9, d11, d1, d3, d5, d13, q14, q15
-	vld1.u8	{q3}, [r0], r1		//read 5th row
-	vst1.u8	{q6}, [r2], r3			//write 4th 16Byte
-
-	FILTER_6TAG_8BITS 	d8, d10, d0, d2, d4, d6, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d9, d11, d1, d3, d5, d7, d13, q14, q15
-	vld1.u8	{q4}, [r0], r1		//read 6th row
-	vst1.u8	{q6}, [r2], r3			//write 5th 16Byte
-
-	FILTER_6TAG_8BITS 	d10, d0, d2, d4, d6, d8, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d11, d1, d3, d5, d7, d9, d13, q14, q15
-	vld1.u8	{q5}, [r0], r1		//read 7th row
-	vst1.u8	{q6}, [r2], r3			//write 6th 16Byte
-
-	FILTER_6TAG_8BITS 	d0, d2, d4, d6, d8, d10, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d1, d3, d5, d7, d9, d11, d13, q14, q15
-	vld1.u8	{q0}, [r0], r1		//read 8th row
-	vst1.u8	{q6}, [r2], r3			//write 7th 16Byte
-
-	FILTER_6TAG_8BITS 	d2, d4, d6, d8, d10, d0, d12, q14, q15
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d3, d5, d7, d9, d11, d1, d13, q14, q15
-	vst1.u8	{q6}, [r2], r3			//write 8th 16Byte
-
-	//q2, q3, q4, q5, q0 --> q0~q4
-	vswp	q0, q4
-	vswp	q0, q2
-	vmov	q1, q3
-	vmov	q3, q5						//q0~q4
-
-	sub		r4, #8
-	cmp		r4, #0
-	bne		w16_v_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer02WidthEq8_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	vld1.u8	{d0}, [r0], r1		//d0=src[-2]
-	vld1.u8	{d1}, [r0], r1		//d1=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	vld1.u8	{d2}, [r0], r1		//d2=src[0]
-	vld1.u8	{d3}, [r0], r1		//d3=src[1]
-
-	vld1.u8	{d4}, [r0], r1		//d4=src[2]
-	vld1.u8	{d5}, [r0], r1		//d5=src[3]
-
-w8_v_mc_luma_loop:
-
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d0, d1, d2, d3, d4, d5, d12, q14, q15
-	vld1.u8	{d0}, [r0], r1		//read 2nd row
-	vst1.u8	{d12}, [r2], r3		//write 1st 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d1, d2, d3, d4, d5, d0, d12, q14, q15
-	vld1.u8	{d1}, [r0], r1		//read 3rd row
-	vst1.u8	{d12}, [r2], r3		//write 2nd 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d2, d3, d4, d5, d0, d1, d12, q14, q15
-	vld1.u8	{d2}, [r0], r1		//read 4th row
-	vst1.u8	{d12}, [r2], r3		//write 3rd 8Byte
-
-	pld			[r0]
-	FILTER_6TAG_8BITS 	d3, d4, d5, d0, d1, d2, d12, q14, q15
-	vld1.u8	{d3}, [r0], r1		//read 5th row
-	vst1.u8	{d12}, [r2], r3		//write 4th 8Byte
-
-	//d4, d5, d0, d1, d2, d3 --> d0, d1, d2, d3, d4, d5
-	vswp	q0, q2
-	vswp	q1, q2
-
-	sub		r4, #4
-	cmp		r4, #0
-	bne		w8_v_mc_luma_loop
-
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer02WidthEq4_neon
-	push		{r4, r5, r6, r7}
-	sub			r0, r1, lsl #1		//src[-2*src_stride]
-	pld			[r0]
-	pld			[r0, r1]
-	vmov.u16	q14, #0x0014			// 20
-	ldr		r4, [r0], r1		//r4=src[-2]
-	ldr		r5, [r0], r1		//r5=src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-	ldr		r6, [r0], r1		//r6=src[0]
-	ldr		r7, [r0], r1		//r7=src[1]
-
-	vmov		d0, r4, r5
-	vmov		d1, r5, r6
-	vmov		d2, r6, r7
-
-	ldr		r4, [r0], r1		//r4=src[2]
-	vmov		d3, r7, r4
-	ldr			r7, [sp, #16]
-
-w4_v_mc_luma_loop:
-
-//	pld			[r0]
-	//using reserving r4
-	ldr		r5, [r0], r1		//r5=src[3]
-	ldr		r6, [r0], r1		//r6=src[0]
-	vmov		d4, r4, r5
-	vmov		d5, r5, r6			//reserved r6
-
-	FILTER_6TAG_8BITS 	d0, d1, d2, d3, d4, d5, d12, q14, q15
-	vmov		r4, r5, d12
-	str	r4, [r2], r3			//write 1st 4Byte
-	str	r5, [r2], r3			//write 2nd 4Byte
-
-	ldr		r5, [r0], r1		//r5=src[1]
-	ldr		r4, [r0], r1		//r4=src[2]
-	vmov		d0, r6, r5
-	vmov		d1, r5, r4			//reserved r4
-
-	FILTER_6TAG_8BITS 	d2, d3, d4, d5, d0, d1, d12, q14, q15
-	vmov		r5, r6, d12
-	str	r5, [r2], r3			//write 3rd 4Byte
-	str	r6, [r2], r3			//write 4th 4Byte
-
-	//d4, d5, d0, d1 --> d0, d1, d2, d3
-	vmov	q1, q0
-	vmov	q0, q2
-
-	sub		r7, #4
-	cmp		r7, #0
-	bne		w4_v_mc_luma_loop
-
-	pop		{r4, r5, r6, r7}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer22WidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, #2					//src[-2]
-	sub			r0, r1, lsl #1		//src[-2*src_stride-2]
-	pld			[r0]
-	pld			[r0, r1]
-
-	vmov.u16	q14, #0x0014			// 20
-	vld1.u8	{d0-d2}, [r0], r1		//use 21(16+5), =src[-2]
-	vld1.u8	{d3-d5}, [r0], r1		//use 21(16+5), =src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2			// 5
-
-	vld1.u8	{d6-d8}, [r0], r1		//use 21(16+5), =src[0]
-	vld1.u8	{d9-d11}, [r0], r1	//use 21(16+5), =src[1]
-	pld			[r0]
-	pld			[r0, r1]
-	vld1.u8	{d12-d14}, [r0], r1	//use 21(16+5), =src[2]
-
-w16_hv_mc_luma_loop:
-
-	vld1.u8	{d15-d17}, [r0], r1	//use 21(16+5), =src[3]
-	//the 1st row
-	pld			[r0]
-	// vertical filtered into q9/q10
-	FILTER_6TAG_8BITS_TO_16BITS 	d0, d3, d6, d9, d12, d15, q9, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d1, d4, d7,d10, d13, d16,q10, q14, q15	// 8 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q9, q10, q11, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d0	//output to q0[0]
-
-	// vertical filtered into q10/q11
-	FILTER_6TAG_8BITS_TO_16BITS 	d2, d5, d8,d11, d14, d17,q11, q14, q15	// only 5 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q10, q11, q9, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q9, q12, q13, d1	//output to q0[1]
-	vst1.u8	{q0}, [r2], r3		//write 16Byte
-
-
-	vld1.u8	{d0-d2}, [r0], r1		//read 2nd row
-	//the 2nd row
-	pld			[r0]
-	// vertical filtered into q9/q10
-	FILTER_6TAG_8BITS_TO_16BITS 	d3, d6, d9, d12, d15, d0, q9, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d4, d7,d10, d13, d16, d1,q10, q14, q15	// 8 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q9, q10, q11, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d3	//output to d3
-
-	// vertical filtered into q10/q11
-	FILTER_6TAG_8BITS_TO_16BITS 	d5, d8,d11, d14, d17, d2,q11, q14, q15	// only 5 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q10, q11, q9, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q9, q12, q13, d4	//output to d4
-
-	vst1.u8	{d3, d4}, [r2], r3		//write 16Byte
-
-	vld1.u8	{d3-d5}, [r0], r1		//read 3rd row
-	//the 3rd row
-	pld			[r0]
-	// vertical filtered into q9/q10
-	FILTER_6TAG_8BITS_TO_16BITS 	d6, d9, d12, d15, d0, d3, q9, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d7,d10, d13, d16, d1, d4,q10, q14, q15	// 8 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q9, q10, q11, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d6	//output to d6
-
-	// vertical filtered into q10/q11
-	FILTER_6TAG_8BITS_TO_16BITS 	d8,d11, d14, d17, d2, d5,q11, q14, q15	// only 5 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q10, q11, q9, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q9, q12, q13, d7	//output to d7
-	vst1.u8	{d6, d7}, [r2], r3		//write 16Byte
-
-	vld1.u8	{d6-d8}, [r0], r1		//read 4th row
-	//the 4th row
-	pld			[r0]
-	// vertical filtered into q9/q10
-	FILTER_6TAG_8BITS_TO_16BITS 	 d9, d12, d15, d0, d3, d6, q9, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS		d10, d13, d16, d1, d4, d7,q10, q14, q15	// 8 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q9, q10, q11, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d9	//output to d9
-	// vertical filtered into q10/q11
-	FILTER_6TAG_8BITS_TO_16BITS 	d11, d14, d17, d2, d5, d8,q11, q14, q15	// only 5 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q10, q11, q9, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q9, q12, q13, d10	//output to d10
-	vst1.u8	{d9, d10}, [r2], r3		//write 16Byte
-
-	//d12~d17(q6~q8), d0~d8(q0~q3+d8), --> d0~d14
-	vswp	q0, q6
-	vswp	q6, q3
-	vmov	q5, q2
-	vmov	q2, q8
-
-	vmov	d20,d8
-	vmov	q4, q1
-	vmov	q1, q7
-	vmov	d14,d20
-
-	sub		r4, #4
-	cmp		r4, #0
-	bne		w16_hv_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer22WidthEq8_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-
-	sub			r0, #2				//src[-2]
-	sub			r0, r1, lsl #1	//src[-2*src_stride-2]
-	pld			[r0]
-	pld			[r0, r1]
-
-	vmov.u16	q14, #0x0014		// 20
-	vld1.u8	{q0}, [r0], r1	//use 13(8+5), =src[-2]
-	vld1.u8	{q1}, [r0], r1	//use 13(8+5), =src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2		// 5
-
-	vld1.u8	{q2}, [r0], r1	//use 13(8+5), =src[0]
-	vld1.u8	{q3}, [r0], r1	//use 13(8+5), =src[1]
-	pld			[r0]
-	pld			[r0, r1]
-	vld1.u8	{q4}, [r0], r1	//use 13(8+5), =src[2]
-
-w8_hv_mc_luma_loop:
-
-	vld1.u8	{q5}, [r0], r1	//use 13(8+5), =src[3]
-	//the 1st row
-	pld			[r0]
-	// vertical filtered into q6/q7
-	FILTER_6TAG_8BITS_TO_16BITS 	d0, d2, d4, d6, d8, d10, q6, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d1, d3, d5, d7, d9, d11, q7, q14, q15	// 5 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q6, q7, q11, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d12	//output to q6[0]
-	vst1.u8	d12, [r2], r3			//write 8Byte
-
-	vld1.u8	{q0}, [r0], r1		//read 2nd row
-	//the 2nd row
-	pld			[r0]
-	// vertical filtered into q6/q7
-	FILTER_6TAG_8BITS_TO_16BITS 	d2, d4, d6, d8, d10, d0, q6, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d3, d5, d7, d9, d11, d1, q7, q14, q15	// 5 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q6, q7, q11, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d12	//output to q6[0]
-	vst1.u8	d12, [r2], r3		//write 8Byte
-
-	vld1.u8	{q1}, [r0], r1		//read 3rd row
-	//the 3rd row
-	pld			[r0]
-	// vertical filtered into q6/q7
-	FILTER_6TAG_8BITS_TO_16BITS 	d4, d6, d8, d10, d0, d2, q6, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d5, d7, d9, d11, d1, d3, q7, q14, q15	// 5 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q6, q7, q11, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d12	//output to q6[0]
-	vst1.u8	d12, [r2], r3			//write 8Byte
-
-	vld1.u8	{q2}, [r0], r1		//read 4th row
-	//the 4th row
-	pld			[r0]
-	// vertical filtered into q6/q7
-	FILTER_6TAG_8BITS_TO_16BITS 	d6, d8, d10, d0, d2, d4, q6, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d7, d9, d11, d1, d3, d5, q7, q14, q15	// 5 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q6, q7, q11, q12, q13
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d12	//output to q6[0]
-	vst1.u8	d12, [r2], r3			//write 8Byte
-
-	//q4~q5, q0~q2, --> q0~q4
-	vswp	q0, q4
-	vswp	q2, q4
-	vmov	q3, q1
-	vmov	q1, q5
-
-	sub		r4, #4
-	cmp		r4, #0
-	bne		w8_hv_mc_luma_loop
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McHorVer22WidthEq4_neon
-	push		{r4 ,r5, r6}
-	ldr			r6, [sp, #12]
-
-	sub			r0, #2				//src[-2]
-	sub			r0, r1, lsl #1	//src[-2*src_stride-2]
-	pld			[r0]
-	pld			[r0, r1]
-
-	vmov.u16	q14, #0x0014		// 20
-	vld1.u8	{q0}, [r0], r1	//use 9(4+5), =src[-2]
-	vld1.u8	{q1}, [r0], r1	//use 9(4+5), =src[-1]
-
-	pld			[r0]
-	pld			[r0, r1]
-	vshr.u16	q15, q14, #2		// 5
-
-	vld1.u8	{q2}, [r0], r1	//use 9(4+5), =src[0]
-	vld1.u8	{q3}, [r0], r1	//use 9(4+5), =src[1]
-	pld			[r0]
-	pld			[r0, r1]
-	vld1.u8	{q4}, [r0], r1	//use 9(4+5), =src[2]
-
-w4_hv_mc_luma_loop:
-
-	vld1.u8	{q5}, [r0], r1	//use 9(4+5), =src[3]
-	vld1.u8	{q6}, [r0], r1	//use 9(4+5), =src[4]
-
-	//the 1st&2nd row
-	pld			[r0]
-	pld			[r0, r1]
-	// vertical filtered
-	FILTER_6TAG_8BITS_TO_16BITS 	d0, d2, d4, d6, d8, d10, q7, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d1, d3, d5, d7, d9, d11, q8, q14, q15	// 1 avail
-
-	FILTER_6TAG_8BITS_TO_16BITS 	d2, d4, d6, d8,d10, d12, q9, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d3, d5, d7, d9,d11, d13,q10, q14, q15	// 1 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q7, q8, q11, q12, q13	//4 avail
-	UNPACK_2_16BITS_TO_ABC	q9,q10, q0, q7, q8		//4 avail
-
-	vmov	d23, d0
-	vmov	d25, d14
-	vmov	d27, d16
-
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d22	//output to q11[0]
-	vmov		r4 ,r5, d22
-	str		r4, [r2], r3				//write 4Byte
-	str		r5, [r2], r3				//write 4Byte
-
-	//the 3rd&4th row
-	vld1.u8	{q0}, [r0], r1	//use 9(4+5), =src[3]
-	vld1.u8	{q1}, [r0], r1	//use 9(4+5), =src[4]
-	pld			[r0]
-	pld			[r0, r1]
-	// vertical filtered
-	FILTER_6TAG_8BITS_TO_16BITS 	d4, d6, d8, d10, d12, d0, q7, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d5, d7, d9, d11, d13, d1, q8, q14, q15	// 1 avail
-
-	FILTER_6TAG_8BITS_TO_16BITS 	d6, d8,d10, d12, d0, d2, q9, q14, q15	// 8 avail
-	FILTER_6TAG_8BITS_TO_16BITS 	d7, d9,d11, d13, d1, d3,q10, q14, q15	// 1 avail
-	// horizon filtered
-	UNPACK_2_16BITS_TO_ABC	q7, q8, q11, q12, q13	//4 avail
-	UNPACK_2_16BITS_TO_ABC	q9,q10, q2, q7, q8		//4 avail
-
-	vmov	d23, d4
-	vmov	d25, d14
-	vmov	d27, d16
-
-	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d22	//output to q11[0]
-	vmov		r4 ,r5, d22
-	str		r4, [r2], r3				//write 4Byte
-	str		r5, [r2], r3				//write 4Byte
-
-	//q4~q6, q0~q1, --> q0~q4
-	vswp	q4, q0
-	vmov	q3, q4
-	vmov	q4, q1
-	vmov	q1, q5
-	vmov	q2, q6
-
-	sub		r6, #4
-	cmp		r6, #0
-	bne		w4_hv_mc_luma_loop
-
-	pop		{r4, r5, r6}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McCopyWidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-w16_copy_loop:
-	vld1.u8		{q0}, [r0], r1
-	sub			r4, #2
-	vld1.u8		{q1}, [r0], r1
-	vst1.u8		{q0}, [r2], r3
-	cmp			r4, #0
-	vst1.u8		{q1}, [r2], r3
-	bne			w16_copy_loop
-
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McCopyWidthEq8_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-w8_copy_loop:
-	vld1.u8		{d0}, [r0], r1
-	vld1.u8		{d1}, [r0], r1
-	vst1.u8		{d0}, [r2], r3
-	vst1.u8		{d1}, [r2], r3
-	sub			r4, #2
-	cmp			r4, #0
-	bne			w8_copy_loop
-
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McCopyWidthEq4_neon
-	push		{r4, r5, r6}
-	ldr			r4, [sp, #12]
-w4_copy_loop:
-	ldr		r5, [r0], r1
-	ldr		r6, [r0], r1
-	str		r5, [r2], r3
-	str		r6, [r2], r3
-
-	sub			r4, #2
-	cmp			r4, #0
-	bne			w4_copy_loop
-
-	pop		{r4, r5, r6}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN PixelAvgWidthEq16_neon
-	push		{r4}
-	ldr			r4, [sp, #4]
-w16_pix_avg_loop:
-	vld1.u8		{q0}, [r2]!
-	vld1.u8		{q1}, [r3]!
-	vld1.u8		{q2}, [r2]!
-	vld1.u8		{q3}, [r3]!
-
-	vld1.u8		{q4}, [r2]!
-	vld1.u8		{q5}, [r3]!
-	vld1.u8		{q6}, [r2]!
-	vld1.u8		{q7}, [r3]!
-
-	AVERAGE_TWO_8BITS		d0, d0, d2
-	AVERAGE_TWO_8BITS		d1, d1, d3
-	vst1.u8		{q0}, [r0], r1
-
-	AVERAGE_TWO_8BITS		d4, d4, d6
-	AVERAGE_TWO_8BITS		d5, d5, d7
-	vst1.u8		{q2}, [r0], r1
-
-	AVERAGE_TWO_8BITS		d8, d8, d10
-	AVERAGE_TWO_8BITS		d9, d9, d11
-	vst1.u8		{q4}, [r0], r1
-
-	AVERAGE_TWO_8BITS		d12, d12, d14
-	AVERAGE_TWO_8BITS		d13, d13, d15
-	vst1.u8		{q6}, [r0], r1
-
-	sub			r4, #4
-	cmp			r4, #0
-	bne			w16_pix_avg_loop
-
-	pop		{r4}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN PixelAvgWidthEq8_neon
-	push		{r4, r5}
-	ldr			r4, [sp, #8]
-	mov			r5, #16
-w8_pix_avg_loop:
-
-	vld1.u8		{d0}, [r2], r5
-	vld1.u8		{d2}, [r3], r5
-	vld1.u8		{d1}, [r2], r5
-	vld1.u8		{d3}, [r3], r5
-
-	AVERAGE_TWO_8BITS		d0, d0, d2
-	AVERAGE_TWO_8BITS		d1, d1, d3
-	vst1.u8		{d0}, [r0], r1
-	vst1.u8		{d1}, [r0], r1
-
-	vld1.u8		{d4}, [r2], r5
-	vld1.u8		{d6}, [r3], r5
-	vld1.u8		{d5}, [r2], r5
-	vld1.u8		{d7}, [r3], r5
-
-	AVERAGE_TWO_8BITS		d4, d4, d6
-	AVERAGE_TWO_8BITS		d5, d5, d7
-	vst1.u8		{d4}, [r0], r1
-	vst1.u8		{d5}, [r0], r1
-
-	sub			r4, #4
-	cmp			r4, #0
-	bne			w8_pix_avg_loop
-
-	pop		{r4, r5}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN PixelAvgWidthEq4_neon
-	push		{r4-r8}
-	ldr			r4, [sp, #20]
-w4_pix_avg_loop:
-
-	ldr		r5, [r2]
-	ldr		r6, [r2, #16]
-	ldr		r7, [r3]
-	ldr		r8, [r3, #16]
-	add		r2, #32
-	add		r3, #32
-
-	vmov		d0, r5, r6
-	vmov		d1, r7, r8
-	AVERAGE_TWO_8BITS		d0, d0, d1
-	vmov		r5, r6, d0
-
-	str		r5, [r0], r1
-	str		r6, [r0], r1
-
-	sub			r4, #2
-	cmp			r4, #0
-	bne			w4_pix_avg_loop
-
-	pop		{r4-r8}
-WELS_ASM_FUNC_END
-
-WELS_ASM_FUNC_BEGIN McChromaWidthEq8_neon
-	push		{r4, r5}
-	ldr			r4, [sp, #8]
-	ldr			r5, [sp, #12]
-//	normal case: {cA*src[x]  + cB*src[x+1]} + {cC*src[x+stride] + cD*srcp[x+stride+1]}
-//	we can opti it by adding vert only/ hori only cases, to be continue
-	vld1.u8	{d31}, [r4]		//load A/B/C/D
-	vld1.u8		{q0}, [r0], r1	//src[x]
-
-	vdup.u8	d28, d31[0]			//A
-	vdup.u8	d29, d31[1]			//B
-	vdup.u8	d30, d31[2]			//C
-	vdup.u8	d31, d31[3]			//D
-
-	vext.u8		d1, d0, d1, #1		//src[x+1]
-
-w8_mc_chroma_loop:	// each two pxl row
-	vld1.u8		{q1}, [r0], r1	//src[x+stride]
-	vld1.u8		{q2}, [r0], r1	//src[x+2*stride]
-	vext.u8		d3, d2, d3, #1		//src[x+stride+1]
-	vext.u8		d5, d4, d5, #1		//src[x+2*stride+1]
-
-	vmull.u8		q3, d0, d28			//(src[x] * A)
-	vmlal.u8		q3, d1, d29			//+=(src[x+1] * B)
-	vmlal.u8		q3, d2, d30			//+=(src[x+stride] * C)
-	vmlal.u8		q3, d3, d31			//+=(src[x+stride+1] * D)
-	vrshrn.u16		d6, q3, #6
-	vst1.u8	d6, [r2], r3
-
-	vmull.u8		q3, d2, d28			//(src[x] * A)
-	vmlal.u8		q3, d3, d29			//+=(src[x+1] * B)
-	vmlal.u8		q3, d4, d30			//+=(src[x+stride] * C)
-	vmlal.u8		q3, d5, d31			//+=(src[x+stride+1] * D)
-	vrshrn.u16		d6, q3, #6
-	vst1.u8	d6, [r2], r3
-
-	vmov		q0, q2
-	sub			r5, #2
-	cmp			r5, #0
-	bne			w8_mc_chroma_loop
-
-	pop		{r4, r5}
-WELS_ASM_FUNC_END
-
-
-WELS_ASM_FUNC_BEGIN McChromaWidthEq4_neon
-
-	push		{r4, r5, r6}
-	ldr			r4, [sp, #12]
-	ldr			r6, [sp, #16]
-//	normal case: {cA*src[x]  + cB*src[x+1]} + {cC*src[x+stride] + cD*srcp[x+stride+1]}
-//	we can opti it by adding vert only/ hori only cases, to be continue
-	vld1.u8	{d31}, [r4]		//load A/B/C/D
-
-	vdup.u8	d28, d31[0]			//A
-	vdup.u8	d29, d31[1]			//B
-	vdup.u8	d30, d31[2]			//C
-	vdup.u8	d31, d31[3]			//D
-
-w4_mc_chroma_loop:	// each two pxl row
-	vld1.u8		{d0}, [r0], r1	//a::src[x]
-	vld1.u8		{d2}, [r0], r1	//b::src[x+stride]
-	vld1.u8		{d4}, [r0]			//c::src[x+2*stride]
-
-	vshr.u64		d1, d0, #8
-	vshr.u64		d3, d2, #8
-	vshr.u64		d5, d4, #8
-
-	vmov			q3, q1				//b::[0:7]+b::[1~8]
-	vtrn.32		q0, q1				//d0{a::[0:3]+b::[0:3]}; d1{a::[1:4]+b::[1:4]}
-	vtrn.32		q3, q2				//d6{b::[0:3]+c::[0:3]}; d7{b::[1:4]+c::[1:4]}
-
-	vmull.u8		q1, d0, d28			//(src[x] * A)
-	vmlal.u8		q1, d1, d29			//+=(src[x+1] * B)
-	vmlal.u8		q1, d6, d30			//+=(src[x+stride] * C)
-	vmlal.u8		q1, d7, d31			//+=(src[x+stride+1] * D)
-
-	vrshrn.u16		d2, q1, #6
-	vmov		r4, r5, d2
-	str	r4, [r2], r3
-	str	r5, [r2], r3
-
-	sub			r6, #2
-	cmp			r6, #0
-	bne			w4_mc_chroma_loop
-
-	pop		{r4, r5, r6}
-WELS_ASM_FUNC_END
-#endif
+/*!
+ * \copy
+ *     Copyright (c)  2013, Cisco Systems
+ *     All rights reserved.
+ *
+ *     Redistribution and use in source and binary forms, with or without
+ *     modification, are permitted provided that the following conditions
+ *     are met:
+ *
+ *        * Redistributions of source code must retain the above copyright
+ *          notice, this list of conditions and the following disclaimer.
+ *
+ *        * Redistributions in binary form must reproduce the above copyright
+ *          notice, this list of conditions and the following disclaimer in
+ *          the documentation and/or other materials provided with the
+ *          distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ *     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ *     COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ *     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ *     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ *     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *     POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifdef  HAVE_NEON
+.text
+#include "arm_arch_common_macro.S"
+
+#ifdef APPLE_IOS
+.macro	AVERAGE_TWO_8BITS
+//	{	// input:dst_d, src_d A and B; working: q13
+    vaddl.u8	q13, $2, $1
+    vrshrn.u16		$0, q13, #1
+//	}
+.endm
+
+.macro	FILTER_6TAG_8BITS
+//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
+    vaddl.u8	q12, $0, $5	//q12=src[-2]+src[3]
+    vaddl.u8	q13, $2, $3	//src[0]+src[1]
+    vmla.u16	q12, q13, $7	//q12 += 20*(src[0]+src[1]), 2 cycles
+    vaddl.u8	q13, $1, $4	//src[-1]+src[2]
+    vmls.s16	q12, q13, $8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
+    vqrshrun.s16		$6, q12, #5
+//	}
+.endm
+
+.macro	FILTER_6TAG_8BITS_AVERAGE_WITH_0
+//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
+    vaddl.u8	q12, $0, $5	//q12=src[-2]+src[3]
+    vaddl.u8	q13, $2, $3	//src[0]+src[1]
+    vmla.u16	q12, q13, $7	//q12 += 20*(src[0]+src[1]), 2 cycles
+    vaddl.u8	q13, $1, $4	//src[-1]+src[2]
+    vmls.s16	q12, q13, $8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
+    vqrshrun.s16		$6, q12, #5
+    vaddl.u8	q13, $2, $6
+    vrshrn.u16		$6, q13, #1
+//	}
+.endm
+
+.macro	FILTER_6TAG_8BITS_AVERAGE_WITH_1
+//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
+    vaddl.u8	q12, $0, $5	//q12=src[-2]+src[3]
+    vaddl.u8	q13, $2, $3	//src[0]+src[1]
+    vmla.u16	q12, q13, $7	//q12 += 20*(src[0]+src[1]), 2 cycles
+    vaddl.u8	q13, $1, $4	//src[-1]+src[2]
+    vmls.s16	q12, q13, $8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
+    vqrshrun.s16		$6, q12, #5
+    vaddl.u8	q13, $3, $6
+    vrshrn.u16		$6, q13, #1
+//	}
+.endm
+
+.macro	FILTER_6TAG_8BITS_TO_16BITS
+//	{	// input:d_src[-2], d_src[-1], d_src[0], d_src[1], d_src[2], d_src[3], dst_q, multiplier a/b; working:q13
+    vaddl.u8	$6, $0, $5		//dst_q=src[-2]+src[3]
+    vaddl.u8	q13, $2, $3	//src[0]+src[1]
+    vmla.u16	$6, q13, $7	//dst_q += 20*(src[0]+src[1]), 2 cycles
+    vaddl.u8	q13, $1, $4	//src[-1]+src[2]
+    vmls.s16	$6, q13, $8	//dst_q -= 5*(src[-1]+src[2]), 2 cycles
+//	}
+.endm
+
+.macro	FILTER_3_IN_16BITS_TO_8BITS
+//	{	// input:a, b, c, dst_d;
+    vsub.s16	$0, $0, $1			//a-b
+    vshr.s16	$0, $0, #2			//(a-b)/4
+    vsub.s16	$0, $0, $1			//(a-b)/4-b
+    vadd.s16	$0, $0, $2			//(a-b)/4-b+c
+    vshr.s16	$0, $0, #2			//((a-b)/4-b+c)/4
+    vadd.s16	$0, $0, $2			//((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
+    vqrshrun.s16	$3, $0, #6		//(+32)>>6
+//	}
+.endm
+
+.macro	UNPACK_2_16BITS_TO_ABC
+//	{	// input:q_src[-2:5], q_src[6:13](avail 8+5)/q_src[6:**](avail 4+5), dst_a, dst_b, dst_c;
+    vext.16	$4, $0, $1, #2		//src[0]
+    vext.16	$3, $0, $1, #3		//src[1]
+    vadd.s16	$4, $3					//c=src[0]+src[1]
+
+    vext.16	$3, $0, $1, #1		//src[-1]
+    vext.16	$2, $0, $1, #4		//src[2]
+    vadd.s16	$3, $2					//b=src[-1]+src[2]
+
+    vext.16	$2, $0, $1, #5		//src[3]
+    vadd.s16	$2, $0					//a=src[-2]+src[3]
+//	}
+.endm
+#else
+.macro	AVERAGE_TWO_8BITS arg0, arg1, arg2
+//	{	// input:dst_d, src_d A and B; working: q13
+    vaddl.u8	q13, \arg2, \arg1
+    vrshrn.u16		\arg0, q13, #1
+//	}
+.endm
+
+.macro	FILTER_6TAG_8BITS arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
+//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
+    vaddl.u8	q12, \arg0, \arg5	//q12=src[-2]+src[3]
+    vaddl.u8	q13, \arg2, \arg3	//src[0]+src[1]
+    vmla.u16	q12, q13, \arg7	//q12 += 20*(src[0]+src[1]), 2 cycles
+    vaddl.u8	q13, \arg1, \arg4	//src[-1]+src[2]
+    vmls.s16	q12, q13, \arg8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
+    vqrshrun.s16		\arg6, q12, #5
+//	}
+.endm
+
+.macro	FILTER_6TAG_8BITS_AVERAGE_WITH_0 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
+//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
+    vaddl.u8	q12, \arg0, \arg5	//q12=src[-2]+src[3]
+    vaddl.u8	q13, \arg2, \arg3	//src[0]+src[1]
+    vmla.u16	q12, q13, \arg7	//q12 += 20*(src[0]+src[1]), 2 cycles
+    vaddl.u8	q13, \arg1, \arg4	//src[-1]+src[2]
+    vmls.s16	q12, q13, \arg8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
+    vqrshrun.s16		\arg6, q12, #5
+    vaddl.u8	q13, \arg2, \arg6
+    vrshrn.u16		\arg6, q13, #1
+//	}
+.endm
+
+.macro	FILTER_6TAG_8BITS_AVERAGE_WITH_1 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
+//	{	// input:src[-2], src[-1], src[0], src[1], src[2], src[3], dst_d, multiplier a/b; working: q12, q13
+    vaddl.u8	q12, \arg0, \arg5	//q12=src[-2]+src[3]
+    vaddl.u8	q13, \arg2, \arg3	//src[0]+src[1]
+    vmla.u16	q12, q13, \arg7	//q12 += 20*(src[0]+src[1]), 2 cycles
+    vaddl.u8	q13, \arg1, \arg4	//src[-1]+src[2]
+    vmls.s16	q12, q13, \arg8	//q12 -= 5*(src[-1]+src[2]), 2 cycles
+    vqrshrun.s16		\arg6, q12, #5
+    vaddl.u8	q13, \arg3, \arg6
+    vrshrn.u16		\arg6, q13, #1
+//	}
+.endm
+
+.macro	FILTER_6TAG_8BITS_TO_16BITS arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8
+//	{	// input:d_src[-2], d_src[-1], d_src[0], d_src[1], d_src[2], d_src[3], dst_q, multiplier a/b; working:q13
+    vaddl.u8	\arg6, \arg0, \arg5		//dst_q=src[-2]+src[3]
+    vaddl.u8	q13, \arg2, \arg3	//src[0]+src[1]
+    vmla.u16	\arg6, q13, \arg7	//dst_q += 20*(src[0]+src[1]), 2 cycles
+    vaddl.u8	q13, \arg1, \arg4	//src[-1]+src[2]
+    vmls.s16	\arg6, q13, \arg8	//dst_q -= 5*(src[-1]+src[2]), 2 cycles
+//	}
+.endm
+
+.macro	FILTER_3_IN_16BITS_TO_8BITS arg0, arg1, arg2, arg3
+//	{	// input:a, b, c, dst_d;
+    vsub.s16	\arg0, \arg0, \arg1			//a-b
+    vshr.s16	\arg0, \arg0, #2			//(a-b)/4
+    vsub.s16	\arg0, \arg0, \arg1			//(a-b)/4-b
+    vadd.s16	\arg0, \arg0, \arg2			//(a-b)/4-b+c
+    vshr.s16	\arg0, \arg0, #2			//((a-b)/4-b+c)/4
+    vadd.s16	\arg0, \arg0, \arg2			//((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
+    vqrshrun.s16	\arg3, \arg0, #6		//(+32)>>6
+//	}
+.endm
+
+.macro	UNPACK_2_16BITS_TO_ABC arg0, arg1, arg2, arg3, arg4
+//	{	// input:q_src[-2:5], q_src[6:13](avail 8+5)/q_src[6:**](avail 4+5), dst_a, dst_b, dst_c;
+    vext.16	\arg4, \arg0, \arg1, #2		//src[0]
+    vext.16	\arg3, \arg0, \arg1, #3		//src[1]
+    vadd.s16	\arg4, \arg3					//c=src[0]+src[1]
+
+    vext.16	\arg3, \arg0, \arg1, #1		//src[-1]
+    vext.16	\arg2, \arg0, \arg1, #4		//src[2]
+    vadd.s16	\arg3,\arg2					//b=src[-1]+src[2]
+
+    vext.16	\arg2, \arg0, \arg1, #5		//src[3]
+    vadd.s16	\arg2, \arg0					//a=src[-2]+src[3]
+//	}
+.endm
+#endif
+
+WELS_ASM_FUNC_BEGIN McHorVer20WidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w16_h_mc_luma_loop:
+	vld1.u8	{d0,d1,d2}, [r0], r1	//only use 21(16+5); q0=src[-2]
+	pld			[r0]
+	pld			[r0, #16]
+
+	vext.8		q2, q0, q1, #1		//q2=src[-1]
+	vext.8		q3, q0, q1, #2		//q3=src[0]
+	vext.8		q4, q0, q1, #3		//q4=src[1]
+	vext.8		q5, q0, q1, #4		//q5=src[2]
+	vext.8		q6, q0, q1, #5		//q6=src[3]
+
+	FILTER_6TAG_8BITS 	d0, d4, d6, d8, d10, d12, d2, q14, q15
+
+	FILTER_6TAG_8BITS 	d1, d5, d7, d9, d11, d13, d3, q14, q15
+
+	sub		r4, #1
+	vst1.u8	{d2, d3}, [r2], r3		//write 16Byte
+
+	cmp		r4, #0
+	bne		w16_h_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer20WidthEq8_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w8_h_mc_luma_loop:
+	vld1.u8	{d0,d1}, [r0], r1	//only use 13(8+5); q0=src[-2]
+	pld			[r0]
+
+	vext.8		d2, d0, d1, #1		//d2=src[-1]
+	vext.8		d3, d0, d1, #2		//d3=src[0]
+	vext.8		d4, d0, d1, #3		//d4=src[1]
+	vext.8		d5, d0, d1, #4		//d5=src[2]
+	vext.8		d6, d0, d1, #5		//d6=src[3]
+
+	FILTER_6TAG_8BITS 	d0, d2, d3, d4, d5, d6, d1, q14, q15
+
+	sub		r4, #1
+	vst1.u8	{d1}, [r2], r3
+
+	cmp		r4, #0
+	bne		w8_h_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer20WidthEq4_neon
+	push		{r4, r5, r6}
+	ldr			r6, [sp, #12]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w4_h_mc_luma_loop:
+	vld1.u8	{d0, d1}, [r0], r1	//only use 9(4+5);d0: 1st row src[-2:5]
+	pld			[r0]
+	vld1.u8	{d2, d3}, [r0], r1	//d2: 2nd row src[-2:5]
+	pld			[r0]
+
+	vext.8		d4, d0, d1, #1		//d4: 1st row src[-1:6]
+	vext.8		d5, d2, d3, #1		//d5: 2nd row src[-1:6]
+	vext.8		q3, q2, q2, #1		//src[0:6 *]
+	vext.8		q4, q2, q2, #2		//src[1:6 * *]
+
+	vtrn.32	q3, q4					//q3::d6:1st row [0:3]+[1:4]; d7:2nd row [0:3]+[1:4]
+	vtrn.32	d6, d7					//d6:[0:3]; d7[1:4]
+	vtrn.32		d0, d2				//d0:[-2:1]; d2[2:5]
+	vtrn.32		d4, d5				//d4:[-1:2]; d5[3:6]
+
+	FILTER_6TAG_8BITS 	d0, d4, d6, d7, d2, d5, d1, q14, q15
+
+	vmov		r4, r5, d1
+	str	r4, [r2], r3
+	str	r5, [r2], r3
+
+	sub		r6, #2
+	cmp		r6, #0
+	bne		w4_h_mc_luma_loop
+
+	pop		{r4, r5, r6}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer10WidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w16_xy_10_mc_luma_loop:
+	vld1.u8	{d0,d1,d2}, [r0], r1	//only use 21(16+5); q0=src[-2]
+	pld			[r0]
+	pld			[r0, #16]
+
+	vext.8		q2, q0, q1, #1		//q2=src[-1]
+	vext.8		q3, q0, q1, #2		//q3=src[0]
+	vext.8		q4, q0, q1, #3		//q4=src[1]
+	vext.8		q5, q0, q1, #4		//q5=src[2]
+	vext.8		q6, q0, q1, #5		//q6=src[3]
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d4, d6, d8, d10, d12, d2, q14, q15
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d1, d5, d7, d9, d11, d13, d3, q14, q15
+
+	sub		r4, #1
+	vst1.u8	{d2, d3}, [r2], r3		//write 16Byte
+
+	cmp		r4, #0
+	bne		w16_xy_10_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer10WidthEq8_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w8_xy_10_mc_luma_loop:
+	vld1.u8	{d0,d1}, [r0], r1	//only use 13(8+5); q0=src[-2]
+	pld			[r0]
+
+	vext.8		d2, d0, d1, #1		//d2=src[-1]
+	vext.8		d3, d0, d1, #2		//d3=src[0]
+	vext.8		d4, d0, d1, #3		//d4=src[1]
+	vext.8		d5, d0, d1, #4		//d5=src[2]
+	vext.8		d6, d0, d1, #5		//d6=src[3]
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d2, d3, d4, d5, d6, d1, q14, q15
+
+	sub		r4, #1
+	vst1.u8	{d1}, [r2], r3
+
+	cmp		r4, #0
+	bne		w8_xy_10_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer10WidthEq4_neon
+	push		{r4, r5, r6}
+	ldr			r6, [sp, #12]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w4_xy_10_mc_luma_loop:
+	vld1.u8	{d0, d1}, [r0], r1	//only use 9(4+5);d0: 1st row src[-2:5]
+	pld			[r0]
+	vld1.u8	{d2, d3}, [r0], r1	//d2: 2nd row src[-2:5]
+	pld			[r0]
+
+	vext.8		d4, d0, d1, #1		//d4: 1st row src[-1:6]
+	vext.8		d5, d2, d3, #1		//d5: 2nd row src[-1:6]
+	vext.8		q3, q2, q2, #1		//src[0:6 *]
+	vext.8		q4, q2, q2, #2		//src[1:6 * *]
+
+	vtrn.32	q3, q4					//q3::d6:1st row [0:3]+[1:4]; d7:2nd row [0:3]+[1:4]
+	vtrn.32	d6, d7					//d6:[0:3]; d7[1:4]
+	vtrn.32		d0, d2				//d0:[-2:1]; d2[2:5]
+	vtrn.32		d4, d5				//d4:[-1:2]; d5[3:6]
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d4, d6, d7, d2, d5, d1, q14, q15
+
+	vmov		r4, r5, d1
+	str	r4, [r2], r3
+	str	r5, [r2], r3
+
+	sub		r6, #2
+	cmp		r6, #0
+	bne		w4_xy_10_mc_luma_loop
+
+	pop		{r4, r5, r6}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer30WidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w16_xy_30_mc_luma_loop:
+	vld1.u8	{d0,d1,d2}, [r0], r1	//only use 21(16+5); q0=src[-2]
+	pld			[r0]
+	pld			[r0, #16]
+
+	vext.8		q2, q0, q1, #1		//q2=src[-1]
+	vext.8		q3, q0, q1, #2		//q3=src[0]
+	vext.8		q4, q0, q1, #3		//q4=src[1]
+	vext.8		q5, q0, q1, #4		//q5=src[2]
+	vext.8		q6, q0, q1, #5		//q6=src[3]
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d4, d6, d8, d10, d12, d2, q14, q15
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d1, d5, d7, d9, d11, d13, d3, q14, q15
+
+	sub		r4, #1
+	vst1.u8	{d2, d3}, [r2], r3		//write 16Byte
+
+	cmp		r4, #0
+	bne		w16_xy_30_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer30WidthEq8_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w8_xy_30_mc_luma_loop:
+	vld1.u8	{d0,d1}, [r0], r1	//only use 13(8+5); q0=src[-2]
+	pld			[r0]
+
+	vext.8		d2, d0, d1, #1		//d2=src[-1]
+	vext.8		d3, d0, d1, #2		//d3=src[0]
+	vext.8		d4, d0, d1, #3		//d4=src[1]
+	vext.8		d5, d0, d1, #4		//d5=src[2]
+	vext.8		d6, d0, d1, #5		//d6=src[3]
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d2, d3, d4, d5, d6, d1, q14, q15
+
+	sub		r4, #1
+	vst1.u8	{d1}, [r2], r3
+
+	cmp		r4, #0
+	bne		w8_xy_30_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer30WidthEq4_neon
+	push		{r4, r5, r6}
+	ldr			r6, [sp, #12]
+
+	sub			r0, #2
+	vmov.u16	q14, #0x0014				// 20
+	vshr.u16	q15, q14, #2				// 5
+
+w4_xy_30_mc_luma_loop:
+	vld1.u8	{d0, d1}, [r0], r1	//only use 9(4+5);d0: 1st row src[-2:5]
+	pld			[r0]
+	vld1.u8	{d2, d3}, [r0], r1	//d2: 2nd row src[-2:5]
+	pld			[r0]
+
+	vext.8		d4, d0, d1, #1		//d4: 1st row src[-1:6]
+	vext.8		d5, d2, d3, #1		//d5: 2nd row src[-1:6]
+	vext.8		q3, q2, q2, #1		//src[0:6 *]
+	vext.8		q4, q2, q2, #2		//src[1:6 * *]
+
+	vtrn.32	q3, q4					//q3::d6:1st row [0:3]+[1:4]; d7:2nd row [0:3]+[1:4]
+	vtrn.32	d6, d7					//d6:[0:3]; d7[1:4]
+	vtrn.32		d0, d2				//d0:[-2:1]; d2[2:5]
+	vtrn.32		d4, d5				//d4:[-1:2]; d5[3:6]
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d4, d6, d7, d2, d5, d1, q14, q15
+
+	vmov		r4, r5, d1
+	str	r4, [r2], r3
+	str	r5, [r2], r3
+
+	sub		r6, #2
+	cmp		r6, #0
+	bne		w4_xy_30_mc_luma_loop
+
+	pop		{r4, r5, r6}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer01WidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	vld1.u8	{q0}, [r0], r1		//q0=src[-2]
+	vld1.u8	{q1}, [r0], r1		//q1=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	vld1.u8	{q2}, [r0], r1		//q2=src[0]
+	vld1.u8	{q3}, [r0], r1		//q3=src[1]
+	vld1.u8	{q4}, [r0], r1		//q4=src[2]
+
+w16_xy_01_luma_loop:
+
+	vld1.u8	{q5}, [r0], r1		//q5=src[3]
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d2, d4, d6, d8, d10, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d1, d3, d5, d7, d9, d11, d13, q14, q15
+	vld1.u8	{q0}, [r0], r1		//read 2nd row
+	vst1.u8	{q6}, [r2], r3			//write 1st 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d2, d4, d6, d8, d10, d0, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d3, d5, d7, d9, d11, d1, d13, q14, q15
+	vld1.u8	{q1}, [r0], r1		//read 3rd row
+	vst1.u8	{q6}, [r2], r3			//write 2nd 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d4, d6, d8, d10, d0, d2, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d5, d7, d9, d11, d1, d3, d13, q14, q15
+	vld1.u8	{q2}, [r0], r1		//read 4th row
+	vst1.u8	{q6}, [r2], r3			//write 3rd 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d6, d8, d10, d0, d2, d4, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d7, d9, d11, d1, d3, d5, d13, q14, q15
+	vld1.u8	{q3}, [r0], r1		//read 5th row
+	vst1.u8	{q6}, [r2], r3			//write 4th 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d8, d10, d0, d2, d4, d6, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d9, d11, d1, d3, d5, d7, d13, q14, q15
+	vld1.u8	{q4}, [r0], r1		//read 6th row
+	vst1.u8	{q6}, [r2], r3			//write 5th 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d10, d0, d2, d4, d6, d8, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d11, d1, d3, d5, d7, d9, d13, q14, q15
+	vld1.u8	{q5}, [r0], r1		//read 7th row
+	vst1.u8	{q6}, [r2], r3			//write 6th 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d2, d4, d6, d8, d10, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d1, d3, d5, d7, d9, d11, d13, q14, q15
+	vld1.u8	{q0}, [r0], r1		//read 8th row
+	vst1.u8	{q6}, [r2], r3			//write 7th 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d2, d4, d6, d8, d10, d0, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d3, d5, d7, d9, d11, d1, d13, q14, q15
+	vst1.u8	{q6}, [r2], r3			//write 8th 16Byte
+
+	//q2, q3, q4, q5, q0 --> q0~q4
+	vswp	q0, q4
+	vswp	q0, q2
+	vmov	q1, q3
+	vmov	q3, q5						//q0~q4
+
+	sub		r4, #8
+	cmp		r4, #0
+	bne		w16_xy_01_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer01WidthEq8_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	vld1.u8	{d0}, [r0], r1		//d0=src[-2]
+	vld1.u8	{d1}, [r0], r1		//d1=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	vld1.u8	{d2}, [r0], r1		//d2=src[0]
+	vld1.u8	{d3}, [r0], r1		//d3=src[1]
+
+	vld1.u8	{d4}, [r0], r1		//d4=src[2]
+	vld1.u8	{d5}, [r0], r1		//d5=src[3]
+
+w8_xy_01_mc_luma_loop:
+
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d1, d2, d3, d4, d5, d12, q14, q15
+	vld1.u8	{d0}, [r0], r1		//read 2nd row
+	vst1.u8	{d12}, [r2], r3		//write 1st 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d1, d2, d3, d4, d5, d0, d12, q14, q15
+	vld1.u8	{d1}, [r0], r1		//read 3rd row
+	vst1.u8	{d12}, [r2], r3		//write 2nd 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d2, d3, d4, d5, d0, d1, d12, q14, q15
+	vld1.u8	{d2}, [r0], r1		//read 4th row
+	vst1.u8	{d12}, [r2], r3		//write 3rd 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d3, d4, d5, d0, d1, d2, d12, q14, q15
+	vld1.u8	{d3}, [r0], r1		//read 5th row
+	vst1.u8	{d12}, [r2], r3		//write 4th 8Byte
+
+	//d4, d5, d0, d1, d2, d3 --> d0, d1, d2, d3, d4, d5
+	vswp	q0, q2
+	vswp	q1, q2
+
+	sub		r4, #4
+	cmp		r4, #0
+	bne		w8_xy_01_mc_luma_loop
+
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer01WidthEq4_neon
+	push		{r4, r5, r6, r7}
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	ldr		r4, [r0], r1		//r4=src[-2]
+	ldr		r5, [r0], r1		//r5=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	ldr		r6, [r0], r1		//r6=src[0]
+	ldr		r7, [r0], r1		//r7=src[1]
+
+	vmov		d0, r4, r5
+	vmov		d1, r5, r6
+	vmov		d2, r6, r7
+
+	ldr		r4, [r0], r1		//r4=src[2]
+	vmov		d3, r7, r4
+	ldr			r7, [sp, #16]
+
+w4_xy_01_mc_luma_loop:
+
+//	pld			[r0]
+	//using reserving r4
+	ldr		r5, [r0], r1		//r5=src[3]
+	ldr		r6, [r0], r1		//r6=src[0]
+	vmov		d4, r4, r5
+	vmov		d5, r5, r6			//reserved r6
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d0, d1, d2, d3, d4, d5, d12, q14, q15
+	vmov		r4, r5, d12
+	str	r4, [r2], r3			//write 1st 4Byte
+	str	r5, [r2], r3			//write 2nd 4Byte
+
+	ldr		r5, [r0], r1		//r5=src[1]
+	ldr		r4, [r0], r1		//r4=src[2]
+	vmov		d0, r6, r5
+	vmov		d1, r5, r4			//reserved r4
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_0 	d2, d3, d4, d5, d0, d1, d12, q14, q15
+	vmov		r5, r6, d12
+	str	r5, [r2], r3			//write 3rd 4Byte
+	str	r6, [r2], r3			//write 4th 4Byte
+
+	//d4, d5, d0, d1 --> d0, d1, d2, d3
+	vmov	q1, q0
+	vmov	q0, q2
+
+	sub		r7, #4
+	cmp		r7, #0
+	bne		w4_xy_01_mc_luma_loop
+
+	pop		{r4, r5, r6, r7}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer03WidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	vld1.u8	{q0}, [r0], r1		//q0=src[-2]
+	vld1.u8	{q1}, [r0], r1		//q1=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	vld1.u8	{q2}, [r0], r1		//q2=src[0]
+	vld1.u8	{q3}, [r0], r1		//q3=src[1]
+	vld1.u8	{q4}, [r0], r1		//q4=src[2]
+
+w16_xy_03_luma_loop:
+
+	vld1.u8	{q5}, [r0], r1		//q5=src[3]
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d2, d4, d6, d8, d10, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d1, d3, d5, d7, d9, d11, d13, q14, q15
+	vld1.u8	{q0}, [r0], r1		//read 2nd row
+	vst1.u8	{q6}, [r2], r3			//write 1st 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d2, d4, d6, d8, d10, d0, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d3, d5, d7, d9, d11, d1, d13, q14, q15
+	vld1.u8	{q1}, [r0], r1		//read 3rd row
+	vst1.u8	{q6}, [r2], r3			//write 2nd 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d4, d6, d8, d10, d0, d2, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d5, d7, d9, d11, d1, d3, d13, q14, q15
+	vld1.u8	{q2}, [r0], r1		//read 4th row
+	vst1.u8	{q6}, [r2], r3			//write 3rd 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d6, d8, d10, d0, d2, d4, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d7, d9, d11, d1, d3, d5, d13, q14, q15
+	vld1.u8	{q3}, [r0], r1		//read 5th row
+	vst1.u8	{q6}, [r2], r3			//write 4th 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d8, d10, d0, d2, d4, d6, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d9, d11, d1, d3, d5, d7, d13, q14, q15
+	vld1.u8	{q4}, [r0], r1		//read 6th row
+	vst1.u8	{q6}, [r2], r3			//write 5th 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d10, d0, d2, d4, d6, d8, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d11, d1, d3, d5, d7, d9, d13, q14, q15
+	vld1.u8	{q5}, [r0], r1		//read 7th row
+	vst1.u8	{q6}, [r2], r3			//write 6th 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d2, d4, d6, d8, d10, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d1, d3, d5, d7, d9, d11, d13, q14, q15
+	vld1.u8	{q0}, [r0], r1		//read 8th row
+	vst1.u8	{q6}, [r2], r3			//write 7th 16Byte
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d2, d4, d6, d8, d10, d0, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d3, d5, d7, d9, d11, d1, d13, q14, q15
+	vst1.u8	{q6}, [r2], r3			//write 8th 16Byte
+
+	//q2, q3, q4, q5, q0 --> q0~q4
+	vswp	q0, q4
+	vswp	q0, q2
+	vmov	q1, q3
+	vmov	q3, q5						//q0~q4
+
+	sub		r4, #8
+	cmp		r4, #0
+	bne		w16_xy_03_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer03WidthEq8_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	vld1.u8	{d0}, [r0], r1		//d0=src[-2]
+	vld1.u8	{d1}, [r0], r1		//d1=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	vld1.u8	{d2}, [r0], r1		//d2=src[0]
+	vld1.u8	{d3}, [r0], r1		//d3=src[1]
+
+	vld1.u8	{d4}, [r0], r1		//d4=src[2]
+	vld1.u8	{d5}, [r0], r1		//d5=src[3]
+
+w8_xy_03_mc_luma_loop:
+
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d1, d2, d3, d4, d5, d12, q14, q15
+	vld1.u8	{d0}, [r0], r1		//read 2nd row
+	vst1.u8	{d12}, [r2], r3		//write 1st 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d1, d2, d3, d4, d5, d0, d12, q14, q15
+	vld1.u8	{d1}, [r0], r1		//read 3rd row
+	vst1.u8	{d12}, [r2], r3		//write 2nd 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d2, d3, d4, d5, d0, d1, d12, q14, q15
+	vld1.u8	{d2}, [r0], r1		//read 4th row
+	vst1.u8	{d12}, [r2], r3		//write 3rd 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d3, d4, d5, d0, d1, d2, d12, q14, q15
+	vld1.u8	{d3}, [r0], r1		//read 5th row
+	vst1.u8	{d12}, [r2], r3		//write 4th 8Byte
+
+	//d4, d5, d0, d1, d2, d3 --> d0, d1, d2, d3, d4, d5
+	vswp	q0, q2
+	vswp	q1, q2
+
+	sub		r4, #4
+	cmp		r4, #0
+	bne		w8_xy_03_mc_luma_loop
+
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer03WidthEq4_neon
+	push		{r4, r5, r6, r7}
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	ldr		r4, [r0], r1		//r4=src[-2]
+	ldr		r5, [r0], r1		//r5=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	ldr		r6, [r0], r1		//r6=src[0]
+	ldr		r7, [r0], r1		//r7=src[1]
+
+	vmov		d0, r4, r5
+	vmov		d1, r5, r6
+	vmov		d2, r6, r7
+
+	ldr		r4, [r0], r1		//r4=src[2]
+	vmov		d3, r7, r4
+	ldr			r7, [sp, #16]
+
+w4_xy_03_mc_luma_loop:
+
+//	pld			[r0]
+	//using reserving r4
+	ldr		r5, [r0], r1		//r5=src[3]
+	ldr		r6, [r0], r1		//r6=src[0]
+	vmov		d4, r4, r5
+	vmov		d5, r5, r6			//reserved r6
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d0, d1, d2, d3, d4, d5, d12, q14, q15
+	vmov		r4, r5, d12
+	str	r4, [r2], r3			//write 1st 4Byte
+	str	r5, [r2], r3			//write 2nd 4Byte
+
+	ldr		r5, [r0], r1		//r5=src[1]
+	ldr		r4, [r0], r1		//r4=src[2]
+	vmov		d0, r6, r5
+	vmov		d1, r5, r4			//reserved r4
+
+	FILTER_6TAG_8BITS_AVERAGE_WITH_1 	d2, d3, d4, d5, d0, d1, d12, q14, q15
+	vmov		r5, r6, d12
+	str	r5, [r2], r3			//write 3rd 4Byte
+	str	r6, [r2], r3			//write 4th 4Byte
+
+	//d4, d5, d0, d1 --> d0, d1, d2, d3
+	vmov	q1, q0
+	vmov	q0, q2
+
+	sub		r7, #4
+	cmp		r7, #0
+	bne		w4_xy_03_mc_luma_loop
+
+	pop		{r4, r5, r6, r7}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer02WidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	vld1.u8	{q0}, [r0], r1		//q0=src[-2]
+	vld1.u8	{q1}, [r0], r1		//q1=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	vld1.u8	{q2}, [r0], r1		//q2=src[0]
+	vld1.u8	{q3}, [r0], r1		//q3=src[1]
+	vld1.u8	{q4}, [r0], r1		//q4=src[2]
+
+w16_v_mc_luma_loop:
+
+	vld1.u8	{q5}, [r0], r1		//q5=src[3]
+
+	FILTER_6TAG_8BITS 	d0, d2, d4, d6, d8, d10, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d1, d3, d5, d7, d9, d11, d13, q14, q15
+	vld1.u8	{q0}, [r0], r1		//read 2nd row
+	vst1.u8	{q6}, [r2], r3			//write 1st 16Byte
+
+	FILTER_6TAG_8BITS 	d2, d4, d6, d8, d10, d0, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d3, d5, d7, d9, d11, d1, d13, q14, q15
+	vld1.u8	{q1}, [r0], r1		//read 3rd row
+	vst1.u8	{q6}, [r2], r3			//write 2nd 16Byte
+
+	FILTER_6TAG_8BITS 	d4, d6, d8, d10, d0, d2, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d5, d7, d9, d11, d1, d3, d13, q14, q15
+	vld1.u8	{q2}, [r0], r1		//read 4th row
+	vst1.u8	{q6}, [r2], r3			//write 3rd 16Byte
+
+	FILTER_6TAG_8BITS 	d6, d8, d10, d0, d2, d4, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d7, d9, d11, d1, d3, d5, d13, q14, q15
+	vld1.u8	{q3}, [r0], r1		//read 5th row
+	vst1.u8	{q6}, [r2], r3			//write 4th 16Byte
+
+	FILTER_6TAG_8BITS 	d8, d10, d0, d2, d4, d6, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d9, d11, d1, d3, d5, d7, d13, q14, q15
+	vld1.u8	{q4}, [r0], r1		//read 6th row
+	vst1.u8	{q6}, [r2], r3			//write 5th 16Byte
+
+	FILTER_6TAG_8BITS 	d10, d0, d2, d4, d6, d8, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d11, d1, d3, d5, d7, d9, d13, q14, q15
+	vld1.u8	{q5}, [r0], r1		//read 7th row
+	vst1.u8	{q6}, [r2], r3			//write 6th 16Byte
+
+	FILTER_6TAG_8BITS 	d0, d2, d4, d6, d8, d10, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d1, d3, d5, d7, d9, d11, d13, q14, q15
+	vld1.u8	{q0}, [r0], r1		//read 8th row
+	vst1.u8	{q6}, [r2], r3			//write 7th 16Byte
+
+	FILTER_6TAG_8BITS 	d2, d4, d6, d8, d10, d0, d12, q14, q15
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d3, d5, d7, d9, d11, d1, d13, q14, q15
+	vst1.u8	{q6}, [r2], r3			//write 8th 16Byte
+
+	//q2, q3, q4, q5, q0 --> q0~q4
+	vswp	q0, q4
+	vswp	q0, q2
+	vmov	q1, q3
+	vmov	q3, q5						//q0~q4
+
+	sub		r4, #8
+	cmp		r4, #0
+	bne		w16_v_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer02WidthEq8_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	vld1.u8	{d0}, [r0], r1		//d0=src[-2]
+	vld1.u8	{d1}, [r0], r1		//d1=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	vld1.u8	{d2}, [r0], r1		//d2=src[0]
+	vld1.u8	{d3}, [r0], r1		//d3=src[1]
+
+	vld1.u8	{d4}, [r0], r1		//d4=src[2]
+	vld1.u8	{d5}, [r0], r1		//d5=src[3]
+
+w8_v_mc_luma_loop:
+
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d0, d1, d2, d3, d4, d5, d12, q14, q15
+	vld1.u8	{d0}, [r0], r1		//read 2nd row
+	vst1.u8	{d12}, [r2], r3		//write 1st 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d1, d2, d3, d4, d5, d0, d12, q14, q15
+	vld1.u8	{d1}, [r0], r1		//read 3rd row
+	vst1.u8	{d12}, [r2], r3		//write 2nd 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d2, d3, d4, d5, d0, d1, d12, q14, q15
+	vld1.u8	{d2}, [r0], r1		//read 4th row
+	vst1.u8	{d12}, [r2], r3		//write 3rd 8Byte
+
+	pld			[r0]
+	FILTER_6TAG_8BITS 	d3, d4, d5, d0, d1, d2, d12, q14, q15
+	vld1.u8	{d3}, [r0], r1		//read 5th row
+	vst1.u8	{d12}, [r2], r3		//write 4th 8Byte
+
+	//d4, d5, d0, d1, d2, d3 --> d0, d1, d2, d3, d4, d5
+	vswp	q0, q2
+	vswp	q1, q2
+
+	sub		r4, #4
+	cmp		r4, #0
+	bne		w8_v_mc_luma_loop
+
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer02WidthEq4_neon
+	push		{r4, r5, r6, r7}
+	sub			r0, r1, lsl #1		//src[-2*src_stride]
+	pld			[r0]
+	pld			[r0, r1]
+	vmov.u16	q14, #0x0014			// 20
+	ldr		r4, [r0], r1		//r4=src[-2]
+	ldr		r5, [r0], r1		//r5=src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+	ldr		r6, [r0], r1		//r6=src[0]
+	ldr		r7, [r0], r1		//r7=src[1]
+
+	vmov		d0, r4, r5
+	vmov		d1, r5, r6
+	vmov		d2, r6, r7
+
+	ldr		r4, [r0], r1		//r4=src[2]
+	vmov		d3, r7, r4
+	ldr			r7, [sp, #16]
+
+w4_v_mc_luma_loop:
+
+//	pld			[r0]
+	//using reserving r4
+	ldr		r5, [r0], r1		//r5=src[3]
+	ldr		r6, [r0], r1		//r6=src[0]
+	vmov		d4, r4, r5
+	vmov		d5, r5, r6			//reserved r6
+
+	FILTER_6TAG_8BITS 	d0, d1, d2, d3, d4, d5, d12, q14, q15
+	vmov		r4, r5, d12
+	str	r4, [r2], r3			//write 1st 4Byte
+	str	r5, [r2], r3			//write 2nd 4Byte
+
+	ldr		r5, [r0], r1		//r5=src[1]
+	ldr		r4, [r0], r1		//r4=src[2]
+	vmov		d0, r6, r5
+	vmov		d1, r5, r4			//reserved r4
+
+	FILTER_6TAG_8BITS 	d2, d3, d4, d5, d0, d1, d12, q14, q15
+	vmov		r5, r6, d12
+	str	r5, [r2], r3			//write 3rd 4Byte
+	str	r6, [r2], r3			//write 4th 4Byte
+
+	//d4, d5, d0, d1 --> d0, d1, d2, d3
+	vmov	q1, q0
+	vmov	q0, q2
+
+	sub		r7, #4
+	cmp		r7, #0
+	bne		w4_v_mc_luma_loop
+
+	pop		{r4, r5, r6, r7}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer22WidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, #2					//src[-2]
+	sub			r0, r1, lsl #1		//src[-2*src_stride-2]
+	pld			[r0]
+	pld			[r0, r1]
+
+	vmov.u16	q14, #0x0014			// 20
+	vld1.u8	{d0-d2}, [r0], r1		//use 21(16+5), =src[-2]
+	vld1.u8	{d3-d5}, [r0], r1		//use 21(16+5), =src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2			// 5
+
+	vld1.u8	{d6-d8}, [r0], r1		//use 21(16+5), =src[0]
+	vld1.u8	{d9-d11}, [r0], r1	//use 21(16+5), =src[1]
+	pld			[r0]
+	pld			[r0, r1]
+	vld1.u8	{d12-d14}, [r0], r1	//use 21(16+5), =src[2]
+
+w16_hv_mc_luma_loop:
+
+	vld1.u8	{d15-d17}, [r0], r1	//use 21(16+5), =src[3]
+	//the 1st row
+	pld			[r0]
+	// vertical filtered into q9/q10
+	FILTER_6TAG_8BITS_TO_16BITS 	d0, d3, d6, d9, d12, d15, q9, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d1, d4, d7,d10, d13, d16,q10, q14, q15	// 8 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q9, q10, q11, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d0	//output to q0[0]
+
+	// vertical filtered into q10/q11
+	FILTER_6TAG_8BITS_TO_16BITS 	d2, d5, d8,d11, d14, d17,q11, q14, q15	// only 5 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q10, q11, q9, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q9, q12, q13, d1	//output to q0[1]
+	vst1.u8	{q0}, [r2], r3		//write 16Byte
+
+
+	vld1.u8	{d0-d2}, [r0], r1		//read 2nd row
+	//the 2nd row
+	pld			[r0]
+	// vertical filtered into q9/q10
+	FILTER_6TAG_8BITS_TO_16BITS 	d3, d6, d9, d12, d15, d0, q9, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d4, d7,d10, d13, d16, d1,q10, q14, q15	// 8 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q9, q10, q11, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d3	//output to d3
+
+	// vertical filtered into q10/q11
+	FILTER_6TAG_8BITS_TO_16BITS 	d5, d8,d11, d14, d17, d2,q11, q14, q15	// only 5 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q10, q11, q9, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q9, q12, q13, d4	//output to d4
+
+	vst1.u8	{d3, d4}, [r2], r3		//write 16Byte
+
+	vld1.u8	{d3-d5}, [r0], r1		//read 3rd row
+	//the 3rd row
+	pld			[r0]
+	// vertical filtered into q9/q10
+	FILTER_6TAG_8BITS_TO_16BITS 	d6, d9, d12, d15, d0, d3, q9, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d7,d10, d13, d16, d1, d4,q10, q14, q15	// 8 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q9, q10, q11, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d6	//output to d6
+
+	// vertical filtered into q10/q11
+	FILTER_6TAG_8BITS_TO_16BITS 	d8,d11, d14, d17, d2, d5,q11, q14, q15	// only 5 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q10, q11, q9, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q9, q12, q13, d7	//output to d7
+	vst1.u8	{d6, d7}, [r2], r3		//write 16Byte
+
+	vld1.u8	{d6-d8}, [r0], r1		//read 4th row
+	//the 4th row
+	pld			[r0]
+	// vertical filtered into q9/q10
+	FILTER_6TAG_8BITS_TO_16BITS 	 d9, d12, d15, d0, d3, d6, q9, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS		d10, d13, d16, d1, d4, d7,q10, q14, q15	// 8 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q9, q10, q11, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d9	//output to d9
+	// vertical filtered into q10/q11
+	FILTER_6TAG_8BITS_TO_16BITS 	d11, d14, d17, d2, d5, d8,q11, q14, q15	// only 5 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q10, q11, q9, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q9, q12, q13, d10	//output to d10
+	vst1.u8	{d9, d10}, [r2], r3		//write 16Byte
+
+	//d12~d17(q6~q8), d0~d8(q0~q3+d8), --> d0~d14
+	vswp	q0, q6
+	vswp	q6, q3
+	vmov	q5, q2
+	vmov	q2, q8
+
+	vmov	d20,d8
+	vmov	q4, q1
+	vmov	q1, q7
+	vmov	d14,d20
+
+	sub		r4, #4
+	cmp		r4, #0
+	bne		w16_hv_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer22WidthEq8_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+
+	sub			r0, #2				//src[-2]
+	sub			r0, r1, lsl #1	//src[-2*src_stride-2]
+	pld			[r0]
+	pld			[r0, r1]
+
+	vmov.u16	q14, #0x0014		// 20
+	vld1.u8	{q0}, [r0], r1	//use 13(8+5), =src[-2]
+	vld1.u8	{q1}, [r0], r1	//use 13(8+5), =src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2		// 5
+
+	vld1.u8	{q2}, [r0], r1	//use 13(8+5), =src[0]
+	vld1.u8	{q3}, [r0], r1	//use 13(8+5), =src[1]
+	pld			[r0]
+	pld			[r0, r1]
+	vld1.u8	{q4}, [r0], r1	//use 13(8+5), =src[2]
+
+w8_hv_mc_luma_loop:
+
+	vld1.u8	{q5}, [r0], r1	//use 13(8+5), =src[3]
+	//the 1st row
+	pld			[r0]
+	// vertical filtered into q6/q7
+	FILTER_6TAG_8BITS_TO_16BITS 	d0, d2, d4, d6, d8, d10, q6, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d1, d3, d5, d7, d9, d11, q7, q14, q15	// 5 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q6, q7, q11, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d12	//output to q6[0]
+	vst1.u8	d12, [r2], r3			//write 8Byte
+
+	vld1.u8	{q0}, [r0], r1		//read 2nd row
+	//the 2nd row
+	pld			[r0]
+	// vertical filtered into q6/q7
+	FILTER_6TAG_8BITS_TO_16BITS 	d2, d4, d6, d8, d10, d0, q6, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d3, d5, d7, d9, d11, d1, q7, q14, q15	// 5 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q6, q7, q11, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d12	//output to q6[0]
+	vst1.u8	d12, [r2], r3		//write 8Byte
+
+	vld1.u8	{q1}, [r0], r1		//read 3rd row
+	//the 3rd row
+	pld			[r0]
+	// vertical filtered into q6/q7
+	FILTER_6TAG_8BITS_TO_16BITS 	d4, d6, d8, d10, d0, d2, q6, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d5, d7, d9, d11, d1, d3, q7, q14, q15	// 5 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q6, q7, q11, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d12	//output to q6[0]
+	vst1.u8	d12, [r2], r3			//write 8Byte
+
+	vld1.u8	{q2}, [r0], r1		//read 4th row
+	//the 4th row
+	pld			[r0]
+	// vertical filtered into q6/q7
+	FILTER_6TAG_8BITS_TO_16BITS 	d6, d8, d10, d0, d2, d4, q6, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d7, d9, d11, d1, d3, d5, q7, q14, q15	// 5 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q6, q7, q11, q12, q13
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d12	//output to q6[0]
+	vst1.u8	d12, [r2], r3			//write 8Byte
+
+	//q4~q5, q0~q2, --> q0~q4
+	vswp	q0, q4
+	vswp	q2, q4
+	vmov	q3, q1
+	vmov	q1, q5
+
+	sub		r4, #4
+	cmp		r4, #0
+	bne		w8_hv_mc_luma_loop
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McHorVer22WidthEq4_neon
+	push		{r4 ,r5, r6}
+	ldr			r6, [sp, #12]
+
+	sub			r0, #2				//src[-2]
+	sub			r0, r1, lsl #1	//src[-2*src_stride-2]
+	pld			[r0]
+	pld			[r0, r1]
+
+	vmov.u16	q14, #0x0014		// 20
+	vld1.u8	{q0}, [r0], r1	//use 9(4+5), =src[-2]
+	vld1.u8	{q1}, [r0], r1	//use 9(4+5), =src[-1]
+
+	pld			[r0]
+	pld			[r0, r1]
+	vshr.u16	q15, q14, #2		// 5
+
+	vld1.u8	{q2}, [r0], r1	//use 9(4+5), =src[0]
+	vld1.u8	{q3}, [r0], r1	//use 9(4+5), =src[1]
+	pld			[r0]
+	pld			[r0, r1]
+	vld1.u8	{q4}, [r0], r1	//use 9(4+5), =src[2]
+
+w4_hv_mc_luma_loop:
+
+	vld1.u8	{q5}, [r0], r1	//use 9(4+5), =src[3]
+	vld1.u8	{q6}, [r0], r1	//use 9(4+5), =src[4]
+
+	//the 1st&2nd row
+	pld			[r0]
+	pld			[r0, r1]
+	// vertical filtered
+	FILTER_6TAG_8BITS_TO_16BITS 	d0, d2, d4, d6, d8, d10, q7, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d1, d3, d5, d7, d9, d11, q8, q14, q15	// 1 avail
+
+	FILTER_6TAG_8BITS_TO_16BITS 	d2, d4, d6, d8,d10, d12, q9, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d3, d5, d7, d9,d11, d13,q10, q14, q15	// 1 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q7, q8, q11, q12, q13	//4 avail
+	UNPACK_2_16BITS_TO_ABC	q9,q10, q0, q7, q8		//4 avail
+
+	vmov	d23, d0
+	vmov	d25, d14
+	vmov	d27, d16
+
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d22	//output to q11[0]
+	vmov		r4 ,r5, d22
+	str		r4, [r2], r3				//write 4Byte
+	str		r5, [r2], r3				//write 4Byte
+
+	//the 3rd&4th row
+	vld1.u8	{q0}, [r0], r1	//use 9(4+5), =src[3]
+	vld1.u8	{q1}, [r0], r1	//use 9(4+5), =src[4]
+	pld			[r0]
+	pld			[r0, r1]
+	// vertical filtered
+	FILTER_6TAG_8BITS_TO_16BITS 	d4, d6, d8, d10, d12, d0, q7, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d5, d7, d9, d11, d13, d1, q8, q14, q15	// 1 avail
+
+	FILTER_6TAG_8BITS_TO_16BITS 	d6, d8,d10, d12, d0, d2, q9, q14, q15	// 8 avail
+	FILTER_6TAG_8BITS_TO_16BITS 	d7, d9,d11, d13, d1, d3,q10, q14, q15	// 1 avail
+	// horizon filtered
+	UNPACK_2_16BITS_TO_ABC	q7, q8, q11, q12, q13	//4 avail
+	UNPACK_2_16BITS_TO_ABC	q9,q10, q2, q7, q8		//4 avail
+
+	vmov	d23, d4
+	vmov	d25, d14
+	vmov	d27, d16
+
+	FILTER_3_IN_16BITS_TO_8BITS q11, q12, q13, d22	//output to q11[0]
+	vmov		r4 ,r5, d22
+	str		r4, [r2], r3				//write 4Byte
+	str		r5, [r2], r3				//write 4Byte
+
+	//q4~q6, q0~q1, --> q0~q4
+	vswp	q4, q0
+	vmov	q3, q4
+	vmov	q4, q1
+	vmov	q1, q5
+	vmov	q2, q6
+
+	sub		r6, #4
+	cmp		r6, #0
+	bne		w4_hv_mc_luma_loop
+
+	pop		{r4, r5, r6}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McCopyWidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+w16_copy_loop:
+	vld1.u8		{q0}, [r0], r1
+	sub			r4, #2
+	vld1.u8		{q1}, [r0], r1
+	vst1.u8		{q0}, [r2], r3
+	cmp			r4, #0
+	vst1.u8		{q1}, [r2], r3
+	bne			w16_copy_loop
+
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McCopyWidthEq8_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+w8_copy_loop:
+	vld1.u8		{d0}, [r0], r1
+	vld1.u8		{d1}, [r0], r1
+	vst1.u8		{d0}, [r2], r3
+	vst1.u8		{d1}, [r2], r3
+	sub			r4, #2
+	cmp			r4, #0
+	bne			w8_copy_loop
+
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McCopyWidthEq4_neon
+	push		{r4, r5, r6}
+	ldr			r4, [sp, #12]
+w4_copy_loop:
+	ldr		r5, [r0], r1
+	ldr		r6, [r0], r1
+	str		r5, [r2], r3
+	str		r6, [r2], r3
+
+	sub			r4, #2
+	cmp			r4, #0
+	bne			w4_copy_loop
+
+	pop		{r4, r5, r6}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN PixelAvgWidthEq16_neon
+	push		{r4}
+	ldr			r4, [sp, #4]
+w16_pix_avg_loop:
+	vld1.u8		{q0}, [r2]!
+	vld1.u8		{q1}, [r3]!
+	vld1.u8		{q2}, [r2]!
+	vld1.u8		{q3}, [r3]!
+
+	vld1.u8		{q4}, [r2]!
+	vld1.u8		{q5}, [r3]!
+	vld1.u8		{q6}, [r2]!
+	vld1.u8		{q7}, [r3]!
+
+	AVERAGE_TWO_8BITS		d0, d0, d2
+	AVERAGE_TWO_8BITS		d1, d1, d3
+	vst1.u8		{q0}, [r0], r1
+
+	AVERAGE_TWO_8BITS		d4, d4, d6
+	AVERAGE_TWO_8BITS		d5, d5, d7
+	vst1.u8		{q2}, [r0], r1
+
+	AVERAGE_TWO_8BITS		d8, d8, d10
+	AVERAGE_TWO_8BITS		d9, d9, d11
+	vst1.u8		{q4}, [r0], r1
+
+	AVERAGE_TWO_8BITS		d12, d12, d14
+	AVERAGE_TWO_8BITS		d13, d13, d15
+	vst1.u8		{q6}, [r0], r1
+
+	sub			r4, #4
+	cmp			r4, #0
+	bne			w16_pix_avg_loop
+
+	pop		{r4}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN PixelAvgWidthEq8_neon
+	push		{r4, r5}
+	ldr			r4, [sp, #8]
+	mov			r5, #16
+w8_pix_avg_loop:
+
+	vld1.u8		{d0}, [r2], r5
+	vld1.u8		{d2}, [r3], r5
+	vld1.u8		{d1}, [r2], r5
+	vld1.u8		{d3}, [r3], r5
+
+	AVERAGE_TWO_8BITS		d0, d0, d2
+	AVERAGE_TWO_8BITS		d1, d1, d3
+	vst1.u8		{d0}, [r0], r1
+	vst1.u8		{d1}, [r0], r1
+
+	vld1.u8		{d4}, [r2], r5
+	vld1.u8		{d6}, [r3], r5
+	vld1.u8		{d5}, [r2], r5
+	vld1.u8		{d7}, [r3], r5
+
+	AVERAGE_TWO_8BITS		d4, d4, d6
+	AVERAGE_TWO_8BITS		d5, d5, d7
+	vst1.u8		{d4}, [r0], r1
+	vst1.u8		{d5}, [r0], r1
+
+	sub			r4, #4
+	cmp			r4, #0
+	bne			w8_pix_avg_loop
+
+	pop		{r4, r5}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN PixelAvgWidthEq4_neon
+	push		{r4-r8}
+	ldr			r4, [sp, #20]
+w4_pix_avg_loop:
+
+	ldr		r5, [r2]
+	ldr		r6, [r2, #16]
+	ldr		r7, [r3]
+	ldr		r8, [r3, #16]
+	add		r2, #32
+	add		r3, #32
+
+	vmov		d0, r5, r6
+	vmov		d1, r7, r8
+	AVERAGE_TWO_8BITS		d0, d0, d1
+	vmov		r5, r6, d0
+
+	str		r5, [r0], r1
+	str		r6, [r0], r1
+
+	sub			r4, #2
+	cmp			r4, #0
+	bne			w4_pix_avg_loop
+
+	pop		{r4-r8}
+WELS_ASM_FUNC_END
+
+WELS_ASM_FUNC_BEGIN McChromaWidthEq8_neon
+	push		{r4, r5}
+	ldr			r4, [sp, #8]
+	ldr			r5, [sp, #12]
+//	normal case: {cA*src[x]  + cB*src[x+1]} + {cC*src[x+stride] + cD*srcp[x+stride+1]}
+//	we can opti it by adding vert only/ hori only cases, to be continue
+	vld1.u8	{d31}, [r4]		//load A/B/C/D
+	vld1.u8		{q0}, [r0], r1	//src[x]
+
+	vdup.u8	d28, d31[0]			//A
+	vdup.u8	d29, d31[1]			//B
+	vdup.u8	d30, d31[2]			//C
+	vdup.u8	d31, d31[3]			//D
+
+	vext.u8		d1, d0, d1, #1		//src[x+1]
+
+w8_mc_chroma_loop:	// each two pxl row
+	vld1.u8		{q1}, [r0], r1	//src[x+stride]
+	vld1.u8		{q2}, [r0], r1	//src[x+2*stride]
+	vext.u8		d3, d2, d3, #1		//src[x+stride+1]
+	vext.u8		d5, d4, d5, #1		//src[x+2*stride+1]
+
+	vmull.u8		q3, d0, d28			//(src[x] * A)
+	vmlal.u8		q3, d1, d29			//+=(src[x+1] * B)
+	vmlal.u8		q3, d2, d30			//+=(src[x+stride] * C)
+	vmlal.u8		q3, d3, d31			//+=(src[x+stride+1] * D)
+	vrshrn.u16		d6, q3, #6
+	vst1.u8	d6, [r2], r3
+
+	vmull.u8		q3, d2, d28			//(src[x] * A)
+	vmlal.u8		q3, d3, d29			//+=(src[x+1] * B)
+	vmlal.u8		q3, d4, d30			//+=(src[x+stride] * C)
+	vmlal.u8		q3, d5, d31			//+=(src[x+stride+1] * D)
+	vrshrn.u16		d6, q3, #6
+	vst1.u8	d6, [r2], r3
+
+	vmov		q0, q2
+	sub			r5, #2
+	cmp			r5, #0
+	bne			w8_mc_chroma_loop
+
+	pop		{r4, r5}
+WELS_ASM_FUNC_END
+
+
+WELS_ASM_FUNC_BEGIN McChromaWidthEq4_neon
+
+	push		{r4, r5, r6}
+	ldr			r4, [sp, #12]
+	ldr			r6, [sp, #16]
+//	normal case: {cA*src[x]  + cB*src[x+1]} + {cC*src[x+stride] + cD*srcp[x+stride+1]}
+//	we can opti it by adding vert only/ hori only cases, to be continue
+	vld1.u8	{d31}, [r4]		//load A/B/C/D
+
+	vdup.u8	d28, d31[0]			//A
+	vdup.u8	d29, d31[1]			//B
+	vdup.u8	d30, d31[2]			//C
+	vdup.u8	d31, d31[3]			//D
+
+w4_mc_chroma_loop:	// each two pxl row
+	vld1.u8		{d0}, [r0], r1	//a::src[x]
+	vld1.u8		{d2}, [r0], r1	//b::src[x+stride]
+	vld1.u8		{d4}, [r0]			//c::src[x+2*stride]
+
+	vshr.u64		d1, d0, #8
+	vshr.u64		d3, d2, #8
+	vshr.u64		d5, d4, #8
+
+	vmov			q3, q1				//b::[0:7]+b::[1~8]
+	vtrn.32		q0, q1				//d0{a::[0:3]+b::[0:3]}; d1{a::[1:4]+b::[1:4]}
+	vtrn.32		q3, q2				//d6{b::[0:3]+c::[0:3]}; d7{b::[1:4]+c::[1:4]}
+
+	vmull.u8		q1, d0, d28			//(src[x] * A)
+	vmlal.u8		q1, d1, d29			//+=(src[x+1] * B)
+	vmlal.u8		q1, d6, d30			//+=(src[x+stride] * C)
+	vmlal.u8		q1, d7, d31			//+=(src[x+stride+1] * D)
+
+	vrshrn.u16		d2, q1, #6
+	vmov		r4, r5, d2
+	str	r4, [r2], r3
+	str	r5, [r2], r3
+
+	sub			r6, #2
+	cmp			r6, #0
+	bne			w4_mc_chroma_loop
+
+	pop		{r4, r5, r6}
+WELS_ASM_FUNC_END
+#endif