ref: 931c0a954f659cf0210a6abde660e466c01b91e5
parent: 8b17f7f4eb86107ac9ed9e31a2a8bba238a1247a
 parent: 66b9933b8d2b329a8555be0c53a409cb548605ed
	author: Johann <johannkoenig@google.com>
	date: Tue Apr 21 11:45:29 EDT 2015
	
Merge "Rename neon convolve avg file"
--- a/vp9/common/arm/neon/vp9_avg_neon.c
+++ /dev/null
@@ -1,145 +1,0 @@
-/*
- * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stddef.h>
-#include <arm_neon.h>
-
-void vp9_convolve_avg_neon(
- const uint8_t *src, // r0
- ptrdiff_t src_stride, // r1
- uint8_t *dst, // r2
- ptrdiff_t dst_stride, // r3
- const int16_t *filter_x,
- int filter_x_stride,
- const int16_t *filter_y,
- int filter_y_stride,
- int w,
-        int h) {- uint8_t *d;
- uint8x8_t d0u8, d1u8, d2u8, d3u8;
- uint32x2_t d0u32, d2u32;
- uint8x16_t q0u8, q1u8, q2u8, q3u8, q8u8, q9u8, q10u8, q11u8;
- (void)filter_x; (void)filter_x_stride;
- (void)filter_y; (void)filter_y_stride;
-
- d = dst;
-    if (w > 32) {  // avg64-        for (; h > 0; h -= 1) {- q0u8 = vld1q_u8(src);
- q1u8 = vld1q_u8(src + 16);
- q2u8 = vld1q_u8(src + 32);
- q3u8 = vld1q_u8(src + 48);
- src += src_stride;
- q8u8 = vld1q_u8(d);
- q9u8 = vld1q_u8(d + 16);
- q10u8 = vld1q_u8(d + 32);
- q11u8 = vld1q_u8(d + 48);
- d += dst_stride;
-
- q0u8 = vrhaddq_u8(q0u8, q8u8);
- q1u8 = vrhaddq_u8(q1u8, q9u8);
- q2u8 = vrhaddq_u8(q2u8, q10u8);
- q3u8 = vrhaddq_u8(q3u8, q11u8);
-
- vst1q_u8(dst, q0u8);
- vst1q_u8(dst + 16, q1u8);
- vst1q_u8(dst + 32, q2u8);
- vst1q_u8(dst + 48, q3u8);
- dst += dst_stride;
- }
-    } else if (w == 32) {  // avg32-        for (; h > 0; h -= 2) {- q0u8 = vld1q_u8(src);
- q1u8 = vld1q_u8(src + 16);
- src += src_stride;
- q2u8 = vld1q_u8(src);
- q3u8 = vld1q_u8(src + 16);
- src += src_stride;
- q8u8 = vld1q_u8(d);
- q9u8 = vld1q_u8(d + 16);
- d += dst_stride;
- q10u8 = vld1q_u8(d);
- q11u8 = vld1q_u8(d + 16);
- d += dst_stride;
-
- q0u8 = vrhaddq_u8(q0u8, q8u8);
- q1u8 = vrhaddq_u8(q1u8, q9u8);
- q2u8 = vrhaddq_u8(q2u8, q10u8);
- q3u8 = vrhaddq_u8(q3u8, q11u8);
-
- vst1q_u8(dst, q0u8);
- vst1q_u8(dst + 16, q1u8);
- dst += dst_stride;
- vst1q_u8(dst, q2u8);
- vst1q_u8(dst + 16, q3u8);
- dst += dst_stride;
- }
-    } else if (w > 8) {  // avg16-        for (; h > 0; h -= 2) {- q0u8 = vld1q_u8(src);
- src += src_stride;
- q1u8 = vld1q_u8(src);
- src += src_stride;
- q2u8 = vld1q_u8(d);
- d += dst_stride;
- q3u8 = vld1q_u8(d);
- d += dst_stride;
-
- q0u8 = vrhaddq_u8(q0u8, q2u8);
- q1u8 = vrhaddq_u8(q1u8, q3u8);
-
- vst1q_u8(dst, q0u8);
- dst += dst_stride;
- vst1q_u8(dst, q1u8);
- dst += dst_stride;
- }
-    } else if (w == 8) {  // avg8-        for (; h > 0; h -= 2) {- d0u8 = vld1_u8(src);
- src += src_stride;
- d1u8 = vld1_u8(src);
- src += src_stride;
- d2u8 = vld1_u8(d);
- d += dst_stride;
- d3u8 = vld1_u8(d);
- d += dst_stride;
-
- q0u8 = vcombine_u8(d0u8, d1u8);
- q1u8 = vcombine_u8(d2u8, d3u8);
- q0u8 = vrhaddq_u8(q0u8, q1u8);
-
- vst1_u8(dst, vget_low_u8(q0u8));
- dst += dst_stride;
- vst1_u8(dst, vget_high_u8(q0u8));
- dst += dst_stride;
- }
-    } else {  // avg4-        for (; h > 0; h -= 2) {- d0u32 = vld1_lane_u32((const uint32_t *)src, d0u32, 0);
- src += src_stride;
- d0u32 = vld1_lane_u32((const uint32_t *)src, d0u32, 1);
- src += src_stride;
- d2u32 = vld1_lane_u32((const uint32_t *)d, d2u32, 0);
- d += dst_stride;
- d2u32 = vld1_lane_u32((const uint32_t *)d, d2u32, 1);
- d += dst_stride;
-
- d0u8 = vrhadd_u8(vreinterpret_u8_u32(d0u32),
- vreinterpret_u8_u32(d2u32));
-
- d0u32 = vreinterpret_u32_u8(d0u8);
- vst1_lane_u32((uint32_t *)dst, d0u32, 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, d0u32, 1);
- dst += dst_stride;
- }
- }
- return;
-}
--- a/vp9/common/arm/neon/vp9_avg_neon_asm.asm
+++ /dev/null
@@ -1,116 +1,0 @@
-;
-; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
-;
-; Use of this source code is governed by a BSD-style license
-; that can be found in the LICENSE file in the root of the source
-; tree. An additional intellectual property rights grant can be found
-; in the file PATENTS. All contributing project authors may
-; be found in the AUTHORS file in the root of the source tree.
-;
-
- EXPORT |vp9_convolve_avg_neon|
- ARM
- REQUIRE8
- PRESERVE8
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-|vp9_convolve_avg_neon| PROC
-    push                {r4-r6, lr}- ldrd r4, r5, [sp, #32]
- mov r6, r2
-
- cmp r4, #32
- bgt avg64
- beq avg32
- cmp r4, #8
- bgt avg16
- beq avg8
- b avg4
-
-avg64
- sub lr, r1, #32
- sub r4, r3, #32
-avg64_h
- pld [r0, r1, lsl #1]
-    vld1.8              {q0-q1}, [r0]!-    vld1.8              {q2-q3}, [r0], lr- pld [r2, r3]
-    vld1.8              {q8-q9},   [r6@128]!-    vld1.8              {q10-q11}, [r6@128], r4- vrhadd.u8 q0, q0, q8
- vrhadd.u8 q1, q1, q9
- vrhadd.u8 q2, q2, q10
- vrhadd.u8 q3, q3, q11
-    vst1.8              {q0-q1}, [r2@128]!-    vst1.8              {q2-q3}, [r2@128], r4- subs r5, r5, #1
- bgt avg64_h
-    pop                 {r4-r6, pc}-
-avg32
-    vld1.8              {q0-q1}, [r0], r1-    vld1.8              {q2-q3}, [r0], r1-    vld1.8              {q8-q9},   [r6@128], r3-    vld1.8              {q10-q11}, [r6@128], r3- pld [r0]
- vrhadd.u8 q0, q0, q8
- pld [r0, r1]
- vrhadd.u8 q1, q1, q9
- pld [r6]
- vrhadd.u8 q2, q2, q10
- pld [r6, r3]
- vrhadd.u8 q3, q3, q11
-    vst1.8              {q0-q1}, [r2@128], r3-    vst1.8              {q2-q3}, [r2@128], r3- subs r5, r5, #2
- bgt avg32
-    pop                 {r4-r6, pc}-
-avg16
-    vld1.8              {q0}, [r0], r1-    vld1.8              {q1}, [r0], r1-    vld1.8              {q2}, [r6@128], r3-    vld1.8              {q3}, [r6@128], r3- pld [r0]
- pld [r0, r1]
- vrhadd.u8 q0, q0, q2
- pld [r6]
- pld [r6, r3]
- vrhadd.u8 q1, q1, q3
-    vst1.8              {q0}, [r2@128], r3-    vst1.8              {q1}, [r2@128], r3- subs r5, r5, #2
- bgt avg16
-    pop                 {r4-r6, pc}-
-avg8
-    vld1.8              {d0}, [r0], r1-    vld1.8              {d1}, [r0], r1-    vld1.8              {d2}, [r6@64], r3-    vld1.8              {d3}, [r6@64], r3- pld [r0]
- pld [r0, r1]
- vrhadd.u8 q0, q0, q1
- pld [r6]
- pld [r6, r3]
-    vst1.8              {d0}, [r2@64], r3-    vst1.8              {d1}, [r2@64], r3- subs r5, r5, #2
- bgt avg8
-    pop                 {r4-r6, pc}-
-avg4
-    vld1.32             {d0[0]}, [r0], r1-    vld1.32             {d0[1]}, [r0], r1-    vld1.32             {d2[0]}, [r6@32], r3-    vld1.32             {d2[1]}, [r6@32], r3- vrhadd.u8 d0, d0, d2
-    vst1.32             {d0[0]}, [r2@32], r3-    vst1.32             {d0[1]}, [r2@32], r3- subs r5, r5, #2
- bgt avg4
-    pop                 {r4-r6, pc}- ENDP
-
- END
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_convolve_avg_neon.c
@@ -1,0 +1,145 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <arm_neon.h>
+
+void vp9_convolve_avg_neon(
+ const uint8_t *src, // r0
+ ptrdiff_t src_stride, // r1
+ uint8_t *dst, // r2
+ ptrdiff_t dst_stride, // r3
+ const int16_t *filter_x,
+ int filter_x_stride,
+ const int16_t *filter_y,
+ int filter_y_stride,
+ int w,
+        int h) {+ uint8_t *d;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8;
+ uint32x2_t d0u32, d2u32;
+ uint8x16_t q0u8, q1u8, q2u8, q3u8, q8u8, q9u8, q10u8, q11u8;
+ (void)filter_x; (void)filter_x_stride;
+ (void)filter_y; (void)filter_y_stride;
+
+ d = dst;
+    if (w > 32) {  // avg64+        for (; h > 0; h -= 1) {+ q0u8 = vld1q_u8(src);
+ q1u8 = vld1q_u8(src + 16);
+ q2u8 = vld1q_u8(src + 32);
+ q3u8 = vld1q_u8(src + 48);
+ src += src_stride;
+ q8u8 = vld1q_u8(d);
+ q9u8 = vld1q_u8(d + 16);
+ q10u8 = vld1q_u8(d + 32);
+ q11u8 = vld1q_u8(d + 48);
+ d += dst_stride;
+
+ q0u8 = vrhaddq_u8(q0u8, q8u8);
+ q1u8 = vrhaddq_u8(q1u8, q9u8);
+ q2u8 = vrhaddq_u8(q2u8, q10u8);
+ q3u8 = vrhaddq_u8(q3u8, q11u8);
+
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q1u8);
+ vst1q_u8(dst + 32, q2u8);
+ vst1q_u8(dst + 48, q3u8);
+ dst += dst_stride;
+ }
+    } else if (w == 32) {  // avg32+        for (; h > 0; h -= 2) {+ q0u8 = vld1q_u8(src);
+ q1u8 = vld1q_u8(src + 16);
+ src += src_stride;
+ q2u8 = vld1q_u8(src);
+ q3u8 = vld1q_u8(src + 16);
+ src += src_stride;
+ q8u8 = vld1q_u8(d);
+ q9u8 = vld1q_u8(d + 16);
+ d += dst_stride;
+ q10u8 = vld1q_u8(d);
+ q11u8 = vld1q_u8(d + 16);
+ d += dst_stride;
+
+ q0u8 = vrhaddq_u8(q0u8, q8u8);
+ q1u8 = vrhaddq_u8(q1u8, q9u8);
+ q2u8 = vrhaddq_u8(q2u8, q10u8);
+ q3u8 = vrhaddq_u8(q3u8, q11u8);
+
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q1u8);
+ dst += dst_stride;
+ vst1q_u8(dst, q2u8);
+ vst1q_u8(dst + 16, q3u8);
+ dst += dst_stride;
+ }
+    } else if (w > 8) {  // avg16+        for (; h > 0; h -= 2) {+ q0u8 = vld1q_u8(src);
+ src += src_stride;
+ q1u8 = vld1q_u8(src);
+ src += src_stride;
+ q2u8 = vld1q_u8(d);
+ d += dst_stride;
+ q3u8 = vld1q_u8(d);
+ d += dst_stride;
+
+ q0u8 = vrhaddq_u8(q0u8, q2u8);
+ q1u8 = vrhaddq_u8(q1u8, q3u8);
+
+ vst1q_u8(dst, q0u8);
+ dst += dst_stride;
+ vst1q_u8(dst, q1u8);
+ dst += dst_stride;
+ }
+    } else if (w == 8) {  // avg8+        for (; h > 0; h -= 2) {+ d0u8 = vld1_u8(src);
+ src += src_stride;
+ d1u8 = vld1_u8(src);
+ src += src_stride;
+ d2u8 = vld1_u8(d);
+ d += dst_stride;
+ d3u8 = vld1_u8(d);
+ d += dst_stride;
+
+ q0u8 = vcombine_u8(d0u8, d1u8);
+ q1u8 = vcombine_u8(d2u8, d3u8);
+ q0u8 = vrhaddq_u8(q0u8, q1u8);
+
+ vst1_u8(dst, vget_low_u8(q0u8));
+ dst += dst_stride;
+ vst1_u8(dst, vget_high_u8(q0u8));
+ dst += dst_stride;
+ }
+    } else {  // avg4+        for (; h > 0; h -= 2) {+ d0u32 = vld1_lane_u32((const uint32_t *)src, d0u32, 0);
+ src += src_stride;
+ d0u32 = vld1_lane_u32((const uint32_t *)src, d0u32, 1);
+ src += src_stride;
+ d2u32 = vld1_lane_u32((const uint32_t *)d, d2u32, 0);
+ d += dst_stride;
+ d2u32 = vld1_lane_u32((const uint32_t *)d, d2u32, 1);
+ d += dst_stride;
+
+ d0u8 = vrhadd_u8(vreinterpret_u8_u32(d0u32),
+ vreinterpret_u8_u32(d2u32));
+
+ d0u32 = vreinterpret_u32_u8(d0u8);
+ vst1_lane_u32((uint32_t *)dst, d0u32, 0);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, d0u32, 1);
+ dst += dst_stride;
+ }
+ }
+ return;
+}
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_convolve_avg_neon_asm.asm
@@ -1,0 +1,116 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_convolve_avg_neon|
+ ARM
+ REQUIRE8
+ PRESERVE8
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+|vp9_convolve_avg_neon| PROC
+    push                {r4-r6, lr}+ ldrd r4, r5, [sp, #32]
+ mov r6, r2
+
+ cmp r4, #32
+ bgt avg64
+ beq avg32
+ cmp r4, #8
+ bgt avg16
+ beq avg8
+ b avg4
+
+avg64
+ sub lr, r1, #32
+ sub r4, r3, #32
+avg64_h
+ pld [r0, r1, lsl #1]
+    vld1.8              {q0-q1}, [r0]!+    vld1.8              {q2-q3}, [r0], lr+ pld [r2, r3]
+    vld1.8              {q8-q9},   [r6@128]!+    vld1.8              {q10-q11}, [r6@128], r4+ vrhadd.u8 q0, q0, q8
+ vrhadd.u8 q1, q1, q9
+ vrhadd.u8 q2, q2, q10
+ vrhadd.u8 q3, q3, q11
+    vst1.8              {q0-q1}, [r2@128]!+    vst1.8              {q2-q3}, [r2@128], r4+ subs r5, r5, #1
+ bgt avg64_h
+    pop                 {r4-r6, pc}+
+avg32
+    vld1.8              {q0-q1}, [r0], r1+    vld1.8              {q2-q3}, [r0], r1+    vld1.8              {q8-q9},   [r6@128], r3+    vld1.8              {q10-q11}, [r6@128], r3+ pld [r0]
+ vrhadd.u8 q0, q0, q8
+ pld [r0, r1]
+ vrhadd.u8 q1, q1, q9
+ pld [r6]
+ vrhadd.u8 q2, q2, q10
+ pld [r6, r3]
+ vrhadd.u8 q3, q3, q11
+    vst1.8              {q0-q1}, [r2@128], r3+    vst1.8              {q2-q3}, [r2@128], r3+ subs r5, r5, #2
+ bgt avg32
+    pop                 {r4-r6, pc}+
+avg16
+    vld1.8              {q0}, [r0], r1+    vld1.8              {q1}, [r0], r1+    vld1.8              {q2}, [r6@128], r3+    vld1.8              {q3}, [r6@128], r3+ pld [r0]
+ pld [r0, r1]
+ vrhadd.u8 q0, q0, q2
+ pld [r6]
+ pld [r6, r3]
+ vrhadd.u8 q1, q1, q3
+    vst1.8              {q0}, [r2@128], r3+    vst1.8              {q1}, [r2@128], r3+ subs r5, r5, #2
+ bgt avg16
+    pop                 {r4-r6, pc}+
+avg8
+    vld1.8              {d0}, [r0], r1+    vld1.8              {d1}, [r0], r1+    vld1.8              {d2}, [r6@64], r3+    vld1.8              {d3}, [r6@64], r3+ pld [r0]
+ pld [r0, r1]
+ vrhadd.u8 q0, q0, q1
+ pld [r6]
+ pld [r6, r3]
+    vst1.8              {d0}, [r2@64], r3+    vst1.8              {d1}, [r2@64], r3+ subs r5, r5, #2
+ bgt avg8
+    pop                 {r4-r6, pc}+
+avg4
+    vld1.32             {d0[0]}, [r0], r1+    vld1.32             {d0[1]}, [r0], r1+    vld1.32             {d2[0]}, [r6@32], r3+    vld1.32             {d2[1]}, [r6@32], r3+ vrhadd.u8 d0, d0, d2
+    vst1.32             {d0[0]}, [r2@32], r3+    vst1.32             {d0[1]}, [r2@32], r3+ subs r5, r5, #2
+ bgt avg4
+    pop                 {r4-r6, pc}+ ENDP
+
+ END
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -156,9 +156,9 @@
# neon with assembly and intrinsics implementations. If both are available
# prefer assembly.
ifeq ($(HAVE_NEON_ASM), yes)
-VP9_COMMON_SRCS-yes += common/arm/neon/vp9_avg_neon_asm$(ASM)
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_avg_neon_asm$(ASM)
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_neon_asm$(ASM)
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_avg_neon_asm$(ASM)
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_copy_neon_asm$(ASM)
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct16x16_1_add_neon_asm$(ASM)
@@ -174,9 +174,9 @@
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_reconintra_neon_asm$(ASM)
else
ifeq ($(HAVE_NEON), yes)
-VP9_COMMON_SRCS-yes += common/arm/neon/vp9_avg_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_avg_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_avg_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_copy_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct16x16_1_add_neon.c
--
⑨