ref: a1fdfbb174487e5efb76e6e77119d2e50840086e
dir: /vpx_dsp/arm/vpx_convolve8_neon.h/
/* * Copyright (c) 2014 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #ifndef VPX_VPX_DSP_ARM_VPX_CONVOLVE8_NEON_H_ #define VPX_VPX_DSP_ARM_VPX_CONVOLVE8_NEON_H_ #include <arm_neon.h> #include "./vpx_config.h" #include "./vpx_dsp_rtcd.h" static INLINE void load_u8_8x4(const uint8_t *s, const ptrdiff_t p, uint8x8_t *const s0, uint8x8_t *const s1, uint8x8_t *const s2, uint8x8_t *const s3) { *s0 = vld1_u8(s); s += p; *s1 = vld1_u8(s); s += p; *s2 = vld1_u8(s); s += p; *s3 = vld1_u8(s); } static INLINE void load_u8_8x8(const uint8_t *s, const ptrdiff_t p, uint8x8_t *const s0, uint8x8_t *const s1, uint8x8_t *const s2, uint8x8_t *const s3, uint8x8_t *const s4, uint8x8_t *const s5, uint8x8_t *const s6, uint8x8_t *const s7) { *s0 = vld1_u8(s); s += p; *s1 = vld1_u8(s); s += p; *s2 = vld1_u8(s); s += p; *s3 = vld1_u8(s); s += p; *s4 = vld1_u8(s); s += p; *s5 = vld1_u8(s); s += p; *s6 = vld1_u8(s); s += p; *s7 = vld1_u8(s); } static INLINE void load_u8_16x8(const uint8_t *s, const ptrdiff_t p, uint8x16_t *const s0, uint8x16_t *const s1, uint8x16_t *const s2, uint8x16_t *const s3, uint8x16_t *const s4, uint8x16_t *const s5, uint8x16_t *const s6, uint8x16_t *const s7) { *s0 = vld1q_u8(s); s += p; *s1 = vld1q_u8(s); s += p; *s2 = vld1q_u8(s); s += p; *s3 = vld1q_u8(s); s += p; *s4 = vld1q_u8(s); s += p; *s5 = vld1q_u8(s); s += p; *s6 = vld1q_u8(s); s += p; *s7 = vld1q_u8(s); } #if defined(__aarch64__) && defined(__ARM_FEATURE_DOTPROD) && \ (__ARM_FEATURE_DOTPROD == 1) static INLINE int32x4_t convolve8_4_dot_partial(const int8x16_t samples_lo, const int8x16_t samples_hi, const int32x4_t correction, const int8x8_t filters) { /* Sample range-clamping and permutation are performed by the caller. */ int32x4_t sum; /* Accumulate dot product into 'correction' to account for range clamp. */ sum = vdotq_lane_s32(correction, samples_lo, filters, 0); sum = vdotq_lane_s32(sum, samples_hi, filters, 1); /* Narrowing and packing is performed by the caller. */ return sum; } static INLINE int32x4_t convolve8_4_dot(uint8x16_t samples, const int8x8_t filters, const int32x4_t correction, const uint8x16_t range_limit, const uint8x16x2_t permute_tbl) { int8x16_t clamped_samples, permuted_samples[2]; int32x4_t sum; /* Clamp sample range to [-128, 127] for 8-bit signed dot product. */ clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); /* Permute samples ready for dot product. */ /* { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } */ permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]); /* { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } */ permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]); /* Accumulate dot product into 'correction' to account for range clamp. */ sum = vdotq_lane_s32(correction, permuted_samples[0], filters, 0); sum = vdotq_lane_s32(sum, permuted_samples[1], filters, 1); /* Narrowing and packing is performed by the caller. */ return sum; } static INLINE uint8x8_t convolve8_8_dot_partial(const int8x16_t samples0_lo, const int8x16_t samples0_hi, const int8x16_t samples1_lo, const int8x16_t samples1_hi, const int32x4_t correction, const int8x8_t filters) { /* Sample range-clamping and permutation are performed by the caller. */ int32x4_t sum0, sum1; int16x8_t sum; /* Accumulate dot product into 'correction' to account for range clamp. */ /* First 4 output values. */ sum0 = vdotq_lane_s32(correction, samples0_lo, filters, 0); sum0 = vdotq_lane_s32(sum0, samples0_hi, filters, 1); /* Second 4 output values. */ sum1 = vdotq_lane_s32(correction, samples1_lo, filters, 0); sum1 = vdotq_lane_s32(sum1, samples1_hi, filters, 1); /* Narrow and re-pack. */ sum = vcombine_s16(vqmovn_s32(sum0), vqmovn_s32(sum1)); return vqrshrun_n_s16(sum, 7); } static INLINE uint8x8_t convolve8_8_dot(uint8x16_t samples, const int8x8_t filters, const int32x4_t correction, const uint8x16_t range_limit, const uint8x16x3_t permute_tbl) { int8x16_t clamped_samples, permuted_samples[3]; int32x4_t sum0, sum1; int16x8_t sum; /* Clamp sample range to [-128, 127] for 8-bit signed dot product. */ clamped_samples = vreinterpretq_s8_u8(vsubq_u8(samples, range_limit)); /* Permute samples ready for dot product. */ /* { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } */ permuted_samples[0] = vqtbl1q_s8(clamped_samples, permute_tbl.val[0]); /* { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } */ permuted_samples[1] = vqtbl1q_s8(clamped_samples, permute_tbl.val[1]); /* { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } */ permuted_samples[2] = vqtbl1q_s8(clamped_samples, permute_tbl.val[2]); /* Accumulate dot product into 'correction' to account for range clamp. */ /* First 4 output values. */ sum0 = vdotq_lane_s32(correction, permuted_samples[0], filters, 0); sum0 = vdotq_lane_s32(sum0, permuted_samples[1], filters, 1); /* Second 4 output values. */ sum1 = vdotq_lane_s32(correction, permuted_samples[1], filters, 0); sum1 = vdotq_lane_s32(sum1, permuted_samples[2], filters, 1); /* Narrow and re-pack. */ sum = vcombine_s16(vqmovn_s32(sum0), vqmovn_s32(sum1)); return vqrshrun_n_s16(sum, 7); } #endif static INLINE int16x4_t convolve8_4(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, const int16x4_t s6, const int16x4_t s7, const int16x8_t filters, const int16x4_t filter3, const int16x4_t filter4) { const int16x4_t filters_lo = vget_low_s16(filters); const int16x4_t filters_hi = vget_high_s16(filters); int16x4_t sum; sum = vmul_lane_s16(s0, filters_lo, 0); sum = vmla_lane_s16(sum, s1, filters_lo, 1); sum = vmla_lane_s16(sum, s2, filters_lo, 2); sum = vmla_lane_s16(sum, s5, filters_hi, 1); sum = vmla_lane_s16(sum, s6, filters_hi, 2); sum = vmla_lane_s16(sum, s7, filters_hi, 3); sum = vqadd_s16(sum, vmul_s16(s3, filter3)); sum = vqadd_s16(sum, vmul_s16(s4, filter4)); return sum; } static INLINE uint8x8_t convolve8_8(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, const int16x8_t s6, const int16x8_t s7, const int16x8_t filters, const int16x8_t filter3, const int16x8_t filter4) { const int16x4_t filters_lo = vget_low_s16(filters); const int16x4_t filters_hi = vget_high_s16(filters); int16x8_t sum; sum = vmulq_lane_s16(s0, filters_lo, 0); sum = vmlaq_lane_s16(sum, s1, filters_lo, 1); sum = vmlaq_lane_s16(sum, s2, filters_lo, 2); sum = vmlaq_lane_s16(sum, s5, filters_hi, 1); sum = vmlaq_lane_s16(sum, s6, filters_hi, 2); sum = vmlaq_lane_s16(sum, s7, filters_hi, 3); sum = vqaddq_s16(sum, vmulq_s16(s3, filter3)); sum = vqaddq_s16(sum, vmulq_s16(s4, filter4)); return vqrshrun_n_s16(sum, 7); } static INLINE uint8x8_t scale_filter_8(const uint8x8_t *const s, const int16x8_t filters) { const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3); const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0); int16x8_t ss[8]; ss[0] = vreinterpretq_s16_u16(vmovl_u8(s[0])); ss[1] = vreinterpretq_s16_u16(vmovl_u8(s[1])); ss[2] = vreinterpretq_s16_u16(vmovl_u8(s[2])); ss[3] = vreinterpretq_s16_u16(vmovl_u8(s[3])); ss[4] = vreinterpretq_s16_u16(vmovl_u8(s[4])); ss[5] = vreinterpretq_s16_u16(vmovl_u8(s[5])); ss[6] = vreinterpretq_s16_u16(vmovl_u8(s[6])); ss[7] = vreinterpretq_s16_u16(vmovl_u8(s[7])); return convolve8_8(ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], filters, filter3, filter4); } #endif // VPX_VPX_DSP_ARM_VPX_CONVOLVE8_NEON_H_