ref: 46d8660ce37b4d379325d5a76dea0208c8fa47f9
parent: 88d11f473c7020a5da63ff8cbc1e32a21dd27898
author: Johann <johannkoenig@google.com>
date: Tue Jun 27 08:44:54 EDT 2017
sad neon: rewrite 4x4 and add 4x8 The previous implementation loaded 8 values (discarding half) BUG=webm:1425 Change-Id: Icb72a94e2557a4ee2db7091266ab58fd92f72158
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -649,6 +649,7 @@
SadMxNParam(16, 8, &vpx_sad16x8_neon),
SadMxNParam(8, 16, &vpx_sad8x16_neon),
SadMxNParam(8, 8, &vpx_sad8x8_neon),
+ SadMxNParam(4, 8, &vpx_sad4x8_neon),
SadMxNParam(4, 4, &vpx_sad4x4_neon),
};
INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
--- a/vpx_dsp/arm/sad_neon.c
+++ b/vpx_dsp/arm/sad_neon.c
@@ -13,6 +13,7 @@
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
+#include "vpx_dsp/arm/mem_neon.h"
unsigned int vpx_sad8x16_neon(unsigned char *src_ptr, int src_stride,
unsigned char *ref_ptr, int ref_stride) {
@@ -45,32 +46,38 @@
return vget_lane_u32(d5, 0);
}
-unsigned int vpx_sad4x4_neon(unsigned char *src_ptr, int src_stride,
- unsigned char *ref_ptr, int ref_stride) {
- uint8x8_t d0, d8;
- uint16x8_t q12;
- uint32x2_t d1;
- uint64x1_t d3;
- int i;
+// TODO(johannkoenig): combine with avg_neon.h version.
+static INLINE uint32_t horizontal_add_16x8(const uint16x8_t vec_16x8) {
+ const uint32x4_t a = vpaddlq_u16(vec_16x8);
+ const uint64x2_t b = vpaddlq_u32(a);
+ const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+ return vget_lane_u32(c, 0);
+}
- d0 = vld1_u8(src_ptr);
- src_ptr += src_stride;
- d8 = vld1_u8(ref_ptr);
- ref_ptr += ref_stride;
- q12 = vabdl_u8(d0, d8);
+uint32_t vpx_sad4x4_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride) {
+ const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
+ uint16x8_t abs = vabdl_u8(vget_low_u8(src_u8), vget_low_u8(ref_u8));
+ abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(ref_u8));
+ return horizontal_add_16x8(abs);
+}
- for (i = 0; i < 3; i++) {
- d0 = vld1_u8(src_ptr);
- src_ptr += src_stride;
- d8 = vld1_u8(ref_ptr);
- ref_ptr += ref_stride;
- q12 = vabal_u8(q12, d0, d8);
+uint32_t vpx_sad4x8_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride) {
+ int i;
+ uint16x8_t abs = vdupq_n_u16(0);
+ for (i = 0; i < 8; i += 4) {
+ const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
+ src_ptr += 4 * src_stride;
+ ref_ptr += 4 * ref_stride;
+ abs = vabal_u8(abs, vget_low_u8(src_u8), vget_low_u8(ref_u8));
+ abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(ref_u8));
}
- d1 = vpaddl_u16(vget_low_u16(q12));
- d3 = vpaddl_u32(d1);
-
- return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
+ return horizontal_add_16x8(abs);
}
unsigned int vpx_sad16x8_neon(unsigned char *src_ptr, int src_stride,
@@ -114,13 +121,6 @@
const uint32x4_t vec_l_hi =
vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi));
const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi);
- const uint64x2_t b = vpaddlq_u32(a);
- const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
- vreinterpret_u32_u64(vget_high_u64(b)));
- return vget_lane_u32(c, 0);
-}
-static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_16x8) {
- const uint32x4_t a = vpaddlq_u16(vec_16x8);
const uint64x2_t b = vpaddlq_u32(a);
const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
vreinterpret_u32_u64(vget_high_u64(b)));
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -729,7 +729,7 @@
specialize qw/vpx_sad8x4 msa sse2/;
add_proto qw/unsigned int vpx_sad4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad4x8 msa sse2/;
+specialize qw/vpx_sad4x8 neon msa sse2/;
add_proto qw/unsigned int vpx_sad4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
specialize qw/vpx_sad4x4 neon msa sse2/;