ref: 6bac3f80ee21b754e17d0f32ddc18d1bbe9dfc6a
parent: af3cab7b245fd8f60ed6a0699f6321ac19986515
author: Johann <johannkoenig@google.com>
date: Wed Jun 28 09:20:13 EDT 2017
sad neon: avg for 4x4 and 4x8 BUG=webm:1425 Change-Id: Ifc685a96cb34f7fd9243b4c674027480564b84fb
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -657,6 +657,12 @@
};
INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
+const SadMxNAvgParam avg_neon_tests[] = {
+ SadMxNAvgParam(4, 8, &vpx_sad4x8_avg_neon),
+ SadMxNAvgParam(4, 4, &vpx_sad4x4_avg_neon),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADavgTest, ::testing::ValuesIn(avg_neon_tests));
+
const SadMxNx4Param x4d_neon_tests[] = {
SadMxNx4Param(64, 64, &vpx_sad64x64x4d_neon),
SadMxNx4Param(32, 32, &vpx_sad32x32x4d_neon),
--- a/vpx_dsp/arm/sad_neon.c
+++ b/vpx_dsp/arm/sad_neon.c
@@ -33,6 +33,18 @@
return horizontal_add_16x8(abs);
}
+uint32_t vpx_sad4x4_avg_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const uint8_t *second_pred) {
+ const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
+ const uint8x16_t second_pred_u8 = vld1q_u8(second_pred);
+ const uint8x16_t avg = vrhaddq_u8(ref_u8, second_pred_u8);
+ uint16x8_t abs = vabdl_u8(vget_low_u8(src_u8), vget_low_u8(avg));
+ abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(avg));
+ return horizontal_add_16x8(abs);
+}
+
uint32_t vpx_sad4x8_neon(const uint8_t *src_ptr, int src_stride,
const uint8_t *ref_ptr, int ref_stride) {
int i;
@@ -44,6 +56,26 @@
ref_ptr += 4 * ref_stride;
abs = vabal_u8(abs, vget_low_u8(src_u8), vget_low_u8(ref_u8));
abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(ref_u8));
+ }
+
+ return horizontal_add_16x8(abs);
+}
+
+uint32_t vpx_sad4x8_avg_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const uint8_t *second_pred) {
+ int i;
+ uint16x8_t abs = vdupq_n_u16(0);
+ for (i = 0; i < 8; i += 4) {
+ const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
+ const uint8x16_t second_pred_u8 = vld1q_u8(second_pred);
+ const uint8x16_t avg = vrhaddq_u8(ref_u8, second_pred_u8);
+ src_ptr += 4 * src_stride;
+ ref_ptr += 4 * ref_stride;
+ second_pred += 16;
+ abs = vabal_u8(abs, vget_low_u8(src_u8), vget_low_u8(avg));
+ abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(avg));
}
return horizontal_add_16x8(abs);
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -813,10 +813,10 @@
specialize qw/vpx_sad8x4_avg msa sse2/;
add_proto qw/unsigned int vpx_sad4x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-specialize qw/vpx_sad4x8_avg msa sse2/;
+specialize qw/vpx_sad4x8_avg neon msa sse2/;
add_proto qw/unsigned int vpx_sad4x4_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-specialize qw/vpx_sad4x4_avg msa sse2/;
+specialize qw/vpx_sad4x4_avg neon msa sse2/;
#
# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally