shithub: libvpx

Download patch

ref: 98165ec0744ebf7a786954691489face68e3ab03
parent: 5487b6067c5f17ac246079e65d4b7eaa0df97aa8
author: Scott LaVarnway <slavarnway@google.com>
date: Fri Aug 1 07:35:55 EDT 2014

Neon version of vp9_sub_pixel_variance8x8(),

vp9_variance8x8(), and vp9_get8x8var().

On a Nexus 7, vpxenc (in realtime mode, speed -12)
reported a performance improvement of ~1.2%.

Change-Id: I8a66ac2a0f550b407caa27816833bdc563395102

--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -757,13 +757,17 @@
                       make_tuple(6, 6, subpel_avg_variance64x64_avx2)));
 #endif  // HAVE_AVX2
 #if HAVE_NEON
+const vp9_variance_fn_t variance8x8_neon = vp9_variance8x8_neon;
 const vp9_variance_fn_t variance16x16_neon = vp9_variance16x16_neon;
 const vp9_variance_fn_t variance32x32_neon = vp9_variance32x32_neon;
 INSTANTIATE_TEST_CASE_P(
     NEON, VP9VarianceTest,
-    ::testing::Values(make_tuple(4, 4, variance16x16_neon),
+    ::testing::Values(make_tuple(3, 3, variance8x8_neon),
+                      make_tuple(4, 4, variance16x16_neon),
                       make_tuple(5, 5, variance32x32_neon)));
 
+const vp9_subpixvariance_fn_t subpel_variance8x8_neon =
+    vp9_sub_pixel_variance8x8_neon;
 const vp9_subpixvariance_fn_t subpel_variance16x16_neon =
     vp9_sub_pixel_variance16x16_neon;
 const vp9_subpixvariance_fn_t subpel_variance32x32_neon =
@@ -770,7 +774,8 @@
     vp9_sub_pixel_variance32x32_neon;
 INSTANTIATE_TEST_CASE_P(
     NEON, VP9SubpelVarianceTest,
-    ::testing::Values(make_tuple(4, 4, subpel_variance16x16_neon),
+    ::testing::Values(make_tuple(3, 3, subpel_variance8x8_neon),
+                      make_tuple(4, 4, subpel_variance16x16_neon),
                       make_tuple(5, 5, subpel_variance32x32_neon)));
 #endif  // HAVE_NEON
 #endif  // CONFIG_VP9_ENCODER
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -429,10 +429,10 @@
 specialize qw/vp9_variance8x16 mmx/, "$sse2_x86inc";
 
 add_proto qw/unsigned int vp9_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-specialize qw/vp9_variance8x8 mmx/, "$sse2_x86inc";
+specialize qw/vp9_variance8x8 mmx neon/, "$sse2_x86inc";
 
 add_proto qw/void vp9_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
-specialize qw/vp9_get8x8var mmx/, "$sse2_x86inc";
+specialize qw/vp9_get8x8var mmx neon/, "$sse2_x86inc";
 
 add_proto qw/void vp9_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
 specialize qw/vp9_get16x16var avx2 neon/, "$sse2_x86inc";
@@ -501,7 +501,7 @@
 specialize qw/vp9_sub_pixel_avg_variance16x8/, "$sse2_x86inc", "$ssse3_x86inc";
 
 add_proto qw/unsigned int vp9_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-specialize qw/vp9_sub_pixel_variance8x8/, "$sse2_x86inc", "$ssse3_x86inc";
+specialize qw/vp9_sub_pixel_variance8x8 neon/, "$sse2_x86inc", "$ssse3_x86inc";
 
 add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
 specialize qw/vp9_sub_pixel_avg_variance8x8/, "$sse2_x86inc", "$ssse3_x86inc";
--- a/vp9/encoder/arm/neon/vp9_variance_neon.c
+++ b/vp9/encoder/arm/neon/vp9_variance_neon.c
@@ -19,7 +19,9 @@
 
 #include "vp9/encoder/vp9_variance.h"
 
-enum { kAlign16 = 16 };
+enum { kWidth8 = 8 };
+enum { kHeight8 = 8 };
+enum { kHeight8PlusOne = 9 };
 enum { kWidth16 = 16 };
 enum { kHeight16 = 16 };
 enum { kHeight16PlusOne = 17 };
@@ -27,6 +29,7 @@
 enum { kHeight32 = 32 };
 enum { kHeight32PlusOne = 33 };
 enum { kPixelStepOne = 1 };
+enum { kAlign16 = 16 };
 
 static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
   const int32x4_t a = vpaddlq_s16(v_16x8);
@@ -73,6 +76,21 @@
   *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
 }
 
+void vp9_get8x8var_neon(const uint8_t *src_ptr, int source_stride,
+                        const uint8_t *ref_ptr, int ref_stride,
+                        unsigned int *sse, int *sum) {
+  variance_neon_w8(src_ptr, source_stride, ref_ptr, ref_stride, kWidth8,
+                   kHeight8, sse, sum);
+}
+
+unsigned int vp9_variance8x8_neon(const uint8_t *a, int a_stride,
+                                  const uint8_t *b, int b_stride,
+                                  unsigned int *sse) {
+  int sum;
+  variance_neon_w8(a, a_stride, b, b_stride, kWidth8, kHeight8, sse, &sum);
+  return *sse - (((int64_t)sum * sum) / (kWidth8 * kHeight8));
+}
+
 void vp9_get16x16var_neon(const uint8_t *src_ptr, int source_stride,
                           const uint8_t *ref_ptr, int ref_stride,
                           unsigned int *sse, int *sum) {
@@ -88,6 +106,29 @@
   return *sse - (((int64_t)sum * sum) / (kWidth16 * kHeight16));
 }
 
+static void var_filter_block2d_bil_w8(const uint8_t *src_ptr,
+                                      uint8_t *output_ptr,
+                                      unsigned int src_pixels_per_line,
+                                      int pixel_step,
+                                      unsigned int output_height,
+                                      unsigned int output_width,
+                                      const int16_t *vp9_filter) {
+  const uint8x8_t f0 = vmov_n_u8((uint8_t)vp9_filter[0]);
+  const uint8x8_t f1 = vmov_n_u8((uint8_t)vp9_filter[1]);
+  unsigned int i;
+  for (i = 0; i < output_height; ++i) {
+    const uint8x8_t src_0 = vld1_u8(&src_ptr[0]);
+    const uint8x8_t src_1 = vld1_u8(&src_ptr[pixel_step]);
+    const uint16x8_t a = vmull_u8(src_0, f0);
+    const uint16x8_t b = vmlal_u8(a, src_1, f1);
+    const uint8x8_t out = vrshrn_n_u16(b, FILTER_BITS);
+    vst1_u8(&output_ptr[0], out);
+    // Next row...
+    src_ptr += src_pixels_per_line;
+    output_ptr += output_width;
+  }
+}
+
 static void var_filter_block2d_bil_w16(const uint8_t *src_ptr,
                                        uint8_t *output_ptr,
                                        unsigned int src_pixels_per_line,
@@ -114,6 +155,24 @@
     src_ptr += src_pixels_per_line;
     output_ptr += output_width;
   }
+}
+
+unsigned int vp9_sub_pixel_variance8x8_neon(const uint8_t *src,
+                                            int src_stride,
+                                            int xoffset,
+                                            int yoffset,
+                                            const uint8_t *dst,
+                                            int dst_stride,
+                                            unsigned int *sse) {
+  DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, temp2, kHeight8 * kWidth8);
+  DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, fdata3, kHeight8PlusOne * kWidth8);
+
+  var_filter_block2d_bil_w8(src, fdata3, src_stride, kPixelStepOne,
+                            kHeight8PlusOne, kWidth8,
+                            BILINEAR_FILTERS_2TAP(xoffset));
+  var_filter_block2d_bil_w8(fdata3, temp2, kWidth8, kWidth8, kHeight8,
+                            kWidth8, BILINEAR_FILTERS_2TAP(yoffset));
+  return vp9_variance8x8_neon(temp2, kWidth8, dst, dst_stride, sse);
 }
 
 unsigned int vp9_sub_pixel_variance16x16_neon(const uint8_t *src,