shithub: libvpx

Download patch

ref: f4fcfe3075cabc47b47db9df74720e24016ff200
parent: 90a7723f8c172ce130e5d7d824dfc639a4e56a43
author: Yunqing Wang <yunqingwang@google.com>
date: Wed May 22 05:07:30 EDT 2013

Optimize variance functions

Added SSE2 version of variance functions for super blocks.

Change-Id: Ibeaae8771ca21c99d41dd74067574a51e97b412d

--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -188,11 +188,11 @@
 #endif
 
 #if HAVE_SSE2
-const vp9_variance_fn_t variance4x4_wmt = vp9_variance4x4_wmt;
-const vp9_variance_fn_t variance8x8_wmt = vp9_variance8x8_wmt;
-const vp9_variance_fn_t variance8x16_wmt = vp9_variance8x16_wmt;
-const vp9_variance_fn_t variance16x8_wmt = vp9_variance16x8_wmt;
-const vp9_variance_fn_t variance16x16_wmt = vp9_variance16x16_wmt;
+const vp9_variance_fn_t variance4x4_wmt = vp9_variance4x4_sse2;
+const vp9_variance_fn_t variance8x8_wmt = vp9_variance8x8_sse2;
+const vp9_variance_fn_t variance8x16_wmt = vp9_variance8x16_sse2;
+const vp9_variance_fn_t variance16x8_wmt = vp9_variance16x8_sse2;
+const vp9_variance_fn_t variance16x16_wmt = vp9_variance16x16_sse2;
 INSTANTIATE_TEST_CASE_P(
     SSE2, VP9VarianceTest,
     ::testing::Values(make_tuple(4, 4, variance4x4_wmt),
--- a/vp9/common/vp9_rtcd_defs.sh
+++ b/vp9/common/vp9_rtcd_defs.sh
@@ -236,53 +236,43 @@
 [ $arch = "x86_64" ] && mmx_x86_64=mmx && sse2_x86_64=sse2
 
 prototype unsigned int vp9_variance32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_variance32x16
+specialize vp9_variance32x16 sse2
 
 prototype unsigned int vp9_variance16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_variance16x32
+specialize vp9_variance16x32 sse2
 
 prototype unsigned int vp9_variance64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_variance64x32
+specialize vp9_variance64x32 sse2
 
 prototype unsigned int vp9_variance32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_variance32x64
+specialize vp9_variance32x64 sse2
 
 prototype unsigned int vp9_variance32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_variance32x32
+specialize vp9_variance32x32 sse2
 
 prototype unsigned int vp9_variance64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_variance64x64
+specialize vp9_variance64x64 sse2
 
 prototype unsigned int vp9_variance16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
 specialize vp9_variance16x16 mmx sse2
-vp9_variance16x16_sse2=vp9_variance16x16_wmt
-vp9_variance16x16_mmx=vp9_variance16x16_mmx
 
 prototype unsigned int vp9_variance16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
 specialize vp9_variance16x8 mmx sse2
-vp9_variance16x8_sse2=vp9_variance16x8_wmt
-vp9_variance16x8_mmx=vp9_variance16x8_mmx
 
 prototype unsigned int vp9_variance8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
 specialize vp9_variance8x16 mmx sse2
-vp9_variance8x16_sse2=vp9_variance8x16_wmt
-vp9_variance8x16_mmx=vp9_variance8x16_mmx
 
 prototype unsigned int vp9_variance8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
 specialize vp9_variance8x8 mmx sse2
-vp9_variance8x8_sse2=vp9_variance8x8_wmt
-vp9_variance8x8_mmx=vp9_variance8x8_mmx
 
 prototype unsigned int vp9_variance8x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_variance8x4
+specialize vp9_variance8x4 sse2
 
 prototype unsigned int vp9_variance4x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
-specialize vp9_variance4x8
+specialize vp9_variance4x8 sse2
 
 prototype unsigned int vp9_variance4x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
 specialize vp9_variance4x4 mmx sse2
-vp9_variance4x4_sse2=vp9_variance4x4_wmt
-vp9_variance4x4_mmx=vp9_variance4x4_mmx
 
 prototype unsigned int vp9_sub_pixel_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
 specialize vp9_sub_pixel_variance64x64 sse2
--- a/vp9/encoder/x86/vp9_variance_sse2.c
+++ b/vp9/encoder/x86/vp9_variance_sse2.c
@@ -139,8 +139,38 @@
 
 DECLARE_ALIGNED(16, extern const short, vp9_bilinear_filters_mmx[16][8]);
 
-unsigned int vp9_variance4x4_wmt(
+typedef unsigned int (*get_var_sse2) (
   const unsigned char *src_ptr,
+  int source_stride,
+  const unsigned char *ref_ptr,
+  int recon_stride,
+  unsigned int *SSE,
+  int *Sum
+);
+
+static void variance_sse2(const unsigned char *src_ptr, int  source_stride,
+                        const unsigned char *ref_ptr, int  recon_stride,
+                        int  w, int  h, unsigned int *sse, int *sum,
+                        get_var_sse2 var_fn, int block_size) {
+  unsigned int sse0;
+  int sum0;
+  int i, j;
+
+  *sse = 0;
+  *sum = 0;
+
+  for (i = 0; i < h; i += block_size) {
+    for (j = 0; j < w; j += block_size) {
+      var_fn(src_ptr + source_stride * i + j, source_stride,
+             ref_ptr + recon_stride * i + j, recon_stride, &sse0, &sum0);
+      *sse += sse0;
+      *sum += sum0;
+    }
+  }
+}
+
+unsigned int vp9_variance4x4_sse2(
+  const unsigned char *src_ptr,
   int  source_stride,
   const unsigned char *ref_ptr,
   int  recon_stride,
@@ -148,13 +178,41 @@
   unsigned int var;
   int avg;
 
-  vp9_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4,
+                  &var, &avg, vp9_get4x4var_mmx, 4);
   *sse = var;
   return (var - (((unsigned int)avg * avg) >> 4));
+}
 
+unsigned int vp9_variance8x4_sse2(const uint8_t *src_ptr,
+                                  int  source_stride,
+                                  const uint8_t *ref_ptr,
+                                  int  recon_stride,
+                                  unsigned int *sse) {
+  unsigned int var;
+  int avg;
+
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 4,
+                  &var, &avg, vp9_get4x4var_mmx, 4);
+  *sse = var;
+  return (var - (((unsigned int)avg * avg) >> 5));
 }
 
-unsigned int vp9_variance8x8_wmt
+unsigned int vp9_variance4x8_sse2(const uint8_t *src_ptr,
+                                  int  source_stride,
+                                  const uint8_t *ref_ptr,
+                                  int  recon_stride,
+                                  unsigned int *sse) {
+  unsigned int var;
+  int avg;
+
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 4, 8,
+                  &var, &avg, vp9_get4x4var_mmx, 4);
+  *sse = var;
+  return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance8x8_sse2
 (
   const unsigned char *src_ptr,
   int  source_stride,
@@ -164,14 +222,13 @@
   unsigned int var;
   int avg;
 
-  vp9_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8,
+                  &var, &avg, vp9_get8x8var_sse2, 8);
   *sse = var;
   return (var - (((unsigned int)avg * avg) >> 6));
-
 }
 
-
-unsigned int vp9_variance16x16_wmt
+unsigned int vp9_variance16x8_sse2
 (
   const unsigned char *src_ptr,
   int  source_stride,
@@ -178,32 +235,32 @@
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-  unsigned int sse0;
-  int sum0;
+  unsigned int var;
+  int avg;
 
-
-  vp9_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
-  *sse = sse0;
-  return (sse0 - (((unsigned int)sum0 * sum0) >> 8));
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8,
+                  &var, &avg, vp9_get8x8var_sse2, 8);
+  *sse = var;
+  return (var - (((unsigned int)avg * avg) >> 7));
 }
 
-unsigned int vp9_mse16x16_wmt(
+unsigned int vp9_variance8x16_sse2
+(
   const unsigned char *src_ptr,
   int  source_stride,
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
+  unsigned int var;
+  int avg;
 
-  unsigned int sse0;
-  int sum0;
-  vp9_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
-  *sse = sse0;
-  return sse0;
-
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16,
+                &var, &avg, vp9_get8x8var_sse2, 8);
+  *sse = var;
+  return (var - (((unsigned int)avg * avg) >> 7));
 }
 
-
-unsigned int vp9_variance16x8_wmt
+unsigned int vp9_variance16x16_sse2
 (
   const unsigned char *src_ptr,
   int  source_stride,
@@ -210,37 +267,112 @@
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-  unsigned int sse0, sse1, var;
-  int sum0, sum1, avg;
+  unsigned int var;
+  int avg;
 
-  vp9_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
-  vp9_get8x8var_sse2(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
-
-  var = sse0 + sse1;
-  avg = sum0 + sum1;
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16,
+                &var, &avg, vp9_get16x16var_sse2, 16);
   *sse = var;
-  return (var - (((unsigned int)avg * avg) >> 7));
-
+  return (var - (((unsigned int)avg * avg) >> 8));
 }
 
-unsigned int vp9_variance8x16_wmt
-(
+unsigned int vp9_mse16x16_wmt(
   const unsigned char *src_ptr,
   int  source_stride,
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-  unsigned int sse0, sse1, var;
-  int sum0, sum1, avg;
 
-  vp9_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
-  vp9_get8x8var_sse2(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
+  unsigned int sse0;
+  int sum0;
+  vp9_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0,
+                       &sum0);
+  *sse = sse0;
+  return sse0;
+}
 
-  var = sse0 + sse1;
-  avg = sum0 + sum1;
+unsigned int vp9_variance32x32_sse2(const uint8_t *src_ptr,
+                                    int  source_stride,
+                                    const uint8_t *ref_ptr,
+                                    int  recon_stride,
+                                    unsigned int *sse) {
+  unsigned int var;
+  int avg;
+
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 32,
+                &var, &avg, vp9_get16x16var_sse2, 16);
   *sse = var;
-  return (var - (((unsigned int)avg * avg) >> 7));
+  return (var - (((int64_t)avg * avg) >> 10));
+}
 
+unsigned int vp9_variance32x16_sse2(const uint8_t *src_ptr,
+                                    int  source_stride,
+                                    const uint8_t *ref_ptr,
+                                    int  recon_stride,
+                                    unsigned int *sse) {
+  unsigned int var;
+  int avg;
+
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 16,
+                &var, &avg, vp9_get16x16var_sse2, 16);
+  *sse = var;
+  return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_variance16x32_sse2(const uint8_t *src_ptr,
+                                    int  source_stride,
+                                    const uint8_t *ref_ptr,
+                                    int  recon_stride,
+                                    unsigned int *sse) {
+  unsigned int var;
+  int avg;
+
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 32,
+                &var, &avg, vp9_get16x16var_sse2, 16);
+  *sse = var;
+  return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_variance64x64_sse2(const uint8_t *src_ptr,
+                                    int  source_stride,
+                                    const uint8_t *ref_ptr,
+                                    int  recon_stride,
+                                    unsigned int *sse) {
+  unsigned int var;
+  int avg;
+
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 64, 64,
+                &var, &avg, vp9_get16x16var_sse2, 16);
+  *sse = var;
+  return (var - (((int64_t)avg * avg) >> 12));
+}
+
+unsigned int vp9_variance64x32_sse2(const uint8_t *src_ptr,
+                                    int  source_stride,
+                                    const uint8_t *ref_ptr,
+                                    int  recon_stride,
+                                    unsigned int *sse) {
+  unsigned int var;
+  int avg;
+
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 64, 32,
+                &var, &avg, vp9_get16x16var_sse2, 16);
+  *sse = var;
+  return (var - (((int64_t)avg * avg) >> 11));
+}
+
+unsigned int vp9_variance32x64_sse2(const uint8_t *src_ptr,
+                                    int  source_stride,
+                                    const uint8_t *ref_ptr,
+                                    int  recon_stride,
+                                    unsigned int *sse) {
+  unsigned int var;
+  int avg;
+
+  variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 64,
+                &var, &avg, vp9_get16x16var_sse2, 16);
+  *sse = var;
+  return (var - (((int64_t)avg * avg) >> 11));
 }
 
 unsigned int vp9_sub_pixel_variance4x4_wmt
--- a/vp9/encoder/x86/vp9_variance_ssse3.c
+++ b/vp9/encoder/x86/vp9_variance_ssse3.c
@@ -15,15 +15,6 @@
 
 #define HALFNDX 8
 
-extern unsigned int vp9_get16x16var_sse2
-(
-  const unsigned char *src_ptr,
-  int source_stride,
-  const unsigned char *ref_ptr,
-  int recon_stride,
-  unsigned int *SSE,
-  int *Sum
-);
 extern void vp9_half_horiz_vert_variance16x_h_sse2
 (
   const unsigned char *ref_ptr,