shithub: libvpx

Download patch

ref: 420e8d6d039c2224e00c13aba7f8908b68868359
parent: 908fbabe4ed6dc057a9d16ef5e9d575d57bd76be
parent: 837cea40fc683330f02845448f50a0360d58769d
author: James Zern <jzern@google.com>
date: Fri Nov 6 19:57:06 EST 2015

Merge changes I8c83b86d,Ic53b2ed5,I4acc8a84

* changes:
  variance_test: create fn pointers w/'&' ref
  sixtap_predict_test: create fn pointers w/'&' ref
  sad_test: create fn pointers w/'&' ref

--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -484,260 +484,176 @@
 
 //------------------------------------------------------------------------------
 // C functions
-const SadMxNFunc sad64x64_c = vpx_sad64x64_c;
-const SadMxNFunc sad64x32_c = vpx_sad64x32_c;
-const SadMxNFunc sad32x64_c = vpx_sad32x64_c;
-const SadMxNFunc sad32x32_c = vpx_sad32x32_c;
-const SadMxNFunc sad32x16_c = vpx_sad32x16_c;
-const SadMxNFunc sad16x32_c = vpx_sad16x32_c;
-const SadMxNFunc sad16x16_c = vpx_sad16x16_c;
-const SadMxNFunc sad16x8_c = vpx_sad16x8_c;
-const SadMxNFunc sad8x16_c = vpx_sad8x16_c;
-const SadMxNFunc sad8x8_c = vpx_sad8x8_c;
-const SadMxNFunc sad8x4_c = vpx_sad8x4_c;
-const SadMxNFunc sad4x8_c = vpx_sad4x8_c;
-const SadMxNFunc sad4x4_c = vpx_sad4x4_c;
-#if CONFIG_VP9_HIGHBITDEPTH
-const SadMxNFunc highbd_sad64x64_c = vpx_highbd_sad64x64_c;
-const SadMxNFunc highbd_sad64x32_c = vpx_highbd_sad64x32_c;
-const SadMxNFunc highbd_sad32x64_c = vpx_highbd_sad32x64_c;
-const SadMxNFunc highbd_sad32x32_c = vpx_highbd_sad32x32_c;
-const SadMxNFunc highbd_sad32x16_c = vpx_highbd_sad32x16_c;
-const SadMxNFunc highbd_sad16x32_c = vpx_highbd_sad16x32_c;
-const SadMxNFunc highbd_sad16x16_c = vpx_highbd_sad16x16_c;
-const SadMxNFunc highbd_sad16x8_c = vpx_highbd_sad16x8_c;
-const SadMxNFunc highbd_sad8x16_c = vpx_highbd_sad8x16_c;
-const SadMxNFunc highbd_sad8x8_c = vpx_highbd_sad8x8_c;
-const SadMxNFunc highbd_sad8x4_c = vpx_highbd_sad8x4_c;
-const SadMxNFunc highbd_sad4x8_c = vpx_highbd_sad4x8_c;
-const SadMxNFunc highbd_sad4x4_c = vpx_highbd_sad4x4_c;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
 const SadMxNParam c_tests[] = {
-  make_tuple(64, 64, sad64x64_c, -1),
-  make_tuple(64, 32, sad64x32_c, -1),
-  make_tuple(32, 64, sad32x64_c, -1),
-  make_tuple(32, 32, sad32x32_c, -1),
-  make_tuple(32, 16, sad32x16_c, -1),
-  make_tuple(16, 32, sad16x32_c, -1),
-  make_tuple(16, 16, sad16x16_c, -1),
-  make_tuple(16, 8, sad16x8_c, -1),
-  make_tuple(8, 16, sad8x16_c, -1),
-  make_tuple(8, 8, sad8x8_c, -1),
-  make_tuple(8, 4, sad8x4_c, -1),
-  make_tuple(4, 8, sad4x8_c, -1),
-  make_tuple(4, 4, sad4x4_c, -1),
+  make_tuple(64, 64, &vpx_sad64x64_c, -1),
+  make_tuple(64, 32, &vpx_sad64x32_c, -1),
+  make_tuple(32, 64, &vpx_sad32x64_c, -1),
+  make_tuple(32, 32, &vpx_sad32x32_c, -1),
+  make_tuple(32, 16, &vpx_sad32x16_c, -1),
+  make_tuple(16, 32, &vpx_sad16x32_c, -1),
+  make_tuple(16, 16, &vpx_sad16x16_c, -1),
+  make_tuple(16, 8, &vpx_sad16x8_c, -1),
+  make_tuple(8, 16, &vpx_sad8x16_c, -1),
+  make_tuple(8, 8, &vpx_sad8x8_c, -1),
+  make_tuple(8, 4, &vpx_sad8x4_c, -1),
+  make_tuple(4, 8, &vpx_sad4x8_c, -1),
+  make_tuple(4, 4, &vpx_sad4x4_c, -1),
 #if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, highbd_sad64x64_c, 8),
-  make_tuple(64, 32, highbd_sad64x32_c, 8),
-  make_tuple(32, 64, highbd_sad32x64_c, 8),
-  make_tuple(32, 32, highbd_sad32x32_c, 8),
-  make_tuple(32, 16, highbd_sad32x16_c, 8),
-  make_tuple(16, 32, highbd_sad16x32_c, 8),
-  make_tuple(16, 16, highbd_sad16x16_c, 8),
-  make_tuple(16, 8, highbd_sad16x8_c, 8),
-  make_tuple(8, 16, highbd_sad8x16_c, 8),
-  make_tuple(8, 8, highbd_sad8x8_c, 8),
-  make_tuple(8, 4, highbd_sad8x4_c, 8),
-  make_tuple(4, 8, highbd_sad4x8_c, 8),
-  make_tuple(4, 4, highbd_sad4x4_c, 8),
-  make_tuple(64, 64, highbd_sad64x64_c, 10),
-  make_tuple(64, 32, highbd_sad64x32_c, 10),
-  make_tuple(32, 64, highbd_sad32x64_c, 10),
-  make_tuple(32, 32, highbd_sad32x32_c, 10),
-  make_tuple(32, 16, highbd_sad32x16_c, 10),
-  make_tuple(16, 32, highbd_sad16x32_c, 10),
-  make_tuple(16, 16, highbd_sad16x16_c, 10),
-  make_tuple(16, 8, highbd_sad16x8_c, 10),
-  make_tuple(8, 16, highbd_sad8x16_c, 10),
-  make_tuple(8, 8, highbd_sad8x8_c, 10),
-  make_tuple(8, 4, highbd_sad8x4_c, 10),
-  make_tuple(4, 8, highbd_sad4x8_c, 10),
-  make_tuple(4, 4, highbd_sad4x4_c, 10),
-  make_tuple(64, 64, highbd_sad64x64_c, 12),
-  make_tuple(64, 32, highbd_sad64x32_c, 12),
-  make_tuple(32, 64, highbd_sad32x64_c, 12),
-  make_tuple(32, 32, highbd_sad32x32_c, 12),
-  make_tuple(32, 16, highbd_sad32x16_c, 12),
-  make_tuple(16, 32, highbd_sad16x32_c, 12),
-  make_tuple(16, 16, highbd_sad16x16_c, 12),
-  make_tuple(16, 8, highbd_sad16x8_c, 12),
-  make_tuple(8, 16, highbd_sad8x16_c, 12),
-  make_tuple(8, 8, highbd_sad8x8_c, 12),
-  make_tuple(8, 4, highbd_sad8x4_c, 12),
-  make_tuple(4, 8, highbd_sad4x8_c, 12),
-  make_tuple(4, 4, highbd_sad4x4_c, 12),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_c, 8),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_c, 8),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_c, 8),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_c, 8),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_c, 8),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_c, 8),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_c, 8),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_c, 8),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_c, 8),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_c, 8),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_c, 8),
+  make_tuple(4, 8, &vpx_highbd_sad4x8_c, 8),
+  make_tuple(4, 4, &vpx_highbd_sad4x4_c, 8),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_c, 10),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_c, 10),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_c, 10),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_c, 10),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_c, 10),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_c, 10),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_c, 10),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_c, 10),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_c, 10),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_c, 10),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_c, 10),
+  make_tuple(4, 8, &vpx_highbd_sad4x8_c, 10),
+  make_tuple(4, 4, &vpx_highbd_sad4x4_c, 10),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_c, 12),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_c, 12),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_c, 12),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_c, 12),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_c, 12),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_c, 12),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_c, 12),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_c, 12),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_c, 12),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_c, 12),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_c, 12),
+  make_tuple(4, 8, &vpx_highbd_sad4x8_c, 12),
+  make_tuple(4, 4, &vpx_highbd_sad4x4_c, 12),
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
 
-const SadMxNAvgFunc sad64x64_avg_c = vpx_sad64x64_avg_c;
-const SadMxNAvgFunc sad64x32_avg_c = vpx_sad64x32_avg_c;
-const SadMxNAvgFunc sad32x64_avg_c = vpx_sad32x64_avg_c;
-const SadMxNAvgFunc sad32x32_avg_c = vpx_sad32x32_avg_c;
-const SadMxNAvgFunc sad32x16_avg_c = vpx_sad32x16_avg_c;
-const SadMxNAvgFunc sad16x32_avg_c = vpx_sad16x32_avg_c;
-const SadMxNAvgFunc sad16x16_avg_c = vpx_sad16x16_avg_c;
-const SadMxNAvgFunc sad16x8_avg_c = vpx_sad16x8_avg_c;
-const SadMxNAvgFunc sad8x16_avg_c = vpx_sad8x16_avg_c;
-const SadMxNAvgFunc sad8x8_avg_c = vpx_sad8x8_avg_c;
-const SadMxNAvgFunc sad8x4_avg_c = vpx_sad8x4_avg_c;
-const SadMxNAvgFunc sad4x8_avg_c = vpx_sad4x8_avg_c;
-const SadMxNAvgFunc sad4x4_avg_c = vpx_sad4x4_avg_c;
-#if CONFIG_VP9_HIGHBITDEPTH
-const SadMxNAvgFunc highbd_sad64x64_avg_c = vpx_highbd_sad64x64_avg_c;
-const SadMxNAvgFunc highbd_sad64x32_avg_c = vpx_highbd_sad64x32_avg_c;
-const SadMxNAvgFunc highbd_sad32x64_avg_c = vpx_highbd_sad32x64_avg_c;
-const SadMxNAvgFunc highbd_sad32x32_avg_c = vpx_highbd_sad32x32_avg_c;
-const SadMxNAvgFunc highbd_sad32x16_avg_c = vpx_highbd_sad32x16_avg_c;
-const SadMxNAvgFunc highbd_sad16x32_avg_c = vpx_highbd_sad16x32_avg_c;
-const SadMxNAvgFunc highbd_sad16x16_avg_c = vpx_highbd_sad16x16_avg_c;
-const SadMxNAvgFunc highbd_sad16x8_avg_c = vpx_highbd_sad16x8_avg_c;
-const SadMxNAvgFunc highbd_sad8x16_avg_c = vpx_highbd_sad8x16_avg_c;
-const SadMxNAvgFunc highbd_sad8x8_avg_c = vpx_highbd_sad8x8_avg_c;
-const SadMxNAvgFunc highbd_sad8x4_avg_c = vpx_highbd_sad8x4_avg_c;
-const SadMxNAvgFunc highbd_sad4x8_avg_c = vpx_highbd_sad4x8_avg_c;
-const SadMxNAvgFunc highbd_sad4x4_avg_c = vpx_highbd_sad4x4_avg_c;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
 const SadMxNAvgParam avg_c_tests[] = {
-  make_tuple(64, 64, sad64x64_avg_c, -1),
-  make_tuple(64, 32, sad64x32_avg_c, -1),
-  make_tuple(32, 64, sad32x64_avg_c, -1),
-  make_tuple(32, 32, sad32x32_avg_c, -1),
-  make_tuple(32, 16, sad32x16_avg_c, -1),
-  make_tuple(16, 32, sad16x32_avg_c, -1),
-  make_tuple(16, 16, sad16x16_avg_c, -1),
-  make_tuple(16, 8, sad16x8_avg_c, -1),
-  make_tuple(8, 16, sad8x16_avg_c, -1),
-  make_tuple(8, 8, sad8x8_avg_c, -1),
-  make_tuple(8, 4, sad8x4_avg_c, -1),
-  make_tuple(4, 8, sad4x8_avg_c, -1),
-  make_tuple(4, 4, sad4x4_avg_c, -1),
+  make_tuple(64, 64, &vpx_sad64x64_avg_c, -1),
+  make_tuple(64, 32, &vpx_sad64x32_avg_c, -1),
+  make_tuple(32, 64, &vpx_sad32x64_avg_c, -1),
+  make_tuple(32, 32, &vpx_sad32x32_avg_c, -1),
+  make_tuple(32, 16, &vpx_sad32x16_avg_c, -1),
+  make_tuple(16, 32, &vpx_sad16x32_avg_c, -1),
+  make_tuple(16, 16, &vpx_sad16x16_avg_c, -1),
+  make_tuple(16, 8, &vpx_sad16x8_avg_c, -1),
+  make_tuple(8, 16, &vpx_sad8x16_avg_c, -1),
+  make_tuple(8, 8, &vpx_sad8x8_avg_c, -1),
+  make_tuple(8, 4, &vpx_sad8x4_avg_c, -1),
+  make_tuple(4, 8, &vpx_sad4x8_avg_c, -1),
+  make_tuple(4, 4, &vpx_sad4x4_avg_c, -1),
 #if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, highbd_sad64x64_avg_c, 8),
-  make_tuple(64, 32, highbd_sad64x32_avg_c, 8),
-  make_tuple(32, 64, highbd_sad32x64_avg_c, 8),
-  make_tuple(32, 32, highbd_sad32x32_avg_c, 8),
-  make_tuple(32, 16, highbd_sad32x16_avg_c, 8),
-  make_tuple(16, 32, highbd_sad16x32_avg_c, 8),
-  make_tuple(16, 16, highbd_sad16x16_avg_c, 8),
-  make_tuple(16, 8, highbd_sad16x8_avg_c, 8),
-  make_tuple(8, 16, highbd_sad8x16_avg_c, 8),
-  make_tuple(8, 8, highbd_sad8x8_avg_c, 8),
-  make_tuple(8, 4, highbd_sad8x4_avg_c, 8),
-  make_tuple(4, 8, highbd_sad4x8_avg_c, 8),
-  make_tuple(4, 4, highbd_sad4x4_avg_c, 8),
-  make_tuple(64, 64, highbd_sad64x64_avg_c, 10),
-  make_tuple(64, 32, highbd_sad64x32_avg_c, 10),
-  make_tuple(32, 64, highbd_sad32x64_avg_c, 10),
-  make_tuple(32, 32, highbd_sad32x32_avg_c, 10),
-  make_tuple(32, 16, highbd_sad32x16_avg_c, 10),
-  make_tuple(16, 32, highbd_sad16x32_avg_c, 10),
-  make_tuple(16, 16, highbd_sad16x16_avg_c, 10),
-  make_tuple(16, 8, highbd_sad16x8_avg_c, 10),
-  make_tuple(8, 16, highbd_sad8x16_avg_c, 10),
-  make_tuple(8, 8, highbd_sad8x8_avg_c, 10),
-  make_tuple(8, 4, highbd_sad8x4_avg_c, 10),
-  make_tuple(4, 8, highbd_sad4x8_avg_c, 10),
-  make_tuple(4, 4, highbd_sad4x4_avg_c, 10),
-  make_tuple(64, 64, highbd_sad64x64_avg_c, 12),
-  make_tuple(64, 32, highbd_sad64x32_avg_c, 12),
-  make_tuple(32, 64, highbd_sad32x64_avg_c, 12),
-  make_tuple(32, 32, highbd_sad32x32_avg_c, 12),
-  make_tuple(32, 16, highbd_sad32x16_avg_c, 12),
-  make_tuple(16, 32, highbd_sad16x32_avg_c, 12),
-  make_tuple(16, 16, highbd_sad16x16_avg_c, 12),
-  make_tuple(16, 8, highbd_sad16x8_avg_c, 12),
-  make_tuple(8, 16, highbd_sad8x16_avg_c, 12),
-  make_tuple(8, 8, highbd_sad8x8_avg_c, 12),
-  make_tuple(8, 4, highbd_sad8x4_avg_c, 12),
-  make_tuple(4, 8, highbd_sad4x8_avg_c, 12),
-  make_tuple(4, 4, highbd_sad4x4_avg_c, 12),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 8),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 8),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 8),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 8),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 8),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 8),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 8),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 8),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 8),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 8),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 8),
+  make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 8),
+  make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 8),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 10),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 10),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 10),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 10),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 10),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 10),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 10),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 10),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 10),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 10),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 10),
+  make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 10),
+  make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 10),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 12),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 12),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 12),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 12),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 12),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 12),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 12),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 12),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 12),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 12),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 12),
+  make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 12),
+  make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 12),
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
 
-const SadMxNx4Func sad64x64x4d_c = vpx_sad64x64x4d_c;
-const SadMxNx4Func sad64x32x4d_c = vpx_sad64x32x4d_c;
-const SadMxNx4Func sad32x64x4d_c = vpx_sad32x64x4d_c;
-const SadMxNx4Func sad32x32x4d_c = vpx_sad32x32x4d_c;
-const SadMxNx4Func sad32x16x4d_c = vpx_sad32x16x4d_c;
-const SadMxNx4Func sad16x32x4d_c = vpx_sad16x32x4d_c;
-const SadMxNx4Func sad16x16x4d_c = vpx_sad16x16x4d_c;
-const SadMxNx4Func sad16x8x4d_c = vpx_sad16x8x4d_c;
-const SadMxNx4Func sad8x16x4d_c = vpx_sad8x16x4d_c;
-const SadMxNx4Func sad8x8x4d_c = vpx_sad8x8x4d_c;
-const SadMxNx4Func sad8x4x4d_c = vpx_sad8x4x4d_c;
-const SadMxNx4Func sad4x8x4d_c = vpx_sad4x8x4d_c;
-const SadMxNx4Func sad4x4x4d_c = vpx_sad4x4x4d_c;
-#if CONFIG_VP9_HIGHBITDEPTH
-const SadMxNx4Func highbd_sad64x64x4d_c = vpx_highbd_sad64x64x4d_c;
-const SadMxNx4Func highbd_sad64x32x4d_c = vpx_highbd_sad64x32x4d_c;
-const SadMxNx4Func highbd_sad32x64x4d_c = vpx_highbd_sad32x64x4d_c;
-const SadMxNx4Func highbd_sad32x32x4d_c = vpx_highbd_sad32x32x4d_c;
-const SadMxNx4Func highbd_sad32x16x4d_c = vpx_highbd_sad32x16x4d_c;
-const SadMxNx4Func highbd_sad16x32x4d_c = vpx_highbd_sad16x32x4d_c;
-const SadMxNx4Func highbd_sad16x16x4d_c = vpx_highbd_sad16x16x4d_c;
-const SadMxNx4Func highbd_sad16x8x4d_c = vpx_highbd_sad16x8x4d_c;
-const SadMxNx4Func highbd_sad8x16x4d_c = vpx_highbd_sad8x16x4d_c;
-const SadMxNx4Func highbd_sad8x8x4d_c = vpx_highbd_sad8x8x4d_c;
-const SadMxNx4Func highbd_sad8x4x4d_c = vpx_highbd_sad8x4x4d_c;
-const SadMxNx4Func highbd_sad4x8x4d_c = vpx_highbd_sad4x8x4d_c;
-const SadMxNx4Func highbd_sad4x4x4d_c = vpx_highbd_sad4x4x4d_c;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
 const SadMxNx4Param x4d_c_tests[] = {
-  make_tuple(64, 64, sad64x64x4d_c, -1),
-  make_tuple(64, 32, sad64x32x4d_c, -1),
-  make_tuple(32, 64, sad32x64x4d_c, -1),
-  make_tuple(32, 32, sad32x32x4d_c, -1),
-  make_tuple(32, 16, sad32x16x4d_c, -1),
-  make_tuple(16, 32, sad16x32x4d_c, -1),
-  make_tuple(16, 16, sad16x16x4d_c, -1),
-  make_tuple(16, 8, sad16x8x4d_c, -1),
-  make_tuple(8, 16, sad8x16x4d_c, -1),
-  make_tuple(8, 8, sad8x8x4d_c, -1),
-  make_tuple(8, 4, sad8x4x4d_c, -1),
-  make_tuple(4, 8, sad4x8x4d_c, -1),
-  make_tuple(4, 4, sad4x4x4d_c, -1),
+  make_tuple(64, 64, &vpx_sad64x64x4d_c, -1),
+  make_tuple(64, 32, &vpx_sad64x32x4d_c, -1),
+  make_tuple(32, 64, &vpx_sad32x64x4d_c, -1),
+  make_tuple(32, 32, &vpx_sad32x32x4d_c, -1),
+  make_tuple(32, 16, &vpx_sad32x16x4d_c, -1),
+  make_tuple(16, 32, &vpx_sad16x32x4d_c, -1),
+  make_tuple(16, 16, &vpx_sad16x16x4d_c, -1),
+  make_tuple(16, 8, &vpx_sad16x8x4d_c, -1),
+  make_tuple(8, 16, &vpx_sad8x16x4d_c, -1),
+  make_tuple(8, 8, &vpx_sad8x8x4d_c, -1),
+  make_tuple(8, 4, &vpx_sad8x4x4d_c, -1),
+  make_tuple(4, 8, &vpx_sad4x8x4d_c, -1),
+  make_tuple(4, 4, &vpx_sad4x4x4d_c, -1),
 #if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, highbd_sad64x64x4d_c, 8),
-  make_tuple(64, 32, highbd_sad64x32x4d_c, 8),
-  make_tuple(32, 64, highbd_sad32x64x4d_c, 8),
-  make_tuple(32, 32, highbd_sad32x32x4d_c, 8),
-  make_tuple(32, 16, highbd_sad32x16x4d_c, 8),
-  make_tuple(16, 32, highbd_sad16x32x4d_c, 8),
-  make_tuple(16, 16, highbd_sad16x16x4d_c, 8),
-  make_tuple(16, 8, highbd_sad16x8x4d_c, 8),
-  make_tuple(8, 16, highbd_sad8x16x4d_c, 8),
-  make_tuple(8, 8, highbd_sad8x8x4d_c, 8),
-  make_tuple(8, 4, highbd_sad8x4x4d_c, 8),
-  make_tuple(4, 8, highbd_sad4x8x4d_c, 8),
-  make_tuple(4, 4, highbd_sad4x4x4d_c, 8),
-  make_tuple(64, 64, highbd_sad64x64x4d_c, 10),
-  make_tuple(64, 32, highbd_sad64x32x4d_c, 10),
-  make_tuple(32, 64, highbd_sad32x64x4d_c, 10),
-  make_tuple(32, 32, highbd_sad32x32x4d_c, 10),
-  make_tuple(32, 16, highbd_sad32x16x4d_c, 10),
-  make_tuple(16, 32, highbd_sad16x32x4d_c, 10),
-  make_tuple(16, 16, highbd_sad16x16x4d_c, 10),
-  make_tuple(16, 8, highbd_sad16x8x4d_c, 10),
-  make_tuple(8, 16, highbd_sad8x16x4d_c, 10),
-  make_tuple(8, 8, highbd_sad8x8x4d_c, 10),
-  make_tuple(8, 4, highbd_sad8x4x4d_c, 10),
-  make_tuple(4, 8, highbd_sad4x8x4d_c, 10),
-  make_tuple(4, 4, highbd_sad4x4x4d_c, 10),
-  make_tuple(64, 64, highbd_sad64x64x4d_c, 12),
-  make_tuple(64, 32, highbd_sad64x32x4d_c, 12),
-  make_tuple(32, 64, highbd_sad32x64x4d_c, 12),
-  make_tuple(32, 32, highbd_sad32x32x4d_c, 12),
-  make_tuple(32, 16, highbd_sad32x16x4d_c, 12),
-  make_tuple(16, 32, highbd_sad16x32x4d_c, 12),
-  make_tuple(16, 16, highbd_sad16x16x4d_c, 12),
-  make_tuple(16, 8, highbd_sad16x8x4d_c, 12),
-  make_tuple(8, 16, highbd_sad8x16x4d_c, 12),
-  make_tuple(8, 8, highbd_sad8x8x4d_c, 12),
-  make_tuple(8, 4, highbd_sad8x4x4d_c, 12),
-  make_tuple(4, 8, highbd_sad4x8x4d_c, 12),
-  make_tuple(4, 4, highbd_sad4x4x4d_c, 12),
+  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 8),
+  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 8),
+  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 8),
+  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 8),
+  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 8),
+  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 8),
+  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 8),
+  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 8),
+  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 8),
+  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 8),
+  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 8),
+  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 8),
+  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 8),
+  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 10),
+  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 10),
+  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 10),
+  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 10),
+  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 10),
+  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 10),
+  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 10),
+  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 10),
+  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 10),
+  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 10),
+  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 10),
+  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 10),
+  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 10),
+  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 12),
+  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 12),
+  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 12),
+  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 12),
+  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 12),
+  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 12),
+  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 12),
+  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 12),
+  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 12),
+  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 12),
+  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 12),
+  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 12),
+  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 12),
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
@@ -745,40 +661,28 @@
 //------------------------------------------------------------------------------
 // ARM functions
 #if HAVE_MEDIA
-const SadMxNFunc sad16x16_media = vpx_sad16x16_media;
 const SadMxNParam media_tests[] = {
-  make_tuple(16, 16, sad16x16_media, -1),
+  make_tuple(16, 16, &vpx_sad16x16_media, -1),
 };
 INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests));
 #endif  // HAVE_MEDIA
 
 #if HAVE_NEON
-const SadMxNFunc sad64x64_neon = vpx_sad64x64_neon;
-const SadMxNFunc sad32x32_neon = vpx_sad32x32_neon;
-const SadMxNFunc sad16x16_neon = vpx_sad16x16_neon;
-const SadMxNFunc sad16x8_neon = vpx_sad16x8_neon;
-const SadMxNFunc sad8x16_neon = vpx_sad8x16_neon;
-const SadMxNFunc sad8x8_neon = vpx_sad8x8_neon;
-const SadMxNFunc sad4x4_neon = vpx_sad4x4_neon;
-
 const SadMxNParam neon_tests[] = {
-  make_tuple(64, 64, sad64x64_neon, -1),
-  make_tuple(32, 32, sad32x32_neon, -1),
-  make_tuple(16, 16, sad16x16_neon, -1),
-  make_tuple(16, 8, sad16x8_neon, -1),
-  make_tuple(8, 16, sad8x16_neon, -1),
-  make_tuple(8, 8, sad8x8_neon, -1),
-  make_tuple(4, 4, sad4x4_neon, -1),
+  make_tuple(64, 64, &vpx_sad64x64_neon, -1),
+  make_tuple(32, 32, &vpx_sad32x32_neon, -1),
+  make_tuple(16, 16, &vpx_sad16x16_neon, -1),
+  make_tuple(16, 8, &vpx_sad16x8_neon, -1),
+  make_tuple(8, 16, &vpx_sad8x16_neon, -1),
+  make_tuple(8, 8, &vpx_sad8x8_neon, -1),
+  make_tuple(4, 4, &vpx_sad4x4_neon, -1),
 };
 INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
 
-const SadMxNx4Func sad64x64x4d_neon = vpx_sad64x64x4d_neon;
-const SadMxNx4Func sad32x32x4d_neon = vpx_sad32x32x4d_neon;
-const SadMxNx4Func sad16x16x4d_neon = vpx_sad16x16x4d_neon;
 const SadMxNx4Param x4d_neon_tests[] = {
-  make_tuple(64, 64, sad64x64x4d_neon, -1),
-  make_tuple(32, 32, sad32x32x4d_neon, -1),
-  make_tuple(16, 16, sad16x16x4d_neon, -1),
+  make_tuple(64, 64, &vpx_sad64x64x4d_neon, -1),
+  make_tuple(32, 32, &vpx_sad32x32x4d_neon, -1),
+  make_tuple(16, 16, &vpx_sad16x16x4d_neon, -1),
 };
 INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
 #endif  // HAVE_NEON
@@ -786,17 +690,12 @@
 //------------------------------------------------------------------------------
 // x86 functions
 #if HAVE_MMX
-const SadMxNFunc sad16x16_mmx = vpx_sad16x16_mmx;
-const SadMxNFunc sad16x8_mmx = vpx_sad16x8_mmx;
-const SadMxNFunc sad8x16_mmx = vpx_sad8x16_mmx;
-const SadMxNFunc sad8x8_mmx = vpx_sad8x8_mmx;
-const SadMxNFunc sad4x4_mmx = vpx_sad4x4_mmx;
 const SadMxNParam mmx_tests[] = {
-  make_tuple(16, 16, sad16x16_mmx, -1),
-  make_tuple(16, 8, sad16x8_mmx, -1),
-  make_tuple(8, 16, sad8x16_mmx, -1),
-  make_tuple(8, 8, sad8x8_mmx, -1),
-  make_tuple(4, 4, sad4x4_mmx, -1),
+  make_tuple(16, 16, &vpx_sad16x16_mmx, -1),
+  make_tuple(16, 8, &vpx_sad16x8_mmx, -1),
+  make_tuple(8, 16, &vpx_sad8x16_mmx, -1),
+  make_tuple(8, 8, &vpx_sad8x8_mmx, -1),
+  make_tuple(4, 4, &vpx_sad4x4_mmx, -1),
 };
 INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
 #endif  // HAVE_MMX
@@ -803,27 +702,21 @@
 
 #if HAVE_SSE
 #if CONFIG_USE_X86INC
-const SadMxNFunc sad4x8_sse = vpx_sad4x8_sse;
-const SadMxNFunc sad4x4_sse = vpx_sad4x4_sse;
 const SadMxNParam sse_tests[] = {
-  make_tuple(4, 8, sad4x8_sse, -1),
-  make_tuple(4, 4, sad4x4_sse, -1),
+  make_tuple(4, 8, &vpx_sad4x8_sse, -1),
+  make_tuple(4, 4, &vpx_sad4x4_sse, -1),
 };
 INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::ValuesIn(sse_tests));
 
-const SadMxNAvgFunc sad4x8_avg_sse = vpx_sad4x8_avg_sse;
-const SadMxNAvgFunc sad4x4_avg_sse = vpx_sad4x4_avg_sse;
 const SadMxNAvgParam avg_sse_tests[] = {
-  make_tuple(4, 8, sad4x8_avg_sse, -1),
-  make_tuple(4, 4, sad4x4_avg_sse, -1),
+  make_tuple(4, 8, &vpx_sad4x8_avg_sse, -1),
+  make_tuple(4, 4, &vpx_sad4x4_avg_sse, -1),
 };
 INSTANTIATE_TEST_CASE_P(SSE, SADavgTest, ::testing::ValuesIn(avg_sse_tests));
 
-const SadMxNx4Func sad4x8x4d_sse = vpx_sad4x8x4d_sse;
-const SadMxNx4Func sad4x4x4d_sse = vpx_sad4x4x4d_sse;
 const SadMxNx4Param x4d_sse_tests[] = {
-  make_tuple(4, 8, sad4x8x4d_sse, -1),
-  make_tuple(4, 4, sad4x4x4d_sse, -1),
+  make_tuple(4, 8, &vpx_sad4x8x4d_sse, -1),
+  make_tuple(4, 4, &vpx_sad4x4x4d_sse, -1),
 };
 INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::ValuesIn(x4d_sse_tests));
 #endif  // CONFIG_USE_X86INC
@@ -831,232 +724,158 @@
 
 #if HAVE_SSE2
 #if CONFIG_USE_X86INC
-const SadMxNFunc sad64x64_sse2 = vpx_sad64x64_sse2;
-const SadMxNFunc sad64x32_sse2 = vpx_sad64x32_sse2;
-const SadMxNFunc sad32x64_sse2 = vpx_sad32x64_sse2;
-const SadMxNFunc sad32x32_sse2 = vpx_sad32x32_sse2;
-const SadMxNFunc sad32x16_sse2 = vpx_sad32x16_sse2;
-const SadMxNFunc sad16x32_sse2 = vpx_sad16x32_sse2;
-const SadMxNFunc sad16x16_sse2 = vpx_sad16x16_sse2;
-const SadMxNFunc sad16x8_sse2 = vpx_sad16x8_sse2;
-const SadMxNFunc sad8x16_sse2 = vpx_sad8x16_sse2;
-const SadMxNFunc sad8x8_sse2 = vpx_sad8x8_sse2;
-const SadMxNFunc sad8x4_sse2 = vpx_sad8x4_sse2;
-#if CONFIG_VP9_HIGHBITDEPTH
-const SadMxNFunc highbd_sad64x64_sse2 = vpx_highbd_sad64x64_sse2;
-const SadMxNFunc highbd_sad64x32_sse2 = vpx_highbd_sad64x32_sse2;
-const SadMxNFunc highbd_sad32x64_sse2 = vpx_highbd_sad32x64_sse2;
-const SadMxNFunc highbd_sad32x32_sse2 = vpx_highbd_sad32x32_sse2;
-const SadMxNFunc highbd_sad32x16_sse2 = vpx_highbd_sad32x16_sse2;
-const SadMxNFunc highbd_sad16x32_sse2 = vpx_highbd_sad16x32_sse2;
-const SadMxNFunc highbd_sad16x16_sse2 = vpx_highbd_sad16x16_sse2;
-const SadMxNFunc highbd_sad16x8_sse2 = vpx_highbd_sad16x8_sse2;
-const SadMxNFunc highbd_sad8x16_sse2 = vpx_highbd_sad8x16_sse2;
-const SadMxNFunc highbd_sad8x8_sse2 = vpx_highbd_sad8x8_sse2;
-const SadMxNFunc highbd_sad8x4_sse2 = vpx_highbd_sad8x4_sse2;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
 const SadMxNParam sse2_tests[] = {
-  make_tuple(64, 64, sad64x64_sse2, -1),
-  make_tuple(64, 32, sad64x32_sse2, -1),
-  make_tuple(32, 64, sad32x64_sse2, -1),
-  make_tuple(32, 32, sad32x32_sse2, -1),
-  make_tuple(32, 16, sad32x16_sse2, -1),
-  make_tuple(16, 32, sad16x32_sse2, -1),
-  make_tuple(16, 16, sad16x16_sse2, -1),
-  make_tuple(16, 8, sad16x8_sse2, -1),
-  make_tuple(8, 16, sad8x16_sse2, -1),
-  make_tuple(8, 8, sad8x8_sse2, -1),
-  make_tuple(8, 4, sad8x4_sse2, -1),
+  make_tuple(64, 64, &vpx_sad64x64_sse2, -1),
+  make_tuple(64, 32, &vpx_sad64x32_sse2, -1),
+  make_tuple(32, 64, &vpx_sad32x64_sse2, -1),
+  make_tuple(32, 32, &vpx_sad32x32_sse2, -1),
+  make_tuple(32, 16, &vpx_sad32x16_sse2, -1),
+  make_tuple(16, 32, &vpx_sad16x32_sse2, -1),
+  make_tuple(16, 16, &vpx_sad16x16_sse2, -1),
+  make_tuple(16, 8, &vpx_sad16x8_sse2, -1),
+  make_tuple(8, 16, &vpx_sad8x16_sse2, -1),
+  make_tuple(8, 8, &vpx_sad8x8_sse2, -1),
+  make_tuple(8, 4, &vpx_sad8x4_sse2, -1),
 #if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, highbd_sad64x64_sse2, 8),
-  make_tuple(64, 32, highbd_sad64x32_sse2, 8),
-  make_tuple(32, 64, highbd_sad32x64_sse2, 8),
-  make_tuple(32, 32, highbd_sad32x32_sse2, 8),
-  make_tuple(32, 16, highbd_sad32x16_sse2, 8),
-  make_tuple(16, 32, highbd_sad16x32_sse2, 8),
-  make_tuple(16, 16, highbd_sad16x16_sse2, 8),
-  make_tuple(16, 8, highbd_sad16x8_sse2, 8),
-  make_tuple(8, 16, highbd_sad8x16_sse2, 8),
-  make_tuple(8, 8, highbd_sad8x8_sse2, 8),
-  make_tuple(8, 4, highbd_sad8x4_sse2, 8),
-  make_tuple(64, 64, highbd_sad64x64_sse2, 10),
-  make_tuple(64, 32, highbd_sad64x32_sse2, 10),
-  make_tuple(32, 64, highbd_sad32x64_sse2, 10),
-  make_tuple(32, 32, highbd_sad32x32_sse2, 10),
-  make_tuple(32, 16, highbd_sad32x16_sse2, 10),
-  make_tuple(16, 32, highbd_sad16x32_sse2, 10),
-  make_tuple(16, 16, highbd_sad16x16_sse2, 10),
-  make_tuple(16, 8, highbd_sad16x8_sse2, 10),
-  make_tuple(8, 16, highbd_sad8x16_sse2, 10),
-  make_tuple(8, 8, highbd_sad8x8_sse2, 10),
-  make_tuple(8, 4, highbd_sad8x4_sse2, 10),
-  make_tuple(64, 64, highbd_sad64x64_sse2, 12),
-  make_tuple(64, 32, highbd_sad64x32_sse2, 12),
-  make_tuple(32, 64, highbd_sad32x64_sse2, 12),
-  make_tuple(32, 32, highbd_sad32x32_sse2, 12),
-  make_tuple(32, 16, highbd_sad32x16_sse2, 12),
-  make_tuple(16, 32, highbd_sad16x32_sse2, 12),
-  make_tuple(16, 16, highbd_sad16x16_sse2, 12),
-  make_tuple(16, 8, highbd_sad16x8_sse2, 12),
-  make_tuple(8, 16, highbd_sad8x16_sse2, 12),
-  make_tuple(8, 8, highbd_sad8x8_sse2, 12),
-  make_tuple(8, 4, highbd_sad8x4_sse2, 12),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 8),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 8),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 8),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 8),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 8),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 8),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 8),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 8),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 8),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 8),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 8),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 10),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 10),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 10),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 10),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 10),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 10),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 10),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 10),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 10),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 10),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 10),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 12),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 12),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 12),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 12),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 12),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 12),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 12),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 12),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 12),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 12),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 12),
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
 
-const SadMxNAvgFunc sad64x64_avg_sse2 = vpx_sad64x64_avg_sse2;
-const SadMxNAvgFunc sad64x32_avg_sse2 = vpx_sad64x32_avg_sse2;
-const SadMxNAvgFunc sad32x64_avg_sse2 = vpx_sad32x64_avg_sse2;
-const SadMxNAvgFunc sad32x32_avg_sse2 = vpx_sad32x32_avg_sse2;
-const SadMxNAvgFunc sad32x16_avg_sse2 = vpx_sad32x16_avg_sse2;
-const SadMxNAvgFunc sad16x32_avg_sse2 = vpx_sad16x32_avg_sse2;
-const SadMxNAvgFunc sad16x16_avg_sse2 = vpx_sad16x16_avg_sse2;
-const SadMxNAvgFunc sad16x8_avg_sse2 = vpx_sad16x8_avg_sse2;
-const SadMxNAvgFunc sad8x16_avg_sse2 = vpx_sad8x16_avg_sse2;
-const SadMxNAvgFunc sad8x8_avg_sse2 = vpx_sad8x8_avg_sse2;
-const SadMxNAvgFunc sad8x4_avg_sse2 = vpx_sad8x4_avg_sse2;
-#if CONFIG_VP9_HIGHBITDEPTH
-const SadMxNAvgFunc highbd_sad64x64_avg_sse2 = vpx_highbd_sad64x64_avg_sse2;
-const SadMxNAvgFunc highbd_sad64x32_avg_sse2 = vpx_highbd_sad64x32_avg_sse2;
-const SadMxNAvgFunc highbd_sad32x64_avg_sse2 = vpx_highbd_sad32x64_avg_sse2;
-const SadMxNAvgFunc highbd_sad32x32_avg_sse2 = vpx_highbd_sad32x32_avg_sse2;
-const SadMxNAvgFunc highbd_sad32x16_avg_sse2 = vpx_highbd_sad32x16_avg_sse2;
-const SadMxNAvgFunc highbd_sad16x32_avg_sse2 = vpx_highbd_sad16x32_avg_sse2;
-const SadMxNAvgFunc highbd_sad16x16_avg_sse2 = vpx_highbd_sad16x16_avg_sse2;
-const SadMxNAvgFunc highbd_sad16x8_avg_sse2 = vpx_highbd_sad16x8_avg_sse2;
-const SadMxNAvgFunc highbd_sad8x16_avg_sse2 = vpx_highbd_sad8x16_avg_sse2;
-const SadMxNAvgFunc highbd_sad8x8_avg_sse2 = vpx_highbd_sad8x8_avg_sse2;
-const SadMxNAvgFunc highbd_sad8x4_avg_sse2 = vpx_highbd_sad8x4_avg_sse2;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
 const SadMxNAvgParam avg_sse2_tests[] = {
-  make_tuple(64, 64, sad64x64_avg_sse2, -1),
-  make_tuple(64, 32, sad64x32_avg_sse2, -1),
-  make_tuple(32, 64, sad32x64_avg_sse2, -1),
-  make_tuple(32, 32, sad32x32_avg_sse2, -1),
-  make_tuple(32, 16, sad32x16_avg_sse2, -1),
-  make_tuple(16, 32, sad16x32_avg_sse2, -1),
-  make_tuple(16, 16, sad16x16_avg_sse2, -1),
-  make_tuple(16, 8, sad16x8_avg_sse2, -1),
-  make_tuple(8, 16, sad8x16_avg_sse2, -1),
-  make_tuple(8, 8, sad8x8_avg_sse2, -1),
-  make_tuple(8, 4, sad8x4_avg_sse2, -1),
+  make_tuple(64, 64, &vpx_sad64x64_avg_sse2, -1),
+  make_tuple(64, 32, &vpx_sad64x32_avg_sse2, -1),
+  make_tuple(32, 64, &vpx_sad32x64_avg_sse2, -1),
+  make_tuple(32, 32, &vpx_sad32x32_avg_sse2, -1),
+  make_tuple(32, 16, &vpx_sad32x16_avg_sse2, -1),
+  make_tuple(16, 32, &vpx_sad16x32_avg_sse2, -1),
+  make_tuple(16, 16, &vpx_sad16x16_avg_sse2, -1),
+  make_tuple(16, 8, &vpx_sad16x8_avg_sse2, -1),
+  make_tuple(8, 16, &vpx_sad8x16_avg_sse2, -1),
+  make_tuple(8, 8, &vpx_sad8x8_avg_sse2, -1),
+  make_tuple(8, 4, &vpx_sad8x4_avg_sse2, -1),
 #if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, highbd_sad64x64_avg_sse2, 8),
-  make_tuple(64, 32, highbd_sad64x32_avg_sse2, 8),
-  make_tuple(32, 64, highbd_sad32x64_avg_sse2, 8),
-  make_tuple(32, 32, highbd_sad32x32_avg_sse2, 8),
-  make_tuple(32, 16, highbd_sad32x16_avg_sse2, 8),
-  make_tuple(16, 32, highbd_sad16x32_avg_sse2, 8),
-  make_tuple(16, 16, highbd_sad16x16_avg_sse2, 8),
-  make_tuple(16, 8, highbd_sad16x8_avg_sse2, 8),
-  make_tuple(8, 16, highbd_sad8x16_avg_sse2, 8),
-  make_tuple(8, 8, highbd_sad8x8_avg_sse2, 8),
-  make_tuple(8, 4, highbd_sad8x4_avg_sse2, 8),
-  make_tuple(64, 64, highbd_sad64x64_avg_sse2, 10),
-  make_tuple(64, 32, highbd_sad64x32_avg_sse2, 10),
-  make_tuple(32, 64, highbd_sad32x64_avg_sse2, 10),
-  make_tuple(32, 32, highbd_sad32x32_avg_sse2, 10),
-  make_tuple(32, 16, highbd_sad32x16_avg_sse2, 10),
-  make_tuple(16, 32, highbd_sad16x32_avg_sse2, 10),
-  make_tuple(16, 16, highbd_sad16x16_avg_sse2, 10),
-  make_tuple(16, 8, highbd_sad16x8_avg_sse2, 10),
-  make_tuple(8, 16, highbd_sad8x16_avg_sse2, 10),
-  make_tuple(8, 8, highbd_sad8x8_avg_sse2, 10),
-  make_tuple(8, 4, highbd_sad8x4_avg_sse2, 10),
-  make_tuple(64, 64, highbd_sad64x64_avg_sse2, 12),
-  make_tuple(64, 32, highbd_sad64x32_avg_sse2, 12),
-  make_tuple(32, 64, highbd_sad32x64_avg_sse2, 12),
-  make_tuple(32, 32, highbd_sad32x32_avg_sse2, 12),
-  make_tuple(32, 16, highbd_sad32x16_avg_sse2, 12),
-  make_tuple(16, 32, highbd_sad16x32_avg_sse2, 12),
-  make_tuple(16, 16, highbd_sad16x16_avg_sse2, 12),
-  make_tuple(16, 8, highbd_sad16x8_avg_sse2, 12),
-  make_tuple(8, 16, highbd_sad8x16_avg_sse2, 12),
-  make_tuple(8, 8, highbd_sad8x8_avg_sse2, 12),
-  make_tuple(8, 4, highbd_sad8x4_avg_sse2, 12),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 8),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 8),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 8),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 8),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 8),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 8),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 8),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 8),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 8),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 8),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 8),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 10),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 10),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 10),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 10),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 10),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 10),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 10),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 10),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 10),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 10),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 10),
+  make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 12),
+  make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 12),
+  make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 12),
+  make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 12),
+  make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 12),
+  make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 12),
+  make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 12),
+  make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 12),
+  make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 12),
+  make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 12),
+  make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 12),
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
 
-const SadMxNx4Func sad64x64x4d_sse2 = vpx_sad64x64x4d_sse2;
-const SadMxNx4Func sad64x32x4d_sse2 = vpx_sad64x32x4d_sse2;
-const SadMxNx4Func sad32x64x4d_sse2 = vpx_sad32x64x4d_sse2;
-const SadMxNx4Func sad32x32x4d_sse2 = vpx_sad32x32x4d_sse2;
-const SadMxNx4Func sad32x16x4d_sse2 = vpx_sad32x16x4d_sse2;
-const SadMxNx4Func sad16x32x4d_sse2 = vpx_sad16x32x4d_sse2;
-const SadMxNx4Func sad16x16x4d_sse2 = vpx_sad16x16x4d_sse2;
-const SadMxNx4Func sad16x8x4d_sse2 = vpx_sad16x8x4d_sse2;
-const SadMxNx4Func sad8x16x4d_sse2 = vpx_sad8x16x4d_sse2;
-const SadMxNx4Func sad8x8x4d_sse2 = vpx_sad8x8x4d_sse2;
-const SadMxNx4Func sad8x4x4d_sse2 = vpx_sad8x4x4d_sse2;
-#if CONFIG_VP9_HIGHBITDEPTH
-const SadMxNx4Func highbd_sad64x64x4d_sse2 = vpx_highbd_sad64x64x4d_sse2;
-const SadMxNx4Func highbd_sad64x32x4d_sse2 = vpx_highbd_sad64x32x4d_sse2;
-const SadMxNx4Func highbd_sad32x64x4d_sse2 = vpx_highbd_sad32x64x4d_sse2;
-const SadMxNx4Func highbd_sad32x32x4d_sse2 = vpx_highbd_sad32x32x4d_sse2;
-const SadMxNx4Func highbd_sad32x16x4d_sse2 = vpx_highbd_sad32x16x4d_sse2;
-const SadMxNx4Func highbd_sad16x32x4d_sse2 = vpx_highbd_sad16x32x4d_sse2;
-const SadMxNx4Func highbd_sad16x16x4d_sse2 = vpx_highbd_sad16x16x4d_sse2;
-const SadMxNx4Func highbd_sad16x8x4d_sse2 = vpx_highbd_sad16x8x4d_sse2;
-const SadMxNx4Func highbd_sad8x16x4d_sse2 = vpx_highbd_sad8x16x4d_sse2;
-const SadMxNx4Func highbd_sad8x8x4d_sse2 = vpx_highbd_sad8x8x4d_sse2;
-const SadMxNx4Func highbd_sad8x4x4d_sse2 = vpx_highbd_sad8x4x4d_sse2;
-const SadMxNx4Func highbd_sad4x8x4d_sse2 = vpx_highbd_sad4x8x4d_sse2;
-const SadMxNx4Func highbd_sad4x4x4d_sse2 = vpx_highbd_sad4x4x4d_sse2;
-#endif  // CONFIG_VP9_HIGHBITDEPTH
 const SadMxNx4Param x4d_sse2_tests[] = {
-  make_tuple(64, 64, sad64x64x4d_sse2, -1),
-  make_tuple(64, 32, sad64x32x4d_sse2, -1),
-  make_tuple(32, 64, sad32x64x4d_sse2, -1),
-  make_tuple(32, 32, sad32x32x4d_sse2, -1),
-  make_tuple(32, 16, sad32x16x4d_sse2, -1),
-  make_tuple(16, 32, sad16x32x4d_sse2, -1),
-  make_tuple(16, 16, sad16x16x4d_sse2, -1),
-  make_tuple(16, 8, sad16x8x4d_sse2, -1),
-  make_tuple(8, 16, sad8x16x4d_sse2, -1),
-  make_tuple(8, 8, sad8x8x4d_sse2, -1),
-  make_tuple(8, 4, sad8x4x4d_sse2, -1),
+  make_tuple(64, 64, &vpx_sad64x64x4d_sse2, -1),
+  make_tuple(64, 32, &vpx_sad64x32x4d_sse2, -1),
+  make_tuple(32, 64, &vpx_sad32x64x4d_sse2, -1),
+  make_tuple(32, 32, &vpx_sad32x32x4d_sse2, -1),
+  make_tuple(32, 16, &vpx_sad32x16x4d_sse2, -1),
+  make_tuple(16, 32, &vpx_sad16x32x4d_sse2, -1),
+  make_tuple(16, 16, &vpx_sad16x16x4d_sse2, -1),
+  make_tuple(16, 8, &vpx_sad16x8x4d_sse2, -1),
+  make_tuple(8, 16, &vpx_sad8x16x4d_sse2, -1),
+  make_tuple(8, 8, &vpx_sad8x8x4d_sse2, -1),
+  make_tuple(8, 4, &vpx_sad8x4x4d_sse2, -1),
 #if CONFIG_VP9_HIGHBITDEPTH
-  make_tuple(64, 64, highbd_sad64x64x4d_sse2, 8),
-  make_tuple(64, 32, highbd_sad64x32x4d_sse2, 8),
-  make_tuple(32, 64, highbd_sad32x64x4d_sse2, 8),
-  make_tuple(32, 32, highbd_sad32x32x4d_sse2, 8),
-  make_tuple(32, 16, highbd_sad32x16x4d_sse2, 8),
-  make_tuple(16, 32, highbd_sad16x32x4d_sse2, 8),
-  make_tuple(16, 16, highbd_sad16x16x4d_sse2, 8),
-  make_tuple(16, 8, highbd_sad16x8x4d_sse2, 8),
-  make_tuple(8, 16, highbd_sad8x16x4d_sse2, 8),
-  make_tuple(8, 8, highbd_sad8x8x4d_sse2, 8),
-  make_tuple(8, 4, highbd_sad8x4x4d_sse2, 8),
-  make_tuple(4, 8, highbd_sad4x8x4d_sse2, 8),
-  make_tuple(4, 4, highbd_sad4x4x4d_sse2, 8),
-  make_tuple(64, 64, highbd_sad64x64x4d_sse2, 10),
-  make_tuple(64, 32, highbd_sad64x32x4d_sse2, 10),
-  make_tuple(32, 64, highbd_sad32x64x4d_sse2, 10),
-  make_tuple(32, 32, highbd_sad32x32x4d_sse2, 10),
-  make_tuple(32, 16, highbd_sad32x16x4d_sse2, 10),
-  make_tuple(16, 32, highbd_sad16x32x4d_sse2, 10),
-  make_tuple(16, 16, highbd_sad16x16x4d_sse2, 10),
-  make_tuple(16, 8, highbd_sad16x8x4d_sse2, 10),
-  make_tuple(8, 16, highbd_sad8x16x4d_sse2, 10),
-  make_tuple(8, 8, highbd_sad8x8x4d_sse2, 10),
-  make_tuple(8, 4, highbd_sad8x4x4d_sse2, 10),
-  make_tuple(4, 8, highbd_sad4x8x4d_sse2, 10),
-  make_tuple(4, 4, highbd_sad4x4x4d_sse2, 10),
-  make_tuple(64, 64, highbd_sad64x64x4d_sse2, 12),
-  make_tuple(64, 32, highbd_sad64x32x4d_sse2, 12),
-  make_tuple(32, 64, highbd_sad32x64x4d_sse2, 12),
-  make_tuple(32, 32, highbd_sad32x32x4d_sse2, 12),
-  make_tuple(32, 16, highbd_sad32x16x4d_sse2, 12),
-  make_tuple(16, 32, highbd_sad16x32x4d_sse2, 12),
-  make_tuple(16, 16, highbd_sad16x16x4d_sse2, 12),
-  make_tuple(16, 8, highbd_sad16x8x4d_sse2, 12),
-  make_tuple(8, 16, highbd_sad8x16x4d_sse2, 12),
-  make_tuple(8, 8, highbd_sad8x8x4d_sse2, 12),
-  make_tuple(8, 4, highbd_sad8x4x4d_sse2, 12),
-  make_tuple(4, 8, highbd_sad4x8x4d_sse2, 12),
-  make_tuple(4, 4, highbd_sad4x4x4d_sse2, 12),
+  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 8),
+  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 8),
+  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 8),
+  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 8),
+  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 8),
+  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 8),
+  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 8),
+  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 8),
+  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 8),
+  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 8),
+  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 8),
+  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 8),
+  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 8),
+  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 10),
+  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 10),
+  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 10),
+  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 10),
+  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 10),
+  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 10),
+  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 10),
+  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 10),
+  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 10),
+  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 10),
+  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 10),
+  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 10),
+  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 10),
+  make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 12),
+  make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 12),
+  make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 12),
+  make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 12),
+  make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 12),
+  make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 12),
+  make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 12),
+  make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 12),
+  make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 12),
+  make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 12),
+  make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 12),
+  make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 12),
+  make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 12),
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
@@ -1076,39 +895,27 @@
 #endif  // HAVE_SSE4_1
 
 #if HAVE_AVX2
-const SadMxNFunc sad64x64_avx2 = vpx_sad64x64_avx2;
-const SadMxNFunc sad64x32_avx2 = vpx_sad64x32_avx2;
-const SadMxNFunc sad32x64_avx2 = vpx_sad32x64_avx2;
-const SadMxNFunc sad32x32_avx2 = vpx_sad32x32_avx2;
-const SadMxNFunc sad32x16_avx2 = vpx_sad32x16_avx2;
 const SadMxNParam avx2_tests[] = {
-  make_tuple(64, 64, sad64x64_avx2, -1),
-  make_tuple(64, 32, sad64x32_avx2, -1),
-  make_tuple(32, 64, sad32x64_avx2, -1),
-  make_tuple(32, 32, sad32x32_avx2, -1),
-  make_tuple(32, 16, sad32x16_avx2, -1),
+  make_tuple(64, 64, &vpx_sad64x64_avx2, -1),
+  make_tuple(64, 32, &vpx_sad64x32_avx2, -1),
+  make_tuple(32, 64, &vpx_sad32x64_avx2, -1),
+  make_tuple(32, 32, &vpx_sad32x32_avx2, -1),
+  make_tuple(32, 16, &vpx_sad32x16_avx2, -1),
 };
 INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests));
 
-const SadMxNAvgFunc sad64x64_avg_avx2 = vpx_sad64x64_avg_avx2;
-const SadMxNAvgFunc sad64x32_avg_avx2 = vpx_sad64x32_avg_avx2;
-const SadMxNAvgFunc sad32x64_avg_avx2 = vpx_sad32x64_avg_avx2;
-const SadMxNAvgFunc sad32x32_avg_avx2 = vpx_sad32x32_avg_avx2;
-const SadMxNAvgFunc sad32x16_avg_avx2 = vpx_sad32x16_avg_avx2;
 const SadMxNAvgParam avg_avx2_tests[] = {
-  make_tuple(64, 64, sad64x64_avg_avx2, -1),
-  make_tuple(64, 32, sad64x32_avg_avx2, -1),
-  make_tuple(32, 64, sad32x64_avg_avx2, -1),
-  make_tuple(32, 32, sad32x32_avg_avx2, -1),
-  make_tuple(32, 16, sad32x16_avg_avx2, -1),
+  make_tuple(64, 64, &vpx_sad64x64_avg_avx2, -1),
+  make_tuple(64, 32, &vpx_sad64x32_avg_avx2, -1),
+  make_tuple(32, 64, &vpx_sad32x64_avg_avx2, -1),
+  make_tuple(32, 32, &vpx_sad32x32_avg_avx2, -1),
+  make_tuple(32, 16, &vpx_sad32x16_avg_avx2, -1),
 };
 INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
 
-const SadMxNx4Func sad64x64x4d_avx2 = vpx_sad64x64x4d_avx2;
-const SadMxNx4Func sad32x32x4d_avx2 = vpx_sad32x32x4d_avx2;
 const SadMxNx4Param x4d_avx2_tests[] = {
-  make_tuple(64, 64, sad64x64x4d_avx2, -1),
-  make_tuple(32, 32, sad32x32x4d_avx2, -1),
+  make_tuple(64, 64, &vpx_sad64x64x4d_avx2, -1),
+  make_tuple(32, 32, &vpx_sad32x32x4d_avx2, -1),
 };
 INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
 #endif  // HAVE_AVX2
@@ -1116,93 +923,54 @@
 //------------------------------------------------------------------------------
 // MIPS functions
 #if HAVE_MSA
-const SadMxNFunc sad64x64_msa = vpx_sad64x64_msa;
-const SadMxNFunc sad64x32_msa = vpx_sad64x32_msa;
-const SadMxNFunc sad32x64_msa = vpx_sad32x64_msa;
-const SadMxNFunc sad32x32_msa = vpx_sad32x32_msa;
-const SadMxNFunc sad32x16_msa = vpx_sad32x16_msa;
-const SadMxNFunc sad16x32_msa = vpx_sad16x32_msa;
-const SadMxNFunc sad16x16_msa = vpx_sad16x16_msa;
-const SadMxNFunc sad16x8_msa = vpx_sad16x8_msa;
-const SadMxNFunc sad8x16_msa = vpx_sad8x16_msa;
-const SadMxNFunc sad8x8_msa = vpx_sad8x8_msa;
-const SadMxNFunc sad8x4_msa = vpx_sad8x4_msa;
-const SadMxNFunc sad4x8_msa = vpx_sad4x8_msa;
-const SadMxNFunc sad4x4_msa = vpx_sad4x4_msa;
 const SadMxNParam msa_tests[] = {
-  make_tuple(64, 64, sad64x64_msa, -1),
-  make_tuple(64, 32, sad64x32_msa, -1),
-  make_tuple(32, 64, sad32x64_msa, -1),
-  make_tuple(32, 32, sad32x32_msa, -1),
-  make_tuple(32, 16, sad32x16_msa, -1),
-  make_tuple(16, 32, sad16x32_msa, -1),
-  make_tuple(16, 16, sad16x16_msa, -1),
-  make_tuple(16, 8, sad16x8_msa, -1),
-  make_tuple(8, 16, sad8x16_msa, -1),
-  make_tuple(8, 8, sad8x8_msa, -1),
-  make_tuple(8, 4, sad8x4_msa, -1),
-  make_tuple(4, 8, sad4x8_msa, -1),
-  make_tuple(4, 4, sad4x4_msa, -1),
+  make_tuple(64, 64, &vpx_sad64x64_msa, -1),
+  make_tuple(64, 32, &vpx_sad64x32_msa, -1),
+  make_tuple(32, 64, &vpx_sad32x64_msa, -1),
+  make_tuple(32, 32, &vpx_sad32x32_msa, -1),
+  make_tuple(32, 16, &vpx_sad32x16_msa, -1),
+  make_tuple(16, 32, &vpx_sad16x32_msa, -1),
+  make_tuple(16, 16, &vpx_sad16x16_msa, -1),
+  make_tuple(16, 8, &vpx_sad16x8_msa, -1),
+  make_tuple(8, 16, &vpx_sad8x16_msa, -1),
+  make_tuple(8, 8, &vpx_sad8x8_msa, -1),
+  make_tuple(8, 4, &vpx_sad8x4_msa, -1),
+  make_tuple(4, 8, &vpx_sad4x8_msa, -1),
+  make_tuple(4, 4, &vpx_sad4x4_msa, -1),
 };
 INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
 
-const SadMxNAvgFunc sad64x64_avg_msa = vpx_sad64x64_avg_msa;
-const SadMxNAvgFunc sad64x32_avg_msa = vpx_sad64x32_avg_msa;
-const SadMxNAvgFunc sad32x64_avg_msa = vpx_sad32x64_avg_msa;
-const SadMxNAvgFunc sad32x32_avg_msa = vpx_sad32x32_avg_msa;
-const SadMxNAvgFunc sad32x16_avg_msa = vpx_sad32x16_avg_msa;
-const SadMxNAvgFunc sad16x32_avg_msa = vpx_sad16x32_avg_msa;
-const SadMxNAvgFunc sad16x16_avg_msa = vpx_sad16x16_avg_msa;
-const SadMxNAvgFunc sad16x8_avg_msa = vpx_sad16x8_avg_msa;
-const SadMxNAvgFunc sad8x16_avg_msa = vpx_sad8x16_avg_msa;
-const SadMxNAvgFunc sad8x8_avg_msa = vpx_sad8x8_avg_msa;
-const SadMxNAvgFunc sad8x4_avg_msa = vpx_sad8x4_avg_msa;
-const SadMxNAvgFunc sad4x8_avg_msa = vpx_sad4x8_avg_msa;
-const SadMxNAvgFunc sad4x4_avg_msa = vpx_sad4x4_avg_msa;
 const SadMxNAvgParam avg_msa_tests[] = {
-  make_tuple(64, 64, sad64x64_avg_msa, -1),
-  make_tuple(64, 32, sad64x32_avg_msa, -1),
-  make_tuple(32, 64, sad32x64_avg_msa, -1),
-  make_tuple(32, 32, sad32x32_avg_msa, -1),
-  make_tuple(32, 16, sad32x16_avg_msa, -1),
-  make_tuple(16, 32, sad16x32_avg_msa, -1),
-  make_tuple(16, 16, sad16x16_avg_msa, -1),
-  make_tuple(16, 8, sad16x8_avg_msa, -1),
-  make_tuple(8, 16, sad8x16_avg_msa, -1),
-  make_tuple(8, 8, sad8x8_avg_msa, -1),
-  make_tuple(8, 4, sad8x4_avg_msa, -1),
-  make_tuple(4, 8, sad4x8_avg_msa, -1),
-  make_tuple(4, 4, sad4x4_avg_msa, -1),
+  make_tuple(64, 64, &vpx_sad64x64_avg_msa, -1),
+  make_tuple(64, 32, &vpx_sad64x32_avg_msa, -1),
+  make_tuple(32, 64, &vpx_sad32x64_avg_msa, -1),
+  make_tuple(32, 32, &vpx_sad32x32_avg_msa, -1),
+  make_tuple(32, 16, &vpx_sad32x16_avg_msa, -1),
+  make_tuple(16, 32, &vpx_sad16x32_avg_msa, -1),
+  make_tuple(16, 16, &vpx_sad16x16_avg_msa, -1),
+  make_tuple(16, 8, &vpx_sad16x8_avg_msa, -1),
+  make_tuple(8, 16, &vpx_sad8x16_avg_msa, -1),
+  make_tuple(8, 8, &vpx_sad8x8_avg_msa, -1),
+  make_tuple(8, 4, &vpx_sad8x4_avg_msa, -1),
+  make_tuple(4, 8, &vpx_sad4x8_avg_msa, -1),
+  make_tuple(4, 4, &vpx_sad4x4_avg_msa, -1),
 };
 INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
 
-const SadMxNx4Func sad64x64x4d_msa = vpx_sad64x64x4d_msa;
-const SadMxNx4Func sad64x32x4d_msa = vpx_sad64x32x4d_msa;
-const SadMxNx4Func sad32x64x4d_msa = vpx_sad32x64x4d_msa;
-const SadMxNx4Func sad32x32x4d_msa = vpx_sad32x32x4d_msa;
-const SadMxNx4Func sad32x16x4d_msa = vpx_sad32x16x4d_msa;
-const SadMxNx4Func sad16x32x4d_msa = vpx_sad16x32x4d_msa;
-const SadMxNx4Func sad16x16x4d_msa = vpx_sad16x16x4d_msa;
-const SadMxNx4Func sad16x8x4d_msa = vpx_sad16x8x4d_msa;
-const SadMxNx4Func sad8x16x4d_msa = vpx_sad8x16x4d_msa;
-const SadMxNx4Func sad8x8x4d_msa = vpx_sad8x8x4d_msa;
-const SadMxNx4Func sad8x4x4d_msa = vpx_sad8x4x4d_msa;
-const SadMxNx4Func sad4x8x4d_msa = vpx_sad4x8x4d_msa;
-const SadMxNx4Func sad4x4x4d_msa = vpx_sad4x4x4d_msa;
 const SadMxNx4Param x4d_msa_tests[] = {
-  make_tuple(64, 64, sad64x64x4d_msa, -1),
-  make_tuple(64, 32, sad64x32x4d_msa, -1),
-  make_tuple(32, 64, sad32x64x4d_msa, -1),
-  make_tuple(32, 32, sad32x32x4d_msa, -1),
-  make_tuple(32, 16, sad32x16x4d_msa, -1),
-  make_tuple(16, 32, sad16x32x4d_msa, -1),
-  make_tuple(16, 16, sad16x16x4d_msa, -1),
-  make_tuple(16, 8, sad16x8x4d_msa, -1),
-  make_tuple(8, 16, sad8x16x4d_msa, -1),
-  make_tuple(8, 8, sad8x8x4d_msa, -1),
-  make_tuple(8, 4, sad8x4x4d_msa, -1),
-  make_tuple(4, 8, sad4x8x4d_msa, -1),
-  make_tuple(4, 4, sad4x4x4d_msa, -1),
+  make_tuple(64, 64, &vpx_sad64x64x4d_msa, -1),
+  make_tuple(64, 32, &vpx_sad64x32x4d_msa, -1),
+  make_tuple(32, 64, &vpx_sad32x64x4d_msa, -1),
+  make_tuple(32, 32, &vpx_sad32x32x4d_msa, -1),
+  make_tuple(32, 16, &vpx_sad32x16x4d_msa, -1),
+  make_tuple(16, 32, &vpx_sad16x32x4d_msa, -1),
+  make_tuple(16, 16, &vpx_sad16x16x4d_msa, -1),
+  make_tuple(16, 8, &vpx_sad16x8x4d_msa, -1),
+  make_tuple(8, 16, &vpx_sad8x16x4d_msa, -1),
+  make_tuple(8, 8, &vpx_sad8x8x4d_msa, -1),
+  make_tuple(8, 4, &vpx_sad8x4x4d_msa, -1),
+  make_tuple(4, 8, &vpx_sad4x8x4d_msa, -1),
+  make_tuple(4, 4, &vpx_sad4x4x4d_msa, -1),
 };
 INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
 #endif  // HAVE_MSA
--- a/test/sixtap_predict_test.cc
+++ b/test/sixtap_predict_test.cc
@@ -186,70 +186,48 @@
 
 using std::tr1::make_tuple;
 
-const SixtapPredictFunc sixtap_16x16_c = vp8_sixtap_predict16x16_c;
-const SixtapPredictFunc sixtap_8x8_c = vp8_sixtap_predict8x8_c;
-const SixtapPredictFunc sixtap_8x4_c = vp8_sixtap_predict8x4_c;
-const SixtapPredictFunc sixtap_4x4_c = vp8_sixtap_predict4x4_c;
 INSTANTIATE_TEST_CASE_P(
     C, SixtapPredictTest, ::testing::Values(
-        make_tuple(16, 16, sixtap_16x16_c),
-        make_tuple(8, 8, sixtap_8x8_c),
-        make_tuple(8, 4, sixtap_8x4_c),
-        make_tuple(4, 4, sixtap_4x4_c)));
+        make_tuple(16, 16, &vp8_sixtap_predict16x16_c),
+        make_tuple(8, 8, &vp8_sixtap_predict8x8_c),
+        make_tuple(8, 4, &vp8_sixtap_predict8x4_c),
+        make_tuple(4, 4, &vp8_sixtap_predict4x4_c)));
 #if HAVE_NEON
-const SixtapPredictFunc sixtap_16x16_neon = vp8_sixtap_predict16x16_neon;
-const SixtapPredictFunc sixtap_8x8_neon = vp8_sixtap_predict8x8_neon;
-const SixtapPredictFunc sixtap_8x4_neon = vp8_sixtap_predict8x4_neon;
 INSTANTIATE_TEST_CASE_P(
     NEON, SixtapPredictTest, ::testing::Values(
-        make_tuple(16, 16, sixtap_16x16_neon),
-        make_tuple(8, 8, sixtap_8x8_neon),
-        make_tuple(8, 4, sixtap_8x4_neon)));
+        make_tuple(16, 16, &vp8_sixtap_predict16x16_neon),
+        make_tuple(8, 8, &vp8_sixtap_predict8x8_neon),
+        make_tuple(8, 4, &vp8_sixtap_predict8x4_neon)));
 #endif
 #if HAVE_MMX
-const SixtapPredictFunc sixtap_16x16_mmx = vp8_sixtap_predict16x16_mmx;
-const SixtapPredictFunc sixtap_8x8_mmx = vp8_sixtap_predict8x8_mmx;
-const SixtapPredictFunc sixtap_8x4_mmx = vp8_sixtap_predict8x4_mmx;
-const SixtapPredictFunc sixtap_4x4_mmx = vp8_sixtap_predict4x4_mmx;
 INSTANTIATE_TEST_CASE_P(
     MMX, SixtapPredictTest, ::testing::Values(
-        make_tuple(16, 16, sixtap_16x16_mmx),
-        make_tuple(8, 8, sixtap_8x8_mmx),
-        make_tuple(8, 4, sixtap_8x4_mmx),
-        make_tuple(4, 4, sixtap_4x4_mmx)));
+        make_tuple(16, 16, &vp8_sixtap_predict16x16_mmx),
+        make_tuple(8, 8, &vp8_sixtap_predict8x8_mmx),
+        make_tuple(8, 4, &vp8_sixtap_predict8x4_mmx),
+        make_tuple(4, 4, &vp8_sixtap_predict4x4_mmx)));
 #endif
 #if HAVE_SSE2
-const SixtapPredictFunc sixtap_16x16_sse2 = vp8_sixtap_predict16x16_sse2;
-const SixtapPredictFunc sixtap_8x8_sse2 = vp8_sixtap_predict8x8_sse2;
-const SixtapPredictFunc sixtap_8x4_sse2 = vp8_sixtap_predict8x4_sse2;
 INSTANTIATE_TEST_CASE_P(
     SSE2, SixtapPredictTest, ::testing::Values(
-        make_tuple(16, 16, sixtap_16x16_sse2),
-        make_tuple(8, 8, sixtap_8x8_sse2),
-        make_tuple(8, 4, sixtap_8x4_sse2)));
+        make_tuple(16, 16, &vp8_sixtap_predict16x16_sse2),
+        make_tuple(8, 8, &vp8_sixtap_predict8x8_sse2),
+        make_tuple(8, 4, &vp8_sixtap_predict8x4_sse2)));
 #endif
 #if HAVE_SSSE3
-const SixtapPredictFunc sixtap_16x16_ssse3 = vp8_sixtap_predict16x16_ssse3;
-const SixtapPredictFunc sixtap_8x8_ssse3 = vp8_sixtap_predict8x8_ssse3;
-const SixtapPredictFunc sixtap_8x4_ssse3 = vp8_sixtap_predict8x4_ssse3;
-const SixtapPredictFunc sixtap_4x4_ssse3 = vp8_sixtap_predict4x4_ssse3;
 INSTANTIATE_TEST_CASE_P(
     SSSE3, SixtapPredictTest, ::testing::Values(
-        make_tuple(16, 16, sixtap_16x16_ssse3),
-        make_tuple(8, 8, sixtap_8x8_ssse3),
-        make_tuple(8, 4, sixtap_8x4_ssse3),
-        make_tuple(4, 4, sixtap_4x4_ssse3)));
+        make_tuple(16, 16, &vp8_sixtap_predict16x16_ssse3),
+        make_tuple(8, 8, &vp8_sixtap_predict8x8_ssse3),
+        make_tuple(8, 4, &vp8_sixtap_predict8x4_ssse3),
+        make_tuple(4, 4, &vp8_sixtap_predict4x4_ssse3)));
 #endif
 #if HAVE_MSA
-const SixtapPredictFunc sixtap_16x16_msa = vp8_sixtap_predict16x16_msa;
-const SixtapPredictFunc sixtap_8x8_msa = vp8_sixtap_predict8x8_msa;
-const SixtapPredictFunc sixtap_8x4_msa = vp8_sixtap_predict8x4_msa;
-const SixtapPredictFunc sixtap_4x4_msa = vp8_sixtap_predict4x4_msa;
 INSTANTIATE_TEST_CASE_P(
     MSA, SixtapPredictTest, ::testing::Values(
-        make_tuple(16, 16, sixtap_16x16_msa),
-        make_tuple(8, 8, sixtap_8x8_msa),
-        make_tuple(8, 4, sixtap_8x4_msa),
-        make_tuple(4, 4, sixtap_4x4_msa)));
+        make_tuple(16, 16, &vp8_sixtap_predict16x16_msa),
+        make_tuple(8, 8, &vp8_sixtap_predict8x8_msa),
+        make_tuple(8, 4, &vp8_sixtap_predict8x4_msa),
+        make_tuple(4, 4, &vp8_sixtap_predict4x4_msa)));
 #endif
 }  // namespace
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -747,115 +747,63 @@
 INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
                         ::testing::Values(vpx_get_mb_ss_c));
 
-const Get4x4SseFunc get4x4sse_cs_c = vpx_get4x4sse_cs_c;
 INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
-                        ::testing::Values(make_tuple(2, 2, get4x4sse_cs_c)));
+                        ::testing::Values(make_tuple(2, 2,
+                                                     &vpx_get4x4sse_cs_c)));
 
-const VarianceMxNFunc mse16x16_c = vpx_mse16x16_c;
-const VarianceMxNFunc mse16x8_c = vpx_mse16x8_c;
-const VarianceMxNFunc mse8x16_c = vpx_mse8x16_c;
-const VarianceMxNFunc mse8x8_c = vpx_mse8x8_c;
 INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
-                        ::testing::Values(make_tuple(4, 4, mse16x16_c),
-                                          make_tuple(4, 3, mse16x8_c),
-                                          make_tuple(3, 4, mse8x16_c),
-                                          make_tuple(3, 3, mse8x8_c)));
+                        ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_c),
+                                          make_tuple(4, 3, &vpx_mse16x8_c),
+                                          make_tuple(3, 4, &vpx_mse8x16_c),
+                                          make_tuple(3, 3, &vpx_mse8x8_c)));
 
-const VarianceMxNFunc variance64x64_c = vpx_variance64x64_c;
-const VarianceMxNFunc variance64x32_c = vpx_variance64x32_c;
-const VarianceMxNFunc variance32x64_c = vpx_variance32x64_c;
-const VarianceMxNFunc variance32x32_c = vpx_variance32x32_c;
-const VarianceMxNFunc variance32x16_c = vpx_variance32x16_c;
-const VarianceMxNFunc variance16x32_c = vpx_variance16x32_c;
-const VarianceMxNFunc variance16x16_c = vpx_variance16x16_c;
-const VarianceMxNFunc variance16x8_c = vpx_variance16x8_c;
-const VarianceMxNFunc variance8x16_c = vpx_variance8x16_c;
-const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c;
-const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c;
-const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c;
-const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c;
 INSTANTIATE_TEST_CASE_P(
     C, VpxVarianceTest,
-    ::testing::Values(make_tuple(6, 6, variance64x64_c, 0),
-                      make_tuple(6, 5, variance64x32_c, 0),
-                      make_tuple(5, 6, variance32x64_c, 0),
-                      make_tuple(5, 5, variance32x32_c, 0),
-                      make_tuple(5, 4, variance32x16_c, 0),
-                      make_tuple(4, 5, variance16x32_c, 0),
-                      make_tuple(4, 4, variance16x16_c, 0),
-                      make_tuple(4, 3, variance16x8_c, 0),
-                      make_tuple(3, 4, variance8x16_c, 0),
-                      make_tuple(3, 3, variance8x8_c, 0),
-                      make_tuple(3, 2, variance8x4_c, 0),
-                      make_tuple(2, 3, variance4x8_c, 0),
-                      make_tuple(2, 2, variance4x4_c, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_c, 0),
+                      make_tuple(6, 5, &vpx_variance64x32_c, 0),
+                      make_tuple(5, 6, &vpx_variance32x64_c, 0),
+                      make_tuple(5, 5, &vpx_variance32x32_c, 0),
+                      make_tuple(5, 4, &vpx_variance32x16_c, 0),
+                      make_tuple(4, 5, &vpx_variance16x32_c, 0),
+                      make_tuple(4, 4, &vpx_variance16x16_c, 0),
+                      make_tuple(4, 3, &vpx_variance16x8_c, 0),
+                      make_tuple(3, 4, &vpx_variance8x16_c, 0),
+                      make_tuple(3, 3, &vpx_variance8x8_c, 0),
+                      make_tuple(3, 2, &vpx_variance8x4_c, 0),
+                      make_tuple(2, 3, &vpx_variance4x8_c, 0),
+                      make_tuple(2, 2, &vpx_variance4x4_c, 0)));
 
-const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c;
-const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c;
-const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c;
-const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c;
-const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c;
-const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c;
-const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c;
-const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c;
-const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c;
-const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c;
-const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c;
-const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c;
-const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c;
 INSTANTIATE_TEST_CASE_P(
     C, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0),
-                      make_tuple(6, 5, subpel_var64x32_c, 0),
-                      make_tuple(5, 6, subpel_var32x64_c, 0),
-                      make_tuple(5, 5, subpel_var32x32_c, 0),
-                      make_tuple(5, 4, subpel_var32x16_c, 0),
-                      make_tuple(4, 5, subpel_var16x32_c, 0),
-                      make_tuple(4, 4, subpel_var16x16_c, 0),
-                      make_tuple(4, 3, subpel_var16x8_c, 0),
-                      make_tuple(3, 4, subpel_var8x16_c, 0),
-                      make_tuple(3, 3, subpel_var8x8_c, 0),
-                      make_tuple(3, 2, subpel_var8x4_c, 0),
-                      make_tuple(2, 3, subpel_var4x8_c, 0),
-                      make_tuple(2, 2, subpel_var4x4_c, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_c, 0),
+                      make_tuple(6, 5, &vpx_sub_pixel_variance64x32_c, 0),
+                      make_tuple(5, 6, &vpx_sub_pixel_variance32x64_c, 0),
+                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_c, 0),
+                      make_tuple(5, 4, &vpx_sub_pixel_variance32x16_c, 0),
+                      make_tuple(4, 5, &vpx_sub_pixel_variance16x32_c, 0),
+                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_c, 0),
+                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_c, 0),
+                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_c, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_c, 0),
+                      make_tuple(3, 2, &vpx_sub_pixel_variance8x4_c, 0),
+                      make_tuple(2, 3, &vpx_sub_pixel_variance4x8_c, 0),
+                      make_tuple(2, 2, &vpx_sub_pixel_variance4x4_c, 0)));
 
-const SubpixAvgVarMxNFunc subpel_avg_var64x64_c =
-    vpx_sub_pixel_avg_variance64x64_c;
-const SubpixAvgVarMxNFunc subpel_avg_var64x32_c =
-    vpx_sub_pixel_avg_variance64x32_c;
-const SubpixAvgVarMxNFunc subpel_avg_var32x64_c =
-    vpx_sub_pixel_avg_variance32x64_c;
-const SubpixAvgVarMxNFunc subpel_avg_var32x32_c =
-    vpx_sub_pixel_avg_variance32x32_c;
-const SubpixAvgVarMxNFunc subpel_avg_var32x16_c =
-    vpx_sub_pixel_avg_variance32x16_c;
-const SubpixAvgVarMxNFunc subpel_avg_var16x32_c =
-    vpx_sub_pixel_avg_variance16x32_c;
-const SubpixAvgVarMxNFunc subpel_avg_var16x16_c =
-    vpx_sub_pixel_avg_variance16x16_c;
-const SubpixAvgVarMxNFunc subpel_avg_var16x8_c =
-    vpx_sub_pixel_avg_variance16x8_c;
-const SubpixAvgVarMxNFunc subpel_avg_var8x16_c =
-    vpx_sub_pixel_avg_variance8x16_c;
-const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c;
-const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c;
-const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c;
-const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c;
 INSTANTIATE_TEST_CASE_P(
     C, VpxSubpelAvgVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0),
-                      make_tuple(6, 5, subpel_avg_var64x32_c, 0),
-                      make_tuple(5, 6, subpel_avg_var32x64_c, 0),
-                      make_tuple(5, 5, subpel_avg_var32x32_c, 0),
-                      make_tuple(5, 4, subpel_avg_var32x16_c, 0),
-                      make_tuple(4, 5, subpel_avg_var16x32_c, 0),
-                      make_tuple(4, 4, subpel_avg_var16x16_c, 0),
-                      make_tuple(4, 3, subpel_avg_var16x8_c, 0),
-                      make_tuple(3, 4, subpel_avg_var8x16_c, 0),
-                      make_tuple(3, 3, subpel_avg_var8x8_c, 0),
-                      make_tuple(3, 2, subpel_avg_var8x4_c, 0),
-                      make_tuple(2, 3, subpel_avg_var4x8_c, 0),
-                      make_tuple(2, 2, subpel_avg_var4x4_c, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_c, 0),
+                      make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_c, 0),
+                      make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_c, 0),
+                      make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_c, 0),
+                      make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_c, 0),
+                      make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_c, 0),
+                      make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_c, 0),
+                      make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_c, 0),
+                      make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_c, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_c, 0),
+                      make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_c, 0),
+                      make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_c, 0),
+                      make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_c, 0)));
 
 #if CONFIG_VP9_HIGHBITDEPTH
 typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
@@ -875,391 +823,173 @@
 TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
 
 /* TODO(debargha): This test does not support the highbd version
-const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c;
-const VarianceMxNFunc highbd_12_mse16x8_c = vpx_highbd_12_mse16x8_c;
-const VarianceMxNFunc highbd_12_mse8x16_c = vpx_highbd_12_mse8x16_c;
-const VarianceMxNFunc highbd_12_mse8x8_c = vpx_highbd_12_mse8x8_c;
-
-const VarianceMxNFunc highbd_10_mse16x16_c = vpx_highbd_10_mse16x16_c;
-const VarianceMxNFunc highbd_10_mse16x8_c = vpx_highbd_10_mse16x8_c;
-const VarianceMxNFunc highbd_10_mse8x16_c = vpx_highbd_10_mse8x16_c;
-const VarianceMxNFunc highbd_10_mse8x8_c = vpx_highbd_10_mse8x8_c;
-
-const VarianceMxNFunc highbd_8_mse16x16_c = vpx_highbd_8_mse16x16_c;
-const VarianceMxNFunc highbd_8_mse16x8_c = vpx_highbd_8_mse16x8_c;
-const VarianceMxNFunc highbd_8_mse8x16_c = vpx_highbd_8_mse8x16_c;
-const VarianceMxNFunc highbd_8_mse8x8_c = vpx_highbd_8_mse8x8_c;
 INSTANTIATE_TEST_CASE_P(
-    C, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_c),
-                                        make_tuple(4, 4, highbd_12_mse16x8_c),
-                                        make_tuple(4, 4, highbd_12_mse8x16_c),
-                                        make_tuple(4, 4, highbd_12_mse8x8_c),
-                                        make_tuple(4, 4, highbd_10_mse16x16_c),
-                                        make_tuple(4, 4, highbd_10_mse16x8_c),
-                                        make_tuple(4, 4, highbd_10_mse8x16_c),
-                                        make_tuple(4, 4, highbd_10_mse8x8_c),
-                                        make_tuple(4, 4, highbd_8_mse16x16_c),
-                                        make_tuple(4, 4, highbd_8_mse16x8_c),
-                                        make_tuple(4, 4, highbd_8_mse8x16_c),
-                                        make_tuple(4, 4, highbd_8_mse8x8_c)));
+    C, VpxHBDMseTest,
+    ::testing::Values(make_tuple(4, 4, &vpx_highbd_12_mse16x16_c),
+                      make_tuple(4, 4, &vpx_highbd_12_mse16x8_c),
+                      make_tuple(4, 4, &vpx_highbd_12_mse8x16_c),
+                      make_tuple(4, 4, &vpx_highbd_12_mse8x8_c),
+                      make_tuple(4, 4, &vpx_highbd_10_mse16x16_c),
+                      make_tuple(4, 4, &vpx_highbd_10_mse16x8_c),
+                      make_tuple(4, 4, &vpx_highbd_10_mse8x16_c),
+                      make_tuple(4, 4, &vpx_highbd_10_mse8x8_c),
+                      make_tuple(4, 4, &vpx_highbd_8_mse16x16_c),
+                      make_tuple(4, 4, &vpx_highbd_8_mse16x8_c),
+                      make_tuple(4, 4, &vpx_highbd_8_mse8x16_c),
+                      make_tuple(4, 4, &vpx_highbd_8_mse8x8_c)));
 */
 
-const VarianceMxNFunc highbd_12_variance64x64_c = vpx_highbd_12_variance64x64_c;
-const VarianceMxNFunc highbd_12_variance64x32_c = vpx_highbd_12_variance64x32_c;
-const VarianceMxNFunc highbd_12_variance32x64_c = vpx_highbd_12_variance32x64_c;
-const VarianceMxNFunc highbd_12_variance32x32_c = vpx_highbd_12_variance32x32_c;
-const VarianceMxNFunc highbd_12_variance32x16_c = vpx_highbd_12_variance32x16_c;
-const VarianceMxNFunc highbd_12_variance16x32_c = vpx_highbd_12_variance16x32_c;
-const VarianceMxNFunc highbd_12_variance16x16_c = vpx_highbd_12_variance16x16_c;
-const VarianceMxNFunc highbd_12_variance16x8_c = vpx_highbd_12_variance16x8_c;
-const VarianceMxNFunc highbd_12_variance8x16_c = vpx_highbd_12_variance8x16_c;
-const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c;
-const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c;
-const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c;
-const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c;
-const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c;
-const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c;
-const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c;
-const VarianceMxNFunc highbd_10_variance32x32_c = vpx_highbd_10_variance32x32_c;
-const VarianceMxNFunc highbd_10_variance32x16_c = vpx_highbd_10_variance32x16_c;
-const VarianceMxNFunc highbd_10_variance16x32_c = vpx_highbd_10_variance16x32_c;
-const VarianceMxNFunc highbd_10_variance16x16_c = vpx_highbd_10_variance16x16_c;
-const VarianceMxNFunc highbd_10_variance16x8_c = vpx_highbd_10_variance16x8_c;
-const VarianceMxNFunc highbd_10_variance8x16_c = vpx_highbd_10_variance8x16_c;
-const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c;
-const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c;
-const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c;
-const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c;
-const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c;
-const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c;
-const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c;
-const VarianceMxNFunc highbd_8_variance32x32_c = vpx_highbd_8_variance32x32_c;
-const VarianceMxNFunc highbd_8_variance32x16_c = vpx_highbd_8_variance32x16_c;
-const VarianceMxNFunc highbd_8_variance16x32_c = vpx_highbd_8_variance16x32_c;
-const VarianceMxNFunc highbd_8_variance16x16_c = vpx_highbd_8_variance16x16_c;
-const VarianceMxNFunc highbd_8_variance16x8_c = vpx_highbd_8_variance16x8_c;
-const VarianceMxNFunc highbd_8_variance8x16_c = vpx_highbd_8_variance8x16_c;
-const VarianceMxNFunc highbd_8_variance8x8_c = vpx_highbd_8_variance8x8_c;
-const VarianceMxNFunc highbd_8_variance8x4_c = vpx_highbd_8_variance8x4_c;
-const VarianceMxNFunc highbd_8_variance4x8_c = vpx_highbd_8_variance4x8_c;
-const VarianceMxNFunc highbd_8_variance4x4_c = vpx_highbd_8_variance4x4_c;
 INSTANTIATE_TEST_CASE_P(
     C, VpxHBDVarianceTest,
-    ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_c, 12),
-                      make_tuple(6, 5, highbd_12_variance64x32_c, 12),
-                      make_tuple(5, 6, highbd_12_variance32x64_c, 12),
-                      make_tuple(5, 5, highbd_12_variance32x32_c, 12),
-                      make_tuple(5, 4, highbd_12_variance32x16_c, 12),
-                      make_tuple(4, 5, highbd_12_variance16x32_c, 12),
-                      make_tuple(4, 4, highbd_12_variance16x16_c, 12),
-                      make_tuple(4, 3, highbd_12_variance16x8_c, 12),
-                      make_tuple(3, 4, highbd_12_variance8x16_c, 12),
-                      make_tuple(3, 3, highbd_12_variance8x8_c, 12),
-                      make_tuple(3, 2, highbd_12_variance8x4_c, 12),
-                      make_tuple(2, 3, highbd_12_variance4x8_c, 12),
-                      make_tuple(2, 2, highbd_12_variance4x4_c, 12),
-                      make_tuple(6, 6, highbd_10_variance64x64_c, 10),
-                      make_tuple(6, 5, highbd_10_variance64x32_c, 10),
-                      make_tuple(5, 6, highbd_10_variance32x64_c, 10),
-                      make_tuple(5, 5, highbd_10_variance32x32_c, 10),
-                      make_tuple(5, 4, highbd_10_variance32x16_c, 10),
-                      make_tuple(4, 5, highbd_10_variance16x32_c, 10),
-                      make_tuple(4, 4, highbd_10_variance16x16_c, 10),
-                      make_tuple(4, 3, highbd_10_variance16x8_c, 10),
-                      make_tuple(3, 4, highbd_10_variance8x16_c, 10),
-                      make_tuple(3, 3, highbd_10_variance8x8_c, 10),
-                      make_tuple(3, 2, highbd_10_variance8x4_c, 10),
-                      make_tuple(2, 3, highbd_10_variance4x8_c, 10),
-                      make_tuple(2, 2, highbd_10_variance4x4_c, 10),
-                      make_tuple(6, 6, highbd_8_variance64x64_c, 8),
-                      make_tuple(6, 5, highbd_8_variance64x32_c, 8),
-                      make_tuple(5, 6, highbd_8_variance32x64_c, 8),
-                      make_tuple(5, 5, highbd_8_variance32x32_c, 8),
-                      make_tuple(5, 4, highbd_8_variance32x16_c, 8),
-                      make_tuple(4, 5, highbd_8_variance16x32_c, 8),
-                      make_tuple(4, 4, highbd_8_variance16x16_c, 8),
-                      make_tuple(4, 3, highbd_8_variance16x8_c, 8),
-                      make_tuple(3, 4, highbd_8_variance8x16_c, 8),
-                      make_tuple(3, 3, highbd_8_variance8x8_c, 8),
-                      make_tuple(3, 2, highbd_8_variance8x4_c, 8),
-                      make_tuple(2, 3, highbd_8_variance4x8_c, 8),
-                      make_tuple(2, 2, highbd_8_variance4x4_c, 8)));
+    ::testing::Values(make_tuple(6, 6, &vpx_highbd_12_variance64x64_c, 12),
+                      make_tuple(6, 5, &vpx_highbd_12_variance64x32_c, 12),
+                      make_tuple(5, 6, &vpx_highbd_12_variance32x64_c, 12),
+                      make_tuple(5, 5, &vpx_highbd_12_variance32x32_c, 12),
+                      make_tuple(5, 4, &vpx_highbd_12_variance32x16_c, 12),
+                      make_tuple(4, 5, &vpx_highbd_12_variance16x32_c, 12),
+                      make_tuple(4, 4, &vpx_highbd_12_variance16x16_c, 12),
+                      make_tuple(4, 3, &vpx_highbd_12_variance16x8_c, 12),
+                      make_tuple(3, 4, &vpx_highbd_12_variance8x16_c, 12),
+                      make_tuple(3, 3, &vpx_highbd_12_variance8x8_c, 12),
+                      make_tuple(3, 2, &vpx_highbd_12_variance8x4_c, 12),
+                      make_tuple(2, 3, &vpx_highbd_12_variance4x8_c, 12),
+                      make_tuple(2, 2, &vpx_highbd_12_variance4x4_c, 12),
+                      make_tuple(6, 6, &vpx_highbd_10_variance64x64_c, 10),
+                      make_tuple(6, 5, &vpx_highbd_10_variance64x32_c, 10),
+                      make_tuple(5, 6, &vpx_highbd_10_variance32x64_c, 10),
+                      make_tuple(5, 5, &vpx_highbd_10_variance32x32_c, 10),
+                      make_tuple(5, 4, &vpx_highbd_10_variance32x16_c, 10),
+                      make_tuple(4, 5, &vpx_highbd_10_variance16x32_c, 10),
+                      make_tuple(4, 4, &vpx_highbd_10_variance16x16_c, 10),
+                      make_tuple(4, 3, &vpx_highbd_10_variance16x8_c, 10),
+                      make_tuple(3, 4, &vpx_highbd_10_variance8x16_c, 10),
+                      make_tuple(3, 3, &vpx_highbd_10_variance8x8_c, 10),
+                      make_tuple(3, 2, &vpx_highbd_10_variance8x4_c, 10),
+                      make_tuple(2, 3, &vpx_highbd_10_variance4x8_c, 10),
+                      make_tuple(2, 2, &vpx_highbd_10_variance4x4_c, 10),
+                      make_tuple(6, 6, &vpx_highbd_8_variance64x64_c, 8),
+                      make_tuple(6, 5, &vpx_highbd_8_variance64x32_c, 8),
+                      make_tuple(5, 6, &vpx_highbd_8_variance32x64_c, 8),
+                      make_tuple(5, 5, &vpx_highbd_8_variance32x32_c, 8),
+                      make_tuple(5, 4, &vpx_highbd_8_variance32x16_c, 8),
+                      make_tuple(4, 5, &vpx_highbd_8_variance16x32_c, 8),
+                      make_tuple(4, 4, &vpx_highbd_8_variance16x16_c, 8),
+                      make_tuple(4, 3, &vpx_highbd_8_variance16x8_c, 8),
+                      make_tuple(3, 4, &vpx_highbd_8_variance8x16_c, 8),
+                      make_tuple(3, 3, &vpx_highbd_8_variance8x8_c, 8),
+                      make_tuple(3, 2, &vpx_highbd_8_variance8x4_c, 8),
+                      make_tuple(2, 3, &vpx_highbd_8_variance4x8_c, 8),
+                      make_tuple(2, 2, &vpx_highbd_8_variance4x4_c, 8)));
 
-const SubpixVarMxNFunc highbd_8_subpel_var64x64_c =
-    vpx_highbd_8_sub_pixel_variance64x64_c;
-const SubpixVarMxNFunc highbd_8_subpel_var64x32_c =
-    vpx_highbd_8_sub_pixel_variance64x32_c;
-const SubpixVarMxNFunc highbd_8_subpel_var32x64_c =
-    vpx_highbd_8_sub_pixel_variance32x64_c;
-const SubpixVarMxNFunc highbd_8_subpel_var32x32_c =
-    vpx_highbd_8_sub_pixel_variance32x32_c;
-const SubpixVarMxNFunc highbd_8_subpel_var32x16_c =
-    vpx_highbd_8_sub_pixel_variance32x16_c;
-const SubpixVarMxNFunc highbd_8_subpel_var16x32_c =
-    vpx_highbd_8_sub_pixel_variance16x32_c;
-const SubpixVarMxNFunc highbd_8_subpel_var16x16_c =
-    vpx_highbd_8_sub_pixel_variance16x16_c;
-const SubpixVarMxNFunc highbd_8_subpel_var16x8_c =
-    vpx_highbd_8_sub_pixel_variance16x8_c;
-const SubpixVarMxNFunc highbd_8_subpel_var8x16_c =
-    vpx_highbd_8_sub_pixel_variance8x16_c;
-const SubpixVarMxNFunc highbd_8_subpel_var8x8_c =
-    vpx_highbd_8_sub_pixel_variance8x8_c;
-const SubpixVarMxNFunc highbd_8_subpel_var8x4_c =
-    vpx_highbd_8_sub_pixel_variance8x4_c;
-const SubpixVarMxNFunc highbd_8_subpel_var4x8_c =
-    vpx_highbd_8_sub_pixel_variance4x8_c;
-const SubpixVarMxNFunc highbd_8_subpel_var4x4_c =
-    vpx_highbd_8_sub_pixel_variance4x4_c;
-const SubpixVarMxNFunc highbd_10_subpel_var64x64_c =
-    vpx_highbd_10_sub_pixel_variance64x64_c;
-const SubpixVarMxNFunc highbd_10_subpel_var64x32_c =
-    vpx_highbd_10_sub_pixel_variance64x32_c;
-const SubpixVarMxNFunc highbd_10_subpel_var32x64_c =
-    vpx_highbd_10_sub_pixel_variance32x64_c;
-const SubpixVarMxNFunc highbd_10_subpel_var32x32_c =
-    vpx_highbd_10_sub_pixel_variance32x32_c;
-const SubpixVarMxNFunc highbd_10_subpel_var32x16_c =
-    vpx_highbd_10_sub_pixel_variance32x16_c;
-const SubpixVarMxNFunc highbd_10_subpel_var16x32_c =
-    vpx_highbd_10_sub_pixel_variance16x32_c;
-const SubpixVarMxNFunc highbd_10_subpel_var16x16_c =
-    vpx_highbd_10_sub_pixel_variance16x16_c;
-const SubpixVarMxNFunc highbd_10_subpel_var16x8_c =
-    vpx_highbd_10_sub_pixel_variance16x8_c;
-const SubpixVarMxNFunc highbd_10_subpel_var8x16_c =
-    vpx_highbd_10_sub_pixel_variance8x16_c;
-const SubpixVarMxNFunc highbd_10_subpel_var8x8_c =
-    vpx_highbd_10_sub_pixel_variance8x8_c;
-const SubpixVarMxNFunc highbd_10_subpel_var8x4_c =
-    vpx_highbd_10_sub_pixel_variance8x4_c;
-const SubpixVarMxNFunc highbd_10_subpel_var4x8_c =
-    vpx_highbd_10_sub_pixel_variance4x8_c;
-const SubpixVarMxNFunc highbd_10_subpel_var4x4_c =
-    vpx_highbd_10_sub_pixel_variance4x4_c;
-const SubpixVarMxNFunc highbd_12_subpel_var64x64_c =
-    vpx_highbd_12_sub_pixel_variance64x64_c;
-const SubpixVarMxNFunc highbd_12_subpel_var64x32_c =
-    vpx_highbd_12_sub_pixel_variance64x32_c;
-const SubpixVarMxNFunc highbd_12_subpel_var32x64_c =
-    vpx_highbd_12_sub_pixel_variance32x64_c;
-const SubpixVarMxNFunc highbd_12_subpel_var32x32_c =
-    vpx_highbd_12_sub_pixel_variance32x32_c;
-const SubpixVarMxNFunc highbd_12_subpel_var32x16_c =
-    vpx_highbd_12_sub_pixel_variance32x16_c;
-const SubpixVarMxNFunc highbd_12_subpel_var16x32_c =
-    vpx_highbd_12_sub_pixel_variance16x32_c;
-const SubpixVarMxNFunc highbd_12_subpel_var16x16_c =
-    vpx_highbd_12_sub_pixel_variance16x16_c;
-const SubpixVarMxNFunc highbd_12_subpel_var16x8_c =
-    vpx_highbd_12_sub_pixel_variance16x8_c;
-const SubpixVarMxNFunc highbd_12_subpel_var8x16_c =
-    vpx_highbd_12_sub_pixel_variance8x16_c;
-const SubpixVarMxNFunc highbd_12_subpel_var8x8_c =
-    vpx_highbd_12_sub_pixel_variance8x8_c;
-const SubpixVarMxNFunc highbd_12_subpel_var8x4_c =
-    vpx_highbd_12_sub_pixel_variance8x4_c;
-const SubpixVarMxNFunc highbd_12_subpel_var4x8_c =
-    vpx_highbd_12_sub_pixel_variance4x8_c;
-const SubpixVarMxNFunc highbd_12_subpel_var4x4_c =
-    vpx_highbd_12_sub_pixel_variance4x4_c;
 INSTANTIATE_TEST_CASE_P(
     C, VpxHBDSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8),
-                      make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8),
-                      make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8),
-                      make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8),
-                      make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8),
-                      make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8),
-                      make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8),
-                      make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8),
-                      make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8),
-                      make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8),
-                      make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8),
-                      make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8),
-                      make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8),
-                      make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10),
-                      make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10),
-                      make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10),
-                      make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10),
-                      make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10),
-                      make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10),
-                      make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10),
-                      make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10),
-                      make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10),
-                      make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10),
-                      make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10),
-                      make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10),
-                      make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10),
-                      make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12),
-                      make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12),
-                      make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12),
-                      make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12),
-                      make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12),
-                      make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12),
-                      make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12),
-                      make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12),
-                      make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12),
-                      make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12),
-                      make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12),
-                      make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12),
-                      make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12)));
+    ::testing::Values(
+        make_tuple(6, 6, &vpx_highbd_8_sub_pixel_variance64x64_c, 8),
+        make_tuple(6, 5, &vpx_highbd_8_sub_pixel_variance64x32_c, 8),
+        make_tuple(5, 6, &vpx_highbd_8_sub_pixel_variance32x64_c, 8),
+        make_tuple(5, 5, &vpx_highbd_8_sub_pixel_variance32x32_c, 8),
+        make_tuple(5, 4, &vpx_highbd_8_sub_pixel_variance32x16_c, 8),
+        make_tuple(4, 5, &vpx_highbd_8_sub_pixel_variance16x32_c, 8),
+        make_tuple(4, 4, &vpx_highbd_8_sub_pixel_variance16x16_c, 8),
+        make_tuple(4, 3, &vpx_highbd_8_sub_pixel_variance16x8_c, 8),
+        make_tuple(3, 4, &vpx_highbd_8_sub_pixel_variance8x16_c, 8),
+        make_tuple(3, 3, &vpx_highbd_8_sub_pixel_variance8x8_c, 8),
+        make_tuple(3, 2, &vpx_highbd_8_sub_pixel_variance8x4_c, 8),
+        make_tuple(2, 3, &vpx_highbd_8_sub_pixel_variance4x8_c, 8),
+        make_tuple(2, 2, &vpx_highbd_8_sub_pixel_variance4x4_c, 8),
+        make_tuple(6, 6, &vpx_highbd_10_sub_pixel_variance64x64_c, 10),
+        make_tuple(6, 5, &vpx_highbd_10_sub_pixel_variance64x32_c, 10),
+        make_tuple(5, 6, &vpx_highbd_10_sub_pixel_variance32x64_c, 10),
+        make_tuple(5, 5, &vpx_highbd_10_sub_pixel_variance32x32_c, 10),
+        make_tuple(5, 4, &vpx_highbd_10_sub_pixel_variance32x16_c, 10),
+        make_tuple(4, 5, &vpx_highbd_10_sub_pixel_variance16x32_c, 10),
+        make_tuple(4, 4, &vpx_highbd_10_sub_pixel_variance16x16_c, 10),
+        make_tuple(4, 3, &vpx_highbd_10_sub_pixel_variance16x8_c, 10),
+        make_tuple(3, 4, &vpx_highbd_10_sub_pixel_variance8x16_c, 10),
+        make_tuple(3, 3, &vpx_highbd_10_sub_pixel_variance8x8_c, 10),
+        make_tuple(3, 2, &vpx_highbd_10_sub_pixel_variance8x4_c, 10),
+        make_tuple(2, 3, &vpx_highbd_10_sub_pixel_variance4x8_c, 10),
+        make_tuple(2, 2, &vpx_highbd_10_sub_pixel_variance4x4_c, 10),
+        make_tuple(6, 6, &vpx_highbd_12_sub_pixel_variance64x64_c, 12),
+        make_tuple(6, 5, &vpx_highbd_12_sub_pixel_variance64x32_c, 12),
+        make_tuple(5, 6, &vpx_highbd_12_sub_pixel_variance32x64_c, 12),
+        make_tuple(5, 5, &vpx_highbd_12_sub_pixel_variance32x32_c, 12),
+        make_tuple(5, 4, &vpx_highbd_12_sub_pixel_variance32x16_c, 12),
+        make_tuple(4, 5, &vpx_highbd_12_sub_pixel_variance16x32_c, 12),
+        make_tuple(4, 4, &vpx_highbd_12_sub_pixel_variance16x16_c, 12),
+        make_tuple(4, 3, &vpx_highbd_12_sub_pixel_variance16x8_c, 12),
+        make_tuple(3, 4, &vpx_highbd_12_sub_pixel_variance8x16_c, 12),
+        make_tuple(3, 3, &vpx_highbd_12_sub_pixel_variance8x8_c, 12),
+        make_tuple(3, 2, &vpx_highbd_12_sub_pixel_variance8x4_c, 12),
+        make_tuple(2, 3, &vpx_highbd_12_sub_pixel_variance4x8_c, 12),
+        make_tuple(2, 2, &vpx_highbd_12_sub_pixel_variance4x4_c, 12)));
 
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c =
-    vpx_highbd_8_sub_pixel_avg_variance64x64_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c =
-    vpx_highbd_8_sub_pixel_avg_variance64x32_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c =
-    vpx_highbd_8_sub_pixel_avg_variance32x64_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c =
-    vpx_highbd_8_sub_pixel_avg_variance32x32_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c =
-    vpx_highbd_8_sub_pixel_avg_variance32x16_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c =
-    vpx_highbd_8_sub_pixel_avg_variance16x32_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c =
-    vpx_highbd_8_sub_pixel_avg_variance16x16_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c =
-    vpx_highbd_8_sub_pixel_avg_variance16x8_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c =
-    vpx_highbd_8_sub_pixel_avg_variance8x16_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c =
-    vpx_highbd_8_sub_pixel_avg_variance8x8_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c =
-    vpx_highbd_8_sub_pixel_avg_variance8x4_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c =
-    vpx_highbd_8_sub_pixel_avg_variance4x8_c;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c =
-    vpx_highbd_8_sub_pixel_avg_variance4x4_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c =
-    vpx_highbd_10_sub_pixel_avg_variance64x64_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c =
-    vpx_highbd_10_sub_pixel_avg_variance64x32_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c =
-    vpx_highbd_10_sub_pixel_avg_variance32x64_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c =
-    vpx_highbd_10_sub_pixel_avg_variance32x32_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c =
-    vpx_highbd_10_sub_pixel_avg_variance32x16_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c =
-    vpx_highbd_10_sub_pixel_avg_variance16x32_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c =
-    vpx_highbd_10_sub_pixel_avg_variance16x16_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c =
-    vpx_highbd_10_sub_pixel_avg_variance16x8_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c =
-    vpx_highbd_10_sub_pixel_avg_variance8x16_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c =
-    vpx_highbd_10_sub_pixel_avg_variance8x8_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c =
-    vpx_highbd_10_sub_pixel_avg_variance8x4_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c =
-    vpx_highbd_10_sub_pixel_avg_variance4x8_c;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c =
-    vpx_highbd_10_sub_pixel_avg_variance4x4_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c =
-    vpx_highbd_12_sub_pixel_avg_variance64x64_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c =
-    vpx_highbd_12_sub_pixel_avg_variance64x32_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c =
-    vpx_highbd_12_sub_pixel_avg_variance32x64_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c =
-    vpx_highbd_12_sub_pixel_avg_variance32x32_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c =
-    vpx_highbd_12_sub_pixel_avg_variance32x16_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c =
-    vpx_highbd_12_sub_pixel_avg_variance16x32_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c =
-    vpx_highbd_12_sub_pixel_avg_variance16x16_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c =
-    vpx_highbd_12_sub_pixel_avg_variance16x8_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c =
-    vpx_highbd_12_sub_pixel_avg_variance8x16_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c =
-    vpx_highbd_12_sub_pixel_avg_variance8x8_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c =
-    vpx_highbd_12_sub_pixel_avg_variance8x4_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c =
-    vpx_highbd_12_sub_pixel_avg_variance4x8_c;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c =
-    vpx_highbd_12_sub_pixel_avg_variance4x4_c;
 INSTANTIATE_TEST_CASE_P(
     C, VpxHBDSubpelAvgVarianceTest,
     ::testing::Values(
-        make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8),
-        make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8),
-        make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8),
-        make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8),
-        make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8),
-        make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8),
-        make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8),
-        make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8),
-        make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8),
-        make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8),
-        make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8),
-        make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8),
-        make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8),
-        make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10),
-        make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10),
-        make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10),
-        make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10),
-        make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10),
-        make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10),
-        make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10),
-        make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10),
-        make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10),
-        make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10),
-        make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10),
-        make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10),
-        make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10),
-        make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12),
-        make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12),
-        make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12),
-        make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12),
-        make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12),
-        make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12),
-        make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12),
-        make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12),
-        make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12),
-        make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12),
-        make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12),
-        make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12),
-        make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12)));
+        make_tuple(6, 6, &vpx_highbd_8_sub_pixel_avg_variance64x64_c, 8),
+        make_tuple(6, 5, &vpx_highbd_8_sub_pixel_avg_variance64x32_c, 8),
+        make_tuple(5, 6, &vpx_highbd_8_sub_pixel_avg_variance32x64_c, 8),
+        make_tuple(5, 5, &vpx_highbd_8_sub_pixel_avg_variance32x32_c, 8),
+        make_tuple(5, 4, &vpx_highbd_8_sub_pixel_avg_variance32x16_c, 8),
+        make_tuple(4, 5, &vpx_highbd_8_sub_pixel_avg_variance16x32_c, 8),
+        make_tuple(4, 4, &vpx_highbd_8_sub_pixel_avg_variance16x16_c, 8),
+        make_tuple(4, 3, &vpx_highbd_8_sub_pixel_avg_variance16x8_c, 8),
+        make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_c, 8),
+        make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_c, 8),
+        make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_c, 8),
+        make_tuple(2, 3, &vpx_highbd_8_sub_pixel_avg_variance4x8_c, 8),
+        make_tuple(2, 2, &vpx_highbd_8_sub_pixel_avg_variance4x4_c, 8),
+        make_tuple(6, 6, &vpx_highbd_10_sub_pixel_avg_variance64x64_c, 10),
+        make_tuple(6, 5, &vpx_highbd_10_sub_pixel_avg_variance64x32_c, 10),
+        make_tuple(5, 6, &vpx_highbd_10_sub_pixel_avg_variance32x64_c, 10),
+        make_tuple(5, 5, &vpx_highbd_10_sub_pixel_avg_variance32x32_c, 10),
+        make_tuple(5, 4, &vpx_highbd_10_sub_pixel_avg_variance32x16_c, 10),
+        make_tuple(4, 5, &vpx_highbd_10_sub_pixel_avg_variance16x32_c, 10),
+        make_tuple(4, 4, &vpx_highbd_10_sub_pixel_avg_variance16x16_c, 10),
+        make_tuple(4, 3, &vpx_highbd_10_sub_pixel_avg_variance16x8_c, 10),
+        make_tuple(3, 4, &vpx_highbd_10_sub_pixel_avg_variance8x16_c, 10),
+        make_tuple(3, 3, &vpx_highbd_10_sub_pixel_avg_variance8x8_c, 10),
+        make_tuple(3, 2, &vpx_highbd_10_sub_pixel_avg_variance8x4_c, 10),
+        make_tuple(2, 3, &vpx_highbd_10_sub_pixel_avg_variance4x8_c, 10),
+        make_tuple(2, 2, &vpx_highbd_10_sub_pixel_avg_variance4x4_c, 10),
+        make_tuple(6, 6, &vpx_highbd_12_sub_pixel_avg_variance64x64_c, 12),
+        make_tuple(6, 5, &vpx_highbd_12_sub_pixel_avg_variance64x32_c, 12),
+        make_tuple(5, 6, &vpx_highbd_12_sub_pixel_avg_variance32x64_c, 12),
+        make_tuple(5, 5, &vpx_highbd_12_sub_pixel_avg_variance32x32_c, 12),
+        make_tuple(5, 4, &vpx_highbd_12_sub_pixel_avg_variance32x16_c, 12),
+        make_tuple(4, 5, &vpx_highbd_12_sub_pixel_avg_variance16x32_c, 12),
+        make_tuple(4, 4, &vpx_highbd_12_sub_pixel_avg_variance16x16_c, 12),
+        make_tuple(4, 3, &vpx_highbd_12_sub_pixel_avg_variance16x8_c, 12),
+        make_tuple(3, 4, &vpx_highbd_12_sub_pixel_avg_variance8x16_c, 12),
+        make_tuple(3, 3, &vpx_highbd_12_sub_pixel_avg_variance8x8_c, 12),
+        make_tuple(3, 2, &vpx_highbd_12_sub_pixel_avg_variance8x4_c, 12),
+        make_tuple(2, 3, &vpx_highbd_12_sub_pixel_avg_variance4x8_c, 12),
+        make_tuple(2, 2, &vpx_highbd_12_sub_pixel_avg_variance4x4_c, 12)));
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 
 #if HAVE_MMX
-const VarianceMxNFunc mse16x16_mmx = vpx_mse16x16_mmx;
 INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
-                        ::testing::Values(make_tuple(4, 4, mse16x16_mmx)));
+                        ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_mmx)));
 
 INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest,
                         ::testing::Values(vpx_get_mb_ss_mmx));
 
-const VarianceMxNFunc variance16x16_mmx = vpx_variance16x16_mmx;
-const VarianceMxNFunc variance16x8_mmx = vpx_variance16x8_mmx;
-const VarianceMxNFunc variance8x16_mmx = vpx_variance8x16_mmx;
-const VarianceMxNFunc variance8x8_mmx = vpx_variance8x8_mmx;
-const VarianceMxNFunc variance4x4_mmx = vpx_variance4x4_mmx;
 INSTANTIATE_TEST_CASE_P(
     MMX, VpxVarianceTest,
-    ::testing::Values(make_tuple(4, 4, variance16x16_mmx, 0),
-                      make_tuple(4, 3, variance16x8_mmx, 0),
-                      make_tuple(3, 4, variance8x16_mmx, 0),
-                      make_tuple(3, 3, variance8x8_mmx, 0),
-                      make_tuple(2, 2, variance4x4_mmx, 0)));
+    ::testing::Values(make_tuple(4, 4, &vpx_variance16x16_mmx, 0),
+                      make_tuple(4, 3, &vpx_variance16x8_mmx, 0),
+                      make_tuple(3, 4, &vpx_variance8x16_mmx, 0),
+                      make_tuple(3, 3, &vpx_variance8x8_mmx, 0),
+                      make_tuple(2, 2, &vpx_variance4x4_mmx, 0)));
 
-const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx;
-const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx;
-const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx;
-const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx;
-const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx;
 INSTANTIATE_TEST_CASE_P(
     MMX, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0),
-                      make_tuple(4, 3, subpel_var16x8_mmx, 0),
-                      make_tuple(3, 4, subpel_var8x16_mmx, 0),
-                      make_tuple(3, 3, subpel_var8x8_mmx, 0),
-                      make_tuple(2, 2, subpel_var4x4_mmx, 0)));
+    ::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_mmx, 0),
+                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_mmx, 0),
+                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_mmx, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_mmx, 0),
+                      make_tuple(2, 2, &vpx_sub_pixel_variance4x4_mmx, 0)));
 #endif  // HAVE_MMX
 
 #if HAVE_SSE2
@@ -1266,458 +996,188 @@
 INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
                         ::testing::Values(vpx_get_mb_ss_sse2));
 
-const VarianceMxNFunc mse16x16_sse2 = vpx_mse16x16_sse2;
-const VarianceMxNFunc mse16x8_sse2 = vpx_mse16x8_sse2;
-const VarianceMxNFunc mse8x16_sse2 = vpx_mse8x16_sse2;
-const VarianceMxNFunc mse8x8_sse2 = vpx_mse8x8_sse2;
 INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
-                        ::testing::Values(make_tuple(4, 4, mse16x16_sse2),
-                                          make_tuple(4, 3, mse16x8_sse2),
-                                          make_tuple(3, 4, mse8x16_sse2),
-                                          make_tuple(3, 3, mse8x8_sse2)));
+                        ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_sse2),
+                                          make_tuple(4, 3, &vpx_mse16x8_sse2),
+                                          make_tuple(3, 4, &vpx_mse8x16_sse2),
+                                          make_tuple(3, 3, &vpx_mse8x8_sse2)));
 
-const VarianceMxNFunc variance64x64_sse2 = vpx_variance64x64_sse2;
-const VarianceMxNFunc variance64x32_sse2 = vpx_variance64x32_sse2;
-const VarianceMxNFunc variance32x64_sse2 = vpx_variance32x64_sse2;
-const VarianceMxNFunc variance32x32_sse2 = vpx_variance32x32_sse2;
-const VarianceMxNFunc variance32x16_sse2 = vpx_variance32x16_sse2;
-const VarianceMxNFunc variance16x32_sse2 = vpx_variance16x32_sse2;
-const VarianceMxNFunc variance16x16_sse2 = vpx_variance16x16_sse2;
-const VarianceMxNFunc variance16x8_sse2 = vpx_variance16x8_sse2;
-const VarianceMxNFunc variance8x16_sse2 = vpx_variance8x16_sse2;
-const VarianceMxNFunc variance8x8_sse2 = vpx_variance8x8_sse2;
-const VarianceMxNFunc variance8x4_sse2 = vpx_variance8x4_sse2;
-const VarianceMxNFunc variance4x8_sse2 = vpx_variance4x8_sse2;
-const VarianceMxNFunc variance4x4_sse2 = vpx_variance4x4_sse2;
 INSTANTIATE_TEST_CASE_P(
     SSE2, VpxVarianceTest,
-    ::testing::Values(make_tuple(6, 6, variance64x64_sse2, 0),
-                      make_tuple(6, 5, variance64x32_sse2, 0),
-                      make_tuple(5, 6, variance32x64_sse2, 0),
-                      make_tuple(5, 5, variance32x32_sse2, 0),
-                      make_tuple(5, 4, variance32x16_sse2, 0),
-                      make_tuple(4, 5, variance16x32_sse2, 0),
-                      make_tuple(4, 4, variance16x16_sse2, 0),
-                      make_tuple(4, 3, variance16x8_sse2, 0),
-                      make_tuple(3, 4, variance8x16_sse2, 0),
-                      make_tuple(3, 3, variance8x8_sse2, 0),
-                      make_tuple(3, 2, variance8x4_sse2, 0),
-                      make_tuple(2, 3, variance4x8_sse2, 0),
-                      make_tuple(2, 2, variance4x4_sse2, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_sse2, 0),
+                      make_tuple(6, 5, &vpx_variance64x32_sse2, 0),
+                      make_tuple(5, 6, &vpx_variance32x64_sse2, 0),
+                      make_tuple(5, 5, &vpx_variance32x32_sse2, 0),
+                      make_tuple(5, 4, &vpx_variance32x16_sse2, 0),
+                      make_tuple(4, 5, &vpx_variance16x32_sse2, 0),
+                      make_tuple(4, 4, &vpx_variance16x16_sse2, 0),
+                      make_tuple(4, 3, &vpx_variance16x8_sse2, 0),
+                      make_tuple(3, 4, &vpx_variance8x16_sse2, 0),
+                      make_tuple(3, 3, &vpx_variance8x8_sse2, 0),
+                      make_tuple(3, 2, &vpx_variance8x4_sse2, 0),
+                      make_tuple(2, 3, &vpx_variance4x8_sse2, 0),
+                      make_tuple(2, 2, &vpx_variance4x4_sse2, 0)));
 
 #if CONFIG_USE_X86INC
-const SubpixVarMxNFunc subpel_variance64x64_sse2 =
-    vpx_sub_pixel_variance64x64_sse2;
-const SubpixVarMxNFunc subpel_variance64x32_sse2 =
-    vpx_sub_pixel_variance64x32_sse2;
-const SubpixVarMxNFunc subpel_variance32x64_sse2 =
-    vpx_sub_pixel_variance32x64_sse2;
-const SubpixVarMxNFunc subpel_variance32x32_sse2 =
-    vpx_sub_pixel_variance32x32_sse2;
-const SubpixVarMxNFunc subpel_variance32x16_sse2 =
-    vpx_sub_pixel_variance32x16_sse2;
-const SubpixVarMxNFunc subpel_variance16x32_sse2 =
-    vpx_sub_pixel_variance16x32_sse2;
-const SubpixVarMxNFunc subpel_variance16x16_sse2 =
-    vpx_sub_pixel_variance16x16_sse2;
-const SubpixVarMxNFunc subpel_variance16x8_sse2 =
-    vpx_sub_pixel_variance16x8_sse2;
-const SubpixVarMxNFunc subpel_variance8x16_sse2 =
-    vpx_sub_pixel_variance8x16_sse2;
-const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2;
-const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2;
-const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse;
-const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse;
 INSTANTIATE_TEST_CASE_P(
     SSE2, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0),
-                      make_tuple(6, 5, subpel_variance64x32_sse2, 0),
-                      make_tuple(5, 6, subpel_variance32x64_sse2, 0),
-                      make_tuple(5, 5, subpel_variance32x32_sse2, 0),
-                      make_tuple(5, 4, subpel_variance32x16_sse2, 0),
-                      make_tuple(4, 5, subpel_variance16x32_sse2, 0),
-                      make_tuple(4, 4, subpel_variance16x16_sse2, 0),
-                      make_tuple(4, 3, subpel_variance16x8_sse2, 0),
-                      make_tuple(3, 4, subpel_variance8x16_sse2, 0),
-                      make_tuple(3, 3, subpel_variance8x8_sse2, 0),
-                      make_tuple(3, 2, subpel_variance8x4_sse2, 0),
-                      make_tuple(2, 3, subpel_variance4x8_sse, 0),
-                      make_tuple(2, 2, subpel_variance4x4_sse, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_sse2, 0),
+                      make_tuple(6, 5, &vpx_sub_pixel_variance64x32_sse2, 0),
+                      make_tuple(5, 6, &vpx_sub_pixel_variance32x64_sse2, 0),
+                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_sse2, 0),
+                      make_tuple(5, 4, &vpx_sub_pixel_variance32x16_sse2, 0),
+                      make_tuple(4, 5, &vpx_sub_pixel_variance16x32_sse2, 0),
+                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_sse2, 0),
+                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_sse2, 0),
+                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_sse2, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_sse2, 0),
+                      make_tuple(3, 2, &vpx_sub_pixel_variance8x4_sse2, 0),
+                      make_tuple(2, 3, &vpx_sub_pixel_variance4x8_sse, 0),
+                      make_tuple(2, 2, &vpx_sub_pixel_variance4x4_sse, 0)));
 
-const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 =
-    vpx_sub_pixel_avg_variance64x64_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 =
-    vpx_sub_pixel_avg_variance64x32_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 =
-    vpx_sub_pixel_avg_variance32x64_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 =
-    vpx_sub_pixel_avg_variance32x32_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 =
-    vpx_sub_pixel_avg_variance32x16_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 =
-    vpx_sub_pixel_avg_variance16x32_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 =
-    vpx_sub_pixel_avg_variance16x16_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 =
-    vpx_sub_pixel_avg_variance16x8_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 =
-    vpx_sub_pixel_avg_variance8x16_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 =
-    vpx_sub_pixel_avg_variance8x8_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 =
-    vpx_sub_pixel_avg_variance8x4_sse2;
-const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse =
-    vpx_sub_pixel_avg_variance4x8_sse;
-const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse =
-    vpx_sub_pixel_avg_variance4x4_sse;
 INSTANTIATE_TEST_CASE_P(
     SSE2, VpxSubpelAvgVarianceTest,
     ::testing::Values(
-                      make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0),
-                      make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
-                      make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
-                      make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
-                      make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
-                      make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
-                      make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
-                      make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
-                      make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
-                      make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
-                      make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
-                      make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
-                      make_tuple(2, 2, subpel_avg_variance4x4_sse, 0)));
+        make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_sse2, 0),
+        make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_sse2, 0),
+        make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_sse2, 0),
+        make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_sse2, 0),
+        make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_sse2, 0),
+        make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_sse2, 0),
+        make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_sse2, 0),
+        make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_sse2, 0),
+        make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_sse2, 0),
+        make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_sse2, 0),
+        make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_sse2, 0),
+        make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_sse, 0),
+        make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_sse, 0)));
 #endif  // CONFIG_USE_X86INC
 
 #if CONFIG_VP9_HIGHBITDEPTH
 /* TODO(debargha): This test does not support the highbd version
-const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2;
-const VarianceMxNFunc highbd_12_mse16x8_sse2 = vpx_highbd_12_mse16x8_sse2;
-const VarianceMxNFunc highbd_12_mse8x16_sse2 = vpx_highbd_12_mse8x16_sse2;
-const VarianceMxNFunc highbd_12_mse8x8_sse2 = vpx_highbd_12_mse8x8_sse2;
-
-const VarianceMxNFunc highbd_10_mse16x16_sse2 = vpx_highbd_10_mse16x16_sse2;
-const VarianceMxNFunc highbd_10_mse16x8_sse2 = vpx_highbd_10_mse16x8_sse2;
-const VarianceMxNFunc highbd_10_mse8x16_sse2 = vpx_highbd_10_mse8x16_sse2;
-const VarianceMxNFunc highbd_10_mse8x8_sse2 = vpx_highbd_10_mse8x8_sse2;
-
-const VarianceMxNFunc highbd_8_mse16x16_sse2 = vpx_highbd_8_mse16x16_sse2;
-const VarianceMxNFunc highbd_8_mse16x8_sse2 = vpx_highbd_8_mse16x8_sse2;
-const VarianceMxNFunc highbd_8_mse8x16_sse2 = vpx_highbd_8_mse8x16_sse2;
-const VarianceMxNFunc highbd_8_mse8x8_sse2 = vpx_highbd_8_mse8x8_sse2;
 INSTANTIATE_TEST_CASE_P(
-    SSE2, VpxHBDMseTest, ::testing::Values(make_tuple(4, 4, highbd_12_mse16x16_sse2),
-                                           make_tuple(4, 3, highbd_12_mse16x8_sse2),
-                                           make_tuple(3, 4, highbd_12_mse8x16_sse2),
-                                           make_tuple(3, 3, highbd_12_mse8x8_sse2),
-                                           make_tuple(4, 4, highbd_10_mse16x16_sse2),
-                                           make_tuple(4, 3, highbd_10_mse16x8_sse2),
-                                           make_tuple(3, 4, highbd_10_mse8x16_sse2),
-                                           make_tuple(3, 3, highbd_10_mse8x8_sse2),
-                                           make_tuple(4, 4, highbd_8_mse16x16_sse2),
-                                           make_tuple(4, 3, highbd_8_mse16x8_sse2),
-                                           make_tuple(3, 4, highbd_8_mse8x16_sse2),
-                                           make_tuple(3, 3, highbd_8_mse8x8_sse2)));
+    SSE2, VpxHBDMseTest,
+    ::testing::Values(make_tuple(4, 4, &vpx_highbd_12_mse16x16_sse2),
+                      make_tuple(4, 3, &vpx_highbd_12_mse16x8_sse2),
+                      make_tuple(3, 4, &vpx_highbd_12_mse8x16_sse2),
+                      make_tuple(3, 3, &vpx_highbd_12_mse8x8_sse2),
+                      make_tuple(4, 4, &vpx_highbd_10_mse16x16_sse2),
+                      make_tuple(4, 3, &vpx_highbd_10_mse16x8_sse2),
+                      make_tuple(3, 4, &vpx_highbd_10_mse8x16_sse2),
+                      make_tuple(3, 3, &vpx_highbd_10_mse8x8_sse2),
+                      make_tuple(4, 4, &vpx_highbd_8_mse16x16_sse2),
+                      make_tuple(4, 3, &vpx_highbd_8_mse16x8_sse2),
+                      make_tuple(3, 4, &vpx_highbd_8_mse8x16_sse2),
+                      make_tuple(3, 3, &vpx_highbd_8_mse8x8_sse2)));
 */
 
-const VarianceMxNFunc highbd_12_variance64x64_sse2 =
-    vpx_highbd_12_variance64x64_sse2;
-const VarianceMxNFunc highbd_12_variance64x32_sse2 =
-    vpx_highbd_12_variance64x32_sse2;
-const VarianceMxNFunc highbd_12_variance32x64_sse2 =
-    vpx_highbd_12_variance32x64_sse2;
-const VarianceMxNFunc highbd_12_variance32x32_sse2 =
-    vpx_highbd_12_variance32x32_sse2;
-const VarianceMxNFunc highbd_12_variance32x16_sse2 =
-    vpx_highbd_12_variance32x16_sse2;
-const VarianceMxNFunc highbd_12_variance16x32_sse2 =
-    vpx_highbd_12_variance16x32_sse2;
-const VarianceMxNFunc highbd_12_variance16x16_sse2 =
-    vpx_highbd_12_variance16x16_sse2;
-const VarianceMxNFunc highbd_12_variance16x8_sse2 =
-    vpx_highbd_12_variance16x8_sse2;
-const VarianceMxNFunc highbd_12_variance8x16_sse2 =
-    vpx_highbd_12_variance8x16_sse2;
-const VarianceMxNFunc highbd_12_variance8x8_sse2 =
-    vpx_highbd_12_variance8x8_sse2;
-const VarianceMxNFunc highbd_10_variance64x64_sse2 =
-    vpx_highbd_10_variance64x64_sse2;
-const VarianceMxNFunc highbd_10_variance64x32_sse2 =
-    vpx_highbd_10_variance64x32_sse2;
-const VarianceMxNFunc highbd_10_variance32x64_sse2 =
-    vpx_highbd_10_variance32x64_sse2;
-const VarianceMxNFunc highbd_10_variance32x32_sse2 =
-    vpx_highbd_10_variance32x32_sse2;
-const VarianceMxNFunc highbd_10_variance32x16_sse2 =
-    vpx_highbd_10_variance32x16_sse2;
-const VarianceMxNFunc highbd_10_variance16x32_sse2 =
-    vpx_highbd_10_variance16x32_sse2;
-const VarianceMxNFunc highbd_10_variance16x16_sse2 =
-    vpx_highbd_10_variance16x16_sse2;
-const VarianceMxNFunc highbd_10_variance16x8_sse2 =
-    vpx_highbd_10_variance16x8_sse2;
-const VarianceMxNFunc highbd_10_variance8x16_sse2 =
-    vpx_highbd_10_variance8x16_sse2;
-const VarianceMxNFunc highbd_10_variance8x8_sse2 =
-    vpx_highbd_10_variance8x8_sse2;
-const VarianceMxNFunc highbd_8_variance64x64_sse2 =
-    vpx_highbd_8_variance64x64_sse2;
-const VarianceMxNFunc highbd_8_variance64x32_sse2 =
-    vpx_highbd_8_variance64x32_sse2;
-const VarianceMxNFunc highbd_8_variance32x64_sse2 =
-    vpx_highbd_8_variance32x64_sse2;
-const VarianceMxNFunc highbd_8_variance32x32_sse2 =
-    vpx_highbd_8_variance32x32_sse2;
-const VarianceMxNFunc highbd_8_variance32x16_sse2 =
-    vpx_highbd_8_variance32x16_sse2;
-const VarianceMxNFunc highbd_8_variance16x32_sse2 =
-    vpx_highbd_8_variance16x32_sse2;
-const VarianceMxNFunc highbd_8_variance16x16_sse2 =
-    vpx_highbd_8_variance16x16_sse2;
-const VarianceMxNFunc highbd_8_variance16x8_sse2 =
-    vpx_highbd_8_variance16x8_sse2;
-const VarianceMxNFunc highbd_8_variance8x16_sse2 =
-    vpx_highbd_8_variance8x16_sse2;
-const VarianceMxNFunc highbd_8_variance8x8_sse2 =
-    vpx_highbd_8_variance8x8_sse2;
-
 INSTANTIATE_TEST_CASE_P(
     SSE2, VpxHBDVarianceTest,
-    ::testing::Values(make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
-                      make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
-                      make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
-                      make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
-                      make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
-                      make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
-                      make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
-                      make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
-                      make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
-                      make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
-                      make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
-                      make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
-                      make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
-                      make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
-                      make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
-                      make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
-                      make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
-                      make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
-                      make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
-                      make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
-                      make_tuple(6, 6, highbd_8_variance64x64_sse2, 8),
-                      make_tuple(6, 5, highbd_8_variance64x32_sse2, 8),
-                      make_tuple(5, 6, highbd_8_variance32x64_sse2, 8),
-                      make_tuple(5, 5, highbd_8_variance32x32_sse2, 8),
-                      make_tuple(5, 4, highbd_8_variance32x16_sse2, 8),
-                      make_tuple(4, 5, highbd_8_variance16x32_sse2, 8),
-                      make_tuple(4, 4, highbd_8_variance16x16_sse2, 8),
-                      make_tuple(4, 3, highbd_8_variance16x8_sse2, 8),
-                      make_tuple(3, 4, highbd_8_variance8x16_sse2, 8),
-                      make_tuple(3, 3, highbd_8_variance8x8_sse2, 8)));
+    ::testing::Values(make_tuple(6, 6, &vpx_highbd_12_variance64x64_sse2, 12),
+                      make_tuple(6, 5, &vpx_highbd_12_variance64x32_sse2, 12),
+                      make_tuple(5, 6, &vpx_highbd_12_variance32x64_sse2, 12),
+                      make_tuple(5, 5, &vpx_highbd_12_variance32x32_sse2, 12),
+                      make_tuple(5, 4, &vpx_highbd_12_variance32x16_sse2, 12),
+                      make_tuple(4, 5, &vpx_highbd_12_variance16x32_sse2, 12),
+                      make_tuple(4, 4, &vpx_highbd_12_variance16x16_sse2, 12),
+                      make_tuple(4, 3, &vpx_highbd_12_variance16x8_sse2, 12),
+                      make_tuple(3, 4, &vpx_highbd_12_variance8x16_sse2, 12),
+                      make_tuple(3, 3, &vpx_highbd_12_variance8x8_sse2, 12),
+                      make_tuple(6, 6, &vpx_highbd_10_variance64x64_sse2, 10),
+                      make_tuple(6, 5, &vpx_highbd_10_variance64x32_sse2, 10),
+                      make_tuple(5, 6, &vpx_highbd_10_variance32x64_sse2, 10),
+                      make_tuple(5, 5, &vpx_highbd_10_variance32x32_sse2, 10),
+                      make_tuple(5, 4, &vpx_highbd_10_variance32x16_sse2, 10),
+                      make_tuple(4, 5, &vpx_highbd_10_variance16x32_sse2, 10),
+                      make_tuple(4, 4, &vpx_highbd_10_variance16x16_sse2, 10),
+                      make_tuple(4, 3, &vpx_highbd_10_variance16x8_sse2, 10),
+                      make_tuple(3, 4, &vpx_highbd_10_variance8x16_sse2, 10),
+                      make_tuple(3, 3, &vpx_highbd_10_variance8x8_sse2, 10),
+                      make_tuple(6, 6, &vpx_highbd_8_variance64x64_sse2, 8),
+                      make_tuple(6, 5, &vpx_highbd_8_variance64x32_sse2, 8),
+                      make_tuple(5, 6, &vpx_highbd_8_variance32x64_sse2, 8),
+                      make_tuple(5, 5, &vpx_highbd_8_variance32x32_sse2, 8),
+                      make_tuple(5, 4, &vpx_highbd_8_variance32x16_sse2, 8),
+                      make_tuple(4, 5, &vpx_highbd_8_variance16x32_sse2, 8),
+                      make_tuple(4, 4, &vpx_highbd_8_variance16x16_sse2, 8),
+                      make_tuple(4, 3, &vpx_highbd_8_variance16x8_sse2, 8),
+                      make_tuple(3, 4, &vpx_highbd_8_variance8x16_sse2, 8),
+                      make_tuple(3, 3, &vpx_highbd_8_variance8x8_sse2, 8)));
 
 #if CONFIG_USE_X86INC
-const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 =
-    vpx_highbd_12_sub_pixel_variance64x64_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 =
-    vpx_highbd_12_sub_pixel_variance64x32_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 =
-    vpx_highbd_12_sub_pixel_variance32x64_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 =
-    vpx_highbd_12_sub_pixel_variance32x32_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 =
-    vpx_highbd_12_sub_pixel_variance32x16_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 =
-    vpx_highbd_12_sub_pixel_variance16x32_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 =
-    vpx_highbd_12_sub_pixel_variance16x16_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 =
-    vpx_highbd_12_sub_pixel_variance16x8_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 =
-    vpx_highbd_12_sub_pixel_variance8x16_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 =
-    vpx_highbd_12_sub_pixel_variance8x8_sse2;
-const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 =
-    vpx_highbd_12_sub_pixel_variance8x4_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 =
-    vpx_highbd_10_sub_pixel_variance64x64_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 =
-    vpx_highbd_10_sub_pixel_variance64x32_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 =
-    vpx_highbd_10_sub_pixel_variance32x64_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 =
-    vpx_highbd_10_sub_pixel_variance32x32_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 =
-    vpx_highbd_10_sub_pixel_variance32x16_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 =
-    vpx_highbd_10_sub_pixel_variance16x32_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 =
-    vpx_highbd_10_sub_pixel_variance16x16_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 =
-    vpx_highbd_10_sub_pixel_variance16x8_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 =
-    vpx_highbd_10_sub_pixel_variance8x16_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 =
-    vpx_highbd_10_sub_pixel_variance8x8_sse2;
-const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 =
-    vpx_highbd_10_sub_pixel_variance8x4_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 =
-    vpx_highbd_8_sub_pixel_variance64x64_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 =
-    vpx_highbd_8_sub_pixel_variance64x32_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 =
-    vpx_highbd_8_sub_pixel_variance32x64_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 =
-    vpx_highbd_8_sub_pixel_variance32x32_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 =
-    vpx_highbd_8_sub_pixel_variance32x16_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 =
-    vpx_highbd_8_sub_pixel_variance16x32_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 =
-    vpx_highbd_8_sub_pixel_variance16x16_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 =
-    vpx_highbd_8_sub_pixel_variance16x8_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 =
-    vpx_highbd_8_sub_pixel_variance8x16_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 =
-    vpx_highbd_8_sub_pixel_variance8x8_sse2;
-const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 =
-    vpx_highbd_8_sub_pixel_variance8x4_sse2;
 INSTANTIATE_TEST_CASE_P(
     SSE2, VpxHBDSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
-                      make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
-                      make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
-                      make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
-                      make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
-                      make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
-                      make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
-                      make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
-                      make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
-                      make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
-                      make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
-                      make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
-                      make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
-                      make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
-                      make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
-                      make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
-                      make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
-                      make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
-                      make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
-                      make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
-                      make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
-                      make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
-                      make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8),
-                      make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8),
-                      make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8),
-                      make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8),
-                      make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8),
-                      make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8),
-                      make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8),
-                      make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8),
-                      make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8),
-                      make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8),
-                      make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8)));
+    ::testing::Values(
+        make_tuple(6, 6, &vpx_highbd_12_sub_pixel_variance64x64_sse2, 12),
+        make_tuple(6, 5, &vpx_highbd_12_sub_pixel_variance64x32_sse2, 12),
+        make_tuple(5, 6, &vpx_highbd_12_sub_pixel_variance32x64_sse2, 12),
+        make_tuple(5, 5, &vpx_highbd_12_sub_pixel_variance32x32_sse2, 12),
+        make_tuple(5, 4, &vpx_highbd_12_sub_pixel_variance32x16_sse2, 12),
+        make_tuple(4, 5, &vpx_highbd_12_sub_pixel_variance16x32_sse2, 12),
+        make_tuple(4, 4, &vpx_highbd_12_sub_pixel_variance16x16_sse2, 12),
+        make_tuple(4, 3, &vpx_highbd_12_sub_pixel_variance16x8_sse2, 12),
+        make_tuple(3, 4, &vpx_highbd_12_sub_pixel_variance8x16_sse2, 12),
+        make_tuple(3, 3, &vpx_highbd_12_sub_pixel_variance8x8_sse2, 12),
+        make_tuple(3, 2, &vpx_highbd_12_sub_pixel_variance8x4_sse2, 12),
+        make_tuple(6, 6, &vpx_highbd_10_sub_pixel_variance64x64_sse2, 10),
+        make_tuple(6, 5, &vpx_highbd_10_sub_pixel_variance64x32_sse2, 10),
+        make_tuple(5, 6, &vpx_highbd_10_sub_pixel_variance32x64_sse2, 10),
+        make_tuple(5, 5, &vpx_highbd_10_sub_pixel_variance32x32_sse2, 10),
+        make_tuple(5, 4, &vpx_highbd_10_sub_pixel_variance32x16_sse2, 10),
+        make_tuple(4, 5, &vpx_highbd_10_sub_pixel_variance16x32_sse2, 10),
+        make_tuple(4, 4, &vpx_highbd_10_sub_pixel_variance16x16_sse2, 10),
+        make_tuple(4, 3, &vpx_highbd_10_sub_pixel_variance16x8_sse2, 10),
+        make_tuple(3, 4, &vpx_highbd_10_sub_pixel_variance8x16_sse2, 10),
+        make_tuple(3, 3, &vpx_highbd_10_sub_pixel_variance8x8_sse2, 10),
+        make_tuple(3, 2, &vpx_highbd_10_sub_pixel_variance8x4_sse2, 10),
+        make_tuple(6, 6, &vpx_highbd_8_sub_pixel_variance64x64_sse2, 8),
+        make_tuple(6, 5, &vpx_highbd_8_sub_pixel_variance64x32_sse2, 8),
+        make_tuple(5, 6, &vpx_highbd_8_sub_pixel_variance32x64_sse2, 8),
+        make_tuple(5, 5, &vpx_highbd_8_sub_pixel_variance32x32_sse2, 8),
+        make_tuple(5, 4, &vpx_highbd_8_sub_pixel_variance32x16_sse2, 8),
+        make_tuple(4, 5, &vpx_highbd_8_sub_pixel_variance16x32_sse2, 8),
+        make_tuple(4, 4, &vpx_highbd_8_sub_pixel_variance16x16_sse2, 8),
+        make_tuple(4, 3, &vpx_highbd_8_sub_pixel_variance16x8_sse2, 8),
+        make_tuple(3, 4, &vpx_highbd_8_sub_pixel_variance8x16_sse2, 8),
+        make_tuple(3, 3, &vpx_highbd_8_sub_pixel_variance8x8_sse2, 8),
+        make_tuple(3, 2, &vpx_highbd_8_sub_pixel_variance8x4_sse2, 8)));
 
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance64x64_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance64x32_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance32x64_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance32x32_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance32x16_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance16x32_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance16x16_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance16x8_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance8x16_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance8x8_sse2;
-const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 =
-    vpx_highbd_12_sub_pixel_avg_variance8x4_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance64x64_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance64x32_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance32x64_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance32x32_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance32x16_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance16x32_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance16x16_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance16x8_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance8x16_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance8x8_sse2;
-const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 =
-    vpx_highbd_10_sub_pixel_avg_variance8x4_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance64x64_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance64x32_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance32x64_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance32x32_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance32x16_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance16x32_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance16x16_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance16x8_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance8x16_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance8x8_sse2;
-const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 =
-    vpx_highbd_8_sub_pixel_avg_variance8x4_sse2;
 INSTANTIATE_TEST_CASE_P(
     SSE2, VpxHBDSubpelAvgVarianceTest,
     ::testing::Values(
-        make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
-        make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
-        make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
-        make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
-        make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
-        make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
-        make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
-        make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
-        make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
-        make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
-        make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
-        make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
-        make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
-        make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
-        make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
-        make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
-        make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
-        make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
-        make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
-        make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
-        make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
-        make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
-        make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8),
-        make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8),
-        make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8),
-        make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8),
-        make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8),
-        make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8),
-        make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8),
-        make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8),
-        make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8),
-        make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8),
-        make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8)));
+        make_tuple(6, 6, &vpx_highbd_12_sub_pixel_avg_variance64x64_sse2, 12),
+        make_tuple(6, 5, &vpx_highbd_12_sub_pixel_avg_variance64x32_sse2, 12),
+        make_tuple(5, 6, &vpx_highbd_12_sub_pixel_avg_variance32x64_sse2, 12),
+        make_tuple(5, 5, &vpx_highbd_12_sub_pixel_avg_variance32x32_sse2, 12),
+        make_tuple(5, 4, &vpx_highbd_12_sub_pixel_avg_variance32x16_sse2, 12),
+        make_tuple(4, 5, &vpx_highbd_12_sub_pixel_avg_variance16x32_sse2, 12),
+        make_tuple(4, 4, &vpx_highbd_12_sub_pixel_avg_variance16x16_sse2, 12),
+        make_tuple(4, 3, &vpx_highbd_12_sub_pixel_avg_variance16x8_sse2, 12),
+        make_tuple(3, 4, &vpx_highbd_12_sub_pixel_avg_variance8x16_sse2, 12),
+        make_tuple(3, 3, &vpx_highbd_12_sub_pixel_avg_variance8x8_sse2, 12),
+        make_tuple(3, 2, &vpx_highbd_12_sub_pixel_avg_variance8x4_sse2, 12),
+        make_tuple(6, 6, &vpx_highbd_10_sub_pixel_avg_variance64x64_sse2, 10),
+        make_tuple(6, 5, &vpx_highbd_10_sub_pixel_avg_variance64x32_sse2, 10),
+        make_tuple(5, 6, &vpx_highbd_10_sub_pixel_avg_variance32x64_sse2, 10),
+        make_tuple(5, 5, &vpx_highbd_10_sub_pixel_avg_variance32x32_sse2, 10),
+        make_tuple(5, 4, &vpx_highbd_10_sub_pixel_avg_variance32x16_sse2, 10),
+        make_tuple(4, 5, &vpx_highbd_10_sub_pixel_avg_variance16x32_sse2, 10),
+        make_tuple(4, 4, &vpx_highbd_10_sub_pixel_avg_variance16x16_sse2, 10),
+        make_tuple(4, 3, &vpx_highbd_10_sub_pixel_avg_variance16x8_sse2, 10),
+        make_tuple(3, 4, &vpx_highbd_10_sub_pixel_avg_variance8x16_sse2, 10),
+        make_tuple(3, 3, &vpx_highbd_10_sub_pixel_avg_variance8x8_sse2, 10),
+        make_tuple(3, 2, &vpx_highbd_10_sub_pixel_avg_variance8x4_sse2, 10),
+        make_tuple(6, 6, &vpx_highbd_8_sub_pixel_avg_variance64x64_sse2, 8),
+        make_tuple(6, 5, &vpx_highbd_8_sub_pixel_avg_variance64x32_sse2, 8),
+        make_tuple(5, 6, &vpx_highbd_8_sub_pixel_avg_variance32x64_sse2, 8),
+        make_tuple(5, 5, &vpx_highbd_8_sub_pixel_avg_variance32x32_sse2, 8),
+        make_tuple(5, 4, &vpx_highbd_8_sub_pixel_avg_variance32x16_sse2, 8),
+        make_tuple(4, 5, &vpx_highbd_8_sub_pixel_avg_variance16x32_sse2, 8),
+        make_tuple(4, 4, &vpx_highbd_8_sub_pixel_avg_variance16x16_sse2, 8),
+        make_tuple(4, 3, &vpx_highbd_8_sub_pixel_avg_variance16x8_sse2, 8),
+        make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_sse2, 8),
+        make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_sse2, 8),
+        make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_sse2, 8)));
 #endif  // CONFIG_USE_X86INC
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 #endif  // HAVE_SSE2
@@ -1724,192 +1184,108 @@
 
 #if HAVE_SSSE3
 #if CONFIG_USE_X86INC
-const SubpixVarMxNFunc subpel_variance64x64_ssse3 =
-    vpx_sub_pixel_variance64x64_ssse3;
-const SubpixVarMxNFunc subpel_variance64x32_ssse3 =
-    vpx_sub_pixel_variance64x32_ssse3;
-const SubpixVarMxNFunc subpel_variance32x64_ssse3 =
-    vpx_sub_pixel_variance32x64_ssse3;
-const SubpixVarMxNFunc subpel_variance32x32_ssse3 =
-    vpx_sub_pixel_variance32x32_ssse3;
-const SubpixVarMxNFunc subpel_variance32x16_ssse3 =
-    vpx_sub_pixel_variance32x16_ssse3;
-const SubpixVarMxNFunc subpel_variance16x32_ssse3 =
-    vpx_sub_pixel_variance16x32_ssse3;
-const SubpixVarMxNFunc subpel_variance16x16_ssse3 =
-    vpx_sub_pixel_variance16x16_ssse3;
-const SubpixVarMxNFunc subpel_variance16x8_ssse3 =
-    vpx_sub_pixel_variance16x8_ssse3;
-const SubpixVarMxNFunc subpel_variance8x16_ssse3 =
-    vpx_sub_pixel_variance8x16_ssse3;
-const SubpixVarMxNFunc subpel_variance8x8_ssse3 =
-    vpx_sub_pixel_variance8x8_ssse3;
-const SubpixVarMxNFunc subpel_variance8x4_ssse3 =
-    vpx_sub_pixel_variance8x4_ssse3;
-const SubpixVarMxNFunc subpel_variance4x8_ssse3 =
-    vpx_sub_pixel_variance4x8_ssse3;
-const SubpixVarMxNFunc subpel_variance4x4_ssse3 =
-    vpx_sub_pixel_variance4x4_ssse3;
 INSTANTIATE_TEST_CASE_P(
     SSSE3, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0),
-                      make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
-                      make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
-                      make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
-                      make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
-                      make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
-                      make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
-                      make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
-                      make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
-                      make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
-                      make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
-                      make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
-                      make_tuple(2, 2, subpel_variance4x4_ssse3, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_ssse3, 0),
+                      make_tuple(6, 5, &vpx_sub_pixel_variance64x32_ssse3, 0),
+                      make_tuple(5, 6, &vpx_sub_pixel_variance32x64_ssse3, 0),
+                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_ssse3, 0),
+                      make_tuple(5, 4, &vpx_sub_pixel_variance32x16_ssse3, 0),
+                      make_tuple(4, 5, &vpx_sub_pixel_variance16x32_ssse3, 0),
+                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_ssse3, 0),
+                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_ssse3, 0),
+                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_ssse3, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_ssse3, 0),
+                      make_tuple(3, 2, &vpx_sub_pixel_variance8x4_ssse3, 0),
+                      make_tuple(2, 3, &vpx_sub_pixel_variance4x8_ssse3, 0),
+                      make_tuple(2, 2, &vpx_sub_pixel_variance4x4_ssse3, 0)));
 
-const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 =
-    vpx_sub_pixel_avg_variance64x64_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 =
-    vpx_sub_pixel_avg_variance64x32_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 =
-    vpx_sub_pixel_avg_variance32x64_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 =
-    vpx_sub_pixel_avg_variance32x32_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 =
-    vpx_sub_pixel_avg_variance32x16_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 =
-    vpx_sub_pixel_avg_variance16x32_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 =
-    vpx_sub_pixel_avg_variance16x16_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 =
-    vpx_sub_pixel_avg_variance16x8_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 =
-    vpx_sub_pixel_avg_variance8x16_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 =
-    vpx_sub_pixel_avg_variance8x8_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 =
-    vpx_sub_pixel_avg_variance8x4_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 =
-    vpx_sub_pixel_avg_variance4x8_ssse3;
-const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 =
-    vpx_sub_pixel_avg_variance4x4_ssse3;
 INSTANTIATE_TEST_CASE_P(
     SSSE3, VpxSubpelAvgVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0),
-                      make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
-                      make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
-                      make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
-                      make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
-                      make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
-                      make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
-                      make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
-                      make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
-                      make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
-                      make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
-                      make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
-                      make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0)));
+    ::testing::Values(
+        make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_ssse3, 0),
+        make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_ssse3, 0),
+        make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_ssse3, 0),
+        make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_ssse3, 0),
+        make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_ssse3, 0),
+        make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_ssse3, 0),
+        make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_ssse3, 0),
+        make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_ssse3, 0),
+        make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_ssse3, 0),
+        make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_ssse3, 0),
+        make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_ssse3, 0),
+        make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_ssse3, 0),
+        make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_ssse3, 0)));
 #endif  // CONFIG_USE_X86INC
 #endif  // HAVE_SSSE3
 
 #if HAVE_AVX2
-const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2;
 INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
-                        ::testing::Values(make_tuple(4, 4, mse16x16_avx2)));
+                        ::testing::Values(make_tuple(4, 4,
+                                                     &vpx_mse16x16_avx2)));
 
-const VarianceMxNFunc variance64x64_avx2 = vpx_variance64x64_avx2;
-const VarianceMxNFunc variance64x32_avx2 = vpx_variance64x32_avx2;
-const VarianceMxNFunc variance32x32_avx2 = vpx_variance32x32_avx2;
-const VarianceMxNFunc variance32x16_avx2 = vpx_variance32x16_avx2;
-const VarianceMxNFunc variance16x16_avx2 = vpx_variance16x16_avx2;
 INSTANTIATE_TEST_CASE_P(
     AVX2, VpxVarianceTest,
-    ::testing::Values(make_tuple(6, 6, variance64x64_avx2, 0),
-                      make_tuple(6, 5, variance64x32_avx2, 0),
-                      make_tuple(5, 5, variance32x32_avx2, 0),
-                      make_tuple(5, 4, variance32x16_avx2, 0),
-                      make_tuple(4, 4, variance16x16_avx2, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_avx2, 0),
+                      make_tuple(6, 5, &vpx_variance64x32_avx2, 0),
+                      make_tuple(5, 5, &vpx_variance32x32_avx2, 0),
+                      make_tuple(5, 4, &vpx_variance32x16_avx2, 0),
+                      make_tuple(4, 4, &vpx_variance16x16_avx2, 0)));
 
-const SubpixVarMxNFunc subpel_variance64x64_avx2 =
-    vpx_sub_pixel_variance64x64_avx2;
-const SubpixVarMxNFunc subpel_variance32x32_avx2 =
-    vpx_sub_pixel_variance32x32_avx2;
 INSTANTIATE_TEST_CASE_P(
     AVX2, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0),
-                      make_tuple(5, 5, subpel_variance32x32_avx2, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_avx2, 0),
+                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_avx2, 0)));
 
-const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 =
-    vpx_sub_pixel_avg_variance64x64_avx2;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 =
-    vpx_sub_pixel_avg_variance32x32_avx2;
 INSTANTIATE_TEST_CASE_P(
     AVX2, VpxSubpelAvgVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0),
-                      make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0)));
+    ::testing::Values(
+        make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_avx2, 0),
+        make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_avx2, 0)));
 #endif  // HAVE_AVX2
 
 #if HAVE_MEDIA
-const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media;
 INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
-                        ::testing::Values(make_tuple(4, 4, mse16x16_media)));
+                        ::testing::Values(make_tuple(4, 4,
+                                                     &vpx_mse16x16_media)));
 
-const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media;
-const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media;
 INSTANTIATE_TEST_CASE_P(
     MEDIA, VpxVarianceTest,
-    ::testing::Values(make_tuple(4, 4, variance16x16_media, 0),
-                      make_tuple(3, 3, variance8x8_media, 0)));
+    ::testing::Values(make_tuple(4, 4, &vpx_variance16x16_media, 0),
+                      make_tuple(3, 3, &vpx_variance8x8_media, 0)));
 
-const SubpixVarMxNFunc subpel_variance16x16_media =
-    vpx_sub_pixel_variance16x16_media;
-const SubpixVarMxNFunc subpel_variance8x8_media =
-    vpx_sub_pixel_variance8x8_media;
 INSTANTIATE_TEST_CASE_P(
     MEDIA, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0),
-                      make_tuple(3, 3, subpel_variance8x8_media, 0)));
+    ::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_media, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_media, 0)));
 #endif  // HAVE_MEDIA
 
 #if HAVE_NEON
-const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon;
 INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
-                        ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
+                        ::testing::Values(make_tuple(2, 2,
+                                                     &vpx_get4x4sse_cs_neon)));
 
-const VarianceMxNFunc mse16x16_neon = vpx_mse16x16_neon;
 INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
-                        ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
+                        ::testing::Values(make_tuple(4, 4,
+                                                     &vpx_mse16x16_neon)));
 
-const VarianceMxNFunc variance64x64_neon = vpx_variance64x64_neon;
-const VarianceMxNFunc variance64x32_neon = vpx_variance64x32_neon;
-const VarianceMxNFunc variance32x64_neon = vpx_variance32x64_neon;
-const VarianceMxNFunc variance32x32_neon = vpx_variance32x32_neon;
-const VarianceMxNFunc variance16x16_neon = vpx_variance16x16_neon;
-const VarianceMxNFunc variance16x8_neon = vpx_variance16x8_neon;
-const VarianceMxNFunc variance8x16_neon = vpx_variance8x16_neon;
-const VarianceMxNFunc variance8x8_neon = vpx_variance8x8_neon;
 INSTANTIATE_TEST_CASE_P(
     NEON, VpxVarianceTest,
-    ::testing::Values(make_tuple(6, 6, variance64x64_neon, 0),
-                      make_tuple(6, 5, variance64x32_neon, 0),
-                      make_tuple(5, 6, variance32x64_neon, 0),
-                      make_tuple(5, 5, variance32x32_neon, 0),
-                      make_tuple(4, 4, variance16x16_neon, 0),
-                      make_tuple(4, 3, variance16x8_neon, 0),
-                      make_tuple(3, 4, variance8x16_neon, 0),
-                      make_tuple(3, 3, variance8x8_neon, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_neon, 0),
+                      make_tuple(6, 5, &vpx_variance64x32_neon, 0),
+                      make_tuple(5, 6, &vpx_variance32x64_neon, 0),
+                      make_tuple(5, 5, &vpx_variance32x32_neon, 0),
+                      make_tuple(4, 4, &vpx_variance16x16_neon, 0),
+                      make_tuple(4, 3, &vpx_variance16x8_neon, 0),
+                      make_tuple(3, 4, &vpx_variance8x16_neon, 0),
+                      make_tuple(3, 3, &vpx_variance8x8_neon, 0)));
 
-const SubpixVarMxNFunc subpel_variance64x64_neon =
-    vpx_sub_pixel_variance64x64_neon;
-const SubpixVarMxNFunc subpel_variance32x32_neon =
-    vpx_sub_pixel_variance32x32_neon;
-const SubpixVarMxNFunc subpel_variance16x16_neon =
-    vpx_sub_pixel_variance16x16_neon;
-const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon;
 INSTANTIATE_TEST_CASE_P(
     NEON, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0),
-                      make_tuple(5, 5, subpel_variance32x32_neon, 0),
-                      make_tuple(4, 4, subpel_variance16x16_neon, 0),
-                      make_tuple(3, 3, subpel_variance8x8_neon, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_neon, 0),
+                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_neon, 0),
+                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_neon, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_neon, 0)));
 #endif  // HAVE_NEON
 
 #if HAVE_MSA
@@ -1916,125 +1292,62 @@
 INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
                         ::testing::Values(vpx_get_mb_ss_msa));
 
-const Get4x4SseFunc get4x4sse_cs_msa = vpx_get4x4sse_cs_msa;
 INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
-                        ::testing::Values(make_tuple(2, 2, get4x4sse_cs_msa)));
+                        ::testing::Values(make_tuple(2, 2,
+                                                     &vpx_get4x4sse_cs_msa)));
 
-const VarianceMxNFunc mse16x16_msa = vpx_mse16x16_msa;
-const VarianceMxNFunc mse16x8_msa = vpx_mse16x8_msa;
-const VarianceMxNFunc mse8x16_msa = vpx_mse8x16_msa;
-const VarianceMxNFunc mse8x8_msa = vpx_mse8x8_msa;
 INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
-                        ::testing::Values(make_tuple(4, 4, mse16x16_msa),
-                                          make_tuple(4, 3, mse16x8_msa),
-                                          make_tuple(3, 4, mse8x16_msa),
-                                          make_tuple(3, 3, mse8x8_msa)));
+                        ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_msa),
+                                          make_tuple(4, 3, &vpx_mse16x8_msa),
+                                          make_tuple(3, 4, &vpx_mse8x16_msa),
+                                          make_tuple(3, 3, &vpx_mse8x8_msa)));
 
-const VarianceMxNFunc variance64x64_msa = vpx_variance64x64_msa;
-const VarianceMxNFunc variance64x32_msa = vpx_variance64x32_msa;
-const VarianceMxNFunc variance32x64_msa = vpx_variance32x64_msa;
-const VarianceMxNFunc variance32x32_msa = vpx_variance32x32_msa;
-const VarianceMxNFunc variance32x16_msa = vpx_variance32x16_msa;
-const VarianceMxNFunc variance16x32_msa = vpx_variance16x32_msa;
-const VarianceMxNFunc variance16x16_msa = vpx_variance16x16_msa;
-const VarianceMxNFunc variance16x8_msa = vpx_variance16x8_msa;
-const VarianceMxNFunc variance8x16_msa = vpx_variance8x16_msa;
-const VarianceMxNFunc variance8x8_msa = vpx_variance8x8_msa;
-const VarianceMxNFunc variance8x4_msa = vpx_variance8x4_msa;
-const VarianceMxNFunc variance4x8_msa = vpx_variance4x8_msa;
-const VarianceMxNFunc variance4x4_msa = vpx_variance4x4_msa;
 INSTANTIATE_TEST_CASE_P(
     MSA, VpxVarianceTest,
-    ::testing::Values(make_tuple(6, 6, variance64x64_msa, 0),
-                      make_tuple(6, 5, variance64x32_msa, 0),
-                      make_tuple(5, 6, variance32x64_msa, 0),
-                      make_tuple(5, 5, variance32x32_msa, 0),
-                      make_tuple(5, 4, variance32x16_msa, 0),
-                      make_tuple(4, 5, variance16x32_msa, 0),
-                      make_tuple(4, 4, variance16x16_msa, 0),
-                      make_tuple(4, 3, variance16x8_msa, 0),
-                      make_tuple(3, 4, variance8x16_msa, 0),
-                      make_tuple(3, 3, variance8x8_msa, 0),
-                      make_tuple(3, 2, variance8x4_msa, 0),
-                      make_tuple(2, 3, variance4x8_msa, 0),
-                      make_tuple(2, 2, variance4x4_msa, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_msa, 0),
+                      make_tuple(6, 5, &vpx_variance64x32_msa, 0),
+                      make_tuple(5, 6, &vpx_variance32x64_msa, 0),
+                      make_tuple(5, 5, &vpx_variance32x32_msa, 0),
+                      make_tuple(5, 4, &vpx_variance32x16_msa, 0),
+                      make_tuple(4, 5, &vpx_variance16x32_msa, 0),
+                      make_tuple(4, 4, &vpx_variance16x16_msa, 0),
+                      make_tuple(4, 3, &vpx_variance16x8_msa, 0),
+                      make_tuple(3, 4, &vpx_variance8x16_msa, 0),
+                      make_tuple(3, 3, &vpx_variance8x8_msa, 0),
+                      make_tuple(3, 2, &vpx_variance8x4_msa, 0),
+                      make_tuple(2, 3, &vpx_variance4x8_msa, 0),
+                      make_tuple(2, 2, &vpx_variance4x4_msa, 0)));
 
-const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa;
-const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa;
-const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa;
-const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa;
-const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa;
-const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa;
-const SubpixVarMxNFunc subpel_variance16x16_msa =
-    vpx_sub_pixel_variance16x16_msa;
-const SubpixVarMxNFunc subpel_variance16x32_msa =
-    vpx_sub_pixel_variance16x32_msa;
-const SubpixVarMxNFunc subpel_variance32x16_msa =
-    vpx_sub_pixel_variance32x16_msa;
-const SubpixVarMxNFunc subpel_variance32x32_msa =
-    vpx_sub_pixel_variance32x32_msa;
-const SubpixVarMxNFunc subpel_variance32x64_msa =
-    vpx_sub_pixel_variance32x64_msa;
-const SubpixVarMxNFunc subpel_variance64x32_msa =
-    vpx_sub_pixel_variance64x32_msa;
-const SubpixVarMxNFunc subpel_variance64x64_msa =
-    vpx_sub_pixel_variance64x64_msa;
 INSTANTIATE_TEST_CASE_P(
     MSA, VpxSubpelVarianceTest,
-    ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
-                      make_tuple(2, 3, subpel_variance4x8_msa, 0),
-                      make_tuple(3, 2, subpel_variance8x4_msa, 0),
-                      make_tuple(3, 3, subpel_variance8x8_msa, 0),
-                      make_tuple(3, 4, subpel_variance8x16_msa, 0),
-                      make_tuple(4, 3, subpel_variance16x8_msa, 0),
-                      make_tuple(4, 4, subpel_variance16x16_msa, 0),
-                      make_tuple(4, 5, subpel_variance16x32_msa, 0),
-                      make_tuple(5, 4, subpel_variance32x16_msa, 0),
-                      make_tuple(5, 5, subpel_variance32x32_msa, 0),
-                      make_tuple(5, 6, subpel_variance32x64_msa, 0),
-                      make_tuple(6, 5, subpel_variance64x32_msa, 0),
-                      make_tuple(6, 6, subpel_variance64x64_msa, 0)));
+    ::testing::Values(make_tuple(2, 2, &vpx_sub_pixel_variance4x4_msa, 0),
+                      make_tuple(2, 3, &vpx_sub_pixel_variance4x8_msa, 0),
+                      make_tuple(3, 2, &vpx_sub_pixel_variance8x4_msa, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_variance8x8_msa, 0),
+                      make_tuple(3, 4, &vpx_sub_pixel_variance8x16_msa, 0),
+                      make_tuple(4, 3, &vpx_sub_pixel_variance16x8_msa, 0),
+                      make_tuple(4, 4, &vpx_sub_pixel_variance16x16_msa, 0),
+                      make_tuple(4, 5, &vpx_sub_pixel_variance16x32_msa, 0),
+                      make_tuple(5, 4, &vpx_sub_pixel_variance32x16_msa, 0),
+                      make_tuple(5, 5, &vpx_sub_pixel_variance32x32_msa, 0),
+                      make_tuple(5, 6, &vpx_sub_pixel_variance32x64_msa, 0),
+                      make_tuple(6, 5, &vpx_sub_pixel_variance64x32_msa, 0),
+                      make_tuple(6, 6, &vpx_sub_pixel_variance64x64_msa, 0)));
 
-const SubpixAvgVarMxNFunc subpel_avg_variance64x64_msa =
-    vpx_sub_pixel_avg_variance64x64_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance64x32_msa =
-    vpx_sub_pixel_avg_variance64x32_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x64_msa =
-    vpx_sub_pixel_avg_variance32x64_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x32_msa =
-    vpx_sub_pixel_avg_variance32x32_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance32x16_msa =
-    vpx_sub_pixel_avg_variance32x16_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x32_msa =
-    vpx_sub_pixel_avg_variance16x32_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x16_msa =
-    vpx_sub_pixel_avg_variance16x16_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance16x8_msa =
-    vpx_sub_pixel_avg_variance16x8_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x16_msa =
-    vpx_sub_pixel_avg_variance8x16_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x8_msa =
-    vpx_sub_pixel_avg_variance8x8_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance8x4_msa =
-    vpx_sub_pixel_avg_variance8x4_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance4x8_msa =
-    vpx_sub_pixel_avg_variance4x8_msa;
-const SubpixAvgVarMxNFunc subpel_avg_variance4x4_msa =
-    vpx_sub_pixel_avg_variance4x4_msa;
 INSTANTIATE_TEST_CASE_P(
     MSA, VpxSubpelAvgVarianceTest,
-    ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_msa, 0),
-                      make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
-                      make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
-                      make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
-                      make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
-                      make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
-                      make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
-                      make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
-                      make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
-                      make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
-                      make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
-                      make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
-                      make_tuple(2, 2, subpel_avg_variance4x4_msa, 0)));
+    ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_msa, 0),
+                      make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_msa, 0),
+                      make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_msa, 0),
+                      make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_msa, 0),
+                      make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_msa, 0),
+                      make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_msa, 0),
+                      make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_msa, 0),
+                      make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_msa, 0),
+                      make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_msa, 0),
+                      make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_msa, 0),
+                      make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_msa, 0),
+                      make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_msa, 0),
+                      make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_msa, 0)));
 #endif  // HAVE_MSA
 }  // namespace