shithub: libvpx

Download patch

ref: 7d0656537b5739f06b923d471c39c3a0b249e76c
parent: b04e87c6abd33faf688d68611429dc155c4bff0f
author: Deb Mukherjee <debargha@google.com>
date: Wed Aug 8 12:49:15 EDT 2012

Merging in the sixteenth subpel uv experiment

Merges this experiment in to make it easier to run tests on
filter precision, vectorized implementation etc.

Also removes an experimental filter.

Change-Id: I1e8706bb6d4fc469815123939e9c6e0b5ae945cd

--- a/configure
+++ b/configure
@@ -217,7 +217,6 @@
 EXPERIMENT_LIST="
     csm
     featureupdates
-    sixteenth_subpel_uv
     comp_intra_pred
     superblocks
     pred_filter
--- a/vp8/common/filter.c
+++ b/vp8/common/filter.c
@@ -14,7 +14,6 @@
 #include "vpx_ports/mem.h"
 
 DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[SUBPEL_SHIFTS][2]) = {
-#if SUBPEL_SHIFTS==16
   { 128,   0 },
   { 120,   8 },
   { 112,  16 },
@@ -31,22 +30,11 @@
   {  24, 104 },
   {  16, 112 },
   {   8, 120 }
-#else
-  { 128,   0 },
-  { 112,  16 },
-  {  96,  32 },
-  {  80,  48 },
-  {  64,  64 },
-  {  48,  80 },
-  {  32,  96 },
-  {  16, 112 }
-#endif  /* SUBPEL_SHIFTS==16 */
 };
 
 #define FILTER_ALPHA       0
 #define FILTER_ALPHA_SHARP 1
 DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
-#if SUBPEL_SHIFTS==16
 #if FILTER_ALPHA == 0
   /* Lagrangian interpolation filter */
   { 0,   0,   0, 128,   0,   0,   0,  0},
@@ -90,32 +78,9 @@
   { 0,   2,  -6,  18, 122, -10,   2,  0},
   { 0,   1,  -3,   8, 126,  -5,   1,  0}
 #endif  /* FILTER_ALPHA */
-#else   /* SUBPEL_SHIFTS==16 */
-#if FILTER_ALPHA == 0
-  { 0,   0,   0, 128,   0,   0,   0,   0},
-  { -1,   3, -10, 122,  18,  -6,   2,   0},
-  { -1,   4, -16, 112,  37, -11,   4,  -1},
-  { -1,   5, -19,  97,  58, -16,   5,  -1},
-  { -1,   6, -19,  78,  78, -19,   6,  -1},
-  { -1,   5, -16,  58,  97, -19,   5,  -1},
-  { -1,   4, -11,  37, 112, -16,   4,  -1},
-  { 0,   2,  -6,  18, 122, -10,   3,  -1},
-#elif FILTER_ALPHA == 50
-  /* alpha = 0.50 */
-  { 0,   0,   0, 128,   0,   0,   0,  0},
-  { 0,   2, -10, 122,  18,  -6,   2,  0},
-  { -1,   4, -16, 112,  37, -11,   3,  0},
-  { -1,   5, -18,  96,  58, -16,   5, -1},
-  { -1,   5, -18,  78,  78, -18,   5, -1},
-  { -1,   5, -16,  58,  96, -18,   5, -1},
-  { 0,   3, -11,  37, 112, -16,   4, -1},
-  { 0,   2,  -6,  18, 122, -10,   2,  0}
-#endif  /* FILTER_ALPHA */
-#endif  /* SUBPEL_SHIFTS==16 */
 };
 
 DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
-#if SUBPEL_SHIFTS==16
 #if FILTER_ALPHA_SHARP == 1
   /* dct based filter */
   {0,   0,   0, 128,   0,   0,   0, 0},
@@ -152,62 +117,10 @@
   {-2,   5, -10,  28, 119, -16,   6, -2},
   {-1,   3,  -7,  18, 123, -11,   4, -1},
   {-1,   2,  -3,   9, 126,  -6,   2, -1}
-#elif FILTER_ALPHA_SHARP == 65
-  /* alpha = 0.65 */
-  { 0,   0,   0, 128,   0,   0,   0,  0},
-  { 0,   2,  -6, 126,   8,  -3,   1,  0},
-  { -1,   3, -10, 123,  18,  -6,   2, -1},
-  { -1,   5, -14, 118,  27, -10,   4, -1},
-  { -1,   5, -17, 112,  38, -13,   5, -1},
-  { -2,   6, -19, 106,  48, -15,   5, -1},
-  { -2,   7, -21,  98,  59, -17,   6, -2},
-  { -2,   7, -21,  89,  69, -19,   7, -2},
-  { -2,   7, -20,  79,  79, -20,   7, -2},
-  { -2,   7, -19,  69,  89, -21,   7, -2},
-  { -2,   6, -17,  59,  98, -21,   7, -2},
-  { -1,   5, -15,  48, 106, -19,   6, -2},
-  { -1,   5, -13,  38, 112, -17,   5, -1},
-  { -1,   4, -10,  27, 118, -14,   5, -1},
-  { -1,   2,  -6,  18, 123, -10,   3, -1},
-  { 0,   1,  -3,   8, 126,  -6,   2,  0}
 #endif  /* FILTER_ALPHA_SHARP */
-#else   /* SUBPEL_SHIFTS==16 */
-#if FILTER_ALPHA_SHARP == 1
-  /* dct based filter */
-  {0,   0,   0, 128,   0,   0,   0, 0},
-  {-2,   5, -13, 125,  17,  -6,   3, -1},
-  {-4,   9, -20, 115,  37, -13,   6, -2},
-  {-4,  10, -24, 100,  59, -19,   9, -3},
-  {-4,  10, -23,  81,  81, -23,  10, -4},
-  {-3,   9, -19,  59, 100, -24,  10, -4},
-  {-2,   6, -13,  37, 115, -20,   9, -4},
-  {-1,   3,  -6,  17, 125, -13,   5, -2}
-#elif FILTER_ALPHA_SHARP == 75
-  /* alpha = 0.75 */
-  {0,   0,   0, 128,   0,   0,   0, 0},
-  {-1,   4, -11, 123,  18,  -7,   3, -1},
-  {-2,   7, -19, 113,  38, -13,   6, -2},
-  {-3,   9, -22,  99,  59, -19,   8, -3},
-  {-3,   9, -22,  80,  80, -22,   9, -3},
-  {-3,   8, -19,  59,  99, -22,   9, -3},
-  {-2,   6, -13,  38, 113, -19,   7, -2},
-  {-1,   3,  -7,  18, 123, -11,   4, -1}
-#elif FILTER_ALPHA_SHARP == 65
-  /* alpha = 0.65 */
-  { 0,   0,   0, 128,   0,   0,   0, 0},
-  { -1,   3, -10, 123,  18,  -6,   2, -1},
-  { -1,   5, -17, 112,  38, -13,   5, -1},
-  { -2,   7, -21,  98,  59, -17,   6, -2},
-  { -2,   7, -20,  79,  79, -20,   7, -2},
-  { -2,   6, -17,  59,  98, -21,   7, -2},
-  { -1,   5, -13,  38, 112, -17,   5, -1},
-  { -1,   2,  -6,  18, 123, -10,   3, -1}
-#endif  /* FILTER_ALPHA_SHARP */
-#endif  /* SUBPEL_SHIFTS==16 */
 };
 
 DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6]) = {
-#if SUBPEL_SHIFTS==16
   {0,   0, 128,   0,   0, 0},
   {1,  -5, 125,   8,  -2, 1},
   {1,  -8, 122,  17,  -5, 1},
@@ -224,16 +137,6 @@
   {2,  -8,  27, 116, -11, 2},
   {1,  -5,  17, 122,  -8, 1},
   {1,  -2,   8, 125,  -5, 1}
-#else
-  { 0,  0,  128,    0,   0,  0 },         /* note that 1/8 pel positions are just as per alpha -0.5 bicubic */
-  { 0, -6,  123,   12,  -1,  0 },
-  { 2, -11, 108,   36,  -8,  1 },         /* New 1/4 pel 6 tap filter */
-  { 0, -9,   93,   50,  -6,  0 },
-  { 3, -16,  77,   77, -16,  3 },         /* New 1/2 pel 6 tap filter */
-  { 0, -6,   50,   93,  -9,  0 },
-  { 1, -8,   36,  108, -11,  2 },         /* New 1/4 pel 6 tap filter */
-  { 0, -1,   12,  123,  -6,  0 },
-#endif  /* SUBPEL_SHIFTS==16 */
 };
 
 static void filter_block2d_first_pass_6
@@ -255,8 +158,8 @@
              ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
              ((int)src_ptr[0]                    * vp8_filter[2]) +
              ((int)src_ptr[pixel_step]           * vp8_filter[3]) +
-             ((int)src_ptr[2 * pixel_step]         * vp8_filter[4]) +
-             ((int)src_ptr[3 * pixel_step]         * vp8_filter[5]) +
+             ((int)src_ptr[2 * pixel_step]       * vp8_filter[4]) +
+             ((int)src_ptr[3 * pixel_step]       * vp8_filter[5]) +
              (VP8_FILTER_WEIGHT >> 1);      /* Rounding */
 
       /* Normalize back to 0-255 */
--- a/vp8/common/filter.h
+++ b/vp8/common/filter.h
@@ -18,11 +18,7 @@
 #define VP8_FILTER_WEIGHT 128
 #define VP8_FILTER_SHIFT  7
 
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 #define SUBPEL_SHIFTS 16
-#else
-#define SUBPEL_SHIFTS 8
-#endif
 
 extern const short vp8_bilinear_filters[SUBPEL_SHIFTS][2];
 extern const short vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6];
--- a/vp8/common/reconinter.c
+++ b/vp8/common/reconinter.c
@@ -241,11 +241,7 @@
 
   if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
     ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     sppf(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
-#else
-    sppf(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
-#endif
   } else {
     ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
     ptr = ptr_base;
@@ -283,11 +279,7 @@
 
   if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
     ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     sppf(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
-#else
-    sppf(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
-#endif
   } else {
     ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
     ptr = ptr_base;
@@ -314,11 +306,7 @@
   ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
 
   if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     x->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
-#else
-    x->subpixel_predict8x8(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
-#endif
   } else {
     RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
   }
@@ -341,11 +329,7 @@
   ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
 
   if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     x->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
-#else
-    x->subpixel_predict_avg8x8(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
-#endif
   } else {
     RECON_INVOKE(&x->rtcd->recon, avg8x8)(ptr, d->pre_stride, pred_ptr, pitch);
   }
@@ -362,11 +346,7 @@
   ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
 
   if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     x->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
-#else
-    x->subpixel_predict8x4(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
-#endif
   } else {
     RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
   }
@@ -542,7 +522,6 @@
 
     // U & V
     for (i = 0; i < 2; i++) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       if ((omv_row | omv_col) & 15) {
         // Copy extended MB into Temp array, applying the spatial filter
         filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
@@ -551,19 +530,7 @@
         // Sub-pel interpolation
         xd->subpixel_predict8x8(pTemp, len, omv_col & 15,
                                 omv_row & 15, pDst, 8);
-      }
-#else   /* CONFIG_SIXTEENTH_SUBPEL_UV */
-      if ((mv_row | mv_col) & 7) {
-        // Copy extended MB into Temp array, applying the spatial filter
-        filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
-                  Temp, len, len, len);
-
-        // Sub-pel interpolation
-        xd->subpixel_predict8x8(pTemp, len, mv_col & 7,
-                                mv_row & 7, pDst, 8);
-      }
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-      else {
+      } else {
         // Apply prediction filter as we copy from source to destination
         filter_mb(pSrc, pre_stride, pDst, 8, 8, 8);
       }
@@ -574,18 +541,10 @@
     }
   } else
 #endif
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     if ((omv_row | omv_col) & 15) {
       xd->subpixel_predict8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, upred_ptr, 8);
       xd->subpixel_predict8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, vpred_ptr, 8);
-    }
-#else   /* CONFIG_SIXTEENTH_SUBPEL_UV */
-    if ((mv_row | mv_col) & 7) {
-      xd->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
-      xd->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
-    }
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-    else {
+    } else {
       RECON_INVOKE(&xd->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
       RECON_INVOKE(&xd->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
     }
@@ -708,13 +667,8 @@
                 Temp, len, len, len);
 
       // Sub-pel interpolation
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       xd->subpixel_predict16x16(pTemp, len, (mv_col & 7) << 1,
                                 (mv_row & 7) << 1, pred_ptr, 16);
-#else
-      xd->subpixel_predict16x16(pTemp, len, mv_col & 7,
-                                mv_row & 7, pred_ptr, 16);
-#endif
     } else {
       // Apply spatial filter to create the prediction directly
       filter_mb(ptr, pre_stride, pred_ptr, 16, 16, 16);
@@ -722,13 +676,8 @@
   } else
 #endif
     if ((mv_row | mv_col) & 7) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       xd->subpixel_predict16x16(ptr, pre_stride, (mv_col & 7) << 1,
                                 (mv_row & 7) << 1, pred_ptr, 16);
-#else
-      xd->subpixel_predict16x16(ptr, pre_stride, mv_col & 7,
-                                mv_row & 7, pred_ptr, 16);
-#endif
     } else {
       RECON_INVOKE(&xd->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
     }
@@ -808,17 +757,10 @@
                 Temp, len, len, len);
 
       // Sub-pel filter
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       x->subpixel_predict16x16(pTemp, len,
                                (_16x16mv.as_mv.col & 7) << 1,
                                (_16x16mv.as_mv.row & 7) << 1,
                                dst_y, dst_ystride);
-#else
-      x->subpixel_predict16x16(pTemp, len,
-                               _16x16mv.as_mv.col & 7,
-                               _16x16mv.as_mv.row & 7,
-                               dst_y, dst_ystride);
-#endif
     } else {
       // Apply spatial filter to create the prediction directly
       filter_mb(ptr, pre_stride, dst_y, dst_ystride, 16, 16);
@@ -826,14 +768,9 @@
   } else
 #endif
     if (_16x16mv.as_int & 0x00070007) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       x->subpixel_predict16x16(ptr, pre_stride, (_16x16mv.as_mv.col & 7) << 1,
                                (_16x16mv.as_mv.row & 7) << 1,
                                dst_y, dst_ystride);
-#else
-      x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7,
-                               _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
-#endif
     } else {
       RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_y,
                                                dst_ystride);
@@ -873,7 +810,6 @@
 
     // U & V
     for (i = 0; i < 2; i++) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       if (_o16x16mv.as_int & 0x000f000f) {
         // Copy extended MB into Temp array, applying the spatial filter
         filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
@@ -884,21 +820,7 @@
                                _o16x16mv.as_mv.col & 15,
                                _o16x16mv.as_mv.row & 15,
                                pDst, dst_uvstride);
-      }
-#else  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-      if (_16x16mv.as_int & 0x00070007) {
-        // Copy extended MB into Temp array, applying the spatial filter
-        filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
-                  Temp, len, len, len);
-
-        // Sub-pel filter
-        x->subpixel_predict8x8(pTemp, len,
-                               _16x16mv.as_mv.col & 7,
-                               _16x16mv.as_mv.row & 7,
-                               pDst, dst_uvstride);
-      }
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-      else {
+      } else {
         filter_mb(pSrc, pre_stride, pDst, dst_uvstride, 8, 8);
       }
 
@@ -908,18 +830,10 @@
     }
   } else
 #endif
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     if (_o16x16mv.as_int & 0x000f000f) {
       x->subpixel_predict8x8(uptr, pre_stride, _o16x16mv.as_mv.col & 15,  _o16x16mv.as_mv.row & 15, dst_u, dst_uvstride);
       x->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15,  _o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
-    }
-#else  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-    if (_16x16mv.as_int & 0x00070007) {
-      x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7,  _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
-      x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7,  _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
-    }
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-    else {
+    } else {
       RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvstride);
       RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvstride);
     }
@@ -983,13 +897,8 @@
                 Temp, len, len, len);
 
       // Sub-pel filter
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       x->subpixel_predict_avg16x16(pTemp, len, (mv_col & 7) << 1,
                                    (mv_row & 7) << 1, dst_y, dst_ystride);
-#else
-      x->subpixel_predict_avg16x16(pTemp, len, mv_col & 7,
-                                   mv_row & 7, dst_y, dst_ystride);
-#endif
     } else {
       // TODO Needs to AVERAGE with the dst_y
       // For now, do not apply the prediction filter in these cases!
@@ -1000,13 +909,8 @@
 #endif  // CONFIG_PRED_FILTER
   {
     if ((mv_row | mv_col) & 7) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       x->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
                                    (mv_row & 7) << 1, dst_y, dst_ystride);
-#else
-      x->subpixel_predict_avg16x16(ptr, pre_stride, mv_col & 7,
-                                   mv_row & 7, dst_y, dst_ystride);
-#endif
     } else {
       RECON_INVOKE(&x->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
                                               dst_ystride);
@@ -1038,7 +942,6 @@
 
     // U & V
     for (i = 0; i < 2; i++) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
       if ((omv_row | omv_col) & 15) {
         // Copy extended MB into Temp array, applying the spatial filter
         filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
@@ -1047,19 +950,7 @@
         // Sub-pel filter
         x->subpixel_predict_avg8x8(pTemp, len, omv_col & 15,
                                    omv_row & 15, pDst, dst_uvstride);
-      }
-#else  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-      if ((mv_row | mv_col) & 7) {
-        // Copy extended MB into Temp array, applying the spatial filter
-        filter_mb(pSrc - (INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
-                  Temp, len, len, len);
-
-        // Sub-pel filter
-        x->subpixel_predict_avg8x8(pTemp, len, mv_col & 7, mv_row & 7,
-                                   pDst, dst_uvstride);
-      }
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-      else {
+      } else {
         // TODO Needs to AVERAGE with the dst_[u|v]
         // For now, do not apply the prediction filter here!
         RECON_INVOKE(&x->rtcd->recon, avg8x8)(pSrc, pre_stride, pDst,
@@ -1072,18 +963,10 @@
     }
   } else
 #endif  // CONFIG_PRED_FILTER
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     if ((omv_row | omv_col) & 15) {
       x->subpixel_predict_avg8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, dst_u, dst_uvstride);
       x->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, dst_v, dst_uvstride);
-    }
-#else  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-    if ((mv_row | mv_col) & 7) {
-      x->subpixel_predict_avg8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, dst_u, dst_uvstride);
-      x->subpixel_predict_avg8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, dst_v, dst_uvstride);
-    }
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
-    else {
+    } else {
       RECON_INVOKE(&x->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride);
       RECON_INVOKE(&x->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride);
     }
--- a/vp8/common/x86/subpixel_ssse3.asm
+++ b/vp8/common/x86/subpixel_ssse3.asm
@@ -1495,7 +1495,6 @@
     times 8 db  36,  -11
     times 8 db  12,   -6
 align 16
-%if CONFIG_SIXTEENTH_SUBPEL_UV
 vp8_bilinear_filters_ssse3:
     times 8 db 128, 0
     times 8 db 120, 8
@@ -1513,15 +1512,4 @@
     times 8 db 24,  104
     times 8 db 16,  112
     times 8 db 8,   120
-%else
-vp8_bilinear_filters_ssse3:
-    times 8 db 128, 0
-    times 8 db 112, 16
-    times 8 db 96,  32
-    times 8 db 80,  48
-    times 8 db 64,  64
-    times 8 db 48,  80
-    times 8 db 32,  96
-    times 8 db 16,  112
-%endif
 
--- a/vp8/common/x86/vp8_asm_stubs.c
+++ b/vp8/common/x86/vp8_asm_stubs.c
@@ -13,15 +13,8 @@
 #include "vpx_ports/mem.h"
 #include "vp8/common/subpixel.h"
 
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 extern const short vp8_six_tap_mmx[16][6 * 8];
 extern const short vp8_bilinear_filters_mmx[16][2 * 8];
-#else
-extern const short vp8_six_tap_mmx[8][6 * 8];
-extern const short vp8_bilinear_filters_mmx[8][2 * 8];
-#endif
-
-// #define ANNOUNCE_FUNCTION
 
 extern void vp8_filter_block1d_h6_mmx
 (
--- a/vp8/encoder/arm/variance_arm.c
+++ b/vp8/encoder/arm/variance_arm.c
@@ -13,11 +13,7 @@
 #include "vp8/common/filter.h"
 #include "vp8/common/arm/bilinearfilter_arm.h"
 
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 #define HALFNDX 8
-#else
-#define HALFNDX 4
-#endif
 
 #if HAVE_ARMV6
 
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -205,11 +205,7 @@
  */
 
 #define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 #define SP(x) (((x)&3)<<2) // convert motion vector component to offset for svf calc
-#else
-#define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
 #define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c)-rc]) * error_per_bit + 128 )>>8 : 0) // estimated cost of a motion vector (r,c)
 #define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
@@ -216,11 +212,7 @@
 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
 
 #define PREHP(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset))) // pointer to predictor base of a motionvector
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 #define SPHP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
-#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
-#define SPHP(x) ((x)&7) // convert motion vector component to offset for svf calc
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
 #define DISTHP(r,c) vfp->svf( PREHP(r,c), y_stride, SPHP(c),SPHP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
 #define ERRHP(r,c) (MVC(r,c)+DISTHP(r,c)) // returns distortion + motion vector cost
 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = ((xd->allow_high_precision_mv)?DISTHP(r,c):DIST(r,c)); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
@@ -444,11 +436,7 @@
 #undef DISTHP
 #undef ERRHP
 
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 #define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
-#else
-#define SP(x) ((x)&7) // convert motion vector component to offset for svf calc
-#endif  /* CONFIG_SIXTEENTH_SUBPEL_UV */
 int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
                                  int_mv *bestmv, int_mv *ref_mv,
                                  int error_per_bit,
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -518,17 +518,10 @@
   vptr = x->e_mbd.pre.v_buffer + offset;
 
   if ((mv_row | mv_col) & 7) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
                                         (mv_col & 7) << 1, (mv_row & 7) << 1, upred_ptr, uv_stride, &sse2);
     VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
                                         (mv_col & 7) << 1, (mv_row & 7) << 1, vpred_ptr, uv_stride, &sse1);
-#else
-    VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
-                                        mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
-    VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
-                                        mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
-#endif
     sse2 += sse1;
   } else {
     VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -56,13 +56,8 @@
   yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
 
   if ((mv_row | mv_col) & 7) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
     x->subpixel_predict16x16(yptr, stride,
                              (mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
-#else
-    x->subpixel_predict16x16(yptr, stride,
-                             mv_col & 7, mv_row & 7, &pred[0], 16);
-#endif
   } else {
     RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
   }
@@ -77,7 +72,6 @@
   uptr = u_mb_ptr + offset;
   vptr = v_mb_ptr + offset;
 
-#if CONFIG_SIXTEENTH_SUBPEL_UV
   if ((omv_row | omv_col) & 15) {
     x->subpixel_predict8x8(uptr, stride,
                            (omv_col & 15), (omv_row & 15), &pred[256], 8);
@@ -84,14 +78,6 @@
     x->subpixel_predict8x8(vptr, stride,
                            (omv_col & 15), (omv_row & 15), &pred[320], 8);
   }
-#else
-  if ((mv_row | mv_col) & 7) {
-    x->subpixel_predict8x8(uptr, stride,
-                           mv_col & 7, mv_row & 7, &pred[256], 8);
-    x->subpixel_predict8x8(vptr, stride,
-                           mv_col & 7, mv_row & 7, &pred[320], 8);
-  }
-#endif
   else {
     RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
     RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
--- a/vp8/encoder/variance_c.c
+++ b/vp8/encoder/variance_c.c
@@ -341,13 +341,8 @@
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
   return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
                                        ref_ptr, recon_stride, sse);
-#else
-  return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 0,
-                                       ref_ptr, recon_stride, sse);
-#endif
 }
 
 
@@ -357,13 +352,8 @@
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
   return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
                                        ref_ptr, recon_stride, sse);
-#else
-  return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 4,
-                                       ref_ptr, recon_stride, sse);
-#endif
 }
 
 
@@ -373,13 +363,8 @@
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
   return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
                                        ref_ptr, recon_stride, sse);
-#else
-  return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 4,
-                                       ref_ptr, recon_stride, sse);
-#endif
 }
 
 
--- a/vp8/encoder/x86/variance_impl_sse2.asm
+++ b/vp8/encoder/x86/variance_impl_sse2.asm
@@ -1348,7 +1348,6 @@
 xmm_bi_rd:
     times 8 dw 64
 align 16
-%if CONFIG_SIXTEENTH_SUBPEL_UV
 vp8_bilinear_filters_sse2:
     dw 128, 128, 128, 128, 128, 128, 128, 128,  0,  0,  0,  0,  0,  0,  0,  0
     dw 120, 120, 120, 120, 120, 120, 120, 120,  8,  8,  8,  8,  8,  8,  8,  8
@@ -1366,14 +1365,3 @@
     dw 24, 24, 24, 24, 24, 24, 24, 24, 104, 104, 104, 104, 104, 104, 104, 104
     dw 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112
     dw 8, 8, 8, 8, 8, 8, 8, 8, 120, 120, 120, 120, 120, 120, 120, 120
-%else
-vp8_bilinear_filters_sse2:
-    dw 128, 128, 128, 128, 128, 128, 128, 128,  0,  0,  0,  0,  0,  0,  0,  0
-    dw 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16
-    dw 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32
-    dw 80, 80, 80, 80, 80, 80, 80, 80, 48, 48, 48, 48, 48, 48, 48, 48
-    dw 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
-    dw 48, 48, 48, 48, 48, 48, 48, 48, 80, 80, 80, 80, 80, 80, 80, 80
-    dw 32, 32, 32, 32, 32, 32, 32, 32, 96, 96, 96, 96, 96, 96, 96, 96
-    dw 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112
-%endif
--- a/vp8/encoder/x86/variance_impl_ssse3.asm
+++ b/vp8/encoder/x86/variance_impl_ssse3.asm
@@ -353,7 +353,6 @@
 xmm_bi_rd:
     times 8 dw 64
 align 16
-%if CONFIG_SIXTEENTH_SUBPEL_UV
 vp8_bilinear_filters_ssse3:
     times 8 db 128, 0
     times 8 db 120, 8
@@ -371,14 +370,3 @@
     times 8 db  24, 104
     times 8 db  16, 112
     times 8 db   8, 120
-%else
-vp8_bilinear_filters_ssse3:
-    times 8 db 128, 0
-    times 8 db 112, 16
-    times 8 db 96,  32
-    times 8 db 80,  48
-    times 8 db 64,  64
-    times 8 db 48,  80
-    times 8 db 32,  96
-    times 8 db 16,  112
-%endif
--- a/vp8/encoder/x86/variance_mmx.c
+++ b/vp8/encoder/x86/variance_mmx.c
@@ -198,7 +198,6 @@
 // the mmx function that does the bilinear filtering and var calculation //
 // int one pass                                                          //
 ///////////////////////////////////////////////////////////////////////////
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = {
   { 128, 128, 128, 128,  0,  0,  0,  0 },
   { 120, 120, 120, 120,  8,  8,  8,  8 },
@@ -217,18 +216,6 @@
   {  16, 16, 16, 16, 112, 112, 112, 112 },
   {   8,  8,  8,  8, 120, 120, 120, 120 }
 };
-#else
-DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[8][8]) = {
-  { 128, 128, 128, 128,  0,  0,  0,  0 },
-  { 112, 112, 112, 112, 16, 16, 16, 16 },
-  {  96, 96, 96, 96, 32, 32, 32, 32 },
-  {  80, 80, 80, 80, 48, 48, 48, 48 },
-  {  64, 64, 64, 64, 64, 64, 64, 64 },
-  {  48, 48, 48, 48, 80, 80, 80, 80 },
-  {  32, 32, 32, 32, 96, 96, 96, 96 },
-  {  16, 16, 16, 16, 112, 112, 112, 112 }
-};
-#endif
 
 unsigned int vp8_sub_pixel_variance4x4_mmx
 (
@@ -392,13 +379,8 @@
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
   return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 0,
                                          ref_ptr, recon_stride, sse);
-#else
-  return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 0,
-                                         ref_ptr, recon_stride, sse);
-#endif
 }
 
 
@@ -408,13 +390,8 @@
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
   return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 8,
                                          ref_ptr, recon_stride, sse);
-#else
-  return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 4,
-                                         ref_ptr, recon_stride, sse);
-#endif
 }
 
 
@@ -424,11 +401,6 @@
   const unsigned char *ref_ptr,
   int  recon_stride,
   unsigned int *sse) {
-#if CONFIG_SIXTEENTH_SUBPEL_UV
   return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 8,
                                          ref_ptr, recon_stride, sse);
-#else
-  return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 4,
-                                         ref_ptr, recon_stride, sse);
-#endif
 }
--- a/vp8/encoder/x86/variance_sse2.c
+++ b/vp8/encoder/x86/variance_sse2.c
@@ -13,11 +13,7 @@
 #include "vp8/common/pragmas.h"
 #include "vpx_ports/mem.h"
 
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 #define HALFNDX 8
-#else
-#define HALFNDX 4
-#endif
 
 extern void filter_block1d_h6_mmx(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
 extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
@@ -141,11 +137,7 @@
   unsigned int *sumsquared
 );
 
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[16][8]);
-#else
-DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[8][8]);
-#endif
 
 unsigned int vp8_variance4x4_wmt(
   const unsigned char *src_ptr,
--- a/vp8/encoder/x86/variance_ssse3.c
+++ b/vp8/encoder/x86/variance_ssse3.c
@@ -13,11 +13,7 @@
 #include "vp8/common/pragmas.h"
 #include "vpx_ports/mem.h"
 
-#if CONFIG_SIXTEENTH_SUBPEL_UV
 #define HALFNDX 8
-#else
-#define HALFNDX 4
-#endif
 
 extern unsigned int vp8_get16x16var_sse2
 (