shithub: libvpx

Download patch

ref: 4455036cfc3c6b7fb9d7b85af1982e7df3711a05
parent: 879cb7d96259a71eea0038452a00241650589084
author: Ronald S. Bultje <rbultje@google.com>
date: Tue Jan 8 05:29:22 EST 2013

Merge superblocks (32x32) experiment.

Change-Id: I0df99742029834a85c4933652b0587cf5b6b2587

--- a/vp9/common/vp9_blockd.c
+++ b/vp9/common/vp9_blockd.c
@@ -16,7 +16,7 @@
   {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8},
   {0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8},
   {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 6, 6, 6, 6, 8},
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 6, 6, 6, 6, 8}
 #endif
 };
@@ -24,7 +24,7 @@
   {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8},
   {0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8},
   {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 6, 6, 6, 6, 8},
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 6, 6, 6, 6, 8}
 #endif
 };
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -122,7 +122,7 @@
   TX_8X8 = 1,                      // 8x8 dct transform
   TX_16X16 = 2,                    // 16x16 dct transform
   TX_SIZE_MAX_MB = 3,              // Number of different transforms available
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   TX_32X32 = TX_SIZE_MAX_MB,       // 32x32 dct transform
   TX_SIZE_MAX_SB,                  // Number of transforms available to SBs
 #else
@@ -226,7 +226,6 @@
   MAX_REF_FRAMES = 4
 } MV_REFERENCE_FRAME;
 
-#if CONFIG_SUPERBLOCKS
 typedef enum {
   BLOCK_SIZE_MB16X16 = 0,
   BLOCK_SIZE_SB32X32 = 1,
@@ -234,7 +233,6 @@
   BLOCK_SIZE_SB64X64 = 2,
 #endif
 } BLOCK_SIZE_TYPE;
-#endif
 
 typedef struct {
   MB_PREDICTION_MODE mode, uv_mode;
@@ -274,13 +272,9 @@
   // Flag to turn prediction signal filter on(1)/off(0 ) at the MB level
   unsigned int pred_filter_enabled;
 #endif
-    INTERPOLATIONFILTERTYPE interp_filter;
+  INTERPOLATIONFILTERTYPE interp_filter;
 
-#if CONFIG_SUPERBLOCKS
-  // FIXME need a SB array of 4 MB_MODE_INFOs that
-  // only needs one sb_type.
   BLOCK_SIZE_TYPE sb_type;
-#endif
 } MB_MODE_INFO;
 
 typedef struct {
@@ -310,7 +304,7 @@
   union b_mode_info bmi;
 } BLOCKD;
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 typedef struct superblockd {
   /* 32x32 Y and 16x16 U/V. No 2nd order transform yet. */
   DECLARE_ALIGNED(16, int16_t, diff[32*32+16*16*2]);
@@ -326,7 +320,7 @@
   DECLARE_ALIGNED(16, int16_t,  dqcoeff[400]);
   DECLARE_ALIGNED(16, uint16_t, eobs[25]);
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   SUPERBLOCKD sb_coeff_data;
 #endif
 
@@ -417,14 +411,6 @@
 
   int corrupted;
 
-#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
-  /* This is an intermediate buffer currently used in sub-pixel motion search
-   * to keep a copy of the reference area. This buffer can be used for other
-   * purpose.
-   */
-  DECLARE_ALIGNED(32, uint8_t, y_buf[22 * 32]);
-#endif
-
   int sb_index;
   int mb_index;   // Index of the MB in the SB (0..3)
   int q_index;
@@ -528,11 +514,9 @@
   int ib = (int)(b - xd->block);
   if (ib >= 16)
     return tx_type;
-#if CONFIG_SUPERBLOCKS
   // TODO(rbultje, debargha): Explore ADST usage for superblocks
   if (xd->mode_info_context->mbmi.sb_type)
     return tx_type;
-#endif
   if (xd->mode_info_context->mbmi.mode == B_PRED &&
       xd->q_index < ACTIVE_HT) {
     tx_type = txfm_map(
@@ -585,11 +569,9 @@
   int ib = (int)(b - xd->block);
   if (ib >= 16)
     return tx_type;
-#if CONFIG_SUPERBLOCKS
   // TODO(rbultje, debargha): Explore ADST usage for superblocks
   if (xd->mode_info_context->mbmi.sb_type)
     return tx_type;
-#endif
   if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
       xd->q_index < ACTIVE_HT8) {
     // TODO(rbultje): MB_PREDICTION_MODE / B_PREDICTION_MODE should be merged
@@ -620,11 +602,9 @@
   int ib = (int)(b - xd->block);
   if (ib >= 16)
     return tx_type;
-#if CONFIG_SUPERBLOCKS
   // TODO(rbultje, debargha): Explore ADST usage for superblocks
   if (xd->mode_info_context->mbmi.sb_type)
     return tx_type;
-#endif
   if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
       xd->q_index < ACTIVE_HT16) {
     tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
--- a/vp9/common/vp9_default_coef_probs.h
+++ b/vp9/common/vp9_default_coef_probs.h
@@ -1038,7 +1038,7 @@
     }
   }
 };
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 static const vp9_coeff_probs default_coef_probs_32x32[BLOCK_TYPES_32X32] = {
   { /* block Type 0 */
     { /* Coeff Band 0 */
@@ -1210,4 +1210,4 @@
     }
   }
 };
-#endif  // CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#endif  // CONFIG_TX32X32
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -142,7 +142,7 @@
   237, 252, 253, 238, 223, 239, 254, 255,
 };
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 #if CONFIG_DWT32X32HYBRID
 DECLARE_ALIGNED(16, const int, vp9_coef_bands_32x32[1024]) = {
   0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6,
@@ -547,7 +547,7 @@
                 vp9_default_zig_zag1d_8x8_neighbors[64 * MAX_NEIGHBORS]);
 DECLARE_ALIGNED(16, int,
                 vp9_default_zig_zag1d_16x16_neighbors[256 * MAX_NEIGHBORS]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 DECLARE_ALIGNED(16, int,
                 vp9_default_zig_zag1d_32x32_neighbors[1024 * MAX_NEIGHBORS]);
 #endif
@@ -628,7 +628,7 @@
                       vp9_default_zig_zag1d_8x8_neighbors);
   init_scan_neighbors(vp9_default_zig_zag1d_16x16, 16,
                       vp9_default_zig_zag1d_16x16_neighbors);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   init_scan_neighbors(vp9_default_zig_zag1d_32x32, 32,
                       vp9_default_zig_zag1d_32x32_neighbors);
 #endif
@@ -645,7 +645,7 @@
     return vp9_default_zig_zag1d_8x8_neighbors;
   } else if (scan == vp9_default_zig_zag1d_16x16) {
     return vp9_default_zig_zag1d_16x16_neighbors;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   } else if (scan == vp9_default_zig_zag1d_32x32) {
     return vp9_default_zig_zag1d_32x32_neighbors;
 #endif
@@ -693,7 +693,7 @@
   vpx_memcpy(pc->fc.hybrid_coef_probs_16x16,
              default_hybrid_coef_probs_16x16,
              sizeof(pc->fc.hybrid_coef_probs_16x16));
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vpx_memcpy(pc->fc.coef_probs_32x32, default_coef_probs_32x32,
              sizeof(pc->fc.coef_probs_32x32));
 #endif
@@ -840,7 +840,7 @@
                     cm->fc.pre_hybrid_coef_probs_16x16,
                     BLOCK_TYPES_16X16, cm->fc.hybrid_coef_counts_16x16,
                     count_sat, update_factor);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   update_coef_probs(cm->fc.coef_probs_32x32, cm->fc.pre_coef_probs_32x32,
                     BLOCK_TYPES_32X32, cm->fc.coef_counts_32x32,
                     count_sat, update_factor);
--- a/vp9/common/vp9_entropy.h
+++ b/vp9/common/vp9_entropy.h
@@ -66,7 +66,7 @@
 
 #define BLOCK_TYPES_16X16 4
 
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 #define BLOCK_TYPES_32X32 4
 #endif
 
@@ -77,7 +77,7 @@
 extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_4x4[16]);
 extern DECLARE_ALIGNED(64, const int, vp9_coef_bands_8x8[64]);
 extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_16x16[256]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_32x32[1024]);
 #endif
 
@@ -122,7 +122,7 @@
 
 extern DECLARE_ALIGNED(64, const int, vp9_default_zig_zag1d_8x8[64]);
 extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]);
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_32x32[1024]);
 #endif
 
@@ -154,7 +154,7 @@
                        64 * MAX_NEIGHBORS]);
 extern DECLARE_ALIGNED(16, int, vp9_default_zig_zag1d_16x16_neighbors[
                        256 * MAX_NEIGHBORS]);
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 extern DECLARE_ALIGNED(16, int, vp9_default_zig_zag1d_32x32_neighbors[
                        1024 * MAX_NEIGHBORS]);
 #endif
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -272,13 +272,11 @@
   -NEWMV, -SPLITMV
 };
 
-#if CONFIG_SUPERBLOCKS
 const vp9_tree_index vp9_sb_mv_ref_tree[6] = {
   -ZEROMV, 2,
   -NEARESTMV, 4,
   -NEARMV, -NEWMV
 };
-#endif
 
 const vp9_tree_index vp9_sub_mv_ref_tree[6] = {
   -LEFT4X4, 2,
@@ -289,10 +287,8 @@
 struct vp9_token_struct vp9_bmode_encodings[VP9_NKF_BINTRAMODES];
 struct vp9_token_struct vp9_kf_bmode_encodings[VP9_KF_BINTRAMODES];
 struct vp9_token_struct vp9_ymode_encodings[VP9_YMODES];
-#if CONFIG_SUPERBLOCKS
 struct vp9_token_struct vp9_sb_ymode_encodings[VP9_I32X32_MODES];
 struct vp9_token_struct vp9_sb_kf_ymode_encodings[VP9_I32X32_MODES];
-#endif
 struct vp9_token_struct vp9_kf_ymode_encodings[VP9_YMODES];
 struct vp9_token_struct vp9_uv_mode_encodings[VP9_UV_MODES];
 struct vp9_token_struct vp9_i8x8_mode_encodings[VP9_I8X8_MODES];
@@ -299,9 +295,7 @@
 struct vp9_token_struct vp9_mbsplit_encodings[VP9_NUMMBSPLITS];
 
 struct vp9_token_struct vp9_mv_ref_encoding_array[VP9_MVREFS];
-#if CONFIG_SUPERBLOCKS
 struct vp9_token_struct vp9_sb_mv_ref_encoding_array[VP9_MVREFS];
-#endif
 struct vp9_token_struct vp9_sub_mv_ref_encoding_array[VP9_SUBMVREFS];
 
 void vp9_init_mbmode_probs(VP9_COMMON *x) {
@@ -310,11 +304,9 @@
   vp9_tree_probs_from_distribution(VP9_YMODES, vp9_ymode_encodings,
                                    vp9_ymode_tree, x->fc.ymode_prob,
                                    bct, y_mode_cts);
-#if CONFIG_SUPERBLOCKS
   vp9_tree_probs_from_distribution(VP9_I32X32_MODES, vp9_sb_ymode_encodings,
                                    vp9_sb_ymode_tree, x->fc.sb_ymode_prob,
                                    bct, y_mode_cts);
-#endif
   {
     int i;
     for (i = 0; i < 8; i++) {
@@ -321,13 +313,11 @@
       vp9_tree_probs_from_distribution(VP9_YMODES, vp9_kf_ymode_encodings,
                                        vp9_kf_ymode_tree, x->kf_ymode_prob[i],
                                        bct, kf_y_mode_cts[i]);
-#if CONFIG_SUPERBLOCKS
       vp9_tree_probs_from_distribution(VP9_I32X32_MODES,
                                        vp9_sb_kf_ymode_encodings,
                                        vp9_sb_kf_ymode_tree,
                                        x->sb_kf_ymode_prob[i], bct,
                                        kf_y_mode_cts[i]);
-#endif
     }
   }
   {
@@ -426,10 +416,8 @@
   vp9_tokens_from_tree(vp9_bmode_encodings,   vp9_bmode_tree);
   vp9_tokens_from_tree(vp9_ymode_encodings,   vp9_ymode_tree);
   vp9_tokens_from_tree(vp9_kf_ymode_encodings, vp9_kf_ymode_tree);
-#if CONFIG_SUPERBLOCKS
   vp9_tokens_from_tree(vp9_sb_ymode_encodings, vp9_sb_ymode_tree);
   vp9_tokens_from_tree(vp9_sb_kf_ymode_encodings, vp9_sb_kf_ymode_tree);
-#endif
   vp9_tokens_from_tree(vp9_uv_mode_encodings,  vp9_uv_mode_tree);
   vp9_tokens_from_tree(vp9_i8x8_mode_encodings,  vp9_i8x8_mode_tree);
   vp9_tokens_from_tree(vp9_mbsplit_encodings, vp9_mbsplit_tree);
@@ -438,10 +426,8 @@
 
   vp9_tokens_from_tree_offset(vp9_mv_ref_encoding_array,
                               vp9_mv_ref_tree, NEARESTMV);
-#if CONFIG_SUPERBLOCKS
   vp9_tokens_from_tree_offset(vp9_sb_mv_ref_encoding_array,
                               vp9_sb_mv_ref_tree, NEARESTMV);
-#endif
   vp9_tokens_from_tree_offset(vp9_sub_mv_ref_encoding_array,
                               vp9_sub_mv_ref_tree, LEFT4X4);
 }
@@ -599,11 +585,9 @@
   update_mode_probs(VP9_YMODES, vp9_ymode_encodings, vp9_ymode_tree,
                     cm->fc.ymode_counts, cm->fc.pre_ymode_prob,
                     cm->fc.ymode_prob);
-#if CONFIG_SUPERBLOCKS
   update_mode_probs(VP9_I32X32_MODES, vp9_sb_ymode_encodings, vp9_sb_ymode_tree,
                     cm->fc.sb_ymode_counts, cm->fc.pre_sb_ymode_prob,
                     cm->fc.sb_ymode_prob);
-#endif
   for (i = 0; i < VP9_YMODES; ++i) {
     update_mode_probs(VP9_UV_MODES, vp9_uv_mode_encodings, vp9_uv_mode_tree,
                       cm->fc.uv_mode_counts[i], cm->fc.pre_uv_mode_prob[i],
--- a/vp9/common/vp9_findnearmv.c
+++ b/vp9/common/vp9_findnearmv.c
@@ -190,7 +190,6 @@
                                    SP(this_mv.as_mv.row),
                                    above_src, xd->dst.y_stride, &sse);
       score += sse;
-#if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
         vp9_sub_pixel_variance16x2_c(above_ref + offset + 16,
                                      ref_y_stride,
@@ -215,7 +214,6 @@
         score += sse;
       }
 #endif
-#endif
     }
     if (xd->left_available) {
       vp9_sub_pixel_variance2x16_c(left_ref + offset, ref_y_stride,
@@ -223,7 +221,6 @@
                                    SP(this_mv.as_mv.row),
                                    left_src, xd->dst.y_stride, &sse);
       score += sse;
-#if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
         vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 16,
                                      ref_y_stride,
@@ -251,7 +248,6 @@
         score += sse;
       }
 #endif
-#endif
     }
 #else
     row_offset = (this_mv.as_mv.row > 0) ?
@@ -263,7 +259,6 @@
     if (xd->up_available) {
       score += vp9_sad16x3(above_src, xd->dst.y_stride,
                            above_ref + offset, ref_y_stride);
-#if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
         score += vp9_sad16x3(above_src + 16, xd->dst.y_stride,
                              above_ref + offset + 16, ref_y_stride);
@@ -276,12 +271,10 @@
                              above_ref + offset + 48, ref_y_stride);
       }
 #endif
-#endif
     }
     if (xd->left_available) {
       score += vp9_sad3x16(left_src, xd->dst.y_stride,
                            left_ref + offset, ref_y_stride);
-#if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
         score += vp9_sad3x16(left_src + xd->dst.y_stride * 16,
                              xd->dst.y_stride,
@@ -299,7 +292,6 @@
                              left_ref + offset + ref_y_stride * 48,
                              ref_y_stride);
       }
-#endif
 #endif
     }
 #endif
--- a/vp9/common/vp9_invtrans.c
+++ b/vp9/common/vp9_invtrans.c
@@ -146,7 +146,7 @@
   vp9_inverse_transform_mbuv_8x8(xd);
 }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 void vp9_inverse_transform_sby_32x32(SUPERBLOCKD *xd_sb) {
   vp9_short_idct32x32(xd_sb->dqcoeff, xd_sb->diff, 64);
 }
--- a/vp9/common/vp9_invtrans.h
+++ b/vp9/common/vp9_invtrans.h
@@ -39,7 +39,7 @@
 
 extern void vp9_inverse_transform_mby_16x16(MACROBLOCKD *xd);
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 extern void vp9_inverse_transform_sby_32x32(SUPERBLOCKD *xd_sb);
 extern void vp9_inverse_transform_sbuv_16x16(SUPERBLOCKD *xd_sb);
 #endif
--- a/vp9/common/vp9_loopfilter.c
+++ b/vp9/common/vp9_loopfilter.c
@@ -229,16 +229,13 @@
           lfi.lim = lfi_n->lim[filter_level];
           lfi.hev_thr = lfi_n->hev_thr[hev_index];
 
-          if (mb_col > 0
-#if CONFIG_SUPERBLOCKS
-              && !((mb_col & 1) && mode_info_context->mbmi.sb_type &&
-                   ((skip_lf && mb_lf_skip(&mode_info_context[-1].mbmi))
+          if (mb_col > 0 &&
+              !((mb_col & 1) && mode_info_context->mbmi.sb_type &&
+                ((skip_lf && mb_lf_skip(&mode_info_context[-1].mbmi))
 #if CONFIG_TX32X32
-                   || tx_size == TX_32X32
+                || tx_size == TX_32X32
 #endif
-                   ))
-#endif
-              )
+                )))
             vp9_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post->y_stride,
                                 post->uv_stride, &lfi);
           if (!skip_lf) {
@@ -251,16 +248,13 @@
 
           }
           /* don't apply across umv border */
-          if (mb_row > 0
-#if CONFIG_SUPERBLOCKS
-              && !((mb_row & 1) && mode_info_context->mbmi.sb_type &&
-                   ((skip_lf && mb_lf_skip(&mode_info_context[-mis].mbmi))
+          if (mb_row > 0 &&
+              !((mb_row & 1) && mode_info_context->mbmi.sb_type &&
+                ((skip_lf && mb_lf_skip(&mode_info_context[-mis].mbmi))
 #if CONFIG_TX32X32
-                    || tx_size == TX_32X32
+                 || tx_size == TX_32X32
 #endif
-                    ))
-#endif
-              )
+                 )))
             vp9_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post->y_stride,
                                 post->uv_stride, &lfi);
           if (!skip_lf) {
@@ -273,12 +267,9 @@
           }
         } else {
           // FIXME: Not 8x8 aware
-          if (mb_col > 0  &&
-              !(skip_lf && mb_lf_skip(&mode_info_context[-1].mbmi))
-#if CONFIG_SUPERBLOCKS
-              && !((mb_col & 1) && mode_info_context->mbmi.sb_type)
-#endif
-              )
+          if (mb_col > 0 &&
+              !(skip_lf && mb_lf_skip(&mode_info_context[-1].mbmi)) &&
+              !((mb_col & 1) && mode_info_context->mbmi.sb_type))
             vp9_loop_filter_simple_mbv(y_ptr, post->y_stride,
                                        lfi_n->mblim[filter_level]);
           if (!skip_lf)
@@ -287,11 +278,8 @@
 
           /* don't apply across umv border */
           if (mb_row > 0 &&
-              !(skip_lf && mb_lf_skip(&mode_info_context[-mis].mbmi))
-#if CONFIG_SUPERBLOCKS
-              && !((mb_row & 1) && mode_info_context->mbmi.sb_type)
-#endif
-              )
+              !(skip_lf && mb_lf_skip(&mode_info_context[-mis].mbmi)) &&
+              !((mb_row & 1) && mode_info_context->mbmi.sb_type))
             vp9_loop_filter_simple_mbh(y_ptr, post->y_stride,
                                        lfi_n->mblim[filter_level]);
           if (!skip_lf)
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -17,7 +17,6 @@
 };
 static int mb_ref_distance_weight[MVREF_NEIGHBOURS] =
   { 3, 3, 2, 1, 1, 1, 1, 1 };
-#if CONFIG_SUPERBLOCKS
 static int sb_mv_ref_search[MVREF_NEIGHBOURS][2] = {
     {0, -1}, {-1, 0}, {1, -1}, {-1, 1},
     {-1, -1}, {0, -2}, {-2, 0}, {-1, -2}
@@ -24,7 +23,6 @@
 };
 static int sb_ref_distance_weight[MVREF_NEIGHBOURS] =
   { 3, 3, 2, 2, 2, 1, 1, 1 };
-#endif
 
 // clamp_mv
 #define MV_BORDER (16 << 3) // Allow 16 pels in 1/8th pel units
@@ -236,7 +234,6 @@
   vpx_memset(candidate_mvs, 0, sizeof(int_mv) * MAX_MV_REF_CANDIDATES);
   vpx_memset(candidate_scores, 0, sizeof(candidate_scores));
 
-#if CONFIG_SUPERBLOCKS
   if (mbmi->sb_type) {
     mv_ref_search = sb_mv_ref_search;
     ref_distance_weight = sb_ref_distance_weight;
@@ -244,10 +241,6 @@
     mv_ref_search = mb_mv_ref_search;
     ref_distance_weight = mb_ref_distance_weight;
   }
-#else
-  mv_ref_search = mb_mv_ref_search;
-  ref_distance_weight = mb_ref_distance_weight;
-#endif
 
   // We first scan for candidate vectors that match the current reference frame
   // Look at nearest neigbours
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -44,9 +44,7 @@
 typedef struct frame_contexts {
   vp9_prob bmode_prob[VP9_NKF_BINTRAMODES - 1];
   vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
-#if CONFIG_SUPERBLOCKS
   vp9_prob sb_ymode_prob[VP9_I32X32_MODES - 1];
-#endif
   vp9_prob uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
   vp9_prob i8x8_mode_prob[VP9_I8X8_MODES - 1];
   vp9_prob sub_mv_ref_prob[SUBMVREF_COUNT][VP9_SUBMVREFS - 1];
@@ -57,7 +55,7 @@
   vp9_coeff_probs hybrid_coef_probs_8x8[BLOCK_TYPES_8X8];
   vp9_coeff_probs coef_probs_16x16[BLOCK_TYPES_16X16];
   vp9_coeff_probs hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_coeff_probs coef_probs_32x32[BLOCK_TYPES_32X32];
 #endif
 
@@ -65,9 +63,7 @@
   nmv_context pre_nmvc;
   vp9_prob pre_bmode_prob[VP9_NKF_BINTRAMODES - 1];
   vp9_prob pre_ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
-#if CONFIG_SUPERBLOCKS
   vp9_prob pre_sb_ymode_prob[VP9_I32X32_MODES - 1];
-#endif
   vp9_prob pre_uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
   vp9_prob pre_i8x8_mode_prob[VP9_I8X8_MODES - 1];
   vp9_prob pre_sub_mv_ref_prob[SUBMVREF_COUNT][VP9_SUBMVREFS - 1];
@@ -74,9 +70,7 @@
   vp9_prob pre_mbsplit_prob[VP9_NUMMBSPLITS - 1];
   unsigned int bmode_counts[VP9_NKF_BINTRAMODES];
   unsigned int ymode_counts[VP9_YMODES];   /* interframe intra mode probs */
-#if CONFIG_SUPERBLOCKS
   unsigned int sb_ymode_counts[VP9_I32X32_MODES];
-#endif
   unsigned int uv_mode_counts[VP9_YMODES][VP9_UV_MODES];
   unsigned int i8x8_mode_counts[VP9_I8X8_MODES];   /* interframe intra probs */
   unsigned int sub_mv_ref_counts[SUBMVREF_COUNT][VP9_SUBMVREFS];
@@ -88,7 +82,7 @@
   vp9_coeff_probs pre_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8];
   vp9_coeff_probs pre_coef_probs_16x16[BLOCK_TYPES_16X16];
   vp9_coeff_probs pre_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_coeff_probs pre_coef_probs_32x32[BLOCK_TYPES_32X32];
 #endif
 
@@ -98,7 +92,7 @@
   vp9_coeff_count hybrid_coef_counts_8x8[BLOCK_TYPES_8X8];
   vp9_coeff_count coef_counts_16x16[BLOCK_TYPES_16X16];
   vp9_coeff_count hybrid_coef_counts_16x16[BLOCK_TYPES_16X16];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_coeff_count coef_counts_32x32[BLOCK_TYPES_32X32];
 #endif
 
@@ -131,11 +125,11 @@
   ONLY_4X4            = 0,
   ALLOW_8X8           = 1,
   ALLOW_16X16         = 2,
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   ALLOW_32X32         = 3,
 #endif
-  TX_MODE_SELECT      = 3 + (CONFIG_TX32X32 && CONFIG_SUPERBLOCKS),
-  NB_TXFM_MODES       = 4 + (CONFIG_TX32X32 && CONFIG_SUPERBLOCKS),
+  TX_MODE_SELECT      = 3 + CONFIG_TX32X32,
+  NB_TXFM_MODES       = 4 + CONFIG_TX32X32,
 } TXFM_MODE;
 
 typedef struct VP9Common {
@@ -237,9 +231,7 @@
                         [VP9_KF_BINTRAMODES]
                         [VP9_KF_BINTRAMODES - 1];
   vp9_prob kf_ymode_prob[8][VP9_YMODES - 1]; /* keyframe "" */
-#if CONFIG_SUPERBLOCKS
   vp9_prob sb_kf_ymode_prob[8][VP9_I32X32_MODES - 1];
-#endif
   int kf_ymode_probs_index;
   int kf_ymode_probs_update;
   vp9_prob kf_uv_mode_prob[VP9_YMODES] [VP9_UV_MODES - 1];
@@ -247,12 +239,10 @@
   vp9_prob prob_intra_coded;
   vp9_prob prob_last_coded;
   vp9_prob prob_gf_coded;
-#if CONFIG_SUPERBLOCKS
   vp9_prob sb32_coded;
 #if CONFIG_SUPERBLOCKS64
   vp9_prob sb64_coded;
 #endif  // CONFIG_SUPERBLOCKS64
-#endif
 
   // Context probabilities when using predictive coding of segment id
   vp9_prob segment_pred_probs[PREDICTION_PROBS];
--- a/vp9/common/vp9_pred_common.c
+++ b/vp9/common/vp9_pred_common.c
@@ -223,14 +223,11 @@
 void vp9_set_pred_flag(MACROBLOCKD *const xd,
                        PRED_ID pred_id,
                        unsigned char pred_flag) {
-#if CONFIG_SUPERBLOCKS
   const int mis = xd->mode_info_stride;
-#endif
 
   switch (pred_id) {
     case PRED_SEG_ID:
       xd->mode_info_context->mbmi.seg_id_predicted = pred_flag;
-#if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.sb_type) {
 #define sub(a, b) (b) < 0 ? (a) + (b) : (a)
         const int n_mbs = 1 << xd->mode_info_context->mbmi.sb_type;
@@ -245,12 +242,10 @@
           }
         }
       }
-#endif
       break;
 
     case PRED_REF:
       xd->mode_info_context->mbmi.ref_predicted = pred_flag;
-#if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.sb_type) {
         const int n_mbs = 1 << xd->mode_info_context->mbmi.sb_type;
         const int x_mbs = sub(n_mbs, xd->mb_to_right_edge >> 7);
@@ -263,12 +258,10 @@
           }
         }
       }
-#endif
       break;
 
     case PRED_MBSKIP:
       xd->mode_info_context->mbmi.mb_skip_coeff = pred_flag;
-#if CONFIG_SUPERBLOCKS
       if (xd->mode_info_context->mbmi.sb_type) {
         const int n_mbs = 1 << xd->mode_info_context->mbmi.sb_type;
         const int x_mbs = sub(n_mbs, xd->mb_to_right_edge >> 7);
@@ -281,7 +274,6 @@
           }
         }
       }
-#endif
       break;
 
     default:
@@ -299,11 +291,8 @@
                                     const MACROBLOCKD *const xd, int MbIndex) {
   // Currently the prediction for the macroblock segment ID is
   // the value stored for this macroblock in the previous frame.
-#if CONFIG_SUPERBLOCKS
   if (!xd->mode_info_context->mbmi.sb_type) {
-#endif
     return cm->last_frame_seg_map[MbIndex];
-#if CONFIG_SUPERBLOCKS
   } else {
     const int n_mbs = 1 << xd->mode_info_context->mbmi.sb_type;
     const int mb_col = MbIndex % cm->mb_cols;
@@ -321,7 +310,6 @@
 
     return seg_id;
   }
-#endif
 }
 
 MV_REFERENCE_FRAME vp9_get_pred_ref(const VP9_COMMON *const cm,
--- a/vp9/common/vp9_recon.c
+++ b/vp9/common/vp9_recon.c
@@ -81,7 +81,6 @@
   }
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_recon_mby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
   int x, y;
   BLOCKD *b = &xd->block[0];
@@ -146,8 +145,7 @@
     vdiff += 16;
   }
 }
-#endif
-#endif
+#endif  // CONFIG_TX32X32
 
 void vp9_recon_mby_c(MACROBLOCKD *xd) {
   int i;
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -711,7 +711,6 @@
   vp9_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
                                         uint8_t *dst_y,
                                         uint8_t *dst_u,
@@ -781,6 +780,7 @@
 #endif
 }
 
+#if CONFIG_SUPERBLOCKS64
 void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
                                         uint8_t *dst_y,
                                         uint8_t *dst_u,
@@ -844,7 +844,7 @@
   }
 #endif
 }
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 /*
  * The following functions should be called after an initial
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -47,7 +47,6 @@
                                                    int dst_ystride,
                                                    int dst_uvstride);
 
-#if CONFIG_SUPERBLOCKS
 extern void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
                                                uint8_t *dst_y,
                                                uint8_t *dst_u,
@@ -55,6 +54,7 @@
                                                int dst_ystride,
                                                int dst_uvstride);
 
+#if CONFIG_SUPERBLOCKS64
 extern void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
                                                uint8_t *dst_y,
                                                uint8_t *dst_u,
@@ -61,7 +61,7 @@
                                                uint8_t *dst_v,
                                                int dst_ystride,
                                                int dst_uvstride);
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 extern void vp9_build_inter_predictors_mb(MACROBLOCKD *xd);
 
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -658,7 +658,6 @@
                      vpred, uvstride, vintrapredictor, 8, 8);
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_build_interintra_32x32_predictors_sby(MACROBLOCKD *xd,
                                                uint8_t *ypred,
                                                int ystride) {
@@ -704,6 +703,7 @@
   vp9_build_interintra_32x32_predictors_sbuv(xd, upred, vpred, uvstride);
 }
 
+#if CONFIG_SUPERBLOCKS64
 void vp9_build_interintra_64x64_predictors_sby(MACROBLOCKD *xd,
                                                uint8_t *ypred,
                                                int ystride) {
@@ -744,8 +744,8 @@
   vp9_build_interintra_64x64_predictors_sby(xd, ypred, ystride);
   vp9_build_interintra_64x64_predictors_sbuv(xd, upred, vpred, uvstride);
 }
-#endif
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
+#endif  // CONFIG_COMP_INTERINTRA_PRED
 
 void vp9_build_intra_predictors_mby(MACROBLOCKD *xd) {
   vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
@@ -761,7 +761,6 @@
                                       xd->up_available, xd->left_available);
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd) {
   vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
                                       xd->dst.y_buffer, xd->dst.y_stride,
@@ -769,6 +768,7 @@
                                       xd->up_available, xd->left_available);
 }
 
+#if CONFIG_SUPERBLOCKS64
 void vp9_build_intra_predictors_sb64y_s(MACROBLOCKD *xd) {
   vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
                                       xd->dst.y_buffer, xd->dst.y_stride,
@@ -775,7 +775,7 @@
                                       xd->mode_info_context->mbmi.mode, 64,
                                       xd->up_available, xd->left_available);
 }
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 #if CONFIG_COMP_INTRA_PRED
 void vp9_build_comp_intra_predictors_mby(MACROBLOCKD *xd) {
@@ -827,7 +827,6 @@
                                            8);
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) {
   vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
                                            xd->dst.v_buffer, xd->dst.uv_stride,
@@ -835,6 +834,7 @@
                                            16);
 }
 
+#if CONFIG_SUPERBLOCKS64
 void vp9_build_intra_predictors_sb64uv_s(MACROBLOCKD *xd) {
   vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
                                            xd->dst.v_buffer, xd->dst.uv_stride,
@@ -841,7 +841,7 @@
                                            xd->mode_info_context->mbmi.uv_mode,
                                            32);
 }
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 #if CONFIG_COMP_INTRA_PRED
 void vp9_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) {
--- a/vp9/common/vp9_reconintra.h
+++ b/vp9/common/vp9_reconintra.h
@@ -34,7 +34,6 @@
                                                        int uvstride);
 #endif  // CONFIG_COMP_INTERINTRA_PRED
 
-#if CONFIG_SUPERBLOCKS
 extern void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd,
                                                      uint8_t *ypred,
                                                      uint8_t *upred,
@@ -47,6 +46,5 @@
                                                      uint8_t *vpred,
                                                      int ystride,
                                                      int uvstride);
-#endif  // CONFIG_SUPERBLOCKS
 
 #endif  // VP9_COMMON_VP9_RECONINTRA_H_
--- a/vp9/common/vp9_rtcd_defs.sh
+++ b/vp9/common/vp9_rtcd_defs.sh
@@ -77,6 +77,16 @@
 prototype void vp9_dequant_idct_add_uv_block "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dstu, uint8_t *dstv, int stride, uint16_t *eobs"
 specialize vp9_dequant_idct_add_uv_block
 
+if [ "$CONFIG_TX32X32" = "yes" ]; then
+
+prototype void vp9_dequant_idct_add_32x32 "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dst, int pitch, int stride, int eob"
+specialize vp9_dequant_idct_add_32x32
+
+prototype void vp9_dequant_idct_add_uv_block_16x16 "int16_t *q, const int16_t *dq, uint8_t *dstu, uint8_t *dstv, int stride, uint16_t *eobs"
+specialize vp9_dequant_idct_add_uv_block_16x16
+
+fi
+
 #
 # RECON
 #
@@ -125,6 +135,16 @@
 prototype void vp9_recon_mbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst"
 specialize void vp9_recon_mbuv_s
 
+if [ "$CONFIG_TX32X32" = "yes" ]; then
+
+prototype void vp9_recon_sby_s "struct macroblockd *x, uint8_t *dst"
+specialize vp9_recon_sby_s
+
+prototype void vp9_recon_sbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst"
+specialize void vp9_recon_sbuv_s
+
+fi
+
 prototype void vp9_build_intra_predictors_mby_s "struct macroblockd *x"
 specialize vp9_build_intra_predictors_mby_s
 
@@ -152,6 +172,16 @@
 prototype void vp9_build_comp_intra_predictors_mbuv "struct macroblockd *x"
 specialize vp9_build_comp_intra_predictors_mbuv;
 
+if [ "$CONFIG_SUPERBLOCKS64" = "yes" ]; then
+
+prototype void vp9_build_intra_predictors_sb64y_s "struct macroblockd *x"
+specialize vp9_build_intra_predictors_sb64y_s;
+
+prototype void vp9_build_intra_predictors_sb64uv_s "struct macroblockd *x"
+specialize vp9_build_intra_predictors_sb64uv_s;
+
+fi
+
 prototype void vp9_intra4x4_predict "struct blockd *x, int b_mode, uint8_t *predictor"
 specialize vp9_intra4x4_predict;
 
@@ -396,17 +426,11 @@
 prototype void vp9_short_inv_walsh4x4_lossless "int16_t *in, int16_t *out"
 fi
 
-
-
-if [ "$CONFIG_SUPERBLOCKS" = "yes" ]; then
-
 prototype unsigned int vp9_sad32x3 "const uint8_t *src_ptr, int  src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad"
 specialize vp9_sad32x3
 
 prototype unsigned int vp9_sad3x32 "const uint8_t *src_ptr, int  src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad"
 specialize vp9_sad3x32
-
-fi
 
 #
 # Encoder functions below this point.
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -51,7 +51,6 @@
   return treed_read(bc, vp9_ymode_tree, p);
 }
 
-#if CONFIG_SUPERBLOCKS
 static int read_sb_ymode(vp9_reader *bc, const vp9_prob *p) {
   return treed_read(bc, vp9_sb_ymode_tree, p);
 }
@@ -59,7 +58,6 @@
 static int read_kf_sb_ymode(vp9_reader *bc, const vp9_prob *p) {
   return treed_read(bc, vp9_uv_mode_tree, p);
 }
-#endif
 
 static int read_kf_mb_ymode(vp9_reader *bc, const vp9_prob *p) {
   return treed_read(bc, vp9_kf_ymode_tree, p);
@@ -122,7 +120,6 @@
   m->mbmi.segment_id = 0;
   if (pbi->mb.update_mb_segmentation_map) {
     read_mb_segid(bc, &m->mbmi, &pbi->mb);
-#if CONFIG_SUPERBLOCKS
     if (m->mbmi.sb_type) {
       const int nmbs = 1 << m->mbmi.sb_type;
       const int ymbs = MIN(cm->mb_rows - mb_row, nmbs);
@@ -135,9 +132,7 @@
               m->mbmi.segment_id;
         }
       }
-    } else
-#endif
-    {
+    } else {
       cm->last_frame_seg_map[map_index] = m->mbmi.segment_id;
     }
   }
@@ -161,14 +156,13 @@
       m->mbmi.mb_skip_coeff = 0;
   }
 
-#if CONFIG_SUPERBLOCKS
   if (m->mbmi.sb_type) {
     y_mode = (MB_PREDICTION_MODE) read_kf_sb_ymode(bc,
       pbi->common.sb_kf_ymode_prob[pbi->common.kf_ymode_probs_index]);
-  } else
-#endif
-  y_mode = (MB_PREDICTION_MODE) read_kf_mb_ymode(bc,
-    pbi->common.kf_ymode_prob[pbi->common.kf_ymode_probs_index]);
+  } else {
+    y_mode = (MB_PREDICTION_MODE) read_kf_mb_ymode(bc,
+      pbi->common.kf_ymode_prob[pbi->common.kf_ymode_probs_index]);
+  }
 #if CONFIG_COMP_INTRA_PRED
   m->mbmi.second_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
 #endif
@@ -228,12 +222,12 @@
     m->mbmi.txfm_size = vp9_read(bc, cm->prob_tx[0]);
     if (m->mbmi.txfm_size != TX_4X4 && m->mbmi.mode != I8X8_PRED) {
       m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[1]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       if (m->mbmi.txfm_size != TX_8X8 && m->mbmi.sb_type)
         m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[2]);
 #endif
     }
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   } else if (cm->txfm_mode >= ALLOW_32X32 && m->mbmi.sb_type) {
     m->mbmi.txfm_size = TX_32X32;
 #endif
@@ -504,11 +498,9 @@
   return (MV_REFERENCE_FRAME)ref_frame;
 }
 
-#if CONFIG_SUPERBLOCKS
 static MB_PREDICTION_MODE read_sb_mv_ref(vp9_reader *bc, const vp9_prob *p) {
   return (MB_PREDICTION_MODE) treed_read(bc, vp9_sb_mv_ref_tree, p);
 }
-#endif
 
 static MB_PREDICTION_MODE read_mv_ref(vp9_reader *bc, const vp9_prob *p) {
   return (MB_PREDICTION_MODE) treed_read(bc, vp9_mv_ref_tree, p);
@@ -598,7 +590,6 @@
       } while (++i < VP9_YMODES - 1);
     }
 
-#if CONFIG_SUPERBLOCKS
     if (vp9_read_bit(bc)) {
       int i = 0;
 
@@ -606,7 +597,6 @@
         cm->fc.sb_ymode_prob[i] = (vp9_prob) vp9_read_literal(bc, 8);
       } while (++i < VP9_I32X32_MODES - 1);
     }
-#endif
 
     read_nmvprobs(bc, nmvc, xd->allow_high_precision_mv);
   }
@@ -654,7 +644,6 @@
       else {
         read_mb_segid(bc, mbmi, xd);
       }
-#if CONFIG_SUPERBLOCKS
       if (mbmi->sb_type) {
         const int nmbs = 1 << mbmi->sb_type;
         const int ymbs = MIN(cm->mb_rows - mb_row, nmbs);
@@ -667,13 +656,10 @@
                 mbmi->segment_id;
           }
         }
-      } else
-#endif
-      {
+      } else {
         cm->last_frame_seg_map[index] = mbmi->segment_id;
       }
     } else {
-#if CONFIG_SUPERBLOCKS
       if (mbmi->sb_type) {
         const int nmbs = 1 << mbmi->sb_type;
         const int ymbs = MIN(cm->mb_rows - mb_row, nmbs);
@@ -689,9 +675,7 @@
           }
         }
         mbmi->segment_id = segment_id;
-      } else
-#endif
-      {
+      } else {
         mbmi->segment_id = cm->last_frame_seg_map[index];
       }
     }
@@ -716,11 +700,7 @@
   int mb_to_right_edge;
   int mb_to_top_edge;
   int mb_to_bottom_edge;
-#if CONFIG_SUPERBLOCKS
   const int mb_size = 1 << mi->mbmi.sb_type;
-#else
-  const int mb_size = 1;
-#endif
 
   mb_to_top_edge = xd->mb_to_top_edge;
   mb_to_bottom_edge = xd->mb_to_bottom_edge;
@@ -818,12 +798,10 @@
         mbmi->mode =
           vp9_get_segdata(xd, mbmi->segment_id, SEG_LVL_MODE);
       } else {
-#if CONFIG_SUPERBLOCKS
         if (mbmi->sb_type)
           mbmi->mode = read_sb_mv_ref(bc, mv_ref_p);
         else
-#endif
-        mbmi->mode = read_mv_ref(bc, mv_ref_p);
+          mbmi->mode = read_mv_ref(bc, mv_ref_p);
 
         vp9_accum_mv_refs(&pbi->common, mbmi->mode,
                           mbmi->mb_mode_context[ref_frame]);
@@ -1172,12 +1150,10 @@
     if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_MODE)) {
       mbmi->mode = (MB_PREDICTION_MODE)
                    vp9_get_segdata(xd, mbmi->segment_id, SEG_LVL_MODE);
-#if CONFIG_SUPERBLOCKS
     } else if (mbmi->sb_type) {
       mbmi->mode = (MB_PREDICTION_MODE)
                    read_sb_ymode(bc, pbi->common.fc.sb_ymode_prob);
       pbi->common.fc.sb_ymode_counts[mbmi->mode]++;
-#endif
     } else {
       mbmi->mode = (MB_PREDICTION_MODE)
                    read_ymode(bc, pbi->common.fc.ymode_prob);
@@ -1249,12 +1225,12 @@
     if (mbmi->txfm_size != TX_4X4 && mbmi->mode != I8X8_PRED &&
         mbmi->mode != SPLITMV) {
       mbmi->txfm_size += vp9_read(bc, cm->prob_tx[1]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       if (mbmi->sb_type && mbmi->txfm_size != TX_8X8)
         mbmi->txfm_size += vp9_read(bc, cm->prob_tx[2]);
 #endif
     }
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   } else if (mbmi->sb_type && cm->txfm_mode >= ALLOW_32X32) {
     mbmi->txfm_size = TX_32X32;
 #endif
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -172,7 +172,6 @@
  */
 static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) {
   if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_SUPERBLOCKS64
     if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
       vp9_build_intra_predictors_sb64uv_s(xd);
@@ -182,14 +181,11 @@
     if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
       vp9_build_intra_predictors_sbuv_s(xd);
       vp9_build_intra_predictors_sby_s(xd);
-    } else
-#endif  // CONFIG_SUPERBLOCKS
-    {
+    } else {
       vp9_build_intra_predictors_mbuv_s(xd);
       vp9_build_intra_predictors_mby_s(xd);
     }
   } else {
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_SUPERBLOCKS64
     if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
       vp9_build_inter64x64_predictors_sb(xd,
@@ -207,9 +203,7 @@
                                          xd->dst.v_buffer,
                                          xd->dst.y_stride,
                                          xd->dst.uv_stride);
-    } else
-#endif  // CONFIG_SUPERBLOCKS
-    {
+    } else {
       vp9_build_1st_inter16x16_predictors_mb(xd,
                                              xd->dst.y_buffer,
                                              xd->dst.u_buffer,
@@ -562,7 +556,6 @@
   }
 }
 
-#if CONFIG_SUPERBLOCKS
 static void decode_16x16_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
                             BOOL_DECODER* const bc, int n,
                             int maska, int shiftb) {
@@ -910,46 +903,43 @@
                                             xd->dst.u_buffer, xd->dst.v_buffer,
                                             xd->dst.uv_stride, xd->eobs + 16);
     }
-  } else {
+  } else
 #endif
-  for (n = 0; n < 4; n++) {
-    int x_idx = n & 1, y_idx = n >> 1;
+  {
+    for (n = 0; n < 4; n++) {
+      int x_idx = n & 1, y_idx = n >> 1;
 
-    if (mb_col + x_idx >= pc->mb_cols || mb_row + y_idx >= pc->mb_rows)
-      continue;
+      if (mb_col + x_idx >= pc->mb_cols || mb_row + y_idx >= pc->mb_rows)
+        continue;
 
+      xd->above_context = pc->above_context + mb_col + x_idx;
+      xd->left_context = pc->left_context + y_idx + (mb_row & 2);
+      xd->mode_info_context = orig_mi + x_idx + y_idx * mis;
+      for (i = 0; i < 25; i++) {
+        xd->block[i].eob = 0;
+        xd->eobs[i] = 0;
+      }
 
-    xd->above_context = pc->above_context + mb_col + x_idx;
-    xd->left_context = pc->left_context + y_idx + (mb_row & 2);
-    xd->mode_info_context = orig_mi + x_idx + y_idx * mis;
-    for (i = 0; i < 25; i++) {
-      xd->block[i].eob = 0;
-      xd->eobs[i] = 0;
-    }
+      eobtotal = vp9_decode_mb_tokens(pbi, xd, bc);
+      if (eobtotal == 0) {  // skip loopfilter
+        xd->mode_info_context->mbmi.mb_skip_coeff = 1;
+        continue;
+      }
 
-    eobtotal = vp9_decode_mb_tokens(pbi, xd, bc);
-    if (eobtotal == 0) {  // skip loopfilter
-      xd->mode_info_context->mbmi.mb_skip_coeff = 1;
-      continue;
+      if (tx_size == TX_16X16) {
+        decode_16x16_sb(pbi, xd, bc, n, 1, 1);
+      } else if (tx_size == TX_8X8) {
+        decode_8x8_sb(pbi, xd, bc, n, 1, 1);
+      } else {
+        decode_4x4_sb(pbi, xd, bc, n, 1, 1);
+      }
     }
 
-    if (tx_size == TX_16X16) {
-      decode_16x16_sb(pbi, xd, bc, n, 1, 1);
-    } else if (tx_size == TX_8X8) {
-      decode_8x8_sb(pbi, xd, bc, n, 1, 1);
-    } else {
-      decode_4x4_sb(pbi, xd, bc, n, 1, 1);
-    }
+    xd->above_context = pc->above_context + mb_col;
+    xd->left_context = pc->left_context + (mb_row & 2);
+    xd->mode_info_context = orig_mi;
   }
-
-  xd->above_context = pc->above_context + mb_col;
-  xd->left_context = pc->left_context + (mb_row & 2);
-  xd->mode_info_context = orig_mi;
-#if CONFIG_TX32X32
-  }
-#endif
 }
-#endif
 
 static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
                               int mb_row, unsigned int mb_col,
@@ -959,9 +949,7 @@
   int i;
   int tx_size;
 
-#if CONFIG_SUPERBLOCKS
   assert(!xd->mode_info_context->mbmi.sb_type);
-#endif
 
   // re-initialize macroblock dequantizer before detokenization
   if (xd->segmentation_enabled)
@@ -1096,9 +1084,7 @@
   const int recon_uvoffset = mb_row * 8 * recon_uv_stride + 8 * mb_col;
 
   xd->mode_info_context = cm->mi + idx;
-#if CONFIG_SUPERBLOCKS
   xd->mode_info_context->mbmi.sb_type = block_size >> 5;
-#endif
   xd->prev_mode_info_context = cm->prev_mi + idx;
   xd->above_context = cm->above_context + mb_col;
   xd->left_context = cm->left_context + (mb_row & 3);
@@ -1173,7 +1159,6 @@
     }
   }
 
-#if CONFIG_SUPERBLOCKS
   if (mbmi->sb_type) {
     const int n_mbs = 1 << mbmi->sb_type;
     const int y_mbs = MIN(n_mbs, cm->mb_rows - mb_row);
@@ -1187,7 +1172,6 @@
       }
     }
   }
-#endif
 }
 
 /* Decode a row of Superblocks (2x2 region of MBs) */
@@ -1200,7 +1184,7 @@
   vpx_memset(pc->left_context, 0, sizeof(pc->left_context));
 
   for (mb_col = 0; mb_col < pc->mb_cols; mb_col += 4) {
-#if CONFIG_SUPERBLOCKS64 && CONFIG_SUPERBLOCKS
+#if CONFIG_SUPERBLOCKS64
     if (vp9_read(bc, pc->sb64_coded)) {
       set_offsets(pbi, 64, mb_row, mb_col);
       vp9_decode_mb_mode_mv(pbi, xd, mb_row, mb_col, bc);
@@ -1223,7 +1207,6 @@
 
         xd->sb_index = j;
 
-#if CONFIG_SUPERBLOCKS
         if (vp9_read(bc, pc->sb32_coded)) {
           set_offsets(pbi, 32, mb_row + y_idx_sb, mb_col + x_idx_sb);
           vp9_decode_mb_mode_mv(pbi,
@@ -1232,9 +1215,7 @@
           decode_superblock32(pbi,
                               xd, mb_row + y_idx_sb, mb_col + x_idx_sb, bc);
           xd->corrupted |= bool_error(bc);
-        } else
-#endif  // CONFIG_SUPERBLOCKS
-        {
+        } else {
           int i;
 
           // Process the 4 MBs within the SB in the order:
@@ -1426,7 +1407,7 @@
     read_coef_probs_common(bc, pc->fc.hybrid_coef_probs_16x16,
                            BLOCK_TYPES_16X16);
   }
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   if (pbi->common.txfm_mode > ALLOW_16X16) {
     read_coef_probs_common(bc, pc->fc.coef_probs_32x32, BLOCK_TYPES_32X32);
   }
@@ -1615,16 +1596,14 @@
     }
   }
 
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_SUPERBLOCKS64
   pc->sb64_coded = vp9_read_literal(&header_bc, 8);
 #endif
   pc->sb32_coded = vp9_read_literal(&header_bc, 8);
-#endif
 
   /* Read the loop filter level and type */
   pc->txfm_mode = vp9_read_literal(&header_bc, 2);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   if (pc->txfm_mode == 3)
     pc->txfm_mode += vp9_read_bit(&header_bc);
 #endif
@@ -1631,7 +1610,7 @@
   if (pc->txfm_mode == TX_MODE_SELECT) {
     pc->prob_tx[0] = vp9_read_literal(&header_bc, 8);
     pc->prob_tx[1] = vp9_read_literal(&header_bc, 8);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     pc->prob_tx[2] = vp9_read_literal(&header_bc, 8);
 #endif
   }
@@ -1816,14 +1795,12 @@
            pbi->common.fc.coef_probs_16x16);
   vp9_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16,
            pbi->common.fc.hybrid_coef_probs_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_copy(pbi->common.fc.pre_coef_probs_32x32,
            pbi->common.fc.coef_probs_32x32);
 #endif
   vp9_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob);
-#if CONFIG_SUPERBLOCKS
   vp9_copy(pbi->common.fc.pre_sb_ymode_prob, pbi->common.fc.sb_ymode_prob);
-#endif
   vp9_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob);
   vp9_copy(pbi->common.fc.pre_bmode_prob, pbi->common.fc.bmode_prob);
   vp9_copy(pbi->common.fc.pre_i8x8_mode_prob, pbi->common.fc.i8x8_mode_prob);
@@ -1839,13 +1816,11 @@
   vp9_zero(pbi->common.fc.hybrid_coef_counts_8x8);
   vp9_zero(pbi->common.fc.coef_counts_16x16);
   vp9_zero(pbi->common.fc.hybrid_coef_counts_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_zero(pbi->common.fc.coef_counts_32x32);
 #endif
   vp9_zero(pbi->common.fc.ymode_counts);
-#if CONFIG_SUPERBLOCKS
   vp9_zero(pbi->common.fc.sb_ymode_counts);
-#endif
   vp9_zero(pbi->common.fc.uv_mode_counts);
   vp9_zero(pbi->common.fc.bmode_counts);
   vp9_zero(pbi->common.fc.i8x8_mode_counts);
--- a/vp9/decoder/vp9_dequantize.c
+++ b/vp9/decoder/vp9_dequantize.c
@@ -348,10 +348,10 @@
   }
 }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
-void vp9_dequant_idct_add_32x32(int16_t *input, const int16_t *dq,
-                                uint8_t *pred, uint8_t *dest, int pitch,
-                                int stride, int eob) {
+#if CONFIG_TX32X32
+void vp9_dequant_idct_add_32x32_c(int16_t *input, const int16_t *dq,
+                                  uint8_t *pred, uint8_t *dest, int pitch,
+                                  int stride, int eob) {
   int16_t output[1024];
   int i;
 
--- a/vp9/decoder/vp9_dequantize.h
+++ b/vp9/decoder/vp9_dequantize.h
@@ -70,7 +70,6 @@
                                      unsigned char *dest,
                                      int pitch, int stride, uint16_t eobs);
 
-#if CONFIG_SUPERBLOCKS
 void vp9_dequant_dc_idct_add_y_block_8x8_inplace_c(int16_t *q, const int16_t *dq,
                                                    unsigned char *dst,
                                                    int stride,
@@ -98,6 +97,5 @@
                                                  int stride,
                                                  uint16_t *eobs,
                                                  MACROBLOCKD *xd);
-#endif
 
 #endif
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -144,7 +144,7 @@
         coef_counts = fc->hybrid_coef_counts_16x16;
       }
       break;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     case TX_32X32:
       coef_probs = fc->coef_probs_32x32;
       coef_counts = fc->coef_counts_32x32;
@@ -249,7 +249,7 @@
   return eob;
 }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 int vp9_decode_sb_tokens(VP9D_COMP* const pbi,
                          MACROBLOCKD* const xd,
                          BOOL_DECODER* const bc) {
--- a/vp9/decoder/vp9_detokenize.h
+++ b/vp9/decoder/vp9_detokenize.h
@@ -23,7 +23,7 @@
 int vp9_decode_mb_tokens(VP9D_COMP* const, MACROBLOCKD* const,
                          BOOL_DECODER* const);
 
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 int vp9_decode_sb_tokens(VP9D_COMP* const pbi,
                          MACROBLOCKD* const xd,
                          BOOL_DECODER* const bc);
--- a/vp9/decoder/vp9_idct_blk.c
+++ b/vp9/decoder/vp9_idct_blk.c
@@ -39,7 +39,6 @@
   }
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_dequant_dc_idct_add_y_block_4x4_inplace_c(int16_t *q,
                                                    const int16_t *dq,
                                                    uint8_t *dst,
@@ -64,7 +63,6 @@
     dst += 4 * stride - 16;
   }
 }
-#endif
 
 void vp9_dequant_idct_add_y_block_c(int16_t *q, const int16_t *dq,
                                     uint8_t *pre,
@@ -134,7 +132,6 @@
   }
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_dequant_idct_add_uv_block_4x4_inplace_c(int16_t *q, const int16_t *dq,
                                                  uint8_t *dstu,
                                                  uint8_t *dstv,
@@ -175,7 +172,6 @@
     dstv += 4 * stride - 8;
   }
 }
-#endif
 
 void vp9_dequant_dc_idct_add_y_block_8x8_c(int16_t *q, const int16_t *dq,
                                            uint8_t *pre,
@@ -200,7 +196,6 @@
                                 xd->eobs[12]);
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_dequant_dc_idct_add_y_block_8x8_inplace_c(int16_t *q,
                                                    const int16_t *dq,
                                                    uint8_t *dst,
@@ -225,7 +220,6 @@
                                 dst + 8 * stride + 8, stride, stride, 1,
                                 xd->eobs[12]);
 }
-#endif
 
 void vp9_dequant_idct_add_y_block_8x8_c(int16_t *q, const int16_t *dq,
                                         uint8_t *pre,
@@ -259,7 +253,6 @@
   vp9_dequant_idct_add_8x8_c(q, dq, pre, dstv, 8, stride, 0, xd->eobs[20]);
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_dequant_idct_add_uv_block_8x8_inplace_c(int16_t *q, const int16_t *dq,
                                                  uint8_t *dstu,
                                                  uint8_t *dstv,
@@ -273,7 +266,6 @@
   vp9_dequant_idct_add_8x8_c(q, dq, dstv, dstv, stride, stride, 0,
                              xd->eobs[20]);
 }
-#endif
 
 #if CONFIG_LOSSLESS
 void vp9_dequant_dc_idct_add_y_block_lossless_c(int16_t *q, const int16_t *dq,
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -146,11 +146,9 @@
       bc, VP9_YMODES, vp9_ymode_encodings, vp9_ymode_tree,
       Pnew, cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
     );
-#if CONFIG_SUPERBLOCKS
     update_mode(bc, VP9_I32X32_MODES, vp9_sb_ymode_encodings,
                 vp9_sb_ymode_tree, Pnew, cm->fc.sb_ymode_prob, bct,
                 (unsigned int *)cpi->sb_ymode_count);
-#endif
   }
 }
 
@@ -318,7 +316,6 @@
   write_token(bc, vp9_kf_ymode_tree, p, vp9_kf_ymode_encodings + m);
 }
 
-#if CONFIG_SUPERBLOCKS
 static void write_sb_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
   write_token(bc, vp9_sb_ymode_tree, p, vp9_sb_ymode_encodings + m);
 }
@@ -326,7 +323,6 @@
 static void sb_kfwrite_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
   write_token(bc, vp9_uv_mode_tree, p, vp9_sb_kf_ymode_encodings + m);
 }
-#endif
 
 static void write_i8x8_mode(vp9_writer *bc, int m, const vp9_prob *p) {
   write_token(bc, vp9_i8x8_mode_tree, p, vp9_i8x8_mode_encodings + m);
@@ -492,7 +488,6 @@
               vp9_mv_ref_encoding_array - NEARESTMV + m);
 }
 
-#if CONFIG_SUPERBLOCKS
 static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
                             const vp9_prob *p) {
 #if CONFIG_DEBUG
@@ -501,7 +496,6 @@
   write_token(bc, vp9_sb_mv_ref_tree, p,
               vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
 }
-#endif
 
 static void write_sub_mv_ref
 (
@@ -703,11 +697,7 @@
   const MV_REFERENCE_FRAME rf = mi->ref_frame;
   const MB_PREDICTION_MODE mode = mi->mode;
   const int segment_id = mi->segment_id;
-#if CONFIG_SUPERBLOCKS
   const int mb_size = 1 << mi->sb_type;
-#else
-  const int mb_size = 1;
-#endif
   int skip_coeff;
 
   int mb_row = pc->mb_rows - mb_rows_left;
@@ -781,11 +771,9 @@
 #endif
 
     if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
-#if CONFIG_SUPERBLOCKS
       if (m->mbmi.sb_type)
         write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
       else
-#endif
         write_ymode(bc, mode, pc->fc.ymode_prob);
     }
     if (mode == B_PRED) {
@@ -835,12 +823,9 @@
 
     // Is the segment coding of mode enabled
     if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
-#if CONFIG_SUPERBLOCKS
       if (mi->sb_type) {
         write_sb_mv_ref(bc, mode, mv_ref_p);
-      } else
-#endif
-      {
+      } else {
         write_mv_ref(bc, mode, mv_ref_p);
       }
       vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
@@ -998,7 +983,7 @@
     vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
     if (sz != TX_4X4 && mode != I8X8_PRED && mode != SPLITMV) {
       vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       if (mi->sb_type && sz != TX_8X8)
         vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
 #endif
@@ -1027,11 +1012,7 @@
              vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) {
     skip_coeff = 1;
   } else {
-#if CONFIG_SUPERBLOCKS
     const int nmbs = 1 << m->mbmi.sb_type;
-#else
-    const int nmbs = 1;
-#endif
     const int xmbs = MIN(nmbs, mb_cols_left);
     const int ymbs = MIN(nmbs, mb_rows_left);
     int x, y;
@@ -1047,13 +1028,10 @@
               vp9_get_pred_prob(c, xd, PRED_MBSKIP));
   }
 
-#if CONFIG_SUPERBLOCKS
   if (m->mbmi.sb_type) {
     sb_kfwrite_ymode(bc, ym,
                      c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
-  } else
-#endif
-  {
+  } else {
     kfwrite_ymode(bc, ym,
                   c->kf_ymode_prob[c->kf_ymode_probs_index]);
   }
@@ -1111,7 +1089,7 @@
     vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
     if (sz != TX_4X4 && ym <= TM_PRED) {
       vp9_write(bc, sz != TX_8X8, c->prob_tx[1]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       if (m->mbmi.sb_type && sz != TX_8X8)
         vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
 #endif
@@ -1155,7 +1133,7 @@
   for (mb_row = 0; mb_row < c->mb_rows; mb_row += 4, m_ptr += 4 * mis) {
     m = m_ptr;
     for (mb_col = 0; mb_col < c->mb_cols; mb_col += 4, m += 4) {
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
       vp9_write(bc, m->mbmi.sb_type == BLOCK_SIZE_SB64X64, c->sb64_coded);
       if (m->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
         write_modes_b(cpi, m, bc, &tok, tok_end, mb_row, mb_col);
@@ -1166,23 +1144,18 @@
 
         for (j = 0; j < 4; j++) {
           const int x_idx_sb = (j & 1) << 1, y_idx_sb = j & 2;
-#if CONFIG_SUPERBLOCKS
           MODE_INFO *sb_m = m + y_idx_sb * mis + x_idx_sb;
-#endif
 
           if (mb_col + x_idx_sb >= c->mb_cols ||
               mb_row + y_idx_sb >= c->mb_rows)
             continue;
 
-#if CONFIG_SUPERBLOCKS
           vp9_write(bc, sb_m->mbmi.sb_type, c->sb32_coded);
           if (sb_m->mbmi.sb_type) {
             assert(sb_m->mbmi.sb_type == BLOCK_SIZE_SB32X32);
             write_modes_b(cpi, sb_m, bc, &tok, tok_end,
                           mb_row + y_idx_sb, mb_col + x_idx_sb);
-          } else
-#endif
-          {
+          } else {
             // Process the 4 MBs in the order:
             // top-left, top-right, bottom-left, bottom-right
             for (i = 0; i < 4; i++) {
@@ -1195,9 +1168,7 @@
                 continue;
               }
 
-#if CONFIG_SUPERBLOCKS
               assert(mb_m->mbmi.sb_type == BLOCK_SIZE_MB16X16);
-#endif
               write_modes_b(cpi, mb_m, bc, &tok, tok_end,
                             mb_row + y_idx, mb_col + x_idx);
             }
@@ -1305,7 +1276,7 @@
                           cpi, hybrid_context_counters_16x16,
 #endif
                           cpi->frame_hybrid_branch_ct_16x16, BLOCK_TYPES_16X16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   build_tree_distribution(cpi->frame_coef_probs_32x32,
                           cpi->coef_counts_32x32,
 #ifdef ENTROPY_STATS
@@ -1489,7 +1460,7 @@
                              BLOCK_TYPES_16X16);
   }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   if (cpi->common.txfm_mode > ALLOW_16X16) {
     update_coef_probs_common(bc,
 #ifdef ENTROPY_STATS
@@ -1535,13 +1506,11 @@
     for (j = 0; j < VP9_YMODES; j++) {
       cost += mode_cost[j] * cpi->ymode_count[j];
     }
-#if CONFIG_SUPERBLOCKS
     vp9_cost_tokens(mode_cost, cpi->common.sb_kf_ymode_prob[i],
                     vp9_sb_ymode_tree);
     for (j = 0; j < VP9_I32X32_MODES; j++) {
       cost += mode_cost[j] * cpi->sb_ymode_count[j];
     }
-#endif
     if (cost < bestcost) {
       bestindex = i;
       bestcost = cost;
@@ -1731,7 +1700,6 @@
     }
   }
 
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_SUPERBLOCKS64
   pc->sb64_coded = get_binary_prob(cpi->sb64_count[0], cpi->sb64_count[1]);
   vp9_write_literal(&header_bc, pc->sb64_coded, 8);
@@ -1738,7 +1706,6 @@
 #endif
   pc->sb32_coded = get_binary_prob(cpi->sb32_count[0], cpi->sb32_count[1]);
   vp9_write_literal(&header_bc, pc->sb32_coded, 8);
-#endif
 
   {
     if (pc->txfm_mode == TX_MODE_SELECT) {
@@ -1748,7 +1715,7 @@
                                 cpi->txfm_count_32x32p[TX_4X4] +
                                 cpi->txfm_count_32x32p[TX_8X8] +
                                 cpi->txfm_count_32x32p[TX_16X16] +
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
                                 cpi->txfm_count_32x32p[TX_32X32] +
 #endif
                                 cpi->txfm_count_16x16p[TX_4X4] +
@@ -1760,12 +1727,12 @@
                                 cpi->txfm_count_16x16p[TX_8X8],
                                 cpi->txfm_count_32x32p[TX_8X8] +
                                 cpi->txfm_count_32x32p[TX_16X16] +
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
                                 cpi->txfm_count_32x32p[TX_32X32] +
 #endif
                                 cpi->txfm_count_16x16p[TX_8X8] +
                                 cpi->txfm_count_16x16p[TX_16X16]);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       pc->prob_tx[2] = get_prob(cpi->txfm_count_32x32p[TX_16X16],
                                 cpi->txfm_count_32x32p[TX_16X16] +
                                 cpi->txfm_count_32x32p[TX_32X32]);
@@ -1773,12 +1740,12 @@
     } else {
       pc->prob_tx[0] = 128;
       pc->prob_tx[1] = 128;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       pc->prob_tx[2] = 128;
 #endif
     }
     vp9_write_literal(&header_bc, pc->txfm_mode <= 3 ? pc->txfm_mode : 3, 2);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     if (pc->txfm_mode > ALLOW_16X16) {
       vp9_write_bit(&header_bc, pc->txfm_mode == TX_MODE_SELECT);
     }
@@ -1786,7 +1753,7 @@
     if (pc->txfm_mode == TX_MODE_SELECT) {
       vp9_write_literal(&header_bc, pc->prob_tx[0], 8);
       vp9_write_literal(&header_bc, pc->prob_tx[1], 8);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       vp9_write_literal(&header_bc, pc->prob_tx[2], 8);
 #endif
     }
@@ -2009,13 +1976,11 @@
            cpi->common.fc.coef_probs_16x16);
   vp9_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16,
            cpi->common.fc.hybrid_coef_probs_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_copy(cpi->common.fc.pre_coef_probs_32x32,
            cpi->common.fc.coef_probs_32x32);
 #endif
-#if CONFIG_SUPERBLOCKS
   vp9_copy(cpi->common.fc.pre_sb_ymode_prob, cpi->common.fc.sb_ymode_prob);
-#endif
   vp9_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
   vp9_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
   vp9_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
--- a/vp9/encoder/vp9_block.h
+++ b/vp9/encoder/vp9_block.h
@@ -35,13 +35,13 @@
   int16_t *zbin;
   int16_t *zbin_8x8;
   int16_t *zbin_16x16;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   int16_t *zbin_32x32;
 #endif
   int16_t *zrun_zbin_boost;
   int16_t *zrun_zbin_boost_8x8;
   int16_t *zrun_zbin_boost_16x16;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   int16_t *zrun_zbin_boost_32x32;
 #endif
   int16_t *round;
@@ -57,7 +57,7 @@
   int eob_max_offset;
   int eob_max_offset_8x8;
   int eob_max_offset_16x16;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   int eob_max_offset_32x32;
 #endif
 } BLOCK;
@@ -92,7 +92,7 @@
   int64_t txfm_rd_diff[NB_TXFM_MODES];
 } PICK_MODE_CONTEXT;
 
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 typedef struct superblock {
   DECLARE_ALIGNED(16, int16_t, src_diff[32*32+16*16*2]);
   DECLARE_ALIGNED(16, int16_t, coeff[32*32+16*16*2]);
@@ -102,16 +102,11 @@
 typedef struct macroblock {
   DECLARE_ALIGNED(16, int16_t, src_diff[400]);  // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
   DECLARE_ALIGNED(16, int16_t, coeff[400]);     // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
-#if !CONFIG_SUPERBLOCKS
-  DECLARE_ALIGNED(16, uint8_t, thismb[256]);    // 16x16 Y
-
-  unsigned char *thismb_ptr;
-#endif
   // 16 Y blocks, 4 U blocks, 4 V blocks,
   // 1 DC 2nd order block each with 16 entries
   BLOCK block[25];
 
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
   SUPERBLOCK sb_coeff_data;
 #endif
 
@@ -183,13 +178,11 @@
   // Structure to hold context for each of the 4 MBs within a SB:
   // when encoded as 4 independent MBs:
   PICK_MODE_CONTEXT mb_context[4][4];
-#if CONFIG_SUPERBLOCKS
   // when 4 MBs share coding parameters:
   PICK_MODE_CONTEXT sb32_context[4];
 #if CONFIG_SUPERBLOCKS64
   PICK_MODE_CONTEXT sb64_context;
 #endif  // CONFIG_SUPERBLOCKS64
-#endif
 
   void (*vp9_short_fdct4x4)(int16_t *input, int16_t *output, int pitch);
   void (*vp9_short_fdct8x4)(int16_t *input, int16_t *output, int pitch);
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -285,10 +285,6 @@
       xd->left_available = (mb_col != 0);
       recon_yoffset += 16;
 #endif
-#if !CONFIG_SUPERBLOCKS
-      // Copy current mb to a buffer
-      vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#endif
 
       // measure activity
       mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
@@ -444,11 +440,7 @@
   int mb_mode = mi->mbmi.mode;
   int mb_mode_index = ctx->best_mode_index;
   const int mis = cpi->common.mode_info_stride;
-#if CONFIG_SUPERBLOCKS
   int mb_block_size = 1 << mi->mbmi.sb_type;
-#else
-  int mb_block_size = 1;
-#endif
 
 #if CONFIG_DEBUG
   assert(mb_mode < MB_MODE_COUNT);
@@ -455,9 +447,7 @@
   assert(mb_mode_index < MAX_MODES);
   assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
 #endif
-#if CONFIG_SUPERBLOCKS
   assert(mi->mbmi.sb_type == (block_size >> 5));
-#endif
 
   // Restore the coding context of the MB to that that was in place
   // when the mode was picked for it
@@ -471,7 +461,7 @@
       }
     }
   }
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   if (block_size == 16) {
     ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
   }
@@ -787,18 +777,11 @@
     set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16,
                 &recon_yoffset, &recon_uvoffset);
 
-#if !CONFIG_SUPERBLOCKS
-    // Copy current MB to a work buffer
-    vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#endif
-
     if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
       vp9_activity_masking(cpi, x);
 
     mbmi = &xd->mode_info_context->mbmi;
-#if CONFIG_SUPERBLOCKS
     mbmi->sb_type = BLOCK_SIZE_MB16X16;
-#endif
 
     cpi->update_context = 0;    // TODO Do we need this now??
 
@@ -869,7 +852,6 @@
              sizeof(above_context));
 }
 
-#if CONFIG_SUPERBLOCKS
 static void pick_sb_modes(VP9_COMP *cpi,
                           int mb_row,
                           int mb_col,
@@ -943,7 +925,6 @@
   }
 }
 #endif  // CONFIG_SUPERBLOCKS64
-#endif  // CONFIG_SUPERBLOCKS
 
 static void update_stats(VP9_COMP *cpi) {
   VP9_COMMON *const cm = &cpi->common;
@@ -1011,7 +992,6 @@
   MACROBLOCKD *const xd = &x->e_mbd;
   int recon_yoffset, recon_uvoffset;
 
-#if CONFIG_SUPERBLOCKS
   cpi->sb32_count[is_sb]++;
   if (is_sb) {
     set_offsets(cpi, mb_row, mb_col, 32, &recon_yoffset, &recon_uvoffset);
@@ -1028,9 +1008,7 @@
       if (mb_row < cm->mb_rows)
         cpi->tplist[mb_row].stop = *tp;
     }
-  } else
-#endif
-  {
+  } else {
     int i;
 
     for (i = 0; i < 4; i++) {
@@ -1046,11 +1024,6 @@
       xd->mb_index = i;
       update_state(cpi, &x->mb_context[xd->sb_index][i], 16, output_enabled);
 
-#if !CONFIG_SUPERBLOCKS
-      // Copy current MB to a work buffer
-      vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#endif
-
       if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
         vp9_activity_masking(cpi, x);
 
@@ -1081,7 +1054,7 @@
 #endif
 }
 
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
 static void encode_sb64(VP9_COMP *cpi,
                         int mb_row,
                         int mb_col,
@@ -1121,7 +1094,7 @@
     }
   }
 }
-#endif // CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#endif  // CONFIG_SUPERBLOCKS64
 
 static void encode_sb_row(VP9_COMP *cpi,
                           int mb_row,
@@ -1141,7 +1114,7 @@
     int i;
     int sb32_rate = 0, sb32_dist = 0;
     int is_sb[4];
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
     int sb64_rate = INT_MAX, sb64_dist;
     ENTROPY_CONTEXT_PLANES l[4], a[4];
     TOKENEXTRA *tp_orig = *tp;
@@ -1148,13 +1121,11 @@
 
     memcpy(&a, cm->above_context + mb_col, sizeof(a));
     memcpy(&l, cm->left_context, sizeof(l));
-#endif  // CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#endif  // CONFIG_SUPERBLOCKS64
     for (i = 0; i < 4; i++) {
       const int x_idx = (i & 1) << 1, y_idx = i & 2;
       int mb_rate = 0, mb_dist = 0;
-#if CONFIG_SUPERBLOCKS
       int sb_rate = INT_MAX, sb_dist;
-#endif
 
       if (mb_row + y_idx >= cm->mb_rows || mb_col + x_idx >= cm->mb_cols)
         continue;
@@ -1163,11 +1134,8 @@
 
       pick_mb_modes(cpi, mb_row + y_idx, mb_col + x_idx,
                     tp, &mb_rate, &mb_dist);
-#if CONFIG_SUPERBLOCKS
       mb_rate += vp9_cost_bit(cm->sb32_coded, 0);
-#endif
 
-#if CONFIG_SUPERBLOCKS
       if (!(((    mb_cols & 1) && mb_col + x_idx ==     mb_cols - 1) ||
             ((cm->mb_rows & 1) && mb_row + y_idx == cm->mb_rows - 1))) {
         /* Pick a mode assuming that it applies to all 4 of the MBs in the SB */
@@ -1183,12 +1151,8 @@
         is_sb[i] = 1;
         sb32_rate += sb_rate;
         sb32_dist += sb_dist;
-      } else
-#endif
-      {
-#if CONFIG_SUPERBLOCKS
+      } else {
         is_sb[i] = 0;
-#endif
         sb32_rate += mb_rate;
         sb32_dist += mb_dist;
       }
@@ -1200,11 +1164,10 @@
       // instead of small->big) means we can use as threshold for small, which
       // may enable breakouts if RD is not good enough (i.e. faster)
       encode_sb(cpi, mb_row + y_idx, mb_col + x_idx,
-                !(CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64),
-                tp, is_sb[i]);
+                !CONFIG_SUPERBLOCKS64, tp, is_sb[i]);
     }
 
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
     memcpy(cm->above_context + mb_col, &a, sizeof(a));
     memcpy(cm->left_context, &l, sizeof(l));
     sb32_rate += vp9_cost_bit(cm->sb64_coded, 0);
@@ -1227,11 +1190,11 @@
       *totalrate += sb32_rate;
     }
 
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
     assert(tp_orig == *tp);
     encode_sb64(cpi, mb_row, mb_col, tp, is_sb);
     assert(tp_orig < *tp);
-#endif  // CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#endif  // CONFIG_SUPERBLOCKS64
   }
 }
 
@@ -1279,13 +1242,11 @@
   vp9_zero(cpi->sub_mv_ref_count)
   vp9_zero(cpi->mbsplit_count)
   vp9_zero(cpi->common.fc.mv_ref_ct)
-#if CONFIG_SUPERBLOCKS
   vp9_zero(cpi->sb_ymode_count)
   vp9_zero(cpi->sb32_count);
 #if CONFIG_SUPERBLOCKS64
   vp9_zero(cpi->sb64_count);
 #endif  // CONFIG_SUPERBLOCKS64
-#endif  // CONFIG_SUPERBLOCKS
 #if CONFIG_COMP_INTERINTRA_PRED
   vp9_zero(cpi->interintra_count);
   vp9_zero(cpi->interintra_select_count);
@@ -1362,7 +1323,7 @@
   vp9_zero(cpi->hybrid_coef_counts_8x8);
   vp9_zero(cpi->coef_counts_16x16);
   vp9_zero(cpi->hybrid_coef_counts_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_zero(cpi->coef_counts_32x32);
 #endif
 #if CONFIG_NEW_MVREF
@@ -1462,7 +1423,6 @@
   }
 }
 
-#if CONFIG_SUPERBLOCKS
 static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
   int x, y;
 
@@ -1530,7 +1490,6 @@
   }
 }
 #endif
-#endif
 
 static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
   VP9_COMMON *const cm = &cpi->common;
@@ -1541,33 +1500,28 @@
   for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 4, mi_ptr += 4 * mis) {
     mi = mi_ptr;
     for (mb_col = 0; mb_col < cm->mb_cols; mb_col += 4, mi += 4) {
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
       if (mi->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
         reset_skip_txfm_size_sb64(cpi, mi, mis, txfm_max,
                                   cm->mb_rows - mb_row, cm->mb_cols - mb_col);
       } else
-#endif  // CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#endif  // CONFIG_SUPERBLOCKS64
       {
         int i;
 
         for (i = 0; i < 4; i++) {
           const int x_idx_sb = (i & 1) << 1, y_idx_sb = i & 2;
-#if CONFIG_SUPERBLOCKS
           MODE_INFO *sb_mi = mi + y_idx_sb * mis + x_idx_sb;
-#endif
 
           if (mb_row + y_idx_sb >= cm->mb_rows ||
               mb_col + x_idx_sb >= cm->mb_cols)
             continue;
 
-#if CONFIG_SUPERBLOCKS
           if (sb_mi->mbmi.sb_type) {
             reset_skip_txfm_size_sb32(cpi, sb_mi, mis, txfm_max,
                                       cm->mb_rows - mb_row - y_idx_sb,
                                       cm->mb_cols - mb_col - x_idx_sb);
-          } else
-#endif
-          {
+          } else {
             int m;
 
             for (m = 0; m < 4; m++) {
@@ -1579,9 +1533,7 @@
                 continue;
 
               mb_mi = mi + y_idx * mis + x_idx;
-#if CONFIG_SUPERBLOCKS
               assert(mb_mi->mbmi.sb_type == BLOCK_SIZE_MB16X16);
-#endif
               reset_skip_txfm_size_mb(cpi, mb_mi, txfm_max);
             }
           }
@@ -1647,7 +1599,7 @@
      * keyframe's probabilities as an estimate of what the current keyframe's
      * coefficient cost distributions may look like. */
     if (frame_type == 0) {
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       txfm_type = ALLOW_32X32;
 #else
       txfm_type = ALLOW_16X16;
@@ -1682,7 +1634,7 @@
     } else
       txfm_type = ALLOW_8X8;
 #else
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >=
                  cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
     ALLOW_32X32 : TX_MODE_SELECT;
@@ -1742,7 +1694,7 @@
       const int count8x8_8x8p = cpi->txfm_count_8x8p[TX_8X8];
       const int count16x16_16x16p = cpi->txfm_count_16x16p[TX_16X16];
       const int count16x16_lp = cpi->txfm_count_32x32p[TX_16X16];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       const int count32x32 = cpi->txfm_count_32x32p[TX_32X32];
 #else
       const int count32x32 = 0;
@@ -1756,13 +1708,13 @@
                  count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
         cpi->common.txfm_mode = ONLY_4X4;
         reset_skip_txfm_size(cpi, TX_4X4);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
         cpi->common.txfm_mode = ALLOW_32X32;
 #endif
       } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
         cpi->common.txfm_mode = ALLOW_16X16;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
         reset_skip_txfm_size(cpi, TX_16X16);
 #endif
       }
@@ -1814,9 +1766,6 @@
 
   vp9_build_block_doffsets(&x->e_mbd);
 
-#if !CONFIG_SUPERBLOCKS
-  // y blocks
-  x->thismb_ptr = &x->thismb[0];
   for (br = 0; br < 4; br++) {
     for (bc = 0; bc < 4; bc++) {
       BLOCK *this_block = &x->block[block];
@@ -1823,19 +1772,6 @@
       // this_block->base_src = &x->src.y_buffer;
       // this_block->src_stride = x->src.y_stride;
       // this_block->src = 4 * br * this_block->src_stride + 4 * bc;
-      this_block->base_src = &x->thismb_ptr;
-      this_block->src_stride = 16;
-      this_block->src = 4 * br * 16 + 4 * bc;
-      ++block;
-    }
-  }
-#else
-  for (br = 0; br < 4; br++) {
-    for (bc = 0; bc < 4; bc++) {
-      BLOCK *this_block = &x->block[block];
-      // this_block->base_src = &x->src.y_buffer;
-      // this_block->src_stride = x->src.y_stride;
-      // this_block->src = 4 * br * this_block->src_stride + 4 * bc;
       this_block->base_src = &x->src.y_buffer;
       this_block->src_stride = x->src.y_stride;
       this_block->src = 4 * br * this_block->src_stride + 4 * bc;
@@ -1842,7 +1778,6 @@
       ++block;
     }
   }
-#endif
 
   // u blocks
   for (br = 0; br < 2; br++) {
@@ -1896,12 +1831,11 @@
   }
 #endif
 
-#if CONFIG_SUPERBLOCKS
   if (xd->mode_info_context->mbmi.sb_type) {
     ++cpi->sb_ymode_count[m];
-  } else
-#endif
+  } else {
     ++cpi->ymode_count[m];
+  }
   if (m != I8X8_PRED)
     ++cpi->y_uv_mode_count[m][uvm];
   else {
@@ -1943,7 +1877,6 @@
 #endif
 }
 
-#if CONFIG_SUPERBLOCKS
 static void update_sb_skip_coeff_state(VP9_COMP *cpi,
                                        ENTROPY_CONTEXT_PLANES ta[4],
                                        ENTROPY_CONTEXT_PLANES tl[4],
@@ -2010,6 +1943,7 @@
                                          int skip[16], int output_enabled) {
   MACROBLOCK *const x = &cpi->mb;
 
+#if CONFIG_TX32X32
   if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_32X32) {
     TOKENEXTRA tokens[4][1024+512];
     int n_tokens[4], n;
@@ -2057,7 +1991,9 @@
         (*tp) += n_tokens[n];
       }
     }
-  } else {
+  } else
+#endif  // CONFIG_TX32X32
+  {
     TOKENEXTRA tokens[16][16 * 25];
     int n_tokens[16], n;
 
@@ -2113,7 +2049,6 @@
   }
 }
 #endif  // CONFIG_SUPERBLOCKS64
-#endif /* CONFIG_SUPERBLOCKS */
 
 static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
                               int recon_yoffset, int recon_uvoffset,
@@ -2125,9 +2060,7 @@
   MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
   unsigned char ref_pred_flag;
 
-#if CONFIG_SUPERBLOCKS
   assert(!xd->mode_info_context->mbmi.sb_type);
-#endif
 
 #ifdef ENC_DEBUG
   enc_debug = (cpi->common.current_video_frame == 46 &&
@@ -2375,7 +2308,6 @@
   }
 }
 
-#if CONFIG_SUPERBLOCKS
 static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
                                 int recon_yoffset, int recon_uvoffset,
                                 int output_enabled, int mb_row, int mb_col) {
@@ -2769,8 +2701,7 @@
         vp9_inverse_transform_sbuv_16x16(&x->e_mbd.sb_coeff_data);
         vp9_inverse_transform_sby_32x32(&x->e_mbd.sb_coeff_data);
         vp9_recon_sby_s_c(&x->e_mbd,
-                          dst + 32 * x_idx + 32 * y_idx * dst_y_stride,
-                          dst_y_stride);
+                          dst + 32 * x_idx + 32 * y_idx * dst_y_stride);
         vp9_recon_sbuv_s_c(&x->e_mbd,
                            udst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
                            vdst + x_idx * 16 + y_idx * 16 * dst_uv_stride);
@@ -2867,9 +2798,15 @@
   if (output_enabled) {
     if (cm->txfm_mode == TX_MODE_SELECT &&
         !((cm->mb_no_coeff_skip &&
-           ((mi->mbmi.txfm_size == TX_32X32 &&
+           (
+#if CONFIG_TX32X32
+            (mi->mbmi.txfm_size == TX_32X32 &&
              skip[0] && skip[1] && skip[2] && skip[3]) ||
-            (mi->mbmi.txfm_size != TX_32X32 &&
+#endif  // CONFIG_TX32X32
+            (
+#if CONFIG_TX32X32
+             mi->mbmi.txfm_size != TX_32X32 &&
+#endif  // CONFIG_TX32X32
              skip[0] && skip[1] && skip[2] && skip[3] &&
              skip[4] && skip[5] && skip[6] && skip[7] &&
              skip[8] && skip[9] && skip[10] && skip[11] &&
@@ -2897,4 +2834,3 @@
   }
 }
 #endif  // CONFIG_SUPERBLOCKS64
-#endif
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -108,7 +108,7 @@
   }
 }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 void vp9_subtract_sby_s_c(int16_t *diff, const uint8_t *src, int src_stride,
                           const uint8_t *pred, int dst_stride) {
   int r, c;
@@ -311,7 +311,7 @@
   vp9_transform_mbuv_8x8(x);
 }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 void vp9_transform_sby_32x32(MACROBLOCK *x) {
   SUPERBLOCK * const x_sb = &x->sb_coeff_data;
   vp9_short_fdct32x32(x_sb->src_diff, x_sb->coeff, 64);
--- a/vp9/encoder/vp9_encodemb.h
+++ b/vp9/encoder/vp9_encodemb.h
@@ -46,7 +46,7 @@
 void vp9_transform_mby_16x16(MACROBLOCK *x);
 void vp9_optimize_mby_16x16(MACROBLOCK *x);
 
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 void vp9_transform_sby_32x32(MACROBLOCK *x);
 void vp9_transform_sbuv_16x16(MACROBLOCK *x);
 #endif
@@ -55,7 +55,6 @@
 
 void vp9_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);
 
-#if CONFIG_SUPERBLOCKS
 void vp9_subtract_mbuv_s_c(int16_t *diff, const uint8_t *usrc,
                            const uint8_t *vsrc, int src_stride,
                            const uint8_t *upred,
@@ -71,6 +70,5 @@
                            const uint8_t *upred,
                            const uint8_t *vpred, int dst_stride);
 #endif  // CONFIG_TX32X32
-#endif  // CONFIG_SUPERBLOCKS
 
 #endif  // VP9_ENCODER_VP9_ENCODEMB_H_
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -516,11 +516,6 @@
       xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset;
       xd->left_available = (mb_col != 0);
 
-#if !CONFIG_SUPERBLOCKS
-      // Copy current mb to a buffer
-      vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#endif
-
       // do intra 16x16 prediction
       this_error = vp9_encode_intra(cpi, x, use_dc_pred);
 
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -267,31 +267,9 @@
   int offset;
   int usehp = xd->allow_high_precision_mv;
 
-#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
-  uint8_t *y0 = *(d->base_pre) + d->pre +
-                (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
-  uint8_t *y;
-  int buf_r1, buf_r2, buf_c1, buf_c2;
-
-  // Clamping to avoid out-of-range data access
-  buf_r1 = ((bestmv->as_mv.row - VP9_INTERP_EXTEND) < x->mv_row_min) ?
-      (bestmv->as_mv.row - x->mv_row_min) : VP9_INTERP_EXTEND - 1;
-  buf_r2 = ((bestmv->as_mv.row + VP9_INTERP_EXTEND) > x->mv_row_max) ?
-      (x->mv_row_max - bestmv->as_mv.row) : VP9_INTERP_EXTEND - 1;
-  buf_c1 = ((bestmv->as_mv.col - VP9_INTERP_EXTEND) < x->mv_col_min) ?
-      (bestmv->as_mv.col - x->mv_col_min) : VP9_INTERP_EXTEND - 1;
-  buf_c2 = ((bestmv->as_mv.col + VP9_INTERP_EXTEND) > x->mv_col_max) ?
-      (x->mv_col_max - bestmv->as_mv.col) : VP9_INTERP_EXTEND - 1;
-  y_stride = 32;
-
-  /* Copy to intermediate buffer before searching. */
-  vfp->copymem(y0 - buf_c1 - d->pre_stride * buf_r1, d->pre_stride, xd->y_buf, y_stride, 16 + buf_r1 + buf_r2);
-  y = xd->y_buf + y_stride * buf_r1 + buf_c1;
-#else
   uint8_t *y = *(d->base_pre) + d->pre +
                (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
   y_stride = d->pre_stride;
-#endif
 
   rr = ref_mv->as_mv.row;
   rc = ref_mv->as_mv.col;
@@ -463,20 +441,9 @@
   MACROBLOCKD *xd = &x->e_mbd;
   int usehp = xd->allow_high_precision_mv;
 
-#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
-  uint8_t *y0 = *(d->base_pre) + d->pre +
-                (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
-  uint8_t *y;
-
-  y_stride = 32;
-  /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
-  vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
-  y = xd->y_buf + y_stride + 1;
-#else
   uint8_t *y = *(d->base_pre) + d->pre +
                (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
   y_stride = d->pre_stride;
-#endif
 
   // central mv
   bestmv->as_mv.row <<= 3;
@@ -943,20 +910,9 @@
   int y_stride;
   MACROBLOCKD *xd = &x->e_mbd;
 
-#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
-  uint8_t *y0 = *(d->base_pre) + d->pre +
-      (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
-  uint8_t *y;
-
-  y_stride = 32;
-  /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
-  vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
-  y = xd->y_buf + y_stride + 1;
-#else
   uint8_t *y = *(d->base_pre) + d->pre +
       (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
   y_stride = d->pre_stride;
-#endif
 
   // central mv
   bestmv->as_mv.row <<= 3;
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -1761,12 +1761,10 @@
   cm->prob_last_coded               = 128;
   cm->prob_gf_coded                 = 128;
   cm->prob_intra_coded              = 63;
-#if CONFIG_SUPERBLOCKS
   cm->sb32_coded                    = 200;
 #if CONFIG_SUPERBLOCKS64
   cm->sb64_coded                    = 200;
 #endif
-#endif
   for (i = 0; i < COMP_PRED_CONTEXTS; i++)
     cm->prob_comppred[i]         = 128;
   for (i = 0; i < TX_SIZE_MAX_SB - 1; i++)
@@ -1966,7 +1964,6 @@
     cpi->fn_ptr[BT].sdx4df         = SDX4DF;
 
 
-#if CONFIG_SUPERBLOCKS
   BFP(BLOCK_32X32, vp9_sad32x32, vp9_variance32x32, vp9_sub_pixel_variance32x32,
       vp9_variance_halfpixvar32x32_h, vp9_variance_halfpixvar32x32_v,
       vp9_variance_halfpixvar32x32_hv, vp9_sad32x32x3, vp9_sad32x32x8,
@@ -1978,7 +1975,6 @@
       vp9_variance_halfpixvar64x64_hv, vp9_sad64x64x3, vp9_sad64x64x8,
       vp9_sad64x64x4d)
 #endif
-#endif
 
   BFP(BLOCK_16X16, vp9_sad16x16, vp9_variance16x16, vp9_sub_pixel_variance16x16,
        vp9_variance_halfpixvar16x16_h, vp9_variance_halfpixvar16x16_v,
@@ -3655,14 +3651,12 @@
   vp9_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
   vp9_copy(cpi->common.fc.hybrid_coef_counts_16x16,
            cpi->hybrid_coef_counts_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_copy(cpi->common.fc.coef_counts_32x32, cpi->coef_counts_32x32);
 #endif
   vp9_adapt_coef_probs(&cpi->common);
   if (cpi->common.frame_type != KEY_FRAME) {
-#if CONFIG_SUPERBLOCKS
     vp9_copy(cpi->common.fc.sb_ymode_counts, cpi->sb_ymode_count);
-#endif
     vp9_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
     vp9_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
     vp9_copy(cpi->common.fc.bmode_counts, cpi->bmode_count);
--- a/vp9/encoder/vp9_onyx_int.h
+++ b/vp9/encoder/vp9_onyx_int.h
@@ -100,13 +100,11 @@
   vp9_coeff_probs hybrid_coef_probs_8x8[BLOCK_TYPES_8X8];
   vp9_coeff_probs coef_probs_16x16[BLOCK_TYPES_16X16];
   vp9_coeff_probs hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_coeff_probs coef_probs_32x32[BLOCK_TYPES_32X32];
 #endif
 
-#if CONFIG_SUPERBLOCKS
   vp9_prob sb_ymode_prob[VP9_I32X32_MODES - 1];
-#endif
   vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
   vp9_prob uv_mode_prob[VP9_YMODES][VP9_UV_MODES - 1];
   vp9_prob bmode_prob[VP9_NKF_BINTRAMODES - 1];
@@ -390,15 +388,11 @@
   BLOCK_4X4 = PARTITIONING_4X4,
   BLOCK_16X16,
   BLOCK_MAX_SEGMENTS,
-#if CONFIG_SUPERBLOCKS
   BLOCK_32X32 = BLOCK_MAX_SEGMENTS,
 #if CONFIG_SUPERBLOCKS64
   BLOCK_64X64,
 #endif  // CONFIG_SUPERBLOCKS64
   BLOCK_MAX_SB_SEGMENTS,
-#else  // CONFIG_SUPERBLOCKS
-  BLOCK_MAX_SB_SEGMENTS = BLOCK_MAX_SEGMENTS,
-#endif  // CONFIG_SUPERBLOCKS
 };
 
 typedef struct VP9_COMP {
@@ -436,7 +430,7 @@
   DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2_16x16[QINDEX_RANGE][256]);
   DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv_16x16[QINDEX_RANGE][256]);
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   DECLARE_ALIGNED(16, short, Y1zbin_32x32[QINDEX_RANGE][1024]);
   DECLARE_ALIGNED(16, short, Y2zbin_32x32[QINDEX_RANGE][1024]);
   DECLARE_ALIGNED(16, short, UVzbin_32x32[QINDEX_RANGE][1024]);
@@ -577,13 +571,11 @@
 
   int cq_target_quality;
 
-#if CONFIG_SUPERBLOCKS
   int sb32_count[2];
 #if CONFIG_SUPERBLOCKS64
   int sb64_count[2];
 #endif
   int sb_ymode_count [VP9_I32X32_MODES];
-#endif
   int ymode_count[VP9_YMODES];        /* intra MB type cts this frame */
   int bmode_count[VP9_NKF_BINTRAMODES];
   int i8x8_mode_count[VP9_I8X8_MODES];
@@ -618,7 +610,7 @@
   vp9_coeff_probs frame_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
   vp9_coeff_stats frame_hybrid_branch_ct_16x16[BLOCK_TYPES_16X16];
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_coeff_count coef_counts_32x32[BLOCK_TYPES_32X32];
   vp9_coeff_probs frame_coef_probs_32x32[BLOCK_TYPES_32X32];
   vp9_coeff_stats frame_branch_ct_32x32[BLOCK_TYPES_32X32];
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -379,7 +379,7 @@
            &d->eob, vp9_default_zig_zag1d_16x16, 1);
 }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 void vp9_quantize_sby_32x32(MACROBLOCK *x) {
   x->e_mbd.block[0].eob = 0;
   quantize(x->block[0].zrun_zbin_boost_32x32,
@@ -472,7 +472,7 @@
     48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
     48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
   };
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   static const int zbin_boost_32x32[1024] = {
     0,  0,  0,  8,  8,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28,
     30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 48, 48, 48, 48, 48, 48,
@@ -569,7 +569,7 @@
       ((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
     cpi->zrun_zbin_boost_y1_16x16[Q][0] =
       ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     cpi->Y1zbin_32x32[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
     cpi->zrun_zbin_boost_y1_32x32[Q][0] =
      ((quant_val * zbin_boost_32x32[0]) + 64) >> 7;
@@ -677,7 +677,7 @@
       cpi->zrun_zbin_boost_uv_16x16[Q][i] =
         ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
     }
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     // 32x32 structures. Same comment above applies.
     for (i = 1; i < 1024; i++) {
       int rc = vp9_default_zig_zag1d_32x32[i];
@@ -727,7 +727,7 @@
     x->block[i].zbin = cpi->Y1zbin[QIndex];
     x->block[i].zbin_8x8 = cpi->Y1zbin_8x8[QIndex];
     x->block[i].zbin_16x16 = cpi->Y1zbin_16x16[QIndex];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     x->block[i].zbin_32x32 = cpi->Y1zbin_32x32[QIndex];
 #endif
     x->block[i].round = cpi->Y1round[QIndex];
@@ -735,7 +735,7 @@
     x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
     x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y1_8x8[QIndex];
     x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y1_16x16[QIndex];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     x->block[i].zrun_zbin_boost_32x32 = cpi->zrun_zbin_boost_y1_32x32[QIndex];
 #endif
     x->block[i].zbin_extra = (int16_t)zbin_extra;
@@ -748,7 +748,7 @@
         vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
       x->block[i].eob_max_offset_16x16 =
         vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       x->block[i].eob_max_offset_32x32 =
       vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
 #endif
@@ -756,7 +756,7 @@
       x->block[i].eob_max_offset = 16;
       x->block[i].eob_max_offset_8x8 = 64;
       x->block[i].eob_max_offset_16x16 = 256;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       x->block[i].eob_max_offset_32x32 = 1024;
 #endif
     }
--- a/vp9/encoder/vp9_quantize.h
+++ b/vp9/encoder/vp9_quantize.h
@@ -78,7 +78,7 @@
 extern prototype_quantize_block(vp9_quantize_quantb_16x16);
 extern prototype_quantize_mb(vp9_quantize_mby_16x16);
 
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 void vp9_quantize_sby_32x32(MACROBLOCK *x);
 void vp9_quantize_sbuv_16x16(MACROBLOCK *x);
 #endif
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -139,9 +139,7 @@
   vp9_copy(cc->vp9_mode_contexts, cm->fc.vp9_mode_contexts);
 
   vp9_copy(cc->ymode_prob, cm->fc.ymode_prob);
-#if CONFIG_SUPERBLOCKS
   vp9_copy(cc->sb_ymode_prob, cm->fc.sb_ymode_prob);
-#endif
   vp9_copy(cc->bmode_prob, cm->fc.bmode_prob);
   vp9_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob);
   vp9_copy(cc->i8x8_mode_prob, cm->fc.i8x8_mode_prob);
@@ -175,7 +173,7 @@
   vp9_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
   vp9_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
   vp9_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_copy(cc->coef_probs_32x32, cm->fc.coef_probs_32x32);
 #endif
   vp9_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
@@ -200,9 +198,7 @@
   vp9_copy(cm->fc.vp9_mode_contexts, cc->vp9_mode_contexts);
 
   vp9_copy(cm->fc.ymode_prob, cc->ymode_prob);
-#if CONFIG_SUPERBLOCKS
   vp9_copy(cm->fc.sb_ymode_prob, cc->sb_ymode_prob);
-#endif
   vp9_copy(cm->fc.bmode_prob, cc->bmode_prob);
   vp9_copy(cm->fc.i8x8_mode_prob, cc->i8x8_mode_prob);
   vp9_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob);
@@ -237,7 +233,7 @@
   vp9_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
   vp9_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
   vp9_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   vp9_copy(cm->fc.coef_probs_32x32, cc->coef_probs_32x32);
 #endif
   vp9_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -385,7 +385,7 @@
   fill_token_costs(cpi->mb.hybrid_token_costs[TX_16X16],
                    cpi->common.fc.hybrid_coef_probs_16x16, BLOCK_TYPES_16X16);
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   fill_token_costs(cpi->mb.token_costs[TX_32X32],
                    cpi->common.fc.coef_probs_32x32, BLOCK_TYPES_32X32);
 #endif
@@ -527,7 +527,7 @@
   int pt;
   const int eob = b->eob;
   MACROBLOCKD *xd = &mb->e_mbd;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   const int ib = (int)(b - xd->block);
 #endif
   int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
@@ -572,7 +572,7 @@
       scan = vp9_default_zig_zag1d_16x16;
       band = vp9_coef_bands_16x16;
       seg_eob = 256;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       if (type == PLANE_TYPE_UV) {
         const int uv_idx = ib - 16;
         qcoeff_ptr = xd->sb_coeff_data.qcoeff + 1024 + 64 * uv_idx;
@@ -579,7 +579,7 @@
       }
 #endif
       break;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     case TX_32X32:
       scan = vp9_default_zig_zag1d_32x32;
       band = vp9_coef_bands_32x32;
@@ -853,7 +853,7 @@
     }
   }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   if (max_txfm_size == TX_32X32 &&
       (cm->txfm_mode == ALLOW_32X32 ||
        (cm->txfm_mode == TX_MODE_SELECT &&
@@ -863,7 +863,7 @@
   } else
 #endif
   if ( cm->txfm_mode == ALLOW_16X16 ||
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
       (max_txfm_size == TX_16X16 && cm->txfm_mode == ALLOW_32X32) ||
 #endif
       (cm->txfm_mode == TX_MODE_SELECT &&
@@ -884,7 +884,7 @@
   txfm_cache[ONLY_4X4] = rd[TX_4X4][0];
   txfm_cache[ALLOW_8X8] = rd[TX_8X8][0];
   txfm_cache[ALLOW_16X16] = rd[TX_16X16][0];
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   txfm_cache[ALLOW_32X32] = rd[max_txfm_size][0];
   if (max_txfm_size == TX_32X32 &&
       rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
@@ -925,7 +925,6 @@
   d[12] = p[12];
 }
 
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_TX32X32
 static int rdcost_sby_32x32(MACROBLOCK *x, int backup) {
   MACROBLOCKD * const xd = &x->e_mbd;
@@ -1075,6 +1074,7 @@
   xd->left_context = orig_left;
 }
 
+#if CONFIG_SUPERBLOCKS64
 static void super_block_64_yrd(VP9_COMP *cpi,
                                MACROBLOCK *x, int *rate, int *distortion,
                                int *skip,
@@ -1172,7 +1172,7 @@
   xd->above_context = orig_above;
   xd->left_context = orig_left;
 }
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 static void copy_predictor_8x8(uint8_t *dst, const uint8_t *predictor) {
   const unsigned int *p = (const unsigned int *)predictor;
@@ -1426,7 +1426,6 @@
   return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
 }
 
-#if CONFIG_SUPERBLOCKS
 static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi,
                                       MACROBLOCK *x,
                                       int *rate,
@@ -1508,7 +1507,6 @@
   return best_rd;
 }
 #endif  // CONFIG_SUPERBLOCKS64
-#endif
 
 static int64_t rd_pick_intra16x16mby_mode(VP9_COMP *cpi,
                                           MACROBLOCK *x,
@@ -1882,7 +1880,6 @@
   return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
 }
 
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_TX32X32
 static int rd_cost_sbuv_16x16(MACROBLOCK *x, int backup) {
   int b;
@@ -1939,56 +1936,56 @@
                           usrc, vsrc, src_uv_stride,
                           udst, vdst, dst_uv_stride);
     rd_inter32x32_uv_16x16(x, rate, distortion, skip, 1);
-  } else {
+  } else
 #endif
-  int n, r = 0, d = 0;
-  int skippable = 1;
-  ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
-  ENTROPY_CONTEXT_PLANES *ta = xd->above_context;
-  ENTROPY_CONTEXT_PLANES *tl = xd->left_context;
+  {
+    int n, r = 0, d = 0;
+    int skippable = 1;
+    ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
+    ENTROPY_CONTEXT_PLANES *ta = xd->above_context;
+    ENTROPY_CONTEXT_PLANES *tl = xd->left_context;
 
-  memcpy(t_above, xd->above_context, sizeof(t_above));
-  memcpy(t_left, xd->left_context, sizeof(t_left));
+    memcpy(t_above, xd->above_context, sizeof(t_above));
+    memcpy(t_left, xd->left_context, sizeof(t_left));
 
-  for (n = 0; n < 4; n++) {
-    int x_idx = n & 1, y_idx = n >> 1;
-    int d_tmp, s_tmp, r_tmp;
+    for (n = 0; n < 4; n++) {
+      int x_idx = n & 1, y_idx = n >> 1;
+      int d_tmp, s_tmp, r_tmp;
 
-    xd->above_context = ta + x_idx;
-    xd->left_context = tl + y_idx;
-    vp9_subtract_mbuv_s_c(x->src_diff,
-                          usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
-                          vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
-                          src_uv_stride,
-                          udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
-                          vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
-                          dst_uv_stride);
+      xd->above_context = ta + x_idx;
+      xd->left_context = tl + y_idx;
+      vp9_subtract_mbuv_s_c(x->src_diff,
+                            usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
+                            vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
+                            src_uv_stride,
+                            udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
+                            vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
+                            dst_uv_stride);
 
-    if (mbmi->txfm_size == TX_4X4) {
-      rd_inter16x16_uv_4x4(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
-    } else {
-      rd_inter16x16_uv_8x8(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
+      if (mbmi->txfm_size == TX_4X4) {
+        rd_inter16x16_uv_4x4(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
+      } else {
+        rd_inter16x16_uv_8x8(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
+      }
+
+      r += r_tmp;
+      d += d_tmp;
+      skippable = skippable && s_tmp;
     }
 
-    r += r_tmp;
-    d += d_tmp;
-    skippable = skippable && s_tmp;
+    *rate = r;
+    *distortion = d;
+    *skip = skippable;
+    xd->left_context = tl;
+    xd->above_context = ta;
+    memcpy(xd->above_context, t_above, sizeof(t_above));
+    memcpy(xd->left_context, t_left, sizeof(t_left));
   }
 
-  *rate = r;
-  *distortion = d;
-  *skip = skippable;
-  xd->left_context = tl;
-  xd->above_context = ta;
-  memcpy(xd->above_context, t_above, sizeof(t_above));
-  memcpy(xd->left_context, t_left, sizeof(t_left));
-#if CONFIG_TX32X32
-  }
-#endif
-
   return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
 }
 
+#if CONFIG_SUPERBLOCKS64
 static void super_block_64_uvrd(MACROBLOCK *x, int *rate,
                                 int *distortion, int *skip);
 static int64_t rd_inter64x64_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
@@ -1996,7 +1993,7 @@
   super_block_64_uvrd(x, rate, distortion, skip);
   return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
 }
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 static int64_t rd_inter4x4_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
                               int *distortion, int *skip, int fullpixel) {
@@ -2130,7 +2127,6 @@
   mbmi->uv_mode = mode_selected;
 }
 
-#if CONFIG_SUPERBLOCKS
 // TODO(rbultje) very similar to rd_inter32x32_uv(), merge?
 static void super_block_uvrd(MACROBLOCK *x,
                              int *rate,
@@ -2148,57 +2144,57 @@
                           usrc, vsrc, src_uv_stride,
                           udst, vdst, dst_uv_stride);
     rd_inter32x32_uv_16x16(x, rate, distortion, skippable, 1);
-  } else {
+  } else
 #endif
-  int d = 0, r = 0, n, s = 1;
-  ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
-  ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
-  ENTROPY_CONTEXT_PLANES *tl_orig = xd->left_context;
+  {
+    int d = 0, r = 0, n, s = 1;
+    ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
+    ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
+    ENTROPY_CONTEXT_PLANES *tl_orig = xd->left_context;
 
-  memcpy(t_above, xd->above_context, sizeof(t_above));
-  memcpy(t_left,  xd->left_context,  sizeof(t_left));
+    memcpy(t_above, xd->above_context, sizeof(t_above));
+    memcpy(t_left,  xd->left_context,  sizeof(t_left));
 
-  for (n = 0; n < 4; n++) {
-    int x_idx = n & 1, y_idx = n >> 1;
+    for (n = 0; n < 4; n++) {
+      int x_idx = n & 1, y_idx = n >> 1;
 
-    vp9_subtract_mbuv_s_c(x->src_diff,
-                          usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
-                          vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
-                          src_uv_stride,
-                          udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
-                          vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
-                          dst_uv_stride);
-    if (mbmi->txfm_size == TX_4X4) {
-      vp9_transform_mbuv_4x4(x);
-      vp9_quantize_mbuv_4x4(x);
-      s &= vp9_mbuv_is_skippable_4x4(xd);
-    } else {
-      vp9_transform_mbuv_8x8(x);
-      vp9_quantize_mbuv_8x8(x);
-      s &= vp9_mbuv_is_skippable_8x8(xd);
-    }
+      vp9_subtract_mbuv_s_c(x->src_diff,
+                            usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
+                            vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
+                            src_uv_stride,
+                            udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
+                            vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
+                            dst_uv_stride);
+      if (mbmi->txfm_size == TX_4X4) {
+        vp9_transform_mbuv_4x4(x);
+        vp9_quantize_mbuv_4x4(x);
+        s &= vp9_mbuv_is_skippable_4x4(xd);
+      } else {
+        vp9_transform_mbuv_8x8(x);
+        vp9_quantize_mbuv_8x8(x);
+        s &= vp9_mbuv_is_skippable_8x8(xd);
+      }
 
-    d += vp9_mbuverror(x) >> 2;
-    xd->above_context = t_above + x_idx;
-    xd->left_context = t_left + y_idx;
-    if (mbmi->txfm_size == TX_4X4) {
-      r += rd_cost_mbuv_4x4(x, 0);
-    } else {
-      r += rd_cost_mbuv_8x8(x, 0);
+      d += vp9_mbuverror(x) >> 2;
+      xd->above_context = t_above + x_idx;
+      xd->left_context = t_left + y_idx;
+      if (mbmi->txfm_size == TX_4X4) {
+        r += rd_cost_mbuv_4x4(x, 0);
+      } else {
+        r += rd_cost_mbuv_8x8(x, 0);
+      }
     }
-  }
 
-  xd->above_context = ta_orig;
-  xd->left_context = tl_orig;
+    xd->above_context = ta_orig;
+    xd->left_context = tl_orig;
 
-  *distortion = d;
-  *rate       = r;
-  *skippable  = s;
-#if CONFIG_TX32X32
+    *distortion = d;
+    *rate       = r;
+    *skippable  = s;
   }
-#endif
 }
 
+#if CONFIG_SUPERBLOCKS64
 static void super_block_64_uvrd(MACROBLOCK *x,
                                 int *rate,
                                 int *distortion,
@@ -2239,8 +2235,9 @@
       d += d_tmp;
       s = s && s_tmp;
     }
-  } else {
+  } else
 #endif
+  {
     for (n = 0; n < 16; n++) {
       int x_idx = n & 3, y_idx = n >> 2;
 
@@ -2270,9 +2267,7 @@
         r += rd_cost_mbuv_8x8(x, 0);
       }
     }
-#if CONFIG_TX32X32
   }
-#endif
 
   *distortion = d;
   *rate       = r;
@@ -2281,6 +2276,7 @@
   xd->left_context = tl_orig;
   xd->above_context = ta_orig;
 }
+#endif  // CONFIG_SUPERBLOCKS64
 
 static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi,
                                        MACROBLOCK *x,
@@ -2357,7 +2353,6 @@
   return best_rd;
 }
 #endif  // CONFIG_SUPERBLOCKS64
-#endif
 
 int vp9_cost_mv_ref(VP9_COMP *cpi,
                     MB_PREDICTION_MODE m,
@@ -3651,7 +3646,6 @@
   }
 #endif
 
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_SUPERBLOCKS64
   if (block_size == BLOCK_64X64) {
     vp9_build_inter64x64_predictors_sb(xd,
@@ -3669,9 +3663,7 @@
                                        xd->dst.v_buffer,
                                        xd->dst.y_stride,
                                        xd->dst.uv_stride);
-  } else
-#endif  // CONFIG_SUPERBLOCKS
-  {
+  } else {
     assert(block_size == BLOCK_16X16);
     vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
     if (is_comp_pred)
@@ -3693,7 +3685,6 @@
     if (threshold < x->encode_breakout)
       threshold = x->encode_breakout;
 
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_SUPERBLOCKS64
     if (block_size == BLOCK_64X64) {
       var = vp9_variance64x64(*(b->base_src), b->src_stride,
@@ -3703,9 +3694,7 @@
     if (block_size == BLOCK_32X32) {
       var = vp9_variance32x32(*(b->base_src), b->src_stride,
                               xd->dst.y_buffer, xd->dst.y_stride, &sse);
-    } else
-#endif  // CONFIG_SUPERBLOCK
-    {
+    } else {
       assert(block_size == BLOCK_16X16);
       var = vp9_variance16x16(*(b->base_src), b->src_stride,
                               xd->predictor, 16, &sse);
@@ -3720,7 +3709,6 @@
         // Check u and v to make sure skip is ok
         int sse2;
 
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_SUPERBLOCKS64
         if (block_size == BLOCK_64X64) {
           unsigned int sse2u, sse2v;
@@ -3738,9 +3726,7 @@
           var = vp9_variance16x16(x->src.v_buffer, x->src.uv_stride,
                                   xd->dst.v_buffer, xd->dst.uv_stride, &sse2v);
           sse2 = sse2u + sse2v;
-        } else
-#endif  // CONFIG_SUPERBLOCKS
-        {
+        } else {
           assert(block_size == BLOCK_16X16);
           sse2 = vp9_uvsse(x);
         }
@@ -3773,7 +3759,6 @@
   }
 
   if (!x->skip) {
-#if CONFIG_SUPERBLOCKS
 #if CONFIG_SUPERBLOCKS64
     if (block_size == BLOCK_64X64) {
       int skippable_y, skippable_uv;
@@ -3807,9 +3792,7 @@
       *rate2 += *rate_uv;
       *distortion += *distortion_uv;
       *skippable = skippable_y && skippable_uv;
-    } else
-#endif  // CONFIG_SUPERBLOCKS
-    {
+    } else {
       assert(block_size == BLOCK_16X16);
 
       vp9_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
@@ -4652,7 +4635,6 @@
                        best_pred_diff, best_txfm_diff);
 }
 
-#if CONFIG_SUPERBLOCKS
 void vp9_rd_pick_intra_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
                                  int *returnrate,
                                  int *returndist) {
@@ -4711,8 +4693,7 @@
     *returndist = dist_y + (dist_uv >> 2);
   }
 }
-#endif
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
                             int *returnrate, int *returndist) {
@@ -4870,7 +4851,6 @@
   *returndist = dist;
 }
 
-#if CONFIG_SUPERBLOCKS
 static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
                                          int recon_yoffset, int recon_uvoffset,
                                          int *returnrate,
@@ -4920,7 +4900,7 @@
 #if CONFIG_TX32X32
   int rate_uv_16x16 = 0, rate_uv_tokenonly_16x16 = 0;
   int dist_uv_16x16 = 0, uv_skip_16x16 = 0;
-  MB_PREDICTION_MODE mode_uv_16x16;
+  MB_PREDICTION_MODE mode_uv_16x16 = NEARESTMV;
 #endif
 
   x->skip = 0;
@@ -5474,7 +5454,6 @@
                                    returnrate, returndistortion, BLOCK_64X64);
 }
 #endif  // CONFIG_SUPERBLOCKS64
-#endif
 
 void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
                                     int recon_yoffset,
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -221,7 +221,7 @@
   for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 4, mi_ptr += 4 * mis) {
     mi = mi_ptr;
     for (mb_col = 0; mb_col < cm->mb_cols; mb_col += 4, mi += 4) {
-#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
+#if CONFIG_SUPERBLOCKS64
       if (mi->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
         count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
                    t_unpred_seg_counts, 4, mb_row, mb_col);
@@ -230,9 +230,7 @@
       {
         for (i = 0; i < 4; i++) {
           int x_idx = (i & 1) << 1, y_idx = i & 2;
-#if CONFIG_SUPERBLOCKS
           MODE_INFO *sb_mi = mi + y_idx * mis + x_idx;
-#endif
 
           if (mb_col + x_idx >= cm->mb_cols ||
               mb_row + y_idx >= cm->mb_rows) {
@@ -239,14 +237,11 @@
             continue;
           }
 
-#if CONFIG_SUPERBLOCKS
           if (sb_mi->mbmi.sb_type) {
             assert(sb_mi->mbmi.sb_type == BLOCK_SIZE_SB32X32);
             count_segs(cpi, sb_mi, no_pred_segcounts, temporal_predictor_count,
                        t_unpred_seg_counts, 2, mb_row + y_idx, mb_col + x_idx);
-          } else
-#endif
-          {
+          } else {
             int j;
 
             for (j = 0; j < 4; j++) {
@@ -258,9 +253,7 @@
                 continue;
               }
 
-#if CONFIG_SUPERBLOCKS
               assert(mb_mi->mbmi.sb_type == BLOCK_SIZE_MB16X16);
-#endif
               count_segs(cpi, mb_mi, no_pred_segcounts,
                          temporal_predictor_count, t_unpred_seg_counts,
                          1, mb_row + y_idx_mb, mb_col + x_idx_mb);
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -141,7 +141,7 @@
       vp9_block2left[tx_size][ib];
   ENTROPY_CONTEXT a_ec = *a, l_ec = *l;
 
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
   ENTROPY_CONTEXT *const a1 = (ENTROPY_CONTEXT *)(&xd->above_context[1]) +
       vp9_block2above[tx_size][ib];
   ENTROPY_CONTEXT *const l1 = (ENTROPY_CONTEXT *)(&xd->left_context[1]) +
@@ -195,7 +195,7 @@
       if (type != PLANE_TYPE_UV) {
         a_ec = (a[0] + a[1] + a[2] + a[3]) != 0;
         l_ec = (l[0] + l[1] + l[2] + l[3]) != 0;
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
       } else {
         a_ec = (a[0] + a[1] + a1[0] + a1[1]) != 0;
         l_ec = (l[0] + l[1] + l1[0] + l1[1]) != 0;
@@ -212,7 +212,7 @@
         counts = cpi->coef_counts_16x16;
         probs = cpi->common.fc.coef_probs_16x16;
       }
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
       if (type == PLANE_TYPE_UV) {
         int uv_idx = (ib - 16) >> 2;
         qcoeff_ptr = xd->sb_coeff_data.qcoeff + 1024 + 256 * uv_idx;
@@ -219,7 +219,7 @@
       }
 #endif
       break;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     case TX_32X32:
 #if CONFIG_CNVCONTEXT
       a_ec = a[0] + a[1] + a[2] + a[3] +
@@ -294,13 +294,13 @@
     if (type != PLANE_TYPE_UV) {
       a[1] = a[2] = a[3] = a_ec;
       l[1] = l[2] = l[3] = l_ec;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     } else {
       a1[0] = a1[1] = a[1] = a_ec;
       l1[0] = l1[1] = l[1] = l_ec;
 #endif
     }
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   } else if (tx_size == TX_32X32) {
     a[1] = a[2] = a[3] = a_ec;
     l[1] = l[2] = l[3] = l_ec;
@@ -378,7 +378,7 @@
   return (vp9_mby_is_skippable_16x16(xd) & vp9_mbuv_is_skippable_8x8(xd));
 }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 int vp9_sby_is_skippable_32x32(MACROBLOCKD *xd) {
   int skip = 1;
   skip &= !xd->block[0].eob;
@@ -768,7 +768,7 @@
   ENTROPY_CONTEXT *const l = (ENTROPY_CONTEXT *)xd->left_context +
       vp9_block2left[tx_size][ib];
   ENTROPY_CONTEXT a_ec = *a, l_ec = *l;
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
   ENTROPY_CONTEXT *const a1 = (ENTROPY_CONTEXT *)(&xd->above_context[1]) +
       vp9_block2above[tx_size][ib];
   ENTROPY_CONTEXT *const l1 = (ENTROPY_CONTEXT *)(&xd->left_context[1]) +
@@ -808,7 +808,7 @@
       if (type != PLANE_TYPE_UV) {
         a_ec = (a[0] + a[1] + a[2] + a[3]) != 0;
         l_ec = (l[0] + l[1] + l[2] + l[3]) != 0;
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
       } else {
         a_ec = (a[0] + a[1] + a1[0] + a1[1]) != 0;
         l_ec = (l[0] + l[1] + l1[0] + l1[1]) != 0;
@@ -824,7 +824,7 @@
         probs = cpi->common.fc.coef_probs_16x16;
       }
       break;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     case TX_32X32:
 #if CONFIG_CNVCONTEXT
       a_ec = a[0] + a[1] + a[2] + a[3] +
@@ -857,13 +857,13 @@
     if (type != PLANE_TYPE_UV) {
       a[1] = a[2] = a[3] = 0;
       l[1] = l[2] = l[3] = 0;
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
     } else {
       a1[0] = a1[1] = a[1] = a_ec;
       l1[0] = l1[1] = l[1] = l_ec;
 #endif
     }
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
   } else if (tx_size == TX_32X32) {
     a[1] = a[2] = a[3] = a_ec;
     l[1] = l[2] = l[3] = l_ec;
@@ -983,7 +983,7 @@
   }
 }
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 static void stuff_sb_32x32(VP9_COMP *cpi, MACROBLOCKD *xd,
                                TOKENEXTRA **t, int dry_run) {
   int b;
@@ -1005,7 +1005,7 @@
 }
 #endif
 
-#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+#if CONFIG_TX32X32
 void vp9_fix_contexts_sb(MACROBLOCKD *xd) {
   vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 2);
   vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 2);
--- a/vp9/encoder/vp9_tokenize.h
+++ b/vp9/encoder/vp9_tokenize.h
@@ -36,7 +36,7 @@
 extern int vp9_mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
 extern int vp9_mbuv_is_skippable_8x8(MACROBLOCKD *xd);
 extern int vp9_mby_is_skippable_16x16(MACROBLOCKD *xd);
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 extern int vp9_sby_is_skippable_32x32(MACROBLOCKD *xd);
 extern int vp9_sbuv_is_skippable_16x16(MACROBLOCKD *xd);
 #endif
@@ -50,12 +50,12 @@
 
 extern void vp9_stuff_mb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
                          TOKENEXTRA **t, int dry_run);
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 extern void vp9_stuff_sb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
                          TOKENEXTRA **t, int dry_run);
 #endif
 
-#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
+#if CONFIG_TX32X32
 extern void vp9_fix_contexts_sb(MACROBLOCKD *xd);
 #endif
 #ifdef ENTROPY_STATS
--- a/vp9/encoder/vp9_variance_c.c
+++ b/vp9/encoder/vp9_variance_c.c
@@ -24,7 +24,7 @@
   return sum;
 }
 
-#if CONFIG_SUPERBLOCKS
+#if CONFIG_SUPERBLOCKS64
 unsigned int vp9_variance64x64_c(const uint8_t *src_ptr,
                                  int  source_stride,
                                  const uint8_t *ref_ptr,
@@ -37,6 +37,7 @@
   *sse = var;
   return (var - (((int64_t)avg * avg) >> 12));
 }
+#endif  // CONFIG_SUPERBLOCKS64
 
 unsigned int vp9_variance32x32_c(const uint8_t *src_ptr,
                                  int  source_stride,
@@ -50,7 +51,6 @@
   *sse = var;
   return (var - (((int64_t)avg * avg) >> 10));
 }
-#endif
 
 unsigned int vp9_variance16x16_c(const uint8_t *src_ptr,
                                  int  source_stride,
@@ -197,7 +197,7 @@
   return vp9_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
 }
 
-#if CONFIG_SUPERBLOCKS
+#if CONFIG_SUPERBLOCKS64
 unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
                                            int  src_pixels_per_line,
                                            int  xoffset,
@@ -218,6 +218,7 @@
 
   return vp9_variance64x64_c(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
 }
+#endif  // CONFIG_SUPERBLOCKS64
 
 unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
                                            int  src_pixels_per_line,
@@ -238,7 +239,6 @@
 
   return vp9_variance32x32_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
 }
-#endif
 
 unsigned int vp9_variance_halfpixvar16x16_h_c(const uint8_t *src_ptr,
                                               int  source_stride,
@@ -249,7 +249,6 @@
                                        ref_ptr, recon_stride, sse);
 }
 
-#if CONFIG_SUPERBLOCKS
 unsigned int vp9_variance_halfpixvar32x32_h_c(const uint8_t *src_ptr,
                                               int  source_stride,
                                               const uint8_t *ref_ptr,
@@ -259,6 +258,7 @@
                                        ref_ptr, recon_stride, sse);
 }
 
+#if CONFIG_SUPERBLOCKS64
 unsigned int vp9_variance_halfpixvar64x64_h_c(const uint8_t *src_ptr,
                                               int  source_stride,
                                               const uint8_t *ref_ptr,
@@ -267,7 +267,7 @@
   return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 0,
                                        ref_ptr, recon_stride, sse);
 }
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 
 unsigned int vp9_variance_halfpixvar16x16_v_c(const uint8_t *src_ptr,
@@ -279,7 +279,6 @@
                                        ref_ptr, recon_stride, sse);
 }
 
-#if CONFIG_SUPERBLOCKS
 unsigned int vp9_variance_halfpixvar32x32_v_c(const uint8_t *src_ptr,
                                               int  source_stride,
                                               const uint8_t *ref_ptr,
@@ -289,6 +288,7 @@
                                        ref_ptr, recon_stride, sse);
 }
 
+#if CONFIG_SUPERBLOCKS64
 unsigned int vp9_variance_halfpixvar64x64_v_c(const uint8_t *src_ptr,
                                               int  source_stride,
                                               const uint8_t *ref_ptr,
@@ -297,8 +297,9 @@
   return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 0, 8,
                                        ref_ptr, recon_stride, sse);
 }
-#endif
+#endif  // #if CONFIG_SUPERBLOCKS64
 
+
 unsigned int vp9_variance_halfpixvar16x16_hv_c(const uint8_t *src_ptr,
                                                int  source_stride,
                                                const uint8_t *ref_ptr,
@@ -308,7 +309,6 @@
                                        ref_ptr, recon_stride, sse);
 }
 
-#if CONFIG_SUPERBLOCKS
 unsigned int vp9_variance_halfpixvar32x32_hv_c(const uint8_t *src_ptr,
                                                int  source_stride,
                                                const uint8_t *ref_ptr,
@@ -318,6 +318,7 @@
                                        ref_ptr, recon_stride, sse);
 }
 
+#if CONFIG_SUPERBLOCKS64
 unsigned int vp9_variance_halfpixvar64x64_hv_c(const uint8_t *src_ptr,
                                                int  source_stride,
                                                const uint8_t *ref_ptr,
@@ -326,7 +327,7 @@
   return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 8,
                                        ref_ptr, recon_stride, sse);
 }
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 unsigned int vp9_sub_pixel_mse16x16_c(const uint8_t *src_ptr,
                                       int  src_pixels_per_line,
@@ -341,7 +342,6 @@
   return *sse;
 }
 
-#if CONFIG_SUPERBLOCKS
 unsigned int vp9_sub_pixel_mse32x32_c(const uint8_t *src_ptr,
                                       int  src_pixels_per_line,
                                       int  xoffset,
@@ -355,6 +355,7 @@
   return *sse;
 }
 
+#if CONFIG_SUPERBLOCKS64
 unsigned int vp9_sub_pixel_mse64x64_c(const uint8_t *src_ptr,
                                       int  src_pixels_per_line,
                                       int  xoffset,
@@ -367,7 +368,7 @@
                                 dst_pixels_per_line, sse);
   return *sse;
 }
-#endif
+#endif  // CONFIG_SUPERBLOCKS64
 
 unsigned int vp9_sub_pixel_variance16x8_c(const uint8_t *src_ptr,
                                           int  src_pixels_per_line,
--