shithub: libvpx

Download patch

ref: 749bc98618c8b75bc44dedecb5f18dd3b6c78bf1
parent: 57d459ba8262635ac23b65f3b3098e4b2973faab
author: Scott LaVarnway <slavarnway@google.com>
date: Fri Jan 20 08:52:16 EST 2012

BLOCKD structure cleanup

Removed redundancies.  All of the information can be
found in the MACROBLOCKD structure.

Change-Id: I7556392c6f67b43bef2a5e9932180a737466ef93

--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -185,15 +185,7 @@
     unsigned char  *predictor;
     short *dequant;
 
-    /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
-    unsigned char **base_pre;
-    int pre;
-    int pre_stride;
-
-    unsigned char **base_dst;
-    int dst;
-    int dst_stride;
-
+    int offset;
     char *eob;
 
     union b_mode_info bmi;
--- a/vp8/common/mbpitch.c
+++ b/vp8/common/mbpitch.c
@@ -17,33 +17,6 @@
     DEST = 1
 } BLOCKSET;
 
-static void setup_block
-(
-    BLOCKD *b,
-    int mv_stride,
-    unsigned char **base,
-    int Stride,
-    int offset,
-    BLOCKSET bs
-)
-{
-
-    if (bs == DEST)
-    {
-        b->dst_stride = Stride;
-        b->dst = offset;
-        b->base_dst = base;
-    }
-    else
-    {
-        b->pre_stride = Stride;
-        b->pre = offset;
-        b->base_pre = base;
-    }
-
-}
-
-
 static void setup_macroblock(MACROBLOCKD *x, BLOCKSET bs)
 {
     int block;
@@ -65,17 +38,15 @@
 
     for (block = 0; block < 16; block++) /* y blocks */
     {
-        setup_block(&x->block[block], x->dst.y_stride, y, x->dst.y_stride,
-                        (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4, bs);
+        x->block[block].offset =
+            (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4;
     }
 
     for (block = 16; block < 20; block++) /* U and V blocks */
     {
-        setup_block(&x->block[block], x->dst.uv_stride, u, x->dst.uv_stride,
-                        ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs);
-
-        setup_block(&x->block[block+4], x->dst.uv_stride, v, x->dst.uv_stride,
-                        ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs);
+        x->block[block+4].offset =
+        x->block[block].offset =
+            ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4;
     }
 }
 
--- a/vp8/common/reconinter.c
+++ b/vp8/common/reconinter.c
@@ -122,25 +122,19 @@
 }
 
 
-void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf)
+void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf)
 {
     int r;
-    unsigned char *ptr_base;
-    unsigned char *ptr;
     unsigned char *pred_ptr = d->predictor;
+    unsigned char *ptr;
+    ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
 
-    ptr_base = *(d->base_pre);
-
     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
     {
-        ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
-        sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
+        sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
     }
     else
     {
-        ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
-        ptr = ptr_base;
-
         for (r = 0; r < 4; r++)
         {
 #if !(CONFIG_FAST_UNALIGNED)
@@ -152,65 +146,53 @@
             *(uint32_t *)pred_ptr = *(uint32_t *)ptr ;
 #endif
             pred_ptr     += pitch;
-            ptr         += d->pre_stride;
+            ptr         += pre_stride;
         }
     }
 }
 
-static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride)
+static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride)
 {
-    unsigned char *ptr_base;
     unsigned char *ptr;
+    ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
 
-    ptr_base = *(d->base_pre);
-    ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
-
     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
     {
-        x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
+        x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
     }
     else
     {
-        vp8_copy_mem8x8(ptr, d->pre_stride, dst, dst_stride);
+        vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride);
     }
 }
 
-static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride)
+static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride)
 {
-    unsigned char *ptr_base;
     unsigned char *ptr;
+    ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
 
-    ptr_base = *(d->base_pre);
-    ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
-
     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
     {
-        x->subpixel_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
+        x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
     }
     else
     {
-        vp8_copy_mem8x4(ptr, d->pre_stride, dst, dst_stride);
+        vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride);
     }
 }
 
-static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, vp8_subpix_fn_t sppf)
+static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf)
 {
     int r;
-    unsigned char *ptr_base;
     unsigned char *ptr;
+    ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
 
-    ptr_base = *(d->base_pre);
-
     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
     {
-        ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
-        sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
+        sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
     }
     else
     {
-        ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
-        ptr = ptr_base;
-
         for (r = 0; r < 4; r++)
         {
 #if !(CONFIG_FAST_UNALIGNED)
@@ -222,7 +204,7 @@
             *(uint32_t *)dst = *(uint32_t *)ptr ;
 #endif
             dst     += dst_stride;
-            ptr         += d->pre_stride;
+            ptr     += pre_stride;
         }
     }
 }
@@ -238,7 +220,7 @@
     int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
     int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
     int offset;
-    int pre_stride = x->block[16].pre_stride;
+    int pre_stride = x->pre.uv_stride;
 
     /* calc uv motion vectors */
     if (mv_row < 0)
@@ -277,6 +259,8 @@
 void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
 {
     int i, j;
+    int pre_stride = x->pre.uv_stride;
+    unsigned char *base_pre;
 
     /* build uv mvs */
     for (i = 0; i < 2; i++)
@@ -316,19 +300,35 @@
         }
     }
 
-    for (i = 16; i < 24; i += 2)
+    base_pre = x->pre.u_buffer;
+    for (i = 16; i < 20; i += 2)
     {
         BLOCKD *d0 = &x->block[i];
         BLOCKD *d1 = &x->block[i+1];
 
         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
-            build_inter_predictors2b(x, d0, d0->predictor, 8);
+            build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
         else
         {
-            vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
-            vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
+            vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict);
+            vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict);
         }
     }
+
+    base_pre = x->pre.v_buffer;
+    for (i = 20; i < 24; i += 2)
+    {
+        BLOCKD *d0 = &x->block[i];
+        BLOCKD *d1 = &x->block[i+1];
+
+        if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
+            build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
+        else
+        {
+            vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict);
+            vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict);
+        }
+    }
 }
 
 
@@ -341,7 +341,7 @@
     unsigned char *ptr;
     int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
     int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
-    int pre_stride = x->block[0].pre_stride;
+    int pre_stride = x->pre.y_stride;
 
     ptr_base = x->pre.y_buffer;
     ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
@@ -408,7 +408,7 @@
     int_mv _16x16mv;
 
     unsigned char *ptr_base = x->pre.y_buffer;
-    int pre_stride = x->block[0].pre_stride;
+    int pre_stride = x->pre.y_stride;
 
     _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
 
@@ -465,11 +465,13 @@
 static void build_inter4x4_predictors_mb(MACROBLOCKD *x)
 {
     int i;
+    unsigned char *base_dst = x->dst.y_buffer;
+    unsigned char *base_pre = x->pre.y_buffer;
 
     if (x->mode_info_context->mbmi.partitioning < 3)
     {
         BLOCKD *b;
-        int dst_stride = x->block[ 0].dst_stride;
+        int dst_stride = x->dst.y_stride;
 
         x->block[ 0].bmi = x->mode_info_context->bmi[ 0];
         x->block[ 2].bmi = x->mode_info_context->bmi[ 2];
@@ -484,13 +486,13 @@
         }
 
         b = &x->block[ 0];
-        build_inter_predictors4b(x, b, *(b->base_dst) + b->dst, dst_stride);
+        build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
         b = &x->block[ 2];
-        build_inter_predictors4b(x, b, *(b->base_dst) + b->dst, dst_stride);
+        build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
         b = &x->block[ 8];
-        build_inter_predictors4b(x, b, *(b->base_dst) + b->dst, dst_stride);
+        build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
         b = &x->block[10];
-        build_inter_predictors4b(x, b, *(b->base_dst) + b->dst, dst_stride);
+        build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
     }
     else
     {
@@ -498,7 +500,7 @@
         {
             BLOCKD *d0 = &x->block[i];
             BLOCKD *d1 = &x->block[i+1];
-            int dst_stride = x->block[ 0].dst_stride;
+            int dst_stride = x->dst.y_stride;
 
             x->block[i+0].bmi = x->mode_info_context->bmi[i+0];
             x->block[i+1].bmi = x->mode_info_context->bmi[i+1];
@@ -509,31 +511,51 @@
             }
 
             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
-                build_inter_predictors2b(x, d0, *(d0->base_dst) + d0->dst, dst_stride);
+                build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
             else
             {
-                build_inter_predictors_b(d0, *(d0->base_dst) + d0->dst, dst_stride, x->subpixel_predict);
-                build_inter_predictors_b(d1, *(d1->base_dst) + d1->dst, dst_stride, x->subpixel_predict);
+                build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
+                build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
             }
 
         }
 
     }
+    base_dst = x->dst.u_buffer;
+    base_pre = x->pre.u_buffer;
+    for (i = 16; i < 20; i += 2)
+    {
+        BLOCKD *d0 = &x->block[i];
+        BLOCKD *d1 = &x->block[i+1];
+        int dst_stride = x->dst.uv_stride;
 
-    for (i = 16; i < 24; i += 2)
+        /* Note: uv mvs already clamped in build_4x4uvmvs() */
+
+        if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
+            build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
+        else
+        {
+            build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
+            build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
+        }
+    }
+
+    base_dst = x->dst.v_buffer;
+    base_pre = x->pre.v_buffer;
+    for (i = 20; i < 24; i += 2)
     {
         BLOCKD *d0 = &x->block[i];
         BLOCKD *d1 = &x->block[i+1];
-        int dst_stride = x->block[ 16].dst_stride;
+        int dst_stride = x->dst.uv_stride;
 
         /* Note: uv mvs already clamped in build_4x4uvmvs() */
 
         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
-            build_inter_predictors2b(x, d0, *(d0->base_dst) + d0->dst, dst_stride);
+            build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
         else
         {
-            build_inter_predictors_b(d0, *(d0->base_dst) + d0->dst, dst_stride, x->subpixel_predict);
-            build_inter_predictors_b(d1, *(d1->base_dst) + d1->dst, dst_stride, x->subpixel_predict);
+            build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
+            build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
         }
     }
 }
--- a/vp8/common/reconinter.h
+++ b/vp8/common/reconinter.h
@@ -25,6 +25,8 @@
                                                 unsigned char *dst_y,
                                                 int dst_ystride);
 extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch,
+                                         unsigned char *base_pre,
+                                         int pre_stride,
                                          vp8_subpix_fn_t sppf);
 
 extern void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x);
--- a/vp8/common/reconintra4x4.c
+++ b/vp8/common/reconintra4x4.c
@@ -304,12 +304,13 @@
  */
 void vp8_intra_prediction_down_copy(MACROBLOCKD *x)
 {
-    unsigned char *above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16;
+    int dst_stride = x->dst.y_stride;
+    unsigned char *above_right = x->dst.y_buffer - dst_stride + 16;
 
     unsigned int *src_ptr = (unsigned int *)above_right;
-    unsigned int *dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
-    unsigned int *dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
-    unsigned int *dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);
+    unsigned int *dst_ptr0 = (unsigned int *)(above_right + 4 * dst_stride);
+    unsigned int *dst_ptr1 = (unsigned int *)(above_right + 8 * dst_stride);
+    unsigned int *dst_ptr2 = (unsigned int *)(above_right + 12 * dst_stride);
 
     *dst_ptr0 = *src_ptr;
     *dst_ptr1 = *src_ptr;
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -165,6 +165,8 @@
         else
         {
             short *DQC = xd->dequant_y1;
+            int dst_stride = xd->dst.y_stride;
+            unsigned char *base_dst = xd->dst.y_buffer;
 
             /* clear out residual eob info */
             if(xd->mode_info_context->mbmi.mb_skip_coeff)
@@ -177,10 +179,10 @@
                 BLOCKD *b = &xd->block[i];
                 int b_mode = xd->mode_info_context->bmi[i].as_mode;
 
-                vp8_intra4x4_predict
-                              ( *(b->base_dst) + b->dst, b->dst_stride, b_mode,
-                                *(b->base_dst) + b->dst, b->dst_stride );
 
+                vp8_intra4x4_predict (base_dst + b->offset, dst_stride, b_mode,
+                                      base_dst + b->offset, dst_stride );
+
                 if (xd->eobs[i])
                 {
                     if (xd->eobs[i] > 1)
@@ -187,14 +189,14 @@
                     {
                     vp8_dequant_idct_add
                             (b->qcoeff, DQC,
-                            *(b->base_dst) + b->dst, b->dst_stride);
+                                base_dst + b->offset, dst_stride);
                     }
                     else
                     {
                         vp8_dc_only_idct_add
                             (b->qcoeff[0] * DQC[0],
-                            *(b->base_dst) + b->dst, b->dst_stride,
-                            *(b->base_dst) + b->dst, b->dst_stride);
+                                base_dst + b->offset, dst_stride,
+                                base_dst + b->offset, dst_stride);
                         ((int *)b->qcoeff)[0] = 0;
                     }
                 }
--- a/vp8/decoder/reconintra_mt.c
+++ b/vp8/decoder/reconintra_mt.c
@@ -617,12 +617,15 @@
     unsigned char top_left; /* = Above[-1]; */
 
     BLOCKD *x = &xd->block[num];
+    int dst_stride = xd->dst.y_stride;
+    unsigned char *base_dst = xd->dst.y_buffer;
 
+
     /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
     if (num < 4 && pbi->common.filter_level)
         Above = pbi->mt_yabove_row[mb_row] + mb_col*16 + num*4 + 32;
     else
-        Above = *(x->base_dst) + x->dst - x->dst_stride;
+        Above = base_dst + x->offset - dst_stride;
 
     if (num%4==0 && pbi->common.filter_level)
     {
@@ -630,10 +633,10 @@
             Left[i] = pbi->mt_yleft_col[mb_row][num + i];
     }else
     {
-        Left[0] = (*(x->base_dst))[x->dst - 1];
-        Left[1] = (*(x->base_dst))[x->dst - 1 + x->dst_stride];
-        Left[2] = (*(x->base_dst))[x->dst - 1 + 2 * x->dst_stride];
-        Left[3] = (*(x->base_dst))[x->dst - 1 + 3 * x->dst_stride];
+        Left[0] = (base_dst)[x->offset - 1];
+        Left[1] = (base_dst)[x->offset - 1 + dst_stride];
+        Left[2] = (base_dst)[x->offset - 1 + 2 * dst_stride];
+        Left[3] = (base_dst)[x->offset - 1 + 3 * dst_stride];
     }
 
     if ((num==4 || num==8 || num==12) && pbi->common.filter_level)
@@ -918,19 +921,22 @@
     unsigned int *dst_ptr0;
     unsigned int *dst_ptr1;
     unsigned int *dst_ptr2;
+    int dst_stride = x->dst.y_stride;
+    unsigned char *base_dst = x->dst.y_buffer;
 
+
     if (pbi->common.filter_level)
         above_right = pbi->mt_yabove_row[mb_row] + mb_col*16 + 32 +16;
     else
-        above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16;
+        above_right = base_dst + x->block[0].offset - dst_stride + 16;
 
     src_ptr = (unsigned int *)above_right;
     /*dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
     dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
     dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);*/
-    dst_ptr0 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 3 * x->block[0].dst_stride);
-    dst_ptr1 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 7 * x->block[0].dst_stride);
-    dst_ptr2 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 11 * x->block[0].dst_stride);
+    dst_ptr0 = (unsigned int *)(base_dst + x->block[0].offset + 16 + 3 * dst_stride);
+    dst_ptr1 = (unsigned int *)(base_dst + x->block[0].offset + 16 + 7 * dst_stride);
+    dst_ptr2 = (unsigned int *)(base_dst + x->block[0].offset + 16 + 11 * dst_stride);
     *dst_ptr0 = *src_ptr;
     *dst_ptr1 = *src_ptr;
     *dst_ptr2 = *src_ptr;
--- a/vp8/decoder/threading.c
+++ b/vp8/decoder/threading.c
@@ -171,6 +171,8 @@
     if (xd->mode_info_context->mbmi.mode == B_PRED)
     {
         short *DQC = xd->dequant_y1;
+        int dst_stride = xd->dst.y_stride;
+        unsigned char *base_dst = xd->dst.y_buffer;
 
         for (i = 0; i < 16; i++)
         {
@@ -177,8 +179,8 @@
             BLOCKD *b = &xd->block[i];
             int b_mode = xd->mode_info_context->bmi[i].as_mode;
 
-            vp8mt_predict_intra4x4(pbi, xd, b_mode, *(b->base_dst) + b->dst,
-                                   b->dst_stride, mb_row, mb_col, i);
+            vp8mt_predict_intra4x4(pbi, xd, b_mode, base_dst + b->offset,
+                                   dst_stride, mb_row, mb_col, i);
 
             if (xd->eobs[i] )
             {
@@ -186,14 +188,14 @@
                 {
                     vp8_dequant_idct_add
                         (b->qcoeff, DQC,
-                        *(b->base_dst) + b->dst, b->dst_stride);
+                        base_dst + b->offset, dst_stride);
                 }
                 else
                 {
                     vp8_dc_only_idct_add
                         (b->qcoeff[0] * DQC[0],
-                        *(b->base_dst) + b->dst, b->dst_stride,
-                        *(b->base_dst) + b->dst, b->dst_stride);
+                        base_dst + b->offset, dst_stride,
+                        base_dst + b->offset, dst_stride);
                     ((int *)b->qcoeff)[0] = 0;
                 }
             }
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -53,9 +53,10 @@
 {
     BLOCKD *b = &x->e_mbd.block[ib];
     BLOCK *be = &x->block[ib];
+    int dst_stride = x->e_mbd.dst.y_stride;
+    unsigned char *base_dst = x->e_mbd.dst.y_buffer;
 
-    vp8_intra4x4_predict
-                (*(b->base_dst) + b->dst, b->dst_stride,
+    vp8_intra4x4_predict(base_dst + b->offset, dst_stride,
                  b->bmi.as_mode, b->predictor, 16);
 
     vp8_subtract_b(be, b, 16);
@@ -66,14 +67,14 @@
 
     if (*b->eob > 1)
     {
-        vp8_short_idct4x4llm(b->dqcoeff,
-            b->predictor, 16, *(b->base_dst) + b->dst, b->dst_stride);
+      vp8_short_idct4x4llm(b->dqcoeff,
+            b->predictor, 16, base_dst + b->offset, dst_stride);
     }
     else
     {
-        vp8_dc_only_idct_add
-            (b->dqcoeff[0], b->predictor, 16, *(b->base_dst) + b->dst,
-                b->dst_stride);
+      vp8_dc_only_idct_add
+            (b->dqcoeff[0], b->predictor, 16, base_dst + b->offset,
+                dst_stride);
     }
 }
 
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -396,12 +396,12 @@
     unsigned char *src_ptr = (*(b->base_src) + b->src);
     int src_stride = b->src_stride;
     unsigned char *ref_ptr;
-    int ref_stride=d->pre_stride;
+    int ref_stride = x->e_mbd.pre.y_stride;
 
     // Set up pointers for this macro block recon buffer
     xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
 
-    ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre );
+    ref_ptr = (unsigned char *)(xd->pre.y_buffer + d->offset );
 
     vp8_mse16x16 ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
 }
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -211,10 +211,13 @@
 
     int y_stride;
     int offset;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
 
+
 #if ARCH_X86 || ARCH_X86_64
     MACROBLOCKD *xd = &x->e_mbd;
-    unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+    unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
     unsigned char *y;
     int buf_r1, buf_r2, buf_c1, buf_c2;
 
@@ -226,11 +229,11 @@
     y_stride = 32;
 
     /* Copy to intermediate buffer before searching. */
-    vfp->copymem(y0 - buf_c1 - d->pre_stride*buf_r1, d->pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
+    vfp->copymem(y0 - buf_c1 - pre_stride*buf_r1, pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
     y = xd->y_buf + y_stride*buf_r1 +buf_c1;
 #else
-    unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
-    y_stride = d->pre_stride;
+    unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+    y_stride = pre_stride;
 #endif
 
     offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
@@ -347,19 +350,21 @@
     int whichdir ;
     int thismse;
     int y_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
 
 #if ARCH_X86 || ARCH_X86_64
     MACROBLOCKD *xd = &x->e_mbd;
-    unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+    unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
     unsigned char *y;
 
     y_stride = 32;
     /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
-     vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
+     vfp->copymem(y0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
      y = xd->y_buf + y_stride + 1;
 #else
-     unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
-     y_stride = d->pre_stride;
+     unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+     y_stride = pre_stride;
 #endif
 
     // central mv
@@ -662,19 +667,21 @@
     int whichdir ;
     int thismse;
     int y_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
 
 #if ARCH_X86 || ARCH_X86_64
     MACROBLOCKD *xd = &x->e_mbd;
-    unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+    unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
     unsigned char *y;
 
     y_stride = 32;
     /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
-    vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
+    vfp->copymem(y0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
     y = xd->y_buf + y_stride + 1;
 #else
-    unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
-    y_stride = d->pre_stride;
+    unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+    y_stride = pre_stride;
 #endif
 
     // central mv
@@ -842,7 +849,10 @@
 
     unsigned char *what = (*(b->base_src) + b->src);
     int what_stride = b->src_stride;
-    int in_what_stride = d->pre_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+
+    int in_what_stride = pre_stride;
     int br, bc;
     int_mv this_mv;
     unsigned int bestsad = 0x7fffffff;
@@ -865,8 +875,8 @@
     bc = ref_mv->as_mv.col;
 
     // Work out the start point for the search
-    base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
-    this_offset = base_offset + (br * (d->pre_stride)) + bc;
+    base_offset = (unsigned char *)(base_pre + d->offset);
+    this_offset = base_offset + (br * (pre_stride)) + bc;
     this_mv.as_mv.row = br;
     this_mv.as_mv.col = bc;
     bestsad = vfp->sdf( what, what_stride, this_offset,
@@ -1029,7 +1039,9 @@
     unsigned char *what = (*(b->base_src) + b->src);
     int what_stride = b->src_stride;
     unsigned char *in_what;
-    int in_what_stride = d->pre_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+    int in_what_stride = pre_stride;
     unsigned char *best_address;
 
     int tot_steps;
@@ -1061,7 +1073,7 @@
     best_mv->as_mv.col = ref_col;
 
     // Work out the start point for the search
-    in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
+    in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col);
     best_address = in_what;
 
     // Check the starting position
@@ -1150,7 +1162,9 @@
     unsigned char *what = (*(b->base_src) + b->src);
     int what_stride = b->src_stride;
     unsigned char *in_what;
-    int in_what_stride = d->pre_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+    int in_what_stride = pre_stride;
     unsigned char *best_address;
 
     int tot_steps;
@@ -1182,7 +1196,7 @@
     best_mv->as_mv.col = ref_col;
 
     // Work out the start point for the search
-    in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
+    in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col);
     best_address = in_what;
 
     // Check the starting position
@@ -1300,8 +1314,10 @@
     unsigned char *what = (*(b->base_src) + b->src);
     int what_stride = b->src_stride;
     unsigned char *in_what;
-    int in_what_stride = d->pre_stride;
-    int mv_stride = d->pre_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+    int in_what_stride = pre_stride;
+    int mv_stride = pre_stride;
     unsigned char *bestaddress;
     int_mv *best_mv = &d->bmi.mv;
     int_mv this_mv;
@@ -1325,8 +1341,8 @@
     fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
 
     // Work out the mid point for the search
-    in_what = *(d->base_pre) + d->pre;
-    bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
+    in_what = base_pre + d->offset;
+    bestaddress = in_what + (ref_row * pre_stride) + ref_col;
 
     best_mv->as_mv.row = ref_row;
     best_mv->as_mv.col = ref_col;
@@ -1392,8 +1408,10 @@
     unsigned char *what = (*(b->base_src) + b->src);
     int what_stride = b->src_stride;
     unsigned char *in_what;
-    int in_what_stride = d->pre_stride;
-    int mv_stride = d->pre_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+    int in_what_stride = pre_stride;
+    int mv_stride = pre_stride;
     unsigned char *bestaddress;
     int_mv *best_mv = &d->bmi.mv;
     int_mv this_mv;
@@ -1419,8 +1437,8 @@
     fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
 
     // Work out the mid point for the search
-    in_what = *(d->base_pre) + d->pre;
-    bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
+    in_what = base_pre + d->offset;
+    bestaddress = in_what + (ref_row * pre_stride) + ref_col;
 
     best_mv->as_mv.row = ref_row;
     best_mv->as_mv.col = ref_col;
@@ -1521,9 +1539,11 @@
 {
     unsigned char *what = (*(b->base_src) + b->src);
     int what_stride = b->src_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
     unsigned char *in_what;
-    int in_what_stride = d->pre_stride;
-    int mv_stride = d->pre_stride;
+    int in_what_stride = pre_stride;
+    int mv_stride = pre_stride;
     unsigned char *bestaddress;
     int_mv *best_mv = &d->bmi.mv;
     int_mv this_mv;
@@ -1550,8 +1570,8 @@
     fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
 
     // Work out the mid point for the search
-    in_what = *(d->base_pre) + d->pre;
-    bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
+    in_what = base_pre + d->offset;
+    bestaddress = in_what + (ref_row * pre_stride) + ref_col;
 
     best_mv->as_mv.row = ref_row;
     best_mv->as_mv.col = ref_col;
@@ -1684,10 +1704,12 @@
     short this_row_offset, this_col_offset;
 
     int what_stride = b->src_stride;
-    int in_what_stride = d->pre_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+    int in_what_stride = pre_stride;
     unsigned char *what = (*(b->base_src) + b->src);
-    unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
-        (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
+    unsigned char *best_address = (unsigned char *)(base_pre + d->offset +
+        (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col);
     unsigned char *check_here;
     unsigned int thissad;
     int_mv this_mv;
@@ -1761,10 +1783,12 @@
     short this_row_offset, this_col_offset;
 
     int what_stride = b->src_stride;
-    int in_what_stride = d->pre_stride;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+    int in_what_stride = pre_stride;
     unsigned char *what = (*(b->base_src) + b->src);
-    unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
-        (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
+    unsigned char *best_address = (unsigned char *)(base_pre + d->offset +
+        (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col);
     unsigned char *check_here;
     unsigned int thissad;
     int_mv this_mv;
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -68,12 +68,13 @@
     BLOCKD *d = &mb->e_mbd.block[0];
     unsigned char *what = (*(b->base_src) + b->src);
     int what_stride = b->src_stride;
-    unsigned char *in_what = *(d->base_pre) + d->pre ;
-    int in_what_stride = d->pre_stride;
+    int pre_stride = mb->e_mbd.pre.y_stride;
+    unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset ;
+    int in_what_stride = pre_stride;
     int xoffset = this_mv.as_mv.col & 7;
     int yoffset = this_mv.as_mv.row & 7;
 
-    in_what += (this_mv.as_mv.row >> 3) * d->pre_stride + (this_mv.as_mv.col >> 3);
+    in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3);
 
     if (xoffset | yoffset)
     {
@@ -136,6 +137,8 @@
 
     BLOCKD *b = &x->e_mbd.block[ib];
     BLOCK *be = &x->block[ib];
+    int dst_stride = x->e_mbd.dst.y_stride;
+    unsigned char *base_dst = x->e_mbd.dst.y_buffer;
     B_PREDICTION_MODE mode;
     int best_rd = INT_MAX;       // 1<<30
     int rate;
@@ -147,7 +150,7 @@
 
         rate = mode_costs[mode];
         vp8_intra4x4_predict
-                     (*(b->base_dst) + b->dst, b->dst_stride,
+                     (base_dst + b->offset, dst_stride,
                       mode, b->predictor, 16);
         distortion = get_prediction_error(be, b);
         this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -457,7 +457,7 @@
     int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row;
     int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col;
     int offset;
-    int pre_stride = x->e_mbd.block[16].pre_stride;
+    int pre_stride = x->e_mbd.pre.uv_stride;
 
     if (mv_row < 0)
         mv_row -= 1;
@@ -635,6 +635,8 @@
      * */
     DECLARE_ALIGNED_ARRAY(16, unsigned char,  best_predictor, 16*4);
     DECLARE_ALIGNED_ARRAY(16, short, best_dqcoeff, 16);
+    int dst_stride = x->e_mbd.dst.y_stride;
+    unsigned char *base_dst = x->e_mbd.dst.y_buffer;
 
     for (mode = B_DC_PRED; mode <= B_HU_PRED; mode++)
     {
@@ -643,9 +645,8 @@
 
         rate = bmode_costs[mode];
 
-        vp8_intra4x4_predict
-                     (*(b->base_dst) + b->dst, b->dst_stride,
-                      mode, b->predictor, 16);
+        vp8_intra4x4_predict(base_dst + b->offset, dst_stride, mode,
+                             b->predictor, 16);
         vp8_subtract_b(be, b, 16);
         x->short_fdct4x4(be->src_diff, be->coeff, 32);
         x->quantize_b(be, b);
@@ -674,8 +675,8 @@
     }
     b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode);
 
-    vp8_short_idct4x4llm(best_dqcoeff,
-        best_predictor, 16, *(b->base_dst) + b->dst, b->dst_stride);
+    vp8_short_idct4x4llm(best_dqcoeff, best_predictor, 16, base_dst + b->offset,
+                         dst_stride);
 
     return best_rd;
 }
@@ -1008,7 +1009,10 @@
 {
     int i;
     unsigned int distortion = 0;
+    int pre_stride = x->e_mbd.pre.y_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
 
+
     for (i = 0; i < 16; i++)
     {
         if (labels[i] == which_label)
@@ -1016,8 +1020,7 @@
             BLOCKD *bd = &x->e_mbd.block[i];
             BLOCK *be = &x->block[i];
 
-
-            vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict);
+            vp8_build_inter_predictors_b(bd, 16, base_pre, pre_stride, x->e_mbd.subpixel_predict);
             vp8_subtract_b(be, bd, 16);
             x->short_fdct4x4(be->src_diff, be->coeff, 32);
 
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -164,9 +164,9 @@
     unsigned char **base_src = b->base_src;
     int src = b->src;
     int src_stride = b->src_stride;
-    unsigned char **base_pre = d->base_pre;
-    int pre = d->pre;
-    int pre_stride = d->pre_stride;
+    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
+    int pre = d->offset;
+    int pre_stride = x->e_mbd.pre.y_stride;
 
     best_ref_mv1.as_int = 0;
     best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >>3;
@@ -177,9 +177,9 @@
     b->src_stride = arf_frame->y_stride;
     b->src = mb_offset;
 
-    d->base_pre = &frame_ptr->y_buffer;
-    d->pre_stride = frame_ptr->y_stride;
-    d->pre = mb_offset;
+    x->e_mbd.pre.y_buffer = frame_ptr->y_buffer;
+    x->e_mbd.pre.y_stride = frame_ptr->y_stride;
+    d->offset = mb_offset;
 
     // Further step/diamond searches as necessary
     if (cpi->Speed < 8)
@@ -221,9 +221,9 @@
     b->base_src = base_src;
     b->src = src;
     b->src_stride = src_stride;
-    d->base_pre = base_pre;
-    d->pre = pre;
-    d->pre_stride = pre_stride;
+    x->e_mbd.pre.y_buffer = base_pre;
+    d->offset = pre;
+    x->e_mbd.pre.y_stride = pre_stride;
 
     return bestsme;
 }