shithub: libvpx

Download patch

ref: 664cd5ac91d432bad9f4f375b583a490464288c5
parent: 058566797a18e226acd1868a562b16e0903b73db
parent: 773bcc300d88b47b3b465b7627e794b62b01938f
author: John Koleszar <jkoleszar@google.com>
date: Fri Jul 22 20:05:14 EDT 2011

Merge remote branch 'internal/upstream' into HEAD

--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -200,7 +200,7 @@
     union b_mode_info bmi;
 } BLOCKD;
 
-typedef struct
+typedef struct MacroBlockD
 {
     DECLARE_ALIGNED(16, short, diff[400]);      /* from idct diff */
     DECLARE_ALIGNED(16, unsigned char,  predictor[384]);
@@ -277,6 +277,14 @@
     void *current_bc;
 
     int corrupted;
+
+#if ARCH_X86 || ARCH_X86_64
+    /* This is an intermediate buffer currently used in sub-pixel motion search
+     * to keep a copy of the reference area. This buffer can be used for other
+     * purpose.
+     */
+    DECLARE_ALIGNED(32, unsigned char, y_buf[22*32]);
+#endif
 
 #if CONFIG_RUNTIME_CPU_DETECT
     struct VP8_COMMON_RTCD  *rtcd;
--- a/vp8/common/loopfilter.c
+++ b/vp8/common/loopfilter.c
@@ -195,8 +195,7 @@
 
 void vp8_loop_filter_frame_init(VP8_COMMON *cm,
                                 MACROBLOCKD *mbd,
-                                int default_filt_lvl,
-                                int sharpness_lvl)
+                                int default_filt_lvl)
 {
     int seg,  /* segment number */
         ref,  /* index in ref_lf_deltas */
@@ -205,10 +204,10 @@
     loop_filter_info_n *lfi = &cm->lf_info;
 
     /* update limits if sharpness has changed */
-    if(cm->last_sharpness_level != sharpness_lvl)
+    if(cm->last_sharpness_level != cm->sharpness_level)
     {
-        vp8_loop_filter_update_sharpness(lfi, sharpness_lvl);
-        cm->last_sharpness_level = sharpness_lvl;
+        vp8_loop_filter_update_sharpness(lfi, cm->sharpness_level);
+        cm->last_sharpness_level = cm->sharpness_level;
     }
 
     for(seg = 0; seg < MAX_MB_SEGMENTS; seg++)
@@ -283,8 +282,7 @@
 void vp8_loop_filter_frame
 (
     VP8_COMMON *cm,
-    MACROBLOCKD *mbd,
-    int default_filt_lvl
+    MACROBLOCKD *mbd
 )
 {
     YV12_BUFFER_CONFIG *post = cm->frame_to_show;
@@ -304,7 +302,7 @@
     const MODE_INFO *mode_info_context = cm->mi;
 
     /* Initialize the loop filter for this frame. */
-    vp8_loop_filter_frame_init( cm, mbd, default_filt_lvl, cm->sharpness_level);
+    vp8_loop_filter_frame_init(cm, mbd, cm->filter_level);
 
     /* Set up the buffer pointers */
     y_ptr = post->y_buffer;
@@ -393,8 +391,7 @@
 (
     VP8_COMMON *cm,
     MACROBLOCKD *mbd,
-    int default_filt_lvl,
-    int sharpness_lvl
+    int default_filt_lvl
 )
 {
     YV12_BUFFER_CONFIG *post = cm->frame_to_show;
@@ -412,8 +409,6 @@
     /* Point at base of Mb MODE_INFO list */
     const MODE_INFO *mode_info_context = cm->mi;
 
-    sharpness_lvl = cm->sharpness_level;
-
 #if 0
     if(default_filt_lvl == 0) /* no filter applied */
         return;
@@ -420,7 +415,7 @@
 #endif
 
     /* Initialize the loop filter for this frame. */
-    vp8_loop_filter_frame_init( cm, mbd, default_filt_lvl, sharpness_lvl);
+    vp8_loop_filter_frame_init( cm, mbd, default_filt_lvl);
 
     /* Set up the buffer pointers */
     y_ptr = post->y_buffer;
@@ -503,9 +498,7 @@
 (
     VP8_COMMON *cm,
     MACROBLOCKD *mbd,
-    int default_filt_lvl,
-    int sharpness_lvl,
-    int Fraction
+    int default_filt_lvl
 )
 {
     YV12_BUFFER_CONFIG *post = cm->frame_to_show;
@@ -528,16 +521,10 @@
 
     int lvl_seg[MAX_MB_SEGMENTS];
 
-    sharpness_lvl = cm->sharpness_level;
-
-#if 0
-    if(default_filt_lvl == 0) /* no filter applied */
-        return;
-#endif
-
     mode_info_context = cm->mi + (post->y_height >> 5) * (mb_cols + 1);
 
-    linestocopy = (post->y_height >> (4 + Fraction));
+    /* 3 is a magic number. 4 is probably magic too */
+    linestocopy = (post->y_height >> (4 + 3));
 
     if (linestocopy < 1)
         linestocopy = 1;
@@ -545,6 +532,9 @@
     linestocopy <<= 4;
 
     /* Note the baseline filter values for each segment */
+    /* See vp8_loop_filter_frame_init. Rather than call that for each change
+     * to default_filt_lvl, copy the relevant calculation here.
+     */
     if (alt_flt_enabled)
     {
         for (i = 0; i < MAX_MB_SEGMENTS; i++)
@@ -563,10 +553,7 @@
             }
         }
     }
-    else
-        lvl_seg[0] = default_filt_lvl;
 
-
     /* Set up the buffer pointers */
     y_ptr = post->y_buffer + (post->y_height >> 5) * 16 * post->y_stride;
 
@@ -582,7 +569,7 @@
             if (alt_flt_enabled)
                 filter_level = lvl_seg[mode_info_context->mbmi.segment_id];
             else
-                filter_level = lvl_seg[0];
+                filter_level = default_filt_lvl;
 
             if (filter_level)
             {
--- a/vp8/common/loopfilter.h
+++ b/vp8/common/loopfilter.h
@@ -142,4 +142,27 @@
     unsigned char *v
 );
 
+/* assorted loopfilter functions which get used elsewhere */
+struct VP8Common;
+struct MacroBlockD;
+
+void vp8_loop_filter_init(struct VP8Common *cm);
+
+void vp8_loop_filter_frame_init(struct VP8Common *cm,
+                                struct MacroBlockD *mbd,
+                                int default_filt_lvl);
+
+void vp8_loop_filter_frame(struct VP8Common *cm, struct MacroBlockD *mbd);
+
+void vp8_loop_filter_partial_frame(struct VP8Common *cm,
+                                   struct MacroBlockD *mbd,
+                                   int default_filt_lvl);
+
+void vp8_loop_filter_frame_yonly(struct VP8Common *cm,
+                                 struct MacroBlockD *mbd,
+                                 int default_filt_lvl);
+
+void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
+                                      int sharpness_lvl);
+
 #endif
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -152,7 +152,6 @@
 
 
     INTERPOLATIONFILTERTYPE mcomp_filter_type;
-    LOOPFILTERTYPE last_filter_type;
     LOOPFILTERTYPE filter_type;
 
     loop_filter_info_n lf_info;
@@ -208,10 +207,5 @@
 #endif
     struct postproc_state  postproc_state;
 } VP8_COMMON;
-
-void vp8_loop_filter_init(VP8_COMMON *cm);
-void vp8_loop_filter_frame_init(VP8_COMMON *cm, MACROBLOCKD *mbd,
-                                int default_filt_lvl, int sharpness_lvl);
-void vp8_loop_filter_frame(VP8_COMMON *cm,    MACROBLOCKD *mbd,  int filt_val);
 
 #endif
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -1152,14 +1152,6 @@
     if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION)
     {
         vp8mt_decode_mb_rows(pbi, xd);
-        if(pbi->common.filter_level)
-        {
-            /*vp8_mt_loop_filter_frame(pbi);*/ /*cm, &pbi->mb, cm->filter_level);*/
-
-            pc->last_frame_type = pc->frame_type;
-            pc->last_filter_type = pc->filter_type;
-            pc->last_sharpness_level = pc->sharpness_level;
-        }
         vp8_yv12_extend_frame_borders_ptr(&pc->yv12_fb[pc->new_fb_idx]);    /*cm->frame_to_show);*/
     }
     else
--- a/vp8/decoder/onyxd_if.c
+++ b/vp8/decoder/onyxd_if.c
@@ -128,15 +128,8 @@
      */
     vp8cx_init_de_quantizer(pbi);
 
-    {
-        VP8_COMMON *cm = &pbi->common;
+    vp8_loop_filter_init(&pbi->common);
 
-        vp8_loop_filter_init(cm);
-        cm->last_frame_type = KEY_FRAME;
-        cm->last_filter_type = cm->filter_type;
-        cm->last_sharpness_level = cm->sharpness_level;
-    }
-
     pbi->common.error.setjmp = 0;
 
 #if CONFIG_ERROR_CONCEALMENT
@@ -510,14 +503,10 @@
             return -1;
         }
 
-        if(pbi->common.filter_level)
+        if(cm->filter_level)
         {
             /* Apply the loop filter if appropriate. */
-            vp8_loop_filter_frame(cm, &pbi->mb, cm->filter_level);
-
-            cm->last_frame_type = cm->frame_type;
-            cm->last_filter_type = cm->filter_type;
-            cm->last_sharpness_level = cm->sharpness_level;
+            vp8_loop_filter_frame(cm, &pbi->mb);
         }
         vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show);
     }
--- a/vp8/decoder/threading.c
+++ b/vp8/decoder/threading.c
@@ -746,7 +746,7 @@
         }
 
         /* Initialize the loop filter for this frame. */
-        vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level, pc->sharpness_level);
+        vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level);
     }
 
     setup_decoding_thread_data(pbi, xd, pbi->mb_row_di, pbi->decoding_thread_count);
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -47,7 +47,9 @@
     cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_c;
     cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_c;
     cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_c;
-
+#if ARCH_X86 || ARCH_X86_64
+    cpi->rtcd.variance.copy32xn              = vp8_copy32xn_c;
+#endif
     cpi->rtcd.variance.var4x4                = vp8_variance4x4_c;
     cpi->rtcd.variance.var8x8                = vp8_variance8x8_c;
     cpi->rtcd.variance.var8x16               = vp8_variance8x16_c;
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -11,7 +11,7 @@
 
 #include "mcomp.h"
 #include "vpx_mem/vpx_mem.h"
-
+#include "vpx_ports/config.h"
 #include <stdio.h>
 #include <limits.h>
 #include <math.h>
@@ -165,11 +165,19 @@
     x->searches_per_step = 8;
 }
 
-
+/*
+ * To avoid the penalty for crossing cache-line read, preload the reference
+ * area in a small buffer, which is aligned to make sure there won't be crossing
+ * cache-line read while reading from this buffer. This reduced the cpu
+ * cycles spent on reading ref data in sub-pixel filter functions.
+ * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
+ * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
+ * could reduce the area.
+ */
 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
-#define PRE(r,c) (*(d->base_pre) + d->pre + ((r)>>2) * d->pre_stride + ((c)>>2)) // pointer to predictor base of a motionvector
+#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
-#define DIST(r,c) vfp->svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
+#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
@@ -176,8 +184,6 @@
 #define MIN(x,y) (((x)<(y))?(x):(y))
 #define MAX(x,y) (((x)>(y))?(x):(y))
 
-//#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
-
 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
                                              int_mv *bestmv, int_mv *ref_mv,
                                              int error_per_bit,
@@ -185,7 +191,6 @@
                                              int *mvcost[2], int *distortion,
                                              unsigned int *sse1)
 {
-    unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
     unsigned char *z = (*(b->base_src) + b->src);
 
     int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
@@ -204,12 +209,38 @@
     int minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
     int maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
 
+    int y_stride;
+    int offset;
+
+#if ARCH_X86 || ARCH_X86_64
+    MACROBLOCKD *xd = &x->e_mbd;
+    unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+    unsigned char *y;
+    int buf_r1, buf_r2, buf_c1, buf_c2;
+
+    // Clamping to avoid out-of-range data access
+    buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3;
+    buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3;
+    buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3;
+    buf_c2 = ((bestmv->as_mv.col + 3) > x->mv_col_max)?(x->mv_col_max - bestmv->as_mv.col):3;
+    y_stride = 32;
+
+    /* Copy to intermediate buffer before searching. */
+    vfp->copymem(y0 - buf_c1 - d->pre_stride*buf_r1, d->pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
+    y = xd->y_buf + y_stride*buf_r1 +buf_c1;
+#else
+    unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+    y_stride = d->pre_stride;
+#endif
+
+    offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
     // central mv
     bestmv->as_mv.row <<= 3;
     bestmv->as_mv.col <<= 3;
 
     // calculate central point error
-    besterr = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
+    besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
     *distortion = besterr;
     besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
 
@@ -296,6 +327,7 @@
 #undef PRE
 #undef SP
 #undef DIST
+#undef IFMVCV
 #undef ERR
 #undef CHECK_BETTER
 #undef MIN
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -50,9 +50,6 @@
 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
 
-extern void vp8_init_loop_filter(VP8_COMMON *cm);
-extern void vp8_loop_filter_frame(VP8_COMMON *cm,    MACROBLOCKD *mbd,  int filt_val);
-extern void vp8_loop_filter_frame_yonly(VP8_COMMON *cm,    MACROBLOCKD *mbd,  int filt_val, int sharpness_lvl);
 extern void vp8_dmachine_specific_config(VP8_COMP *cpi);
 extern void vp8_cmachine_specific_config(VP8_COMP *cpi);
 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag);
@@ -2176,6 +2173,14 @@
     cpi->fn_ptr[BLOCK_4X4].sdx8f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8);
     cpi->fn_ptr[BLOCK_4X4].sdx4df         = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d);
 
+#if ARCH_X86 || ARCH_X86_64
+    cpi->fn_ptr[BLOCK_16X16].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+    cpi->fn_ptr[BLOCK_16X8].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+    cpi->fn_ptr[BLOCK_8X16].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+    cpi->fn_ptr[BLOCK_8X8].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+    cpi->fn_ptr[BLOCK_4X4].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+#endif
+
     cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
     cpi->diamond_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, diamond_search);
     cpi->refining_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, refining_search);
@@ -2186,12 +2191,9 @@
     //vp8cx_init_quantizer() is first called here. Add check in vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only called later
     //when needed. This will avoid unnecessary calls of vp8cx_init_quantizer() for every frame.
     vp8cx_init_quantizer(cpi);
-    {
-        vp8_loop_filter_init(cm);
-        cm->last_frame_type = KEY_FRAME;
-        cm->last_filter_type = cm->filter_type;
-        cm->last_sharpness_level = cm->sharpness_level;
-    }
+
+    vp8_loop_filter_init(cm);
+
     cpi->common.error.setjmp = 0;
     return (VP8_PTR) cpi;
 
@@ -3328,9 +3330,7 @@
     if (cm->filter_level > 0)
     {
         vp8cx_set_alt_lf_level(cpi, cm->filter_level);
-        vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, cm->filter_level);
-        cm->last_filter_type = cm->filter_type;
-        cm->last_sharpness_level = cm->sharpness_level;
+        vp8_loop_filter_frame(cm, &cpi->mb.e_mbd);
     }
 
     vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show);
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -16,12 +16,11 @@
 #include "vpx_scale/yv12extend.h"
 #include "vpx_scale/vpxscale.h"
 #include "vp8/common/alloccommon.h"
+#include "vp8/common/loopfilter.h"
 #if ARCH_ARM
 #include "vpx_ports/arm.h"
 #endif
 
-extern void vp8_loop_filter_frame(VP8_COMMON *cm,    MACROBLOCKD *mbd,  int filt_val);
-extern void vp8_loop_filter_frame_yonly(VP8_COMMON *cm,    MACROBLOCKD *mbd,  int filt_val, int sharpness_lvl);
 extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
 #if HAVE_ARMV7
 extern void vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
@@ -104,15 +103,6 @@
     return Total;
 }
 
-extern void vp8_loop_filter_partial_frame
-(
-    VP8_COMMON *cm,
-    MACROBLOCKD *mbd,
-    int default_filt_lvl,
-    int sharpness_lvl,
-    int Fraction
-);
-
 // Enforce a minimum filter level based upon baseline Q
 static int get_min_filter_level(VP8_COMP *cpi, int base_qindex)
 {
@@ -161,7 +151,6 @@
     int best_filt_val = cm->filter_level;
 
     //  Make a copy of the unfiltered / processed recon buffer
-    //vp8_yv12_copy_frame_ptr( cm->frame_to_show, &cpi->last_frame_uf  );
     vp8_yv12_copy_partial_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf, 3);
 
     if (cm->frame_type == KEY_FRAME)
@@ -169,6 +158,12 @@
     else
         cm->sharpness_level = cpi->oxcf.Sharpness;
 
+    if (cm->sharpness_level != cm->last_sharpness_level)
+    {
+        vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
+        cm->last_sharpness_level = cm->last_sharpness_level;
+    }
+
     // Start the search at the previous frame filter level unless it is now out of range.
     if (cm->filter_level < min_filter_level)
         cm->filter_level = min_filter_level;
@@ -178,13 +173,8 @@
     filt_val = cm->filter_level;
     best_filt_val = filt_val;
 
-    // Set up alternate filter values
-
     // Get the err using the previous frame's filter value.
-    vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val, 0  , 3);
-    cm->last_frame_type = cm->frame_type;
-    cm->last_filter_type = cm->filter_type;
-    cm->last_sharpness_level = cm->sharpness_level;
+    vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
 
     best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
 
@@ -197,15 +187,11 @@
     while (filt_val >= min_filter_level)
     {
         // Apply the loop filter
-        vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val, 0, 3);
-        cm->last_frame_type = cm->frame_type;
-        cm->last_filter_type = cm->filter_type;
-        cm->last_sharpness_level = cm->sharpness_level;
+        vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
 
         // Get the err for filtered frame
         filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
 
-
         //  Re-instate the unfiltered frame
         vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
 
@@ -234,10 +220,7 @@
         while (filt_val < max_filter_level)
         {
             // Apply the loop filter
-            vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val, 0, 3);
-            cm->last_frame_type = cm->frame_type;
-            cm->last_filter_type = cm->filter_type;
-            cm->last_sharpness_level = cm->sharpness_level;
+            vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
 
             // Get the err for filtered frame
             filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
@@ -336,10 +319,7 @@
 
     // Get baseline error score
     vp8cx_set_alt_lf_level(cpi, filt_mid);
-    vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid, 0);
-    cm->last_frame_type = cm->frame_type;
-    cm->last_filter_type = cm->filter_type;
-    cm->last_sharpness_level = cm->sharpness_level;
+    vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
 
     best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
     filt_best = filt_mid;
@@ -377,10 +357,7 @@
         {
             // Get Low filter error score
             vp8cx_set_alt_lf_level(cpi, filt_low);
-            vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low, 0);
-            cm->last_frame_type = cm->frame_type;
-            cm->last_filter_type = cm->filter_type;
-            cm->last_sharpness_level = cm->sharpness_level;
+            vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
 
             filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
 
@@ -417,10 +394,7 @@
         if ((filt_direction >= 0) && (filt_high != filt_mid))
         {
             vp8cx_set_alt_lf_level(cpi, filt_high);
-            vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high, 0);
-            cm->last_frame_type = cm->frame_type;
-            cm->last_filter_type = cm->filter_type;
-            cm->last_sharpness_level = cm->sharpness_level;
+            vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
 
             filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
 
--- a/vp8/encoder/sad_c.c
+++ b/vp8/encoder/sad_c.c
@@ -10,6 +10,8 @@
 
 
 #include <stdlib.h>
+#include "vpx_ports/config.h"
+#include "vpx/vpx_integer.h"
 
 unsigned int vp8_sad16x16_c(
     const unsigned char *src_ptr,
@@ -336,4 +338,65 @@
     sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
     sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
     sad_array[3] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+/* Copy 2 macroblocks to a buffer */
+void vp8_copy32xn_c(
+    unsigned char *src_ptr,
+    int  src_stride,
+    unsigned char *dst_ptr,
+    int  dst_stride,
+    int height)
+{
+    int r;
+
+    for (r = 0; r < height; r++)
+    {
+#if !(CONFIG_FAST_UNALIGNED)
+        dst_ptr[0] = src_ptr[0];
+        dst_ptr[1] = src_ptr[1];
+        dst_ptr[2] = src_ptr[2];
+        dst_ptr[3] = src_ptr[3];
+        dst_ptr[4] = src_ptr[4];
+        dst_ptr[5] = src_ptr[5];
+        dst_ptr[6] = src_ptr[6];
+        dst_ptr[7] = src_ptr[7];
+        dst_ptr[8] = src_ptr[8];
+        dst_ptr[9] = src_ptr[9];
+        dst_ptr[10] = src_ptr[10];
+        dst_ptr[11] = src_ptr[11];
+        dst_ptr[12] = src_ptr[12];
+        dst_ptr[13] = src_ptr[13];
+        dst_ptr[14] = src_ptr[14];
+        dst_ptr[15] = src_ptr[15];
+        dst_ptr[16] = src_ptr[16];
+        dst_ptr[17] = src_ptr[17];
+        dst_ptr[18] = src_ptr[18];
+        dst_ptr[19] = src_ptr[19];
+        dst_ptr[20] = src_ptr[20];
+        dst_ptr[21] = src_ptr[21];
+        dst_ptr[22] = src_ptr[22];
+        dst_ptr[23] = src_ptr[23];
+        dst_ptr[24] = src_ptr[24];
+        dst_ptr[25] = src_ptr[25];
+        dst_ptr[26] = src_ptr[26];
+        dst_ptr[27] = src_ptr[27];
+        dst_ptr[28] = src_ptr[28];
+        dst_ptr[29] = src_ptr[29];
+        dst_ptr[30] = src_ptr[30];
+        dst_ptr[31] = src_ptr[31];
+#else
+        ((uint32_t *)dst_ptr)[0] = ((uint32_t *)src_ptr)[0] ;
+        ((uint32_t *)dst_ptr)[1] = ((uint32_t *)src_ptr)[1] ;
+        ((uint32_t *)dst_ptr)[2] = ((uint32_t *)src_ptr)[2] ;
+        ((uint32_t *)dst_ptr)[3] = ((uint32_t *)src_ptr)[3] ;
+        ((uint32_t *)dst_ptr)[4] = ((uint32_t *)src_ptr)[4] ;
+        ((uint32_t *)dst_ptr)[5] = ((uint32_t *)src_ptr)[5] ;
+        ((uint32_t *)dst_ptr)[6] = ((uint32_t *)src_ptr)[6] ;
+        ((uint32_t *)dst_ptr)[7] = ((uint32_t *)src_ptr)[7] ;
+#endif
+        src_ptr += src_stride;
+        dst_ptr += dst_stride;
+
+    }
 }
--- a/vp8/encoder/variance.h
+++ b/vp8/encoder/variance.h
@@ -222,6 +222,13 @@
 #endif
 extern prototype_sad_multi_dif_address(vp8_variance_sad4x4x4d);
 
+#if ARCH_X86 || ARCH_X86_64
+#ifndef vp8_variance_copy32xn
+#define vp8_variance_copy32xn vp8_copy32xn_c
+#endif
+extern prototype_sad(vp8_variance_copy32xn);
+#endif
+
 //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
 
 #ifndef vp8_variance_var4x4
@@ -381,6 +388,10 @@
     vp8_sad_multi_d_fn_t     sad8x8x4d;
     vp8_sad_multi_d_fn_t     sad4x4x4d;
 
+#if ARCH_X86 || ARCH_X86_64
+    vp8_sad_fn_t             copy32xn;
+#endif
+
 #if CONFIG_INTERNAL_STATS
     vp8_ssimpf_fn_t          ssimpf_8x8;
     vp8_ssimpf_fn_t          ssimpf;
@@ -399,7 +410,9 @@
     vp8_sad_multi_fn_t      sdx3f;
     vp8_sad_multi1_fn_t     sdx8f;
     vp8_sad_multi_d_fn_t    sdx4df;
-
+#if ARCH_X86 || ARCH_X86_64
+    vp8_sad_fn_t            copymem;
+#endif
 } vp8_variance_fn_ptr_t;
 
 #if CONFIG_RUNTIME_CPU_DETECT
--- a/vp8/encoder/x86/sad_sse2.asm
+++ b/vp8/encoder/x86/sad_sse2.asm
@@ -328,3 +328,83 @@
     UNSHADOW_ARGS
     pop         rbp
     ret
+
+;void vp8_copy32xn_sse2(
+;    unsigned char *src_ptr,
+;    int  src_stride,
+;    unsigned char *dst_ptr,
+;    int  dst_stride,
+;    int height);
+global sym(vp8_copy32xn_sse2)
+sym(vp8_copy32xn_sse2):
+    push        rbp
+    mov         rbp, rsp
+    SHADOW_ARGS_TO_STACK 5
+    SAVE_XMM 7
+    push        rsi
+    push        rdi
+    ; end prolog
+
+        mov             rsi,        arg(0) ;src_ptr
+        mov             rdi,        arg(2) ;dst_ptr
+
+        movsxd          rax,        dword ptr arg(1) ;src_stride
+        movsxd          rdx,        dword ptr arg(3) ;dst_stride
+        movsxd          rcx,        dword ptr arg(4) ;height
+
+block_copy_sse2_loopx4:
+        movdqu          xmm0,       XMMWORD PTR [rsi]
+        movdqu          xmm1,       XMMWORD PTR [rsi + 16]
+        movdqu          xmm2,       XMMWORD PTR [rsi + rax]
+        movdqu          xmm3,       XMMWORD PTR [rsi + rax + 16]
+
+        lea             rsi,        [rsi+rax*2]
+
+        movdqu          xmm4,       XMMWORD PTR [rsi]
+        movdqu          xmm5,       XMMWORD PTR [rsi + 16]
+        movdqu          xmm6,       XMMWORD PTR [rsi + rax]
+        movdqu          xmm7,       XMMWORD PTR [rsi + rax + 16]
+
+        lea             rsi,    [rsi+rax*2]
+
+        movdqa          XMMWORD PTR [rdi], xmm0
+        movdqa          XMMWORD PTR [rdi + 16], xmm1
+        movdqa          XMMWORD PTR [rdi + rdx], xmm2
+        movdqa          XMMWORD PTR [rdi + rdx + 16], xmm3
+
+        lea             rdi,    [rdi+rdx*2]
+
+        movdqa          XMMWORD PTR [rdi], xmm4
+        movdqa          XMMWORD PTR [rdi + 16], xmm5
+        movdqa          XMMWORD PTR [rdi + rdx], xmm6
+        movdqa          XMMWORD PTR [rdi + rdx + 16], xmm7
+
+        lea             rdi,    [rdi+rdx*2]
+
+        sub             rcx,     4
+        cmp             rcx,     4
+        jge             block_copy_sse2_loopx4
+
+        cmp             rcx, 0
+        je              copy_is_done
+
+block_copy_sse2_loop:
+        movdqu          xmm0,       XMMWORD PTR [rsi]
+        movdqu          xmm1,       XMMWORD PTR [rsi + 16]
+        lea             rsi,    [rsi+rax]
+
+        movdqa          XMMWORD PTR [rdi], xmm0
+        movdqa          XMMWORD PTR [rdi + 16], xmm1
+        lea             rdi,    [rdi+rdx]
+
+        sub             rcx,     1
+        jne             block_copy_sse2_loop
+
+copy_is_done:
+    ; begin epilog
+    pop rdi
+    pop rsi
+    RESTORE_XMM
+    UNSHADOW_ARGS
+    pop         rbp
+    ret
--- a/vp8/encoder/x86/sad_sse3.asm
+++ b/vp8/encoder/x86/sad_sse3.asm
@@ -20,6 +20,7 @@
   %define     ret_var       rbx
   %define     result_ptr    arg(4)
   %define     max_err       arg(4)
+  %define     height        dword ptr arg(4)
     push        rbp
     mov         rbp,        rsp
     push        rsi
@@ -42,6 +43,7 @@
     %define     ret_var     r11
     %define     result_ptr  [rsp+xmm_stack_space+8+4*8]
     %define     max_err     [rsp+xmm_stack_space+8+4*8]
+    %define     height      [rsp+xmm_stack_space+8+4*8]
   %else
     %define     src_ptr     rdi
     %define     src_stride  rsi
@@ -51,6 +53,7 @@
     %define     ret_var     r10
     %define     result_ptr  r8
     %define     max_err     r8
+    %define     height      r8
   %endif
 %endif
 
@@ -65,6 +68,7 @@
   %define     ret_var
   %define     result_ptr
   %define     max_err
+  %define     height
 
 %if ABI_IS_32BIT
     pop         rbx
@@ -632,6 +636,67 @@
 
     STACK_FRAME_DESTROY_X3
 
+;void vp8_copy32xn_sse3(
+;    unsigned char *src_ptr,
+;    int  src_stride,
+;    unsigned char *dst_ptr,
+;    int  dst_stride,
+;    int height);
+global sym(vp8_copy32xn_sse3)
+sym(vp8_copy32xn_sse3):
+
+    STACK_FRAME_CREATE_X3
+
+block_copy_sse3_loopx4:
+        lea             end_ptr,    [src_ptr+src_stride*2]
+
+        movdqu          xmm0,       XMMWORD PTR [src_ptr]
+        movdqu          xmm1,       XMMWORD PTR [src_ptr + 16]
+        movdqu          xmm2,       XMMWORD PTR [src_ptr + src_stride]
+        movdqu          xmm3,       XMMWORD PTR [src_ptr + src_stride + 16]
+        movdqu          xmm4,       XMMWORD PTR [end_ptr]
+        movdqu          xmm5,       XMMWORD PTR [end_ptr + 16]
+        movdqu          xmm6,       XMMWORD PTR [end_ptr + src_stride]
+        movdqu          xmm7,       XMMWORD PTR [end_ptr + src_stride + 16]
+
+        lea             src_ptr,    [src_ptr+src_stride*4]
+
+        lea             end_ptr,    [ref_ptr+ref_stride*2]
+
+        movdqa          XMMWORD PTR [ref_ptr], xmm0
+        movdqa          XMMWORD PTR [ref_ptr + 16], xmm1
+        movdqa          XMMWORD PTR [ref_ptr + ref_stride], xmm2
+        movdqa          XMMWORD PTR [ref_ptr + ref_stride + 16], xmm3
+        movdqa          XMMWORD PTR [end_ptr], xmm4
+        movdqa          XMMWORD PTR [end_ptr + 16], xmm5
+        movdqa          XMMWORD PTR [end_ptr + ref_stride], xmm6
+        movdqa          XMMWORD PTR [end_ptr + ref_stride + 16], xmm7
+
+        lea             ref_ptr,    [ref_ptr+ref_stride*4]
+
+        sub             height,     4
+        cmp             height,     4
+        jge             block_copy_sse3_loopx4
+
+        ;Check to see if there is more rows need to be copied.
+        cmp             height, 0
+        je              copy_is_done
+
+block_copy_sse3_loop:
+        movdqu          xmm0,       XMMWORD PTR [src_ptr]
+        movdqu          xmm1,       XMMWORD PTR [src_ptr + 16]
+        lea             src_ptr,    [src_ptr+src_stride]
+
+        movdqa          XMMWORD PTR [ref_ptr], xmm0
+        movdqa          XMMWORD PTR [ref_ptr + 16], xmm1
+        lea             ref_ptr,    [ref_ptr+ref_stride]
+
+        sub             height,     1
+        jne             block_copy_sse3_loop
+
+copy_is_done:
+    STACK_FRAME_DESTROY_X3
+
 ;void vp8_sad16x16x4d_sse3(
 ;    unsigned char *src_ptr,
 ;    int  src_stride,
@@ -892,3 +957,4 @@
 
 
     STACK_FRAME_DESTROY_X4
+
--- a/vp8/encoder/x86/variance_x86.h
+++ b/vp8/encoder/x86/variance_x86.h
@@ -121,6 +121,7 @@
 extern prototype_sad(vp8_sad8x16_wmt);
 extern prototype_sad(vp8_sad16x8_wmt);
 extern prototype_sad(vp8_sad16x16_wmt);
+extern prototype_sad(vp8_copy32xn_sse2);
 extern prototype_variance(vp8_variance4x4_wmt);
 extern prototype_variance(vp8_variance8x8_wmt);
 extern prototype_variance(vp8_variance8x16_wmt);
@@ -156,6 +157,9 @@
 #undef  vp8_variance_sad16x16
 #define vp8_variance_sad16x16 vp8_sad16x16_wmt
 
+#undef  vp8_variance_copy32xn
+#define vp8_variance_copy32xn vp8_copy32xn_sse2
+
 #undef  vp8_variance_var4x4
 #define vp8_variance_var4x4 vp8_variance4x4_wmt
 
@@ -222,6 +226,7 @@
 extern prototype_sad_multi_dif_address(vp8_sad8x16x4d_sse3);
 extern prototype_sad_multi_dif_address(vp8_sad8x8x4d_sse3);
 extern prototype_sad_multi_dif_address(vp8_sad4x4x4d_sse3);
+extern prototype_sad(vp8_copy32xn_sse3);
 
 #if !CONFIG_RUNTIME_CPU_DETECT
 
@@ -257,6 +262,9 @@
 
 #undef  vp8_variance_sad4x4x4d
 #define vp8_variance_sad4x4x4d vp8_sad4x4x4d_sse3
+
+#undef  vp8_variance_copy32xn
+#define vp8_variance_copy32xn vp8_copy32xn_sse3
 
 #endif
 #endif
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -203,6 +203,7 @@
         cpi->rtcd.variance.sad8x16               = vp8_sad8x16_wmt;
         cpi->rtcd.variance.sad8x8                = vp8_sad8x8_wmt;
         cpi->rtcd.variance.sad4x4                = vp8_sad4x4_wmt;
+        cpi->rtcd.variance.copy32xn              = vp8_copy32xn_sse2;
 
         cpi->rtcd.variance.var4x4                = vp8_variance4x4_wmt;
         cpi->rtcd.variance.var8x8                = vp8_variance8x8_wmt;
@@ -263,6 +264,7 @@
         cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_sse3;
         cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_sse3;
         cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_sse3;
+        cpi->rtcd.variance.copy32xn              = vp8_copy32xn_sse3;
         cpi->rtcd.search.diamond_search          = vp8_diamond_search_sadx4;
         cpi->rtcd.search.refining_search         = vp8_refining_search_sadx4;
     }