shithub: libvpx

Download patch

ref: 27331e1377465aad1d97f9796dca56542ba389af
parent: e05fd0fc36ef7e17be6dc87a7ff2ae72a5fa2473
parent: 1f3f18443db5820157448515579c14efd19eea1b
author: John Koleszar <jkoleszar@google.com>
date: Thu May 19 20:05:16 EDT 2011

Merge remote branch 'internal/upstream' into HEAD

--- a/configure
+++ b/configure
@@ -31,12 +31,13 @@
   ${toggle_md5}                   support for output of checksum data
   ${toggle_static_msvcrt}         use static MSVCRT (VS builds only)
   ${toggle_vp8}                   VP8 codec support
-  ${toggle_psnr}                  output of PSNR data, if supported (encoders)
+  ${toggle_internal_stats}        output of encoder internal stats for debug, if supported (encoders)
   ${toggle_mem_tracker}           track memory usage
   ${toggle_postproc}              postprocessing
   ${toggle_multithread}           multithreaded encoding and decoding.
   ${toggle_spatial_resampling}    spatial sampling (scaling) support
   ${toggle_realtime_only}         enable this option while building for real-time encoding
+  ${toggle_error_concealment}     enable this option to get a decoder which is able to conceal losses
   ${toggle_runtime_cpu_detect}    runtime cpu detection
   ${toggle_shared}                shared library support
   ${toggle_small}                 favor smaller size over speed
@@ -246,7 +247,7 @@
     runtime_cpu_detect
     postproc
     multithread
-    psnr
+    internal_stats
     ${CODECS}
     ${CODEC_FAMILIES}
     encoders
@@ -254,6 +255,7 @@
     static_msvcrt
     spatial_resampling
     realtime_only
+    error_concealment
     shared
     small
     postproc_visualizer
@@ -290,7 +292,7 @@
     dc_recon
     postproc
     multithread
-    psnr
+    internal_stats
     ${CODECS}
     ${CODEC_FAMILIES}
     static_msvcrt
@@ -297,6 +299,7 @@
     mem_tracker
     spatial_resampling
     realtime_only
+    error_concealment
     shared
     small
     postproc_visualizer
--- a/examples.mk
+++ b/examples.mk
@@ -77,6 +77,11 @@
 endif
 decode_with_drops.GUID           = CE5C53C4-8DDA-438A-86ED-0DDD3CDB8D26
 decode_with_drops.DESCRIPTION    = Drops frames while decoding
+ifeq ($(CONFIG_DECODERS),yes)
+GEN_EXAMPLES-$(CONFIG_ERROR_CONCEALMENT) += decode_with_partial_drops.c
+endif
+decode_with_partial_drops.GUID           = 61C2D026-5754-46AC-916F-1343ECC5537E
+decode_with_partial_drops.DESCRIPTION    = Drops parts of frames while decoding
 GEN_EXAMPLES-$(CONFIG_ENCODERS) += error_resilient.c
 error_resilient.GUID             = DF5837B9-4145-4F92-A031-44E4F832E00C
 error_resilient.DESCRIPTION      = Error Resiliency Feature
--- /dev/null
+++ b/examples/decode_with_partial_drops.txt
@@ -1,0 +1,232 @@
+@TEMPLATE decoder_tmpl.c
+Decode With Partial Drops Example
+=========================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTRODUCTION
+This is an example utility which drops a series of frames (or parts of frames),
+as specified on the command line. This is useful for observing the error
+recovery features of the codec.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTRODUCTION
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ EXTRA_INCLUDES
+#include <time.h>
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ EXTRA_INCLUDES
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HELPERS
+struct parsed_header
+{
+    char key_frame;
+    int version;
+    char show_frame;
+    int first_part_size;
+};
+
+int next_packet(struct parsed_header* hdr, int pos, int length, int mtu)
+{
+    int size = 0;
+    int remaining = length - pos;
+    /* Uncompressed part is 3 bytes for P frames and 10 bytes for I frames */
+    int uncomp_part_size = (hdr->key_frame ? 10 : 3);
+    /* number of bytes yet to send from header and the first partition */
+    int remainFirst = uncomp_part_size + hdr->first_part_size - pos;
+    if (remainFirst > 0)
+    {
+        if (remainFirst <= mtu)
+        {
+            size = remainFirst;
+        }
+        else
+        {
+            size = mtu;
+        }
+
+        return size;
+    }
+
+    /* second partition; just slot it up according to MTU */
+    if (remaining <= mtu)
+    {
+        size = remaining;
+        return size;
+    }
+    return mtu;
+}
+
+void throw_packets(unsigned char* frame, int* size, int loss_rate,
+                   int* thrown, int* kept)
+{
+    unsigned char loss_frame[256*1024];
+    int pkg_size = 1;
+    int pos = 0;
+    int loss_pos = 0;
+    struct parsed_header hdr;
+    unsigned int tmp;
+    int mtu = 1500;
+
+    if (*size < 3)
+    {
+        return;
+    }
+    putc('|', stdout);
+    /* parse uncompressed 3 bytes */
+    tmp = (frame[2] << 16) | (frame[1] << 8) | frame[0];
+    hdr.key_frame = !(tmp & 0x1); /* inverse logic */
+    hdr.version = (tmp >> 1) & 0x7;
+    hdr.show_frame = (tmp >> 4) & 0x1;
+    hdr.first_part_size = (tmp >> 5) & 0x7FFFF;
+
+    /* don't drop key frames */
+    if (hdr.key_frame)
+    {
+        int i;
+        *kept = *size/mtu + ((*size % mtu > 0) ? 1 : 0); /* approximate */
+        for (i=0; i < *kept; i++)
+            putc('.', stdout);
+        return;
+    }
+
+    while ((pkg_size = next_packet(&hdr, pos, *size, mtu)) > 0)
+    {
+        int loss_event = ((rand() + 1.0)/(RAND_MAX + 1.0) < loss_rate/100.0);
+        if (*thrown == 0 && !loss_event)
+        {
+            memcpy(loss_frame + loss_pos, frame + pos, pkg_size);
+            loss_pos += pkg_size;
+            (*kept)++;
+            putc('.', stdout);
+        }
+        else
+        {
+            (*thrown)++;
+            putc('X', stdout);
+        }
+        pos += pkg_size;
+    }
+    memcpy(frame, loss_frame, loss_pos);
+    memset(frame + loss_pos, 0, *size - loss_pos);
+    *size = loss_pos;
+}
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HELPERS
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEC_INIT
+/* Initialize codec */
+flags = VPX_CODEC_USE_ERROR_CONCEALMENT;
+res = vpx_codec_dec_init(&codec, interface, NULL, flags);
+if(res)
+    die_codec(&codec, "Failed to initialize decoder");
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEC_INIT
+
+Usage
+-----
+This example adds a single argument to the `simple_decoder` example,
+which specifies the range or pattern of frames to drop. The parameter is
+parsed as follows:
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ USAGE
+if(argc!=4 && argc != 5)
+    die("Usage: %s <infile> <outfile> <N-M|N/M|L,S>\n", argv[0]);
+{
+    char *nptr;
+    n = strtol(argv[3], &nptr, 0);
+    mode = (*nptr == '\0' || *nptr == ',') ? 2 : (*nptr == '-') ? 1 : 0;
+
+    m = strtol(nptr+1, NULL, 0);
+    if((!n && !m) || (*nptr != '-' && *nptr != '/' &&
+        *nptr != '\0' && *nptr != ','))
+        die("Couldn't parse pattern %s\n", argv[3]);
+}
+seed = (m > 0) ? m : (unsigned int)time(NULL);
+srand(seed);thrown_frame = 0;
+printf("Seed: %u\n", seed);
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ USAGE
+
+
+Dropping A Range Of Frames
+--------------------------
+To drop a range of frames, specify the starting frame and the ending
+frame to drop, separated by a dash. The following command will drop
+frames 5 through 10 (base 1).
+
+  $ ./decode_with_partial_drops in.ivf out.i420 5-10
+
+
+Dropping A Pattern Of Frames
+----------------------------
+To drop a pattern of frames, specify the number of frames to drop and
+the number of frames after which to repeat the pattern, separated by
+a forward-slash. The following command will drop 3 of 7 frames.
+Specifically, it will decode 4 frames, then drop 3 frames, and then
+repeat.
+
+  $ ./decode_with_partial_drops in.ivf out.i420 3/7
+
+Dropping Random Parts Of Frames
+-------------------------------
+A third argument tuple is available to split the frame into 1500 bytes pieces
+and randomly drop pieces rather than frames. The frame will be split at
+partition boundaries where possible. The following example will seed the RNG
+with the seed 123 and drop approximately 5% of the pieces. Pieces which
+are depending on an already dropped piece will also be dropped.
+
+  $ ./decode_with_partial_drops in.ivf out.i420 5,123
+
+
+Extra Variables
+---------------
+This example maintains the pattern passed on the command line in the
+`n`, `m`, and `is_range` variables:
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ EXTRA_VARS
+int              n, m, mode;
+unsigned int     seed;
+int              thrown=0, kept=0;
+int              thrown_frame=0, kept_frame=0;
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ EXTRA_VARS
+
+
+Making The Drop Decision
+------------------------
+The example decides whether to drop the frame based on the current
+frame number, immediately before decoding the frame.
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PRE_DECODE
+/* Decide whether to throw parts of the frame or the whole frame
+   depending on the drop mode */
+thrown_frame = 0;
+kept_frame = 0;
+switch (mode)
+{
+case 0:
+    if (m - (frame_cnt-1)%m <= n)
+    {
+        frame_sz = 0;
+    }
+    break;
+case 1:
+    if (frame_cnt >= n && frame_cnt <= m)
+    {
+        frame_sz = 0;
+    }
+    break;
+case 2:
+    throw_packets(frame, &frame_sz, n, &thrown_frame, &kept_frame);
+    break;
+default: break;
+}
+if (mode < 2)
+{
+    if (frame_sz == 0)
+    {
+        putc('X', stdout);
+        thrown_frame++;
+    }
+    else
+    {
+        putc('.', stdout);
+        kept_frame++;
+    }
+}
+thrown += thrown_frame;
+kept += kept_frame;
+fflush(stdout);
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PRE_DECODE
--- a/examples/decoder_tmpl.c
+++ b/examples/decoder_tmpl.c
@@ -42,6 +42,8 @@
 
 @DIE_CODEC
 
+@HELPERS
+
 int main(int argc, char **argv) {
     FILE            *infile, *outfile;
     vpx_codec_ctx_t  codec;
--- a/vp8/common/alloccommon.c
+++ b/vp8/common/alloccommon.c
@@ -27,6 +27,9 @@
 
     for (i = 0; i < rows; i++)
     {
+        /* TODO(holmer): Bug? This updates the last element of each row
+         * rather than the border element!
+         */
         vpx_memset(&mi[i*cols-1], 0, sizeof(MODE_INFO));
     }
 }
@@ -43,9 +46,11 @@
 
     vpx_free(oci->above_context);
     vpx_free(oci->mip);
+    vpx_free(oci->prev_mip);
 
     oci->above_context = 0;
     oci->mip = 0;
+    oci->prev_mip = 0;
 
 }
 
@@ -110,7 +115,22 @@
 
     oci->mi = oci->mip + oci->mode_info_stride + 1;
 
+    /* allocate memory for last frame MODE_INFO array */
+#if CONFIG_ERROR_CONCEALMENT
+    oci->prev_mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
 
+    if (!oci->prev_mip)
+    {
+        vp8_de_alloc_frame_buffers(oci);
+        return 1;
+    }
+
+    oci->prev_mi = oci->prev_mip + oci->mode_info_stride + 1;
+#else
+    oci->prev_mip = NULL;
+    oci->prev_mi = NULL;
+#endif
+
     oci->above_context = vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1);
 
     if (!oci->above_context)
@@ -120,6 +140,9 @@
     }
 
     update_mode_info_border(oci->mi, oci->mb_rows, oci->mb_cols);
+#if CONFIG_ERROR_CONCEALMENT
+    update_mode_info_border(oci->prev_mi, oci->mb_rows, oci->mb_cols);
+#endif
 
     return 0;
 }
--- a/vp8/common/generic/systemdependent.c
+++ b/vp8/common/generic/systemdependent.c
@@ -113,7 +113,7 @@
     rtcd->loopfilter.simple_mb_h = vp8_loop_filter_mbhs_c;
     rtcd->loopfilter.simple_b_h  = vp8_loop_filter_bhs_c;
 
-#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_PSNR)
+#if CONFIG_POSTPROC || (CONFIG_VP8_ENCODER && CONFIG_INTERNAL_STATS)
     rtcd->postproc.down             = vp8_mbpost_proc_down_c;
     rtcd->postproc.across           = vp8_mbpost_proc_across_ip_c;
     rtcd->postproc.downacross       = vp8_post_proc_down_and_across_c;
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -140,6 +140,8 @@
 
     MODE_INFO *mip; /* Base of allocated array */
     MODE_INFO *mi;  /* Corresponds to upper left visible macroblock */
+    MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
+    MODE_INFO *prev_mi;  /* 'mi' from last frame (points into prev_mip) */
 
 
     INTERPOLATIONFILTERTYPE mcomp_filter_type;
--- a/vp8/common/onyxd.h
+++ b/vp8/common/onyxd.h
@@ -31,6 +31,7 @@
         int     Version;
         int     postprocess;
         int     max_threads;
+        int     error_concealment;
     } VP8D_CONFIG;
     typedef enum
     {
--- a/vp8/common/x86/recon_sse2.asm
+++ b/vp8/common/x86/recon_sse2.asm
@@ -584,23 +584,35 @@
 ;    unsigned char *src,
 ;    int src_stride,
 ;    )
-global sym(vp8_intra_pred_uv_ho_mmx2)
-sym(vp8_intra_pred_uv_ho_mmx2):
+%macro vp8_intra_pred_uv_ho 1
+global sym(vp8_intra_pred_uv_ho_%1)
+sym(vp8_intra_pred_uv_ho_%1):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 4
     push        rsi
     push        rdi
+%ifidn %1, ssse3
+    push        rbx
+%endif
     ; end prolog
 
     ; read from left and write out
+%ifidn %1, mmx2
     mov         edx,        4
+%endif
     mov         rsi,        arg(2) ;src;
     movsxd      rax,        dword ptr arg(3) ;src_stride;
     mov         rdi,        arg(0) ;dst;
     movsxd      rcx,        dword ptr arg(1) ;dst_stride
+%ifidn %1, ssse3
+    lea         rbx,        [rax*3]
+    lea         rdx,        [rcx*3]
+    movdqa      xmm2,       [GLOBAL(dc_00001111)]
+%endif
     dec         rsi
-vp8_intra_pred_uv_ho_mmx2_loop:
+%ifidn %1, mmx2
+vp8_intra_pred_uv_ho_%1_loop:
     movd        mm0,        [rsi]
     movd        mm1,        [rsi+rax]
     punpcklbw   mm0,        mm0
@@ -612,15 +624,50 @@
     lea         rsi,        [rsi+rax*2]
     lea         rdi,        [rdi+rcx*2]
     dec         edx
-    jnz vp8_intra_pred_uv_ho_mmx2_loop
+    jnz vp8_intra_pred_uv_ho_%1_loop
+%else
+    movd        xmm0,       [rsi]
+    movd        xmm3,       [rsi+rax]
+    movd        xmm1,       [rsi+rax*2]
+    movd        xmm4,       [rsi+rbx]
+    punpcklbw   xmm0,       xmm3
+    punpcklbw   xmm1,       xmm4
+    pshufb      xmm0,       xmm2
+    pshufb      xmm1,       xmm2
+    movq   [rdi    ],       xmm0
+    movhps [rdi+rcx],       xmm0
+    movq [rdi+rcx*2],       xmm1
+    movhps [rdi+rdx],       xmm1
+    lea         rsi,        [rsi+rax*4]
+    lea         rdi,        [rdi+rcx*4]
+    movd        xmm0,       [rsi]
+    movd        xmm3,       [rsi+rax]
+    movd        xmm1,       [rsi+rax*2]
+    movd        xmm4,       [rsi+rbx]
+    punpcklbw   xmm0,       xmm3
+    punpcklbw   xmm1,       xmm4
+    pshufb      xmm0,       xmm2
+    pshufb      xmm1,       xmm2
+    movq   [rdi    ],       xmm0
+    movhps [rdi+rcx],       xmm0
+    movq [rdi+rcx*2],       xmm1
+    movhps [rdi+rdx],       xmm1
+%endif
 
     ; begin epilog
+%ifidn %1, ssse3
+    pop         rbx
+%endif
     pop         rdi
     pop         rsi
     UNSHADOW_ARGS
     pop         rbp
     ret
+%endmacro
 
+vp8_intra_pred_uv_ho mmx2
+vp8_intra_pred_uv_ho ssse3
+
 SECTION_RODATA
 dc_128:
     times 8 db 128
@@ -629,3 +676,7 @@
 align 16
 dc_1024:
     times 8 dw 0x400
+align 16
+dc_00001111:
+    times 8 db 0
+    times 8 db 1
--- a/vp8/common/x86/recon_wrapper_sse2.c
+++ b/vp8/common/x86/recon_wrapper_sse2.c
@@ -23,6 +23,7 @@
 extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dcleft_mmx2);
 extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_dc128_mmx);
 extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ho_mmx2);
+extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ho_ssse3);
 extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ve_mmx);
 extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_sse2);
 extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_ssse3);
@@ -31,7 +32,8 @@
                                                 unsigned char *dst_u,
                                                 unsigned char *dst_v,
                                                 int dst_stride,
-                                                build_intra_predictors_mbuv_fn_t tm_func)
+                                                build_intra_predictors_mbuv_fn_t tm_func,
+                                                build_intra_predictors_mbuv_fn_t ho_func)
 {
     int mode = x->mode_info_context->mbmi.uv_mode;
     build_intra_predictors_mbuv_fn_t fn;
@@ -39,7 +41,7 @@
 
     switch (mode) {
         case  V_PRED: fn = vp8_intra_pred_uv_ve_mmx; break;
-        case  H_PRED: fn = vp8_intra_pred_uv_ho_mmx2; break;
+        case  H_PRED: fn = ho_func; break;
         case TM_PRED: fn = tm_func; break;
         case DC_PRED:
             if (x->up_available) {
@@ -65,7 +67,8 @@
 {
     vp8_build_intra_predictors_mbuv_x86(x, &x->predictor[256],
                                         &x->predictor[320], 8,
-                                        vp8_intra_pred_uv_tm_sse2);
+                                        vp8_intra_pred_uv_tm_sse2,
+                                        vp8_intra_pred_uv_ho_mmx2);
 }
 
 void vp8_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *x)
@@ -72,7 +75,8 @@
 {
     vp8_build_intra_predictors_mbuv_x86(x, &x->predictor[256],
                                         &x->predictor[320], 8,
-                                        vp8_intra_pred_uv_tm_ssse3);
+                                        vp8_intra_pred_uv_tm_ssse3,
+                                        vp8_intra_pred_uv_ho_ssse3);
 }
 
 void vp8_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *x)
@@ -79,7 +83,8 @@
 {
     vp8_build_intra_predictors_mbuv_x86(x, x->dst.u_buffer,
                                         x->dst.v_buffer, x->dst.uv_stride,
-                                        vp8_intra_pred_uv_tm_sse2);
+                                        vp8_intra_pred_uv_tm_sse2,
+                                        vp8_intra_pred_uv_ho_mmx2);
 }
 
 void vp8_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *x)
@@ -86,5 +91,6 @@
 {
     vp8_build_intra_predictors_mbuv_x86(x, x->dst.u_buffer,
                                         x->dst.v_buffer, x->dst.uv_stride,
-                                        vp8_intra_pred_uv_tm_ssse3);
+                                        vp8_intra_pred_uv_tm_ssse3,
+                                        vp8_intra_pred_uv_ho_ssse3);
 }
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -246,6 +246,12 @@
     MACROBLOCKD *const xd  = & pbi->mb;
 #endif
 
+#if CONFIG_ERROR_CONCEALMENT
+    /* Default is that no macroblock is corrupt, therefore we initialize
+     * mvs_corrupt_from_mb to something very big, which we can be sure is
+     * outside the frame. */
+    pbi->mvs_corrupt_from_mb = UINT_MAX;
+#endif
     pbi->prob_skip_false = 0;
     if (pbi->common.mb_no_coeff_skip)
         pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
@@ -285,6 +291,7 @@
     }
 }
 
+
 static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
                             int mb_row, int mb_col)
 {
@@ -445,7 +452,7 @@
 
                     do {
                         mi->bmi[ *fill_offset] = bmi;
-                      fill_offset++;
+                        fill_offset++;
 
                     }while (--fill_count);
                 }
@@ -566,11 +573,25 @@
 
         while (++mb_col < pbi->common.mb_cols)
         {
+            int mb_num = mb_row * pbi->common.mb_cols + mb_col;
             /*read_mb_modes_mv(pbi, xd->mode_info_context, &xd->mode_info_context->mbmi, mb_row, mb_col);*/
             if(pbi->common.frame_type == KEY_FRAME)
                 vp8_kfread_modes(pbi, mi, mb_row, mb_col);
             else
                 read_mb_modes_mv(pbi, mi, &mi->mbmi, mb_row, mb_col);
+
+#if CONFIG_ERROR_CONCEALMENT
+            /* look for corruption. set mvs_corrupt_from_mb to the current
+             * mb_num if the frame is corrupt from this macroblock. */
+            if (vp8dx_bool_error(&pbi->bc) && mb_num < pbi->mvs_corrupt_from_mb)
+            {
+                pbi->mvs_corrupt_from_mb = mb_num;
+                /* no need to continue since the partition is corrupt from
+                 * here on.
+                 */
+                return;
+            }
+#endif
 
             mi++;       /* next macroblock */
         }
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -27,6 +27,9 @@
 
 #include "decodemv.h"
 #include "vp8/common/extend.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
 #include "vpx_mem/vpx_mem.h"
 #include "vp8/common/idct.h"
 #include "dequantize.h"
@@ -176,7 +179,8 @@
 
 }
 
-static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
+static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
+                              unsigned int mb_idx)
 {
     int eobtotal = 0;
     int i, do_clamp = xd->mode_info_context->mbmi.need_to_clamp_mvs;
@@ -233,6 +237,19 @@
         vp8_build_inter_predictors_mb(xd);
     }
 
+#if CONFIG_ERROR_CONCEALMENT
+    if (pbi->ec_enabled &&
+        (mb_idx > pbi->mvs_corrupt_from_mb ||
+        vp8dx_bool_error(xd->current_bc)))
+    {
+        /* MB with corrupt residuals or corrupt mode/motion vectors.
+         * Better to use the predictor as reconstruction.
+         */
+        vp8_conceal_corrupt_mb(xd);
+        return;
+    }
+#endif
+
     /* dequantization and idct */
     if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
     {
@@ -355,7 +372,33 @@
 
     for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
     {
+        /* Distance of Mb to the various image edges.
+         * These are specified to 8th pel as they are always compared to values
+         * that are in 1/8th pel units
+         */
+        xd->mb_to_left_edge = -((mb_col * 16) << 3);
+        xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
 
+#if CONFIG_ERROR_CONCEALMENT
+        if (pbi->ec_enabled &&
+            xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
+            vp8dx_bool_error(xd->current_bc))
+        {
+            /* We have an intra block with corrupt coefficients, better to
+             * conceal with an inter block. Interpolate MVs from neighboring MBs
+             *
+             * Note that for the first mb with corrupt residual in a frame,
+             * we might not discover that before decoding the residual. That
+             * happens after this check, and therefore no inter concealment will
+             * be done.
+             */
+            vp8_interpolate_motion(xd,
+                                   mb_row, mb_col,
+                                   pc->mb_rows, pc->mb_cols,
+                                   pc->mode_info_stride);
+        }
+#endif
+
         if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
         {
             for (i = 0; i < 16; i++)
@@ -365,12 +408,6 @@
             }
         }
 
-        /* Distance of Mb to the various image edges.
-         * These are specified to 8th pel as they are always compared to values that are in 1/8th pel units
-         */
-        xd->mb_to_left_edge = -((mb_col * 16) << 3);
-        xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
-
         xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
         xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
         xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
@@ -403,7 +440,7 @@
         else
         pbi->debugoutput =0;
         */
-        decode_macroblock(pbi, xd);
+        decode_macroblock(pbi, xd, mb_row * pc->mb_cols  + mb_col);
 
         /* check if the boolean decoder has suffered an error */
         xd->corrupted |= vp8dx_bool_error(xd->current_bc);
@@ -477,8 +514,8 @@
             partition_size = user_data_end - partition;
         }
 
-        if (partition + partition_size > user_data_end
-            || partition + partition_size < partition)
+        if (!pbi->ec_enabled && (partition + partition_size > user_data_end
+            || partition + partition_size < partition))
             vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
                                "Truncated packet or corrupt partition "
                                "%d length", i + 1);
@@ -592,63 +629,105 @@
     pc->yv12_fb[pc->new_fb_idx].corrupted = 0;
 
     if (data_end - data < 3)
-        vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                           "Truncated packet");
-    pc->frame_type = (FRAME_TYPE)(data[0] & 1);
-    pc->version = (data[0] >> 1) & 7;
-    pc->show_frame = (data[0] >> 4) & 1;
-    first_partition_length_in_bytes =
-        (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
-    data += 3;
-
-    if (data + first_partition_length_in_bytes > data_end
-        || data + first_partition_length_in_bytes < data)
-        vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                           "Truncated packet or corrupt partition 0 length");
-    vp8_setup_version(pc);
-
-    if (pc->frame_type == KEY_FRAME)
     {
-        const int Width = pc->Width;
-        const int Height = pc->Height;
+        if (pbi->ec_enabled)
+        {
+            /* Declare the missing frame as an inter frame since it will
+               be handled as an inter frame when we have estimated its
+               motion vectors. */
+            pc->frame_type = INTER_FRAME;
+            pc->version = 0;
+            pc->show_frame = 1;
+            first_partition_length_in_bytes = 0;
+        }
+        else
+        {
+            vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+                               "Truncated packet");
+        }
+    }
+    else
+    {
+        pc->frame_type = (FRAME_TYPE)(data[0] & 1);
+        pc->version = (data[0] >> 1) & 7;
+        pc->show_frame = (data[0] >> 4) & 1;
+        first_partition_length_in_bytes =
+            (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
+        data += 3;
 
-        /* vet via sync code */
-        if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
-            vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
-                               "Invalid frame sync code");
+        if (!pbi->ec_enabled && (data + first_partition_length_in_bytes > data_end
+            || data + first_partition_length_in_bytes < data))
+            vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+                               "Truncated packet or corrupt partition 0 length");
+        vp8_setup_version(pc);
 
-        pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
-        pc->horiz_scale = data[4] >> 6;
-        pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
-        pc->vert_scale = data[6] >> 6;
-        data += 7;
-
-        if (Width != pc->Width  ||  Height != pc->Height)
+        if (pc->frame_type == KEY_FRAME)
         {
-            int prev_mb_rows = pc->mb_rows;
+            const int Width = pc->Width;
+            const int Height = pc->Height;
 
-            if (pc->Width <= 0)
+            /* vet via sync code */
+            /* When error concealment is enabled we should only check the sync
+             * code if we have enough bits available
+             */
+            if (!pbi->ec_enabled || data + 3 < data_end)
             {
-                pc->Width = Width;
-                vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                                   "Invalid frame width");
+                if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
+                    vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
+                                   "Invalid frame sync code");
             }
 
-            if (pc->Height <= 0)
+            /* If error concealment is enabled we should only parse the new size
+             * if we have enough data. Otherwise we will end up with the wrong
+             * size.
+             */
+            if (!pbi->ec_enabled || data + 6 < data_end)
             {
-                pc->Height = Height;
-                vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                                   "Invalid frame height");
+                pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
+                pc->horiz_scale = data[4] >> 6;
+                pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
+                pc->vert_scale = data[6] >> 6;
             }
+            data += 7;
 
-            if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
-                vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
-                                   "Failed to allocate frame buffers");
+            if (Width != pc->Width  ||  Height != pc->Height)
+            {
+                int prev_mb_rows = pc->mb_rows;
 
+                if (pc->Width <= 0)
+                {
+                    pc->Width = Width;
+                    vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+                                       "Invalid frame width");
+                }
+
+                if (pc->Height <= 0)
+                {
+                    pc->Height = Height;
+                    vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+                                       "Invalid frame height");
+                }
+
+                if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
+                    vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+                                       "Failed to allocate frame buffers");
+
+#if CONFIG_ERROR_CONCEALMENT
+                pbi->overlaps = NULL;
+                if (pbi->ec_enabled)
+                {
+                    if (vp8_alloc_overlap_lists(pbi))
+                        vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+                                           "Failed to allocate overlap lists "
+                                           "for error concealment");
+                }
+#endif
+
 #if CONFIG_MULTITHREAD
-            if (pbi->b_multithreaded_rd)
-                vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows);
+                if (pbi->b_multithreaded_rd)
+                    vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows);
 #endif
+            }
         }
     }
 
@@ -793,7 +872,20 @@
     {
         /* Should the GF or ARF be updated from the current frame */
         pc->refresh_golden_frame = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+        /* Assume we shouldn't refresh golden if the bit is missing */
+        xd->corrupted |= vp8dx_bool_error(bc);
+        if (pbi->ec_enabled && xd->corrupted)
+            pc->refresh_golden_frame = 0;
+#endif
+
         pc->refresh_alt_ref_frame = vp8_read_bit(bc);
+#if CONFIG_ERROR_CONCEALMENT
+        /* Assume we shouldn't refresh altref if the bit is missing */
+        xd->corrupted |= vp8dx_bool_error(bc);
+        if (pbi->ec_enabled && xd->corrupted)
+            pc->refresh_alt_ref_frame = 0;
+#endif
 
         /* Buffer to buffer copy flags. */
         pc->copy_buffer_to_gf = 0;
@@ -818,6 +910,13 @@
 
     pc->refresh_last_frame = pc->frame_type == KEY_FRAME  ||  vp8_read_bit(bc);
 
+#if CONFIG_ERROR_CONCEALMENT
+    /* Assume we should refresh the last frame if the bit is missing */
+    xd->corrupted |= vp8dx_bool_error(bc);
+    if (pbi->ec_enabled && xd->corrupted)
+        pc->refresh_last_frame = 1;
+#endif
+
     if (0)
     {
         FILE *z = fopen("decodestats.stt", "a");
@@ -876,6 +975,16 @@
 
 
     vp8_decode_mode_mvs(pbi);
+
+#if CONFIG_ERROR_CONCEALMENT
+    if (pbi->ec_enabled &&
+            pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows)
+    {
+        /* Motion vectors are missing in this frame. We will try to estimate
+         * them and then continue decoding the frame as usual */
+        vp8_estimate_missing_mvs(pbi);
+    }
+#endif
 
     vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
 
--- /dev/null
+++ b/vp8/decoder/ec_types.h
@@ -1,0 +1,49 @@
+/*
+ *  Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP8_DEC_EC_TYPES_H
+#define VP8_DEC_EC_TYPES_H
+
+#define MAX_OVERLAPS 16
+
+/* The area (pixel area in Q6) the block pointed to by bmi overlaps
+ * another block with.
+ */
+typedef struct
+{
+    int overlap;
+    B_MODE_INFO *bmi;
+} OVERLAP_NODE;
+
+/* Structure to keep track of overlapping blocks on a block level. */
+typedef struct
+{
+    /* TODO(holmer): This array should be exchanged for a linked list */
+    OVERLAP_NODE overlaps[MAX_OVERLAPS];
+} B_OVERLAP;
+
+/* Structure used to hold all the overlaps of a macroblock. The overlaps of a
+ * macroblock is further divided into block overlaps.
+ */
+typedef struct
+{
+    B_OVERLAP overlaps[16];
+} MB_OVERLAP;
+
+/* Structure for keeping track of motion vectors and which reference frame they
+ * refer to. Used for motion vector interpolation.
+ */
+typedef struct
+{
+    MV mv;
+    MV_REFERENCE_FRAME ref_frame;
+} EC_BLOCK;
+
+#endif /* VP8_DEC_EC_TYPES_H */
--- /dev/null
+++ b/vp8/decoder/error_concealment.c
@@ -1,0 +1,613 @@
+/*
+ *  Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "error_concealment.h"
+#include "onyxd_int.h"
+#include "decodemv.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/recon.h"
+#include "vp8/common/findnearmv.h"
+
+#include <assert.h>
+
+#define MIN(x,y) (((x)<(y))?(x):(y))
+#define MAX(x,y) (((x)>(y))?(x):(y))
+
+#define FLOOR(x,q) ((x) & -(1 << (q)))
+
+#define NUM_NEIGHBORS 20
+
+typedef struct ec_position
+{
+    int row;
+    int col;
+} EC_POS;
+
+/*
+ * Regenerate the table in Matlab with:
+ * x = meshgrid((1:4), (1:4));
+ * y = meshgrid((1:4), (1:4))';
+ * W = round((1./(sqrt(x.^2 + y.^2))*2^7));
+ * W(1,1) = 0;
+ */
+static const int weights_q7[5][5] = {
+       {  0,   128,    64,    43,    32 },
+       {128,    91,    57,    40,    31 },
+       { 64,    57,    45,    36,    29 },
+       { 43,    40,    36,    30,    26 },
+       { 32,    31,    29,    26,    23 }
+};
+
+int vp8_alloc_overlap_lists(VP8D_COMP *pbi)
+{
+    if (pbi->overlaps != NULL)
+    {
+        vpx_free(pbi->overlaps);
+        pbi->overlaps = NULL;
+    }
+    pbi->overlaps = vpx_calloc(pbi->common.mb_rows * pbi->common.mb_cols,
+                               sizeof(MB_OVERLAP));
+    if (pbi->overlaps == NULL)
+        return -1;
+    vpx_memset(pbi->overlaps, 0,
+               sizeof(MB_OVERLAP) * pbi->common.mb_rows * pbi->common.mb_cols);
+    return 0;
+}
+
+void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi)
+{
+    vpx_free(pbi->overlaps);
+    pbi->overlaps = NULL;
+}
+
+/* Inserts a new overlap area value to the list of overlaps of a block */
+static void assign_overlap(OVERLAP_NODE* overlaps,
+                           B_MODE_INFO *bmi,
+                           int overlap)
+{
+    int i;
+    if (overlap <= 0)
+        return;
+    /* Find and assign to the next empty overlap node in the list of overlaps.
+     * Empty is defined as bmi == NULL */
+    for (i = 0; i < MAX_OVERLAPS; i++)
+    {
+        if (overlaps[i].bmi == NULL)
+        {
+            overlaps[i].bmi = bmi;
+            overlaps[i].overlap = overlap;
+            break;
+        }
+    }
+}
+
+/* Calculates the overlap area between two 4x4 squares, where the first
+ * square has its upper-left corner at (b1_row, b1_col) and the second
+ * square has its upper-left corner at (b2_row, b2_col). Doesn't
+ * properly handle squares which do not overlap.
+ */
+static int block_overlap(int b1_row, int b1_col, int b2_row, int b2_col)
+{
+    const int int_top = MAX(b1_row, b2_row); // top
+    const int int_left = MAX(b1_col, b2_col); // left
+    /* Since each block is 4x4 pixels, adding 4 (Q3) to the left/top edge
+     * gives us the right/bottom edge.
+     */
+    const int int_right = MIN(b1_col + (4<<3), b2_col + (4<<3)); // right
+    const int int_bottom = MIN(b1_row + (4<<3), b2_row + (4<<3)); // bottom
+    return (int_bottom - int_top) * (int_right - int_left);
+}
+
+/* Calculates the overlap area for all blocks in a macroblock at position
+ * (mb_row, mb_col) in macroblocks, which are being overlapped by a given
+ * overlapping block at position (new_row, new_col) (in pixels, Q3). The
+ * first block being overlapped in the macroblock has position (first_blk_row,
+ * first_blk_col) in blocks relative the upper-left corner of the image.
+ */
+static void calculate_overlaps_mb(B_OVERLAP *b_overlaps, B_MODE_INFO *bmi,
+                                  int new_row, int new_col,
+                                  int mb_row, int mb_col,
+                                  int first_blk_row, int first_blk_col)
+{
+    /* Find the blocks within this MB (defined by mb_row, mb_col) which are
+     * overlapped by bmi and calculate and assign overlap for each of those
+     * blocks. */
+
+    /* Block coordinates relative the upper-left block */
+    const int rel_ol_blk_row = first_blk_row - mb_row * 4;
+    const int rel_ol_blk_col = first_blk_col - mb_col * 4;
+    /* If the block partly overlaps any previous MB, these coordinates
+     * can be < 0. We don't want to access blocks in previous MBs.
+     */
+    const int blk_idx = MAX(rel_ol_blk_row,0) * 4 + MAX(rel_ol_blk_col,0);
+    /* Upper left overlapping block */
+    B_OVERLAP *b_ol_ul = &(b_overlaps[blk_idx]);
+
+    /* Calculate and assign overlaps for all blocks in this MB
+     * which the motion compensated block overlaps
+     */
+    /* Avoid calculating overlaps for blocks in later MBs */
+    int end_row = MIN(4 + mb_row * 4 - first_blk_row, 2);
+    int end_col = MIN(4 + mb_col * 4 - first_blk_col, 2);
+    int row, col;
+
+    /* Check if new_row and new_col are evenly divisible by 4 (Q3),
+     * and if so we shouldn't check neighboring blocks
+     */
+    if (new_row >= 0 && (new_row & 0x1F) == 0)
+        end_row = 1;
+    if (new_col >= 0 && (new_col & 0x1F) == 0)
+        end_col = 1;
+
+    /* Check if the overlapping block partly overlaps a previous MB
+     * and if so, we're overlapping fewer blocks in this MB.
+     */
+    if (new_row < (mb_row*16)<<3)
+        end_row = 1;
+    if (new_col < (mb_col*16)<<3)
+        end_col = 1;
+
+    for (row = 0; row < end_row; ++row)
+    {
+        for (col = 0; col < end_col; ++col)
+        {
+            /* input in Q3, result in Q6 */
+            const int overlap = block_overlap(new_row, new_col,
+                                                  (((first_blk_row + row) *
+                                                      4) << 3),
+                                                  (((first_blk_col + col) *
+                                                      4) << 3));
+            assign_overlap(b_ol_ul[row * 4 + col].overlaps, bmi, overlap);
+        }
+    }
+}
+
+void vp8_calculate_overlaps(MB_OVERLAP *overlap_ul,
+                            int mb_rows, int mb_cols,
+                            B_MODE_INFO *bmi,
+                            int b_row, int b_col)
+{
+    MB_OVERLAP *mb_overlap;
+    int row, col, rel_row, rel_col;
+    int new_row, new_col;
+    int end_row, end_col;
+    int overlap_b_row, overlap_b_col;
+    int overlap_mb_row, overlap_mb_col;
+
+    /* mb subpixel position */
+    row = (4 * b_row) << 3; /* Q3 */
+    col = (4 * b_col) << 3; /* Q3 */
+
+    /* reverse compensate for motion */
+    new_row = row - bmi->mv.as_mv.row;
+    new_col = col - bmi->mv.as_mv.col;
+
+    if (new_row >= ((16*mb_rows) << 3) || new_col >= ((16*mb_cols) << 3))
+    {
+        /* the new block ended up outside the frame */
+        return;
+    }
+
+    if (new_row <= (-4 << 3) || new_col <= (-4 << 3))
+    {
+        /* outside the frame */
+        return;
+    }
+    /* overlapping block's position in blocks */
+    overlap_b_row = FLOOR(new_row / 4, 3) >> 3;
+    overlap_b_col = FLOOR(new_col / 4, 3) >> 3;
+
+    /* overlapping block's MB position in MBs
+     * operations are done in Q3
+     */
+    overlap_mb_row = FLOOR((overlap_b_row << 3) / 4, 3) >> 3;
+    overlap_mb_col = FLOOR((overlap_b_col << 3) / 4, 3) >> 3;
+
+    end_row = MIN(mb_rows - overlap_mb_row, 2);
+    end_col = MIN(mb_cols - overlap_mb_col, 2);
+
+    /* Don't calculate overlap for MBs we don't overlap */
+    /* Check if the new block row starts at the last block row of the MB */
+    if (abs(new_row - ((16*overlap_mb_row) << 3)) < ((3*4) << 3))
+        end_row = 1;
+    /* Check if the new block col starts at the last block col of the MB */
+    if (abs(new_col - ((16*overlap_mb_col) << 3)) < ((3*4) << 3))
+        end_col = 1;
+
+    /* find the MB(s) this block is overlapping */
+    for (rel_row = 0; rel_row < end_row; ++rel_row)
+    {
+        for (rel_col = 0; rel_col < end_col; ++rel_col)
+        {
+            if (overlap_mb_row + rel_row < 0 ||
+                overlap_mb_col + rel_col < 0)
+                continue;
+            mb_overlap = overlap_ul + (overlap_mb_row + rel_row) * mb_cols +
+                 overlap_mb_col + rel_col;
+
+            calculate_overlaps_mb(mb_overlap->overlaps, bmi,
+                                  new_row, new_col,
+                                  overlap_mb_row + rel_row,
+                                  overlap_mb_col + rel_col,
+                                  overlap_b_row + rel_row,
+                                  overlap_b_col + rel_col);
+        }
+    }
+}
+
+/* Estimates a motion vector given the overlapping blocks' motion vectors.
+ * Filters out all overlapping blocks which do not refer to the correct
+ * reference frame type.
+ */
+static void estimate_mv(const OVERLAP_NODE *overlaps, B_MODE_INFO *bmi)
+{
+    int i;
+    int overlap_sum = 0;
+    int row_acc = 0;
+    int col_acc = 0;
+
+    bmi->mv.as_int = 0;
+    for (i=0; i < MAX_OVERLAPS; ++i)
+    {
+        if (overlaps[i].bmi == NULL)
+            break;
+        col_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.col;
+        row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row;
+        overlap_sum += overlaps[i].overlap;
+    }
+    if (overlap_sum > 0)
+    {
+        /* Q9 / Q6 = Q3 */
+        bmi->mv.as_mv.col = col_acc / overlap_sum;
+        bmi->mv.as_mv.row = row_acc / overlap_sum;
+        bmi->mode = NEW4X4;
+    }
+    else
+    {
+        bmi->mv.as_mv.col = 0;
+        bmi->mv.as_mv.row = 0;
+        bmi->mode = NEW4X4;
+    }
+}
+
+/* Estimates all motion vectors for a macroblock given the lists of
+ * overlaps for each block. Decides whether or not the MVs must be clamped.
+ */
+static void estimate_mb_mvs(const B_OVERLAP *block_overlaps,
+                            MODE_INFO *mi,
+                            int mb_to_left_edge,
+                            int mb_to_right_edge,
+                            int mb_to_top_edge,
+                            int mb_to_bottom_edge)
+{
+    int i;
+    int non_zero_count = 0;
+    MV * const filtered_mv = &(mi->mbmi.mv.as_mv);
+    B_MODE_INFO * const bmi = mi->bmi;
+    filtered_mv->col = 0;
+    filtered_mv->row = 0;
+    for (i = 0; i < 16; ++i)
+    {
+        /* Estimate vectors for all blocks which are overlapped by this type */
+        /* Interpolate/extrapolate the rest of the block's MVs */
+        estimate_mv(block_overlaps[i].overlaps, &(bmi[i]));
+        mi->mbmi.need_to_clamp_mvs = vp8_check_mv_bounds(&bmi[i].mv,
+                                                         mb_to_left_edge,
+                                                         mb_to_right_edge,
+                                                         mb_to_top_edge,
+                                                         mb_to_bottom_edge);
+        if (bmi[i].mv.as_int != 0)
+        {
+            ++non_zero_count;
+            filtered_mv->col += bmi[i].mv.as_mv.col;
+            filtered_mv->row += bmi[i].mv.as_mv.row;
+        }
+    }
+    if (non_zero_count > 0)
+    {
+        filtered_mv->col /= non_zero_count;
+        filtered_mv->row /= non_zero_count;
+    }
+}
+
+static void calc_prev_mb_overlaps(MB_OVERLAP *overlaps, MODE_INFO *prev_mi,
+                                    int mb_row, int mb_col,
+                                    int mb_rows, int mb_cols)
+{
+    int sub_row;
+    int sub_col;
+    for (sub_row = 0; sub_row < 4; ++sub_row)
+    {
+        for (sub_col = 0; sub_col < 4; ++sub_col)
+        {
+            vp8_calculate_overlaps(
+                                overlaps, mb_rows, mb_cols,
+                                &(prev_mi->bmi[sub_row * 4 + sub_col]),
+                                4 * mb_row + sub_row,
+                                4 * mb_col + sub_col);
+        }
+    }
+}
+
+/* Estimate all missing motion vectors. This function does the same as the one
+ * above, but has different input arguments. */
+static void estimate_missing_mvs(MB_OVERLAP *overlaps,
+                                 MODE_INFO *mi, MODE_INFO *prev_mi,
+                                 int mb_rows, int mb_cols,
+                                 unsigned int first_corrupt)
+{
+    int mb_row, mb_col;
+    vpx_memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols);
+    /* First calculate the overlaps for all blocks */
+    for (mb_row = 0; mb_row < mb_rows; ++mb_row)
+    {
+        for (mb_col = 0; mb_col < mb_cols; ++mb_col)
+        {
+            /* We're only able to use blocks referring to the last frame
+             * when extrapolating new vectors.
+             */
+            if (prev_mi->mbmi.ref_frame == LAST_FRAME)
+            {
+                calc_prev_mb_overlaps(overlaps, prev_mi,
+                                      mb_row, mb_col,
+                                      mb_rows, mb_cols);
+            }
+            ++prev_mi;
+        }
+        ++prev_mi;
+    }
+
+    mb_row = first_corrupt / mb_cols;
+    mb_col = first_corrupt - mb_row * mb_cols;
+    mi += mb_row*(mb_cols + 1) + mb_col;
+    /* Go through all macroblocks in the current image with missing MVs
+     * and calculate new MVs using the overlaps.
+     */
+    for (; mb_row < mb_rows; ++mb_row)
+    {
+        int mb_to_top_edge = -((mb_row * 16)) << 3;
+        int mb_to_bottom_edge = ((mb_rows - 1 - mb_row) * 16) << 3;
+        for (; mb_col < mb_cols; ++mb_col)
+        {
+            int mb_to_left_edge = -((mb_col * 16) << 3);
+            int mb_to_right_edge = ((mb_cols - 1 - mb_col) * 16) << 3;
+            const B_OVERLAP *block_overlaps =
+                    overlaps[mb_row*mb_cols + mb_col].overlaps;
+            mi->mbmi.ref_frame = LAST_FRAME;
+            mi->mbmi.mode = SPLITMV;
+            mi->mbmi.uv_mode = DC_PRED;
+            mi->mbmi.partitioning = 3;
+            estimate_mb_mvs(block_overlaps,
+                                mi,
+                                mb_to_left_edge,
+                                mb_to_right_edge,
+                                mb_to_top_edge,
+                                mb_to_bottom_edge);
+            ++mi;
+        }
+        mb_col = 0;
+        ++mi;
+    }
+}
+
+void vp8_estimate_missing_mvs(VP8D_COMP *pbi)
+{
+    VP8_COMMON * const pc = &pbi->common;
+    estimate_missing_mvs(pbi->overlaps,
+                         pc->mi, pc->prev_mi,
+                         pc->mb_rows, pc->mb_cols,
+                         pbi->mvs_corrupt_from_mb);
+}
+
+static void assign_neighbor(EC_BLOCK *neighbor, MODE_INFO *mi, int block_idx)
+{
+    assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
+    neighbor->ref_frame = mi->mbmi.ref_frame;
+    neighbor->mv = mi->bmi[block_idx].mv.as_mv;
+}
+
+/* Finds the neighboring blocks of a macroblocks. In the general case
+ * 20 blocks are found. If a fewer number of blocks are found due to
+ * image boundaries, those positions in the EC_BLOCK array are left "empty".
+ * The neighbors are enumerated with the upper-left neighbor as the first
+ * element, the second element refers to the neighbor to right of the previous
+ * neighbor, and so on. The last element refers to the neighbor below the first
+ * neighbor.
+ */
+static void find_neighboring_blocks(MODE_INFO *mi,
+                                    EC_BLOCK *neighbors,
+                                    int mb_row, int mb_col,
+                                    int mb_rows, int mb_cols,
+                                    int mi_stride)
+{
+    int i = 0;
+    int j;
+    if (mb_row > 0)
+    {
+        /* upper left */
+        if (mb_col > 0)
+            assign_neighbor(&neighbors[i], mi - mi_stride - 1, 15);
+        ++i;
+        /* above */
+        for (j = 12; j < 16; ++j, ++i)
+            assign_neighbor(&neighbors[i], mi - mi_stride, j);
+    }
+    else
+        i += 5;
+    if (mb_col < mb_cols - 1)
+    {
+        /* upper right */
+        if (mb_row > 0)
+            assign_neighbor(&neighbors[i], mi - mi_stride + 1, 12);
+        ++i;
+        /* right */
+        for (j = 0; j <= 12; j += 4, ++i)
+            assign_neighbor(&neighbors[i], mi + 1, j);
+    }
+    else
+        i += 5;
+    if (mb_row < mb_rows - 1)
+    {
+        /* lower right */
+        if (mb_col < mb_cols - 1)
+            assign_neighbor(&neighbors[i], mi + mi_stride + 1, 0);
+        ++i;
+        /* below */
+        for (j = 0; j < 4; ++j, ++i)
+            assign_neighbor(&neighbors[i], mi + mi_stride, j);
+    }
+    else
+        i += 5;
+    if (mb_col > 0)
+    {
+        /* lower left */
+        if (mb_row < mb_rows - 1)
+            assign_neighbor(&neighbors[i], mi + mi_stride - 1, 4);
+        ++i;
+        /* left */
+        for (j = 3; j < 16; j += 4, ++i)
+        {
+            assign_neighbor(&neighbors[i], mi - 1, j);
+        }
+    }
+    else
+        i += 5;
+    assert(i == 20);
+}
+
+/* Calculates which reference frame type is dominating among the neighbors */
+static MV_REFERENCE_FRAME dominant_ref_frame(EC_BLOCK *neighbors)
+{
+    /* Default to referring to "skip" */
+    MV_REFERENCE_FRAME dom_ref_frame = LAST_FRAME;
+    int max_ref_frame_cnt = 0;
+    int ref_frame_cnt[MAX_REF_FRAMES] = {0};
+    int i;
+    /* Count neighboring reference frames */
+    for (i = 0; i < NUM_NEIGHBORS; ++i)
+    {
+        if (neighbors[i].ref_frame < MAX_REF_FRAMES &&
+            neighbors[i].ref_frame != INTRA_FRAME)
+            ++ref_frame_cnt[neighbors[i].ref_frame];
+    }
+    /* Find maximum */
+    for (i = 0; i < MAX_REF_FRAMES; ++i)
+    {
+        if (ref_frame_cnt[i] > max_ref_frame_cnt)
+        {
+            dom_ref_frame = i;
+            max_ref_frame_cnt = ref_frame_cnt[i];
+        }
+    }
+    return dom_ref_frame;
+}
+
+/* Interpolates all motion vectors for a macroblock from the neighboring blocks'
+ * motion vectors.
+ */
+static void interpolate_mvs(MACROBLOCKD *mb,
+                         EC_BLOCK *neighbors,
+                         MV_REFERENCE_FRAME dom_ref_frame)
+{
+    int row, col, i;
+    MODE_INFO * const mi = mb->mode_info_context;
+    /* Table with the position of the neighboring blocks relative the position
+     * of the upper left block of the current MB. Starting with the upper left
+     * neighbor and going to the right.
+     */
+    const EC_POS neigh_pos[NUM_NEIGHBORS] = {
+                                        {-1,-1}, {-1,0}, {-1,1}, {-1,2}, {-1,3},
+                                        {-1,4}, {0,4}, {1,4}, {2,4}, {3,4},
+                                        {4,4}, {4,3}, {4,2}, {4,1}, {4,0},
+                                        {4,-1}, {3,-1}, {2,-1}, {1,-1}, {0,-1}
+                                      };
+    for (row = 0; row < 4; ++row)
+    {
+        for (col = 0; col < 4; ++col)
+        {
+            int w_sum = 0;
+            int mv_row_sum = 0;
+            int mv_col_sum = 0;
+            int_mv * const mv = &(mi->bmi[row*4 + col].mv);
+            for (i = 0; i < NUM_NEIGHBORS; ++i)
+            {
+                /* Calculate the weighted sum of neighboring MVs referring
+                 * to the dominant frame type.
+                 */
+                const int w = weights_q7[abs(row - neigh_pos[i].row)]
+                                        [abs(col - neigh_pos[i].col)];
+                if (neighbors[i].ref_frame != dom_ref_frame)
+                    continue;
+                w_sum += w;
+                /* Q7 * Q3 = Q10 */
+                mv_row_sum += w*neighbors[i].mv.row;
+                mv_col_sum += w*neighbors[i].mv.col;
+            }
+            if (w_sum > 0)
+            {
+                /* Avoid division by zero.
+                 * Normalize with the sum of the coefficients
+                 * Q3 = Q10 / Q7
+                 */
+                mv->as_mv.row = mv_row_sum / w_sum;
+                mv->as_mv.col = mv_col_sum / w_sum;
+                mi->bmi[row*4 + col].mode = NEW4X4;
+                mi->mbmi.need_to_clamp_mvs = vp8_check_mv_bounds(mv,
+                                                       mb->mb_to_left_edge,
+                                                       mb->mb_to_right_edge,
+                                                       mb->mb_to_top_edge,
+                                                       mb->mb_to_bottom_edge);
+            }
+        }
+    }
+}
+
+void vp8_interpolate_motion(MACROBLOCKD *mb,
+                        int mb_row, int mb_col,
+                        int mb_rows, int mb_cols,
+                        int mi_stride)
+{
+    /* Find relevant neighboring blocks */
+    EC_BLOCK neighbors[NUM_NEIGHBORS];
+    MV_REFERENCE_FRAME dom_ref_frame;
+    int i;
+    /* Initialize the array. MAX_REF_FRAMES is interpreted as "doesn't exist" */
+    for (i = 0; i < NUM_NEIGHBORS; ++i)
+    {
+        neighbors[i].ref_frame = MAX_REF_FRAMES;
+        neighbors[i].mv.row = neighbors[i].mv.col = 0;
+    }
+    find_neighboring_blocks(mb->mode_info_context,
+                                neighbors,
+                                mb_row, mb_col,
+                                mb_rows, mb_cols,
+                                mb->mode_info_stride);
+    /* Determine the dominant block type */
+    dom_ref_frame = dominant_ref_frame(neighbors);
+    /* Interpolate MVs for the missing blocks
+     * from the dominating MVs */
+    interpolate_mvs(mb, neighbors, dom_ref_frame);
+
+    mb->mode_info_context->mbmi.ref_frame = dom_ref_frame;
+    mb->mode_info_context->mbmi.mode = SPLITMV;
+    mb->mode_info_context->mbmi.uv_mode = DC_PRED;
+    mb->mode_info_context->mbmi.partitioning = 3;
+}
+
+void vp8_conceal_corrupt_mb(MACROBLOCKD *xd)
+{
+    /* This macroblock has corrupt residual, use the motion compensated
+       image (predictor) for concealment */
+    vp8_recon_copy16x16(xd->predictor, 16, xd->dst.y_buffer, xd->dst.y_stride);
+    vp8_recon_copy8x8(xd->predictor + 256, 8,
+                      xd->dst.u_buffer, xd->dst.uv_stride);
+    vp8_recon_copy8x8(xd->predictor + 320, 8,
+                      xd->dst.v_buffer, xd->dst.uv_stride);
+}
--- /dev/null
+++ b/vp8/decoder/error_concealment.h
@@ -1,0 +1,41 @@
+/*
+ *  Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef ERROR_CONCEALMENT_H
+#define ERROR_CONCEALMENT_H
+
+#include "onyxd_int.h"
+#include "ec_types.h"
+
+/* Allocate memory for the overlap lists */
+int vp8_alloc_overlap_lists(VP8D_COMP *pbi);
+
+/* Deallocate the overlap lists */
+void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi);
+
+/* Estimate all missing motion vectors. */
+void vp8_estimate_missing_mvs(VP8D_COMP *pbi);
+
+/* Functions for spatial MV interpolation */
+
+/* Interpolates all motion vectors for a macroblock mb at position
+ * (mb_row, mb_col). */
+void vp8_interpolate_motion(MACROBLOCKD *mb,
+                            int mb_row, int mb_col,
+                            int mb_rows, int mb_cols,
+                            int mi_stride);
+
+/* Conceal a macroblock with corrupt residual.
+ * Copies the prediction signal to the reconstructed image.
+ */
+void vp8_conceal_corrupt_mb(MACROBLOCKD *xd);
+
+#endif
--- a/vp8/decoder/onyxd_if.c
+++ b/vp8/decoder/onyxd_if.c
@@ -30,6 +30,9 @@
 #include "vp8/common/systemdependent.h"
 #include "vpx_ports/vpx_timer.h"
 #include "detokenize.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "error_concealment.h"
+#endif
 #if ARCH_ARM
 #include "vpx_ports/arm.h"
 #endif
@@ -95,6 +98,13 @@
     }
 
     pbi->common.error.setjmp = 0;
+
+#if CONFIG_ERROR_CONCEALMENT
+    pbi->ec_enabled = oxcf->error_concealment;
+#else
+    pbi->ec_enabled = 0;
+#endif
+
     return (VP8D_PTR) pbi;
 }
 
@@ -115,6 +125,9 @@
         vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
     vp8_decoder_remove_threads(pbi);
 #endif
+#if CONFIG_ERROR_CONCEALMENT
+    vp8_de_alloc_overlap_lists(pbi);
+#endif
     vp8_remove_common(&pbi->common);
     vpx_free(pbi);
 }
@@ -274,11 +287,17 @@
         */
         cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
 
-        /* Signal that we have no frame to show. */
-        cm->show_frame = 0;
+        /* If error concealment is disabled we won't signal missing frames to
+         * the decoder.
+         */
+        if (!pbi->ec_enabled)
+        {
+            /* Signal that we have no frame to show. */
+            cm->show_frame = 0;
 
-        /* Nothing more to do. */
-        return 0;
+            /* Nothing more to do. */
+            return 0;
+        }
     }
 
 
@@ -392,6 +411,28 @@
 
 
     vp8_clear_system_state();
+
+#if CONFIG_ERROR_CONCEALMENT
+    /* swap the mode infos to storage for future error concealment */
+    if (pbi->ec_enabled && pbi->common.prev_mi)
+    {
+        const MODE_INFO* tmp = pbi->common.prev_mi;
+        int row, col;
+        pbi->common.prev_mi = pbi->common.mi;
+        pbi->common.mi = tmp;
+
+        /* Propagate the segment_ids to the next frame */
+        for (row = 0; row < pbi->common.mb_rows; ++row)
+        {
+            for (col = 0; col < pbi->common.mb_cols; ++col)
+            {
+                const int i = row*pbi->common.mode_info_stride + col;
+                pbi->common.mi[i].mbmi.segment_id =
+                        pbi->common.prev_mi[i].mbmi.segment_id;
+            }
+        }
+    }
+#endif
 
     /*vp8_print_modes_and_motion_vectors( cm->mi, cm->mb_rows,cm->mb_cols, cm->current_video_frame);*/
 
--- a/vp8/decoder/onyxd_int.h
+++ b/vp8/decoder/onyxd_int.h
@@ -17,6 +17,9 @@
 #include "vp8/common/onyxc_int.h"
 #include "vp8/common/threading.h"
 #include "dequantize.h"
+#if CONFIG_ERROR_CONCEALMENT
+#include "ec_types.h"
+#endif
 
 typedef struct
 {
@@ -127,6 +130,13 @@
     vp8_prob prob_last;
     vp8_prob prob_gf;
     vp8_prob prob_skip_false;
+
+#if CONFIG_ERROR_CONCEALMENT
+    MB_OVERLAP *overlaps;
+    /* the mb num from which modes and mvs (first partition) are corrupt */
+    unsigned int mvs_corrupt_from_mb;
+#endif
+    int ec_enabled;
 
 } VP8D_COMP;
 
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -68,371 +68,7 @@
 unsigned int b_modes[14]  = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
 #endif
 
-static const int qrounding_factors[129] =
-{
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48
-};
 
-static const int qzbin_factors[129] =
-{
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80
-};
-
-static const int qrounding_factors_y2[129] =
-{
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48, 48, 48, 48, 48, 48, 48, 48,
-    48
-};
-
-static const int qzbin_factors_y2[129] =
-{
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    84, 84, 84, 84, 84, 84, 84, 84,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80, 80, 80, 80, 80, 80, 80, 80,
-    80
-};
-
-#define EXACT_QUANT
-#ifdef EXACT_QUANT
-static void vp8cx_invert_quant(int improved_quant, short *quant,
-                               unsigned char *shift, short d)
-{
-    if(improved_quant)
-    {
-        unsigned t;
-        int l;
-        t = d;
-        for(l = 0; t > 1; l++)
-            t>>=1;
-        t = 1 + (1<<(16+l))/d;
-        *quant = (short)(t - (1<<16));
-        *shift = l;
-    }
-    else
-    {
-        *quant = (1 << 16) / d;
-        *shift = 0;
-    }
-}
-
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
-    int i;
-    int quant_val;
-    int Q;
-
-    int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
-
-    for (Q = 0; Q < QINDEX_RANGE; Q++)
-    {
-        // dc values
-        quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
-        cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
-        vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
-                           cpi->Y1quant_shift[Q] + 0, quant_val);
-        cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
-        cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
-        cpi->common.Y1dequant[Q][0] = quant_val;
-        cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
-        quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
-        cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
-        vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
-                           cpi->Y2quant_shift[Q] + 0, quant_val);
-        cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
-        cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
-        cpi->common.Y2dequant[Q][0] = quant_val;
-        cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
-        quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
-        cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
-        vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
-                           cpi->UVquant_shift[Q] + 0, quant_val);
-        cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
-        cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
-        cpi->common.UVdequant[Q][0] = quant_val;
-        cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
-        // all the ac values = ;
-        for (i = 1; i < 16; i++)
-        {
-            int rc = vp8_default_zig_zag1d[i];
-
-            quant_val = vp8_ac_yquant(Q);
-            cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
-            vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
-                               cpi->Y1quant_shift[Q] + rc, quant_val);
-            cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
-            cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
-            cpi->common.Y1dequant[Q][rc] = quant_val;
-            cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
-            quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
-            cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
-            vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
-                               cpi->Y2quant_shift[Q] + rc, quant_val);
-            cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
-            cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
-            cpi->common.Y2dequant[Q][rc] = quant_val;
-            cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
-            quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
-            cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
-            vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
-                               cpi->UVquant_shift[Q] + rc, quant_val);
-            cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
-            cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
-            cpi->common.UVdequant[Q][rc] = quant_val;
-            cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-        }
-    }
-}
-#else
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
-    int i;
-    int quant_val;
-    int Q;
-
-    int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
-
-    for (Q = 0; Q < QINDEX_RANGE; Q++)
-    {
-        // dc values
-        quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
-        cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
-        cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
-        cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
-        cpi->common.Y1dequant[Q][0] = quant_val;
-        cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
-        quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
-        cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
-        cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
-        cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
-        cpi->common.Y2dequant[Q][0] = quant_val;
-        cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
-        quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
-        cpi->UVquant[Q][0] = (1 << 16) / quant_val;
-        cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
-        cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
-        cpi->common.UVdequant[Q][0] = quant_val;
-        cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
-        // all the ac values = ;
-        for (i = 1; i < 16; i++)
-        {
-            int rc = vp8_default_zig_zag1d[i];
-
-            quant_val = vp8_ac_yquant(Q);
-            cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
-            cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
-            cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
-            cpi->common.Y1dequant[Q][rc] = quant_val;
-            cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
-            quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
-            cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
-            cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
-            cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
-            cpi->common.Y2dequant[Q][rc] = quant_val;
-            cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
-            quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
-            cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
-            cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
-            cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
-            cpi->common.UVdequant[Q][rc] = quant_val;
-            cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-        }
-    }
-}
-#endif
-void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
-{
-    int i;
-    int QIndex;
-    MACROBLOCKD *xd = &x->e_mbd;
-    int zbin_extra;
-
-    // Select the baseline MB Q index.
-    if (xd->segmentation_enabled)
-    {
-        // Abs Value
-        if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
-
-            QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
-        // Delta Value
-        else
-        {
-            QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
-            QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;    // Clamp to valid range
-        }
-    }
-    else
-        QIndex = cpi->common.base_qindex;
-
-    // Y
-    zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
-                   ( cpi->zbin_over_quant +
-                     cpi->zbin_mode_boost +
-                     x->act_zbin_adj ) ) >> 7;
-
-    for (i = 0; i < 16; i++)
-    {
-        x->block[i].quant = cpi->Y1quant[QIndex];
-        x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
-        x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
-        x->block[i].zbin = cpi->Y1zbin[QIndex];
-        x->block[i].round = cpi->Y1round[QIndex];
-        x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
-        x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
-        x->block[i].zbin_extra = (short)zbin_extra;
-    }
-
-    // UV
-    zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
-                   ( cpi->zbin_over_quant +
-                     cpi->zbin_mode_boost +
-                     x->act_zbin_adj ) ) >> 7;
-
-    for (i = 16; i < 24; i++)
-    {
-        x->block[i].quant = cpi->UVquant[QIndex];
-        x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
-        x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
-        x->block[i].zbin = cpi->UVzbin[QIndex];
-        x->block[i].round = cpi->UVround[QIndex];
-        x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
-        x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
-        x->block[i].zbin_extra = (short)zbin_extra;
-    }
-
-    // Y2
-    zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
-                   ( (cpi->zbin_over_quant / 2) +
-                     cpi->zbin_mode_boost +
-                     x->act_zbin_adj ) ) >> 7;
-
-    x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
-    x->block[24].quant = cpi->Y2quant[QIndex];
-    x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
-    x->block[24].zbin = cpi->Y2zbin[QIndex];
-    x->block[24].round = cpi->Y2round[QIndex];
-    x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
-    x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
-    x->block[24].zbin_extra = (short)zbin_extra;
-
-    /* save this macroblock QIndex for vp8_update_zbin_extra() */
-    x->q_index = QIndex;
-}
-void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
-{
-    int i;
-    int QIndex = x->q_index;
-    int zbin_extra;
-
-    // Y
-    zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
-                   ( cpi->zbin_over_quant +
-                     cpi->zbin_mode_boost +
-                     x->act_zbin_adj ) ) >> 7;
-    for (i = 0; i < 16; i++)
-    {
-        x->block[i].zbin_extra = (short)zbin_extra;
-    }
-
-    // UV
-    zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
-                   ( cpi->zbin_over_quant +
-                     cpi->zbin_mode_boost +
-                     x->act_zbin_adj ) ) >> 7;
-
-    for (i = 16; i < 24; i++)
-    {
-        x->block[i].zbin_extra = (short)zbin_extra;
-    }
-
-    // Y2
-    zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
-                   ( (cpi->zbin_over_quant / 2) +
-                     cpi->zbin_mode_boost +
-                     x->act_zbin_adj ) ) >> 7;
-
-    x->block[24].zbin_extra = (short)zbin_extra;
-}
-
-void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
-{
-    // Clear Zbin mode boost for default case
-    cpi->zbin_mode_boost = 0;
-
-    // MB level quantizer setup
-    vp8cx_mb_init_quantizer(cpi, &cpi->mb);
-}
-
-
 /* activity_avg must be positive, or flat regions could get a zero weight
  *  (infinite lambda), which confounds analysis.
  * This also avoids the need for divide by zero checks in
@@ -452,7 +88,7 @@
 
 
 // Original activity measure from Tim T's code.
-unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
 {
     unsigned int act;
     unsigned int sse;
@@ -482,7 +118,7 @@
 }
 
 // Stub for alternative experimental activity measures.
-unsigned int alt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int alt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
 {
     unsigned int mb_activity = VP8_ACTIVITY_AVG_MIN;
 
@@ -500,7 +136,7 @@
 
 // Measure the activity of the current macroblock
 // What we measure here is TBD so abstracted to this function
-unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
+static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
 {
     unsigned int mb_activity;
 
@@ -519,7 +155,7 @@
 }
 
 // Calculate an "average" mb activity value for the frame
-void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
+static void calc_av_activity( VP8_COMP *cpi, INT64 activity_sum )
 {
     // Simple mean for now
     cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
@@ -529,7 +165,7 @@
 
 #define OUTPUT_NORM_ACT_STATS   0
 // Calculate a normalized activity value for each mb
-void calc_norm_activity( VP8_COMP *cpi, MACROBLOCK *x )
+static void calc_norm_activity( VP8_COMP *cpi, MACROBLOCK *x )
 {
     VP8_COMMON *const cm = & cpi->common;
     int mb_row, mb_col;
@@ -593,7 +229,7 @@
 
 // Loop through all MBs. Note activity of each, average activity and
 // calculate a normalized activity for each
-void build_activity_map( VP8_COMP *cpi )
+static void build_activity_map( VP8_COMP *cpi )
 {
     MACROBLOCK *const x = & cpi->mb;
     VP8_COMMON *const cm = & cpi->common;
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -21,9 +21,6 @@
 #include "vp8/common/g_common.h"
 #include "encodeintra.h"
 
-#define intra4x4ibias_rate    128
-#define intra4x4pbias_rate    256
-
 
 #if CONFIG_RUNTIME_CPU_DETECT
 #define IF_RTCD(x) (x)
--- a/vp8/encoder/encodeintra.h
+++ b/vp8/encoder/encodeintra.h
@@ -17,6 +17,5 @@
 void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
 void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *, MACROBLOCK *mb);
 void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
-void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
 
 #endif
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -100,7 +100,7 @@
     // Pure C:
     vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
 
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
     cpi->rtcd.variance.ssimpf_8x8            = ssim_parms_8x8_c;
     cpi->rtcd.variance.ssimpf                = ssim_parms_c;
 #endif
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -75,7 +75,7 @@
 
 extern const int vp8_gf_interval_table[101];
 
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
 #include "math.h"
 
 extern double vp8_calc_ssim
@@ -1298,7 +1298,7 @@
         cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
     }
 
-    if (cpi->sf.optimize_coefficients == 1)
+    if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1)
         cpi->mb.optimize = 1;
     else
         cpi->mb.optimize = 0;
@@ -1982,8 +1982,8 @@
     cpi->source_alt_ref_active = FALSE;
     cpi->common.refresh_alt_ref_frame = 0;
 
-    cpi->b_calculate_psnr = CONFIG_PSNR;
-#if CONFIG_PSNR
+    cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
+#if CONFIG_INTERNAL_STATS
     cpi->b_calculate_ssimg = 0;
 
     cpi->count = 0;
@@ -2202,7 +2202,7 @@
         print_mode_context();
 #endif
 
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
 
         if (cpi->pass != 1)
         {
@@ -2718,45 +2718,6 @@
 }
 
 
-static void set_quantizer(VP8_COMP *cpi, int Q)
-{
-    VP8_COMMON *cm = &cpi->common;
-    MACROBLOCKD *mbd = &cpi->mb.e_mbd;
-    int update = 0;
-    int new_delta_q;
-    cm->base_qindex = Q;
-
-    /* if any of the delta_q values are changing update flag has to be set */
-    /* currently only y2dc_delta_q may change */
-
-    cm->y1dc_delta_q = 0;
-    cm->y2ac_delta_q = 0;
-    cm->uvdc_delta_q = 0;
-    cm->uvac_delta_q = 0;
-
-    if (Q < 4)
-    {
-        new_delta_q = 4-Q;
-    }
-    else
-        new_delta_q = 0;
-
-    update |= cm->y2dc_delta_q != new_delta_q;
-    cm->y2dc_delta_q = new_delta_q;
-
-
-    // Set Segment specific quatizers
-    mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
-    mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
-    mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
-    mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3];
-
-    /* quantizer has to be reinitialized for any delta_q changes */
-    if(update)
-        vp8cx_init_quantizer(cpi);
-
-}
-
 static void update_alt_ref_frame_and_stats(VP8_COMP *cpi)
 {
     VP8_COMMON *cm = &cpi->common;
@@ -3105,7 +3066,7 @@
     (void) size;
     (void) dest;
     (void) frame_flags;
-    set_quantizer(cpi, 26);
+    vp8_set_quantizer(cpi, 26);
 
     scale_and_extend_source(cpi->un_scaled_source, cpi);
     vp8_first_pass(cpi);
@@ -3502,7 +3463,7 @@
             cm->current_video_frame++;
             cpi->frames_since_key++;
 
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
             cpi->count ++;
 #endif
 
@@ -3769,7 +3730,7 @@
             Q = 127;
             */
 
-        set_quantizer(cpi, Q);
+        vp8_set_quantizer(cpi, Q);
         this_q = Q;
 
         // setup skip prob for costing in mode/mv decision
@@ -4114,7 +4075,7 @@
         {
             vp8_restore_coding_context(cpi);
             loop_count++;
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
             cpi->tot_recode_hits++;
 #endif
         }
@@ -4388,7 +4349,7 @@
         }
     }
 
-#if 0 && CONFIG_PSNR
+#if 0 && CONFIG_INTERNAL_STATS
     {
         FILE *f = fopen("tmp.stt", "a");
 
@@ -4958,7 +4919,7 @@
         generate_psnr_packet(cpi);
     }
 
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
 
     if (cpi->pass != 1)
     {
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -244,7 +244,7 @@
     BLOCK_MAX_SEGMENTS
 };
 
-typedef struct
+typedef struct VP8_COMP
 {
 
     DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]);
@@ -624,7 +624,7 @@
     int fixed_divide[512];
 #endif
 
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
     int    count;
     double total_y;
     double total_u;
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -426,17 +426,22 @@
 
 }
 
-static void vp8_update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
+static void update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
 {
-    /* Split MV modes currently not supported when RD is nopt enabled, therefore, only need to modify MVcount in NEWMV mode. */
+    /* Split MV modes currently not supported when RD is nopt enabled,
+     * therefore, only need to modify MVcount in NEWMV mode. */
     if (xd->mode_info_context->mbmi.mode == NEWMV)
     {
-        cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
-        cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+        cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row -
+                                      best_ref_mv->as_mv.row) >> 1)]++;
+        cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col -
+                                      best_ref_mv->as_mv.col) >> 1)]++;
     }
 }
 
-void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra)
+void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
+                         int recon_uvoffset, int *returnrate,
+                         int *returndistortion, int *returnintra)
 {
     BLOCK *b = &x->block[0];
     BLOCKD *d = &x->e_mbd.block[0];
@@ -443,7 +448,7 @@
     MACROBLOCKD *xd = &x->e_mbd;
     B_MODE_INFO best_bmodes[16];
     MB_MODE_INFO best_mbmode;
-    PARTITION_INFO best_partition;
+
     int_mv best_ref_mv;
     int_mv mode_mv[MB_MODE_COUNT];
     MB_PREDICTION_MODE this_mode;
@@ -878,9 +883,8 @@
             *returndistortion = distortion2;
             best_rd = this_rd;
             vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
-            vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
 
-            if (this_mode == B_PRED || this_mode == SPLITMV)
+            if (this_mode == B_PRED)
                 for (i = 0; i < 16; i++)
                 {
                     vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO));
@@ -952,7 +956,6 @@
         best_mbmode.partitioning = 0;
 
         vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
-        vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
 
         for (i = 0; i < 16; i++)
         {
@@ -963,12 +966,10 @@
         return;
     }
 
-
     // macroblock modes
     vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
-    vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
 
-    if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED || x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
+    if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
         for (i = 0; i < 16; i++)
         {
             vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
@@ -979,7 +980,5 @@
         vp8_set_mbmode_and_mvs(x, x->e_mbd.mode_info_context->mbmi.mode, &best_bmodes[0].mv);
     }
 
-    x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
-
-    vp8_update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
+    update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
 }
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -12,8 +12,9 @@
 #include <math.h>
 #include "vpx_mem/vpx_mem.h"
 
+#include "onyx_int.h"
 #include "quantize.h"
-#include "vp8/common/entropy.h"
+#include "vp8/common/quant_common.h"
 
 #define EXACT_QUANT
 
@@ -298,4 +299,419 @@
 
     for (i = 16; i < 24; i++)
         x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+}
+
+
+static const int qrounding_factors[129] =
+{
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48
+};
+
+
+static const int qzbin_factors[129] =
+{
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80
+};
+
+
+static const int qrounding_factors_y2[129] =
+{
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48, 48, 48, 48, 48, 48, 48, 48,
+    48
+};
+
+
+static const int qzbin_factors_y2[129] =
+{
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    84, 84, 84, 84, 84, 84, 84, 84,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80, 80, 80, 80, 80, 80, 80, 80,
+    80
+};
+
+
+#define EXACT_QUANT
+#ifdef EXACT_QUANT
+static void invert_quant(int improved_quant, short *quant,
+                               unsigned char *shift, short d)
+{
+    if(improved_quant)
+    {
+        unsigned t;
+        int l;
+        t = d;
+        for(l = 0; t > 1; l++)
+            t>>=1;
+        t = 1 + (1<<(16+l))/d;
+        *quant = (short)(t - (1<<16));
+        *shift = l;
+    }
+    else
+    {
+        *quant = (1 << 16) / d;
+        *shift = 0;
+    }
+}
+
+
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+    int i;
+    int quant_val;
+    int Q;
+
+    int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+    for (Q = 0; Q < QINDEX_RANGE; Q++)
+    {
+        // dc values
+        quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+        cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
+        invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
+                     cpi->Y1quant_shift[Q] + 0, quant_val);
+        cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+        cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+        cpi->common.Y1dequant[Q][0] = quant_val;
+        cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+        quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+        cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
+        invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
+                     cpi->Y2quant_shift[Q] + 0, quant_val);
+        cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+        cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+        cpi->common.Y2dequant[Q][0] = quant_val;
+        cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+        quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+        cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
+        invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
+                     cpi->UVquant_shift[Q] + 0, quant_val);
+        cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+        cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+        cpi->common.UVdequant[Q][0] = quant_val;
+        cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+        // all the ac values = ;
+        for (i = 1; i < 16; i++)
+        {
+            int rc = vp8_default_zig_zag1d[i];
+
+            quant_val = vp8_ac_yquant(Q);
+            cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
+            invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
+                         cpi->Y1quant_shift[Q] + rc, quant_val);
+            cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+            cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+            cpi->common.Y1dequant[Q][rc] = quant_val;
+            cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+            quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+            cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
+            invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
+                         cpi->Y2quant_shift[Q] + rc, quant_val);
+            cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+            cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+            cpi->common.Y2dequant[Q][rc] = quant_val;
+            cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+            quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+            cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
+            invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
+                         cpi->UVquant_shift[Q] + rc, quant_val);
+            cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+            cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+            cpi->common.UVdequant[Q][rc] = quant_val;
+            cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+        }
+    }
+}
+#else
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+    int i;
+    int quant_val;
+    int Q;
+
+    int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+    for (Q = 0; Q < QINDEX_RANGE; Q++)
+    {
+        // dc values
+        quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+        cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
+        cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+        cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+        cpi->common.Y1dequant[Q][0] = quant_val;
+        cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+        quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+        cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
+        cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+        cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+        cpi->common.Y2dequant[Q][0] = quant_val;
+        cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+        quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+        cpi->UVquant[Q][0] = (1 << 16) / quant_val;
+        cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+        cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+        cpi->common.UVdequant[Q][0] = quant_val;
+        cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+        // all the ac values = ;
+        for (i = 1; i < 16; i++)
+        {
+            int rc = vp8_default_zig_zag1d[i];
+
+            quant_val = vp8_ac_yquant(Q);
+            cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
+            cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+            cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+            cpi->common.Y1dequant[Q][rc] = quant_val;
+            cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+            quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+            cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
+            cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+            cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+            cpi->common.Y2dequant[Q][rc] = quant_val;
+            cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+            quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+            cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
+            cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+            cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+            cpi->common.UVdequant[Q][rc] = quant_val;
+            cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+        }
+    }
+}
+#endif
+
+
+void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
+{
+    int i;
+    int QIndex;
+    MACROBLOCKD *xd = &x->e_mbd;
+    int zbin_extra;
+
+    // Select the baseline MB Q index.
+    if (xd->segmentation_enabled)
+    {
+        // Abs Value
+        if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
+
+            QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
+        // Delta Value
+        else
+        {
+            QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
+            QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;    // Clamp to valid range
+        }
+    }
+    else
+        QIndex = cpi->common.base_qindex;
+
+    // Y
+    zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
+                   ( cpi->zbin_over_quant +
+                     cpi->zbin_mode_boost +
+                     x->act_zbin_adj ) ) >> 7;
+
+    for (i = 0; i < 16; i++)
+    {
+        x->block[i].quant = cpi->Y1quant[QIndex];
+        x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
+        x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
+        x->block[i].zbin = cpi->Y1zbin[QIndex];
+        x->block[i].round = cpi->Y1round[QIndex];
+        x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
+        x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
+        x->block[i].zbin_extra = (short)zbin_extra;
+    }
+
+    // UV
+    zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
+                   ( cpi->zbin_over_quant +
+                     cpi->zbin_mode_boost +
+                     x->act_zbin_adj ) ) >> 7;
+
+    for (i = 16; i < 24; i++)
+    {
+        x->block[i].quant = cpi->UVquant[QIndex];
+        x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
+        x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
+        x->block[i].zbin = cpi->UVzbin[QIndex];
+        x->block[i].round = cpi->UVround[QIndex];
+        x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
+        x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
+        x->block[i].zbin_extra = (short)zbin_extra;
+    }
+
+    // Y2
+    zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
+                   ( (cpi->zbin_over_quant / 2) +
+                     cpi->zbin_mode_boost +
+                     x->act_zbin_adj ) ) >> 7;
+
+    x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
+    x->block[24].quant = cpi->Y2quant[QIndex];
+    x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
+    x->block[24].zbin = cpi->Y2zbin[QIndex];
+    x->block[24].round = cpi->Y2round[QIndex];
+    x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
+    x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
+    x->block[24].zbin_extra = (short)zbin_extra;
+
+    /* save this macroblock QIndex for vp8_update_zbin_extra() */
+    x->q_index = QIndex;
+}
+
+
+void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
+{
+    int i;
+    int QIndex = x->q_index;
+    int zbin_extra;
+
+    // Y
+    zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
+                   ( cpi->zbin_over_quant +
+                     cpi->zbin_mode_boost +
+                     x->act_zbin_adj ) ) >> 7;
+    for (i = 0; i < 16; i++)
+    {
+        x->block[i].zbin_extra = (short)zbin_extra;
+    }
+
+    // UV
+    zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
+                   ( cpi->zbin_over_quant +
+                     cpi->zbin_mode_boost +
+                     x->act_zbin_adj ) ) >> 7;
+
+    for (i = 16; i < 24; i++)
+    {
+        x->block[i].zbin_extra = (short)zbin_extra;
+    }
+
+    // Y2
+    zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
+                   ( (cpi->zbin_over_quant / 2) +
+                     cpi->zbin_mode_boost +
+                     x->act_zbin_adj ) ) >> 7;
+
+    x->block[24].zbin_extra = (short)zbin_extra;
+}
+
+
+void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
+{
+    // Clear Zbin mode boost for default case
+    cpi->zbin_mode_boost = 0;
+
+    // MB level quantizer setup
+    vp8cx_mb_init_quantizer(cpi, &cpi->mb);
+}
+
+
+void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
+{
+    VP8_COMMON *cm = &cpi->common;
+    MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+    int update = 0;
+    int new_delta_q;
+    cm->base_qindex = Q;
+
+    /* if any of the delta_q values are changing update flag has to be set */
+    /* currently only y2dc_delta_q may change */
+
+    cm->y1dc_delta_q = 0;
+    cm->y2ac_delta_q = 0;
+    cm->uvdc_delta_q = 0;
+    cm->uvac_delta_q = 0;
+
+    if (Q < 4)
+    {
+        new_delta_q = 4-Q;
+    }
+    else
+        new_delta_q = 0;
+
+    update |= cm->y2dc_delta_q != new_delta_q;
+    cm->y2dc_delta_q = new_delta_q;
+
+
+    // Set Segment specific quatizers
+    mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
+    mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
+    mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
+    mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3];
+
+    /* quantizer has to be reinitialized for any delta_q changes */
+    if(update)
+        vp8cx_init_quantizer(cpi);
+
 }
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -55,4 +55,11 @@
 extern void vp8_quantize_mbuv(MACROBLOCK *x);
 extern void vp8_quantize_mby(MACROBLOCK *x);
 
+struct VP8_COMP;
+extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
+extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi);
+extern void vp8_update_zbin_extra(struct VP8_COMP *cpi, MACROBLOCK *x);
+extern void vp8cx_mb_init_quantizer(struct VP8_COMP *cpi, MACROBLOCK *x);
+extern void vp8cx_init_quantizer(struct VP8_COMP *cpi);
+
 #endif
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -994,9 +994,11 @@
             cost = x->inter_bmode_costs[ m];
         }
 
-        d->bmi.mode = m;
         d->bmi.mv.as_int = this_mv->as_int;
 
+        x->partition_info->bmi[i].mode = m;
+        x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
+
     }
     while (++i < 16);
 
@@ -1340,8 +1342,8 @@
         {
             BLOCKD *bd = &x->e_mbd.block[i];
 
-            bsi->mvs[i].as_mv = bd->bmi.mv.as_mv;
-            bsi->modes[i] = bd->bmi.mode;
+            bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+            bsi->modes[i] = x->partition_info->bmi[i].mode;
             bsi->eobs[i] = bd->eob;
         }
     }
@@ -1471,7 +1473,6 @@
         BLOCKD *bd = &x->e_mbd.block[i];
 
         bd->bmi.mv.as_mv = bsi.mvs[i].as_mv;
-        bd->bmi.mode = bsi.modes[i];
         bd->eob = bsi.eobs[i];
     }
 
@@ -1489,9 +1490,13 @@
 
         j = vp8_mbsplit_offset[bsi.segment_num][i];
 
-        x->partition_info->bmi[i].mode = x->e_mbd.block[j].bmi.mode;
-        x->partition_info->bmi[i].mv.as_mv = x->e_mbd.block[j].bmi.mv.as_mv;
+        x->partition_info->bmi[i].mode = bsi.modes[j];
+        x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
     }
+    /*
+     * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
+     */
+    x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
 
     return bsi.segment_rd;
 }
@@ -1751,25 +1756,29 @@
     }
 }
 
-static void vp8_rd_update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
+static void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv)
 {
-    int i;
-
-    if (xd->mode_info_context->mbmi.mode == SPLITMV)
+    if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
     {
-        for (i = 0; i < 16; i++)
+        int i;
+
+        for (i = 0; i < x->partition_info->count; i++)
         {
-            if (xd->block[i].bmi.mode == NEW4X4)
+            if (x->partition_info->bmi[i].mode == NEW4X4)
             {
-                cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
-                cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+                cpi->MVcount[0][mv_max+((x->partition_info->bmi[i].mv.as_mv.row
+                                          - best_ref_mv->as_mv.row) >> 1)]++;
+                cpi->MVcount[1][mv_max+((x->partition_info->bmi[i].mv.as_mv.col
+                                          - best_ref_mv->as_mv.col) >> 1)]++;
             }
         }
     }
-    else if (xd->mode_info_context->mbmi.mode == NEWMV)
+    else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV)
     {
-        cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
-        cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+        cpi->MVcount[0][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
+                                          - best_ref_mv->as_mv.row) >> 1)]++;
+        cpi->MVcount[1][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
+                                          - best_ref_mv->as_mv.col) >> 1)]++;
     }
 }
 
@@ -2479,7 +2488,6 @@
 
     // macroblock modes
     vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
-    vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
 
     for (i = 0; i < 16; i++)
     {
@@ -2486,7 +2494,13 @@
         vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
     }
 
-    x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
+    if (best_mbmode.mode == SPLITMV)
+    {
+        vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
+        x->e_mbd.mode_info_context->mbmi.mv.as_int =
+                                      x->partition_info->bmi[15].mv.as_int;
+    }
 
-    vp8_rd_update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
+    rd_update_mvcount(cpi, x, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
+
 }
--- a/vp8/encoder/variance.h
+++ b/vp8/encoder/variance.h
@@ -391,7 +391,7 @@
     vp8_sad_multi_d_fn_t     sad8x8x4d;
     vp8_sad_multi_d_fn_t     sad4x4x4d;
 
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
     vp8_ssimpf_fn_t          ssimpf_8x8;
     vp8_ssimpf_fn_t          ssimpf;
 #endif
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -112,7 +112,7 @@
 #endif
 
 #if HAVE_SSSE3
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
 #if ARCH_X86_64
 typedef void ssimpf
 (
@@ -287,7 +287,7 @@
 
         cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_ssse3;
 
-#if CONFIG_PSNR
+#if CONFIG_INTERNAL_STATS
 #if ARCH_X86_64
         cpi->rtcd.variance.ssimpf_8x8            = vp8_ssim_parms_8x8_sse3;
         cpi->rtcd.variance.ssimpf                = vp8_ssim_parms_16x16_sse3;
--- a/vp8/vp8_dx_iface.c
+++ b/vp8/vp8_dx_iface.c
@@ -19,6 +19,8 @@
 #include "decoder/onyxd_int.h"
 
 #define VP8_CAP_POSTPROC (CONFIG_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0)
+#define VP8_CAP_ERROR_CONCEALMENT (CONFIG_ERROR_CONCEALMENT ? \
+                                    VPX_CODEC_CAP_ERROR_CONCEALMENT : 0)
 
 typedef vpx_codec_stream_info_t  vp8_stream_info_t;
 
@@ -364,6 +366,8 @@
             oxcf.Version = 9;
             oxcf.postprocess = 0;
             oxcf.max_threads = ctx->cfg.threads;
+            oxcf.error_concealment =
+                    (ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
 
             optr = vp8dx_create_decompressor(&oxcf);
 
@@ -719,7 +723,7 @@
 {
     "WebM Project VP8 Decoder" VERSION_STRING,
     VPX_CODEC_INTERNAL_ABI_VERSION,
-    VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC,
+    VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT,
     /* vpx_codec_caps_t          caps; */
     vp8_init,         /* vpx_codec_init_fn_t       init; */
     vp8_destroy,      /* vpx_codec_destroy_fn_t    destroy; */
@@ -749,7 +753,7 @@
 {
     "WebM Project VP8 Decoder (Deprecated API)" VERSION_STRING,
     VPX_CODEC_INTERNAL_ABI_VERSION,
-    VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC,
+    VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT,
     /* vpx_codec_caps_t          caps; */
     vp8_init,         /* vpx_codec_init_fn_t       init; */
     vp8_destroy,      /* vpx_codec_destroy_fn_t    destroy; */
--- a/vp8/vp8cx.mk
+++ b/vp8/vp8cx.mk
@@ -77,12 +77,12 @@
 VP8_CX_SRCS-yes += encoder/sad_c.c
 VP8_CX_SRCS-yes += encoder/segmentation.c
 VP8_CX_SRCS-yes += encoder/segmentation.h
-VP8_CX_SRCS-$(CONFIG_PSNR) += encoder/ssim.c
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/ssim.c
 VP8_CX_SRCS-yes += encoder/tokenize.c
 VP8_CX_SRCS-yes += encoder/treewriter.c
 VP8_CX_SRCS-yes += encoder/variance_c.c
-VP8_CX_SRCS-$(CONFIG_PSNR) += common/postproc.h
-VP8_CX_SRCS-$(CONFIG_PSNR) += common/postproc.c
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h
+VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
 VP8_CX_SRCS-yes += encoder/temporal_filter.c
 VP8_CX_SRCS-yes += encoder/temporal_filter.h
 
--- a/vp8/vp8dx.mk
+++ b/vp8/vp8dx.mk
@@ -53,6 +53,9 @@
 VP8_DX_SRCS-yes += decoder/decodframe.c
 VP8_DX_SRCS-yes += decoder/dequantize.c
 VP8_DX_SRCS-yes += decoder/detokenize.c
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/ec_types.h
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/error_concealment.h
+VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT) += decoder/error_concealment.c
 VP8_DX_SRCS-yes += decoder/generic/dsystemdependent.c
 VP8_DX_SRCS-yes += decoder/dboolhuff.h
 VP8_DX_SRCS-yes += decoder/decodemv.h
--- a/vpx/src/vpx_decoder.c
+++ b/vpx/src/vpx_decoder.c
@@ -36,6 +36,9 @@
         res = VPX_CODEC_INCAPABLE;
     else if ((flags & VPX_CODEC_USE_POSTPROC) && !(iface->caps & VPX_CODEC_CAP_POSTPROC))
         res = VPX_CODEC_INCAPABLE;
+    else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) &&
+            !(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT))
+        res = VPX_CODEC_INCAPABLE;
     else if (!(iface->caps & VPX_CODEC_CAP_DECODER))
         res = VPX_CODEC_INCAPABLE;
     else
--- a/vpx/vpx_decoder.h
+++ b/vpx/vpx_decoder.h
@@ -53,6 +53,8 @@
 #define VPX_CODEC_CAP_PUT_SLICE  0x10000 /**< Will issue put_slice callbacks */
 #define VPX_CODEC_CAP_PUT_FRAME  0x20000 /**< Will issue put_frame callbacks */
 #define VPX_CODEC_CAP_POSTPROC   0x40000 /**< Can postprocess decoded frame */
+#define VPX_CODEC_CAP_ERROR_CONCEALMENT   0x80000 /**< Can conceal errors due to
+                                                       packet loss */
 
     /*! \brief Initialization-time Feature Enabling
      *
@@ -62,6 +64,8 @@
      *  The available flags are specified by VPX_CODEC_USE_* defines.
      */
 #define VPX_CODEC_USE_POSTPROC   0x10000 /**< Postprocess decoded frame */
+#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000 /**< Conceal errors in decoded
+                                                     frames */
 
     /*!\brief Stream properties
      *
--- a/vpxdec.c
+++ b/vpxdec.c
@@ -87,7 +87,10 @@
                                     "Max threads to use");
 static const arg_def_t verbosearg = ARG_DEF("v", "verbose", 0,
                                   "Show version string");
+static const arg_def_t error_concealment = ARG_DEF(NULL, "error-concealment", 0,
+                                       "Enable decoder error-concealment");
 
+
 #if CONFIG_MD5
 static const arg_def_t md5arg = ARG_DEF(NULL, "md5", 0,
                                         "Compute the MD5 sum of the decoded frame");
@@ -100,6 +103,7 @@
 #if CONFIG_MD5
     &md5arg,
 #endif
+    &error_concealment,
     NULL
 };
 
@@ -700,6 +704,7 @@
     FILE                  *infile;
     int                    frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0, do_md5 = 0, progress = 0;
     int                    stop_after = 0, postproc = 0, summary = 0, quiet = 1;
+    int                    ec_enabled = 0;
     vpx_codec_iface_t       *iface = NULL;
     unsigned int           fourcc;
     unsigned long          dx_time = 0;
@@ -724,6 +729,7 @@
 #endif
     struct input_ctx        input = {0};
     int                     frames_corrupted = 0;
+    int                     dec_flags = 0;
 
     /* Parse command line */
     exec_name = argv_[0];
@@ -843,6 +849,10 @@
                 vp8_dbg_display_mv = flags;
             }
         }
+        else if (arg_match(&arg, &error_concealment, argi))
+        {
+            ec_enabled = 1;
+        }
 
 #endif
         else
@@ -963,8 +973,10 @@
             break;
         }
 
+    dec_flags = (postproc ? VPX_CODEC_USE_POSTPROC : 0) |
+                (ec_enabled ? VPX_CODEC_USE_ERROR_CONCEALMENT : 0);
     if (vpx_codec_dec_init(&decoder, iface ? iface :  ifaces[0].iface, &cfg,
-                           postproc ? VPX_CODEC_USE_POSTPROC : 0))
+                           dec_flags))
     {
         fprintf(stderr, "Failed to initialize decoder: %s\n", vpx_codec_error(&decoder));
         return EXIT_FAILURE;