shithub: libvpx

Download patch

ref: 518c5519033d16c677c1cca4142d6f4c34fb0fe1
parent: 89c3269636c1c1859345a73bbd5eb8f71df29306
parent: dfa9e2c5ea4a4282d931e382c327fc2d149ebcf8
author: John Koleszar <jkoleszar@google.com>
date: Fri Apr 29 20:05:05 EDT 2011

Merge remote branch 'origin/master' into experimental

Change-Id: I9c995f1fdb46c098b0c519bf333318dff651cb40

--- a/vp8/common/reconinter.c
+++ b/vp8/common/reconinter.c
@@ -279,102 +279,113 @@
     }
 }
 
-void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
+void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
+                                        unsigned char *dst_y,
+                                        unsigned char *dst_u,
+                                        unsigned char *dst_v,
+                                        int dst_ystride,
+                                        int dst_uvstride)
 {
+    int offset;
+    unsigned char *ptr;
+    unsigned char *uptr, *vptr;
 
-    if (x->mode_info_context->mbmi.mode != SPLITMV)
-    {
-        int offset;
-        unsigned char *ptr_base;
-        unsigned char *ptr;
-        unsigned char *uptr, *vptr;
-        unsigned char *pred_ptr = x->predictor;
-        unsigned char *upred_ptr = &x->predictor[256];
-        unsigned char *vpred_ptr = &x->predictor[320];
+    int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
+    int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
 
-        int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
-        int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
-        int pre_stride = x->block[0].pre_stride;
+    unsigned char *ptr_base = x->pre.y_buffer;
+    int pre_stride = x->block[0].pre_stride;
 
-        ptr_base = x->pre.y_buffer;
-        ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
+    ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
 
-        if ((mv_row | mv_col) & 7)
-        {
-            x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pred_ptr, 16);
-        }
-        else
-        {
-            RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
-        }
+    if ((mv_row | mv_col) & 7)
+    {
+        x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y, dst_ystride);
+    }
+    else
+    {
+        RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_y, dst_ystride);
+    }
 
-        mv_row = x->block[16].bmi.mv.as_mv.row;
-        mv_col = x->block[16].bmi.mv.as_mv.col;
-        pre_stride >>= 1;
-        offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
-        uptr = x->pre.u_buffer + offset;
-        vptr = x->pre.v_buffer + offset;
+    mv_row = x->block[16].bmi.mv.as_mv.row;
+    mv_col = x->block[16].bmi.mv.as_mv.col;
+    pre_stride >>= 1;
+    offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
+    uptr = x->pre.u_buffer + offset;
+    vptr = x->pre.v_buffer + offset;
 
-        if ((mv_row | mv_col) & 7)
-        {
-            x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
-            x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
-        }
-        else
-        {
-            RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
-            RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
-        }
+    if ((mv_row | mv_col) & 7)
+    {
+        x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, dst_u, dst_uvstride);
+        x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, dst_v, dst_uvstride);
     }
     else
     {
-        int i;
+        RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvstride);
+        RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvstride);
+    }
 
-        if (x->mode_info_context->mbmi.partitioning < 3)
-        {
-            for (i = 0; i < 4; i++)
-            {
-                BLOCKD *d = &x->block[bbb[i]];
-                build_inter_predictors4b(x, d, 16);
-            }
-        }
-        else
-        {
-            for (i = 0; i < 16; i += 2)
-            {
-                BLOCKD *d0 = &x->block[i];
-                BLOCKD *d1 = &x->block[i+1];
+}
 
-                if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
-                    build_inter_predictors2b(x, d0, 16);
-                else
-                {
-                    vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict);
-                    vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict);
-                }
+void vp8_build_inter4x4_predictors_mb(MACROBLOCKD *x)
+{
+    int i;
 
-            }
-
+    if (x->mode_info_context->mbmi.partitioning < 3)
+    {
+        for (i = 0; i < 4; i++)
+        {
+            BLOCKD *d = &x->block[bbb[i]];
+            build_inter_predictors4b(x, d, 16);
         }
-
-        for (i = 16; i < 24; i += 2)
+    }
+    else
+    {
+        for (i = 0; i < 16; i += 2)
         {
             BLOCKD *d0 = &x->block[i];
             BLOCKD *d1 = &x->block[i+1];
 
             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
-                build_inter_predictors2b(x, d0, 8);
+                build_inter_predictors2b(x, d0, 16);
             else
             {
-                vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
-                vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
+                vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict);
+                vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict);
             }
 
         }
 
     }
+
+    for (i = 16; i < 24; i += 2)
+    {
+        BLOCKD *d0 = &x->block[i];
+        BLOCKD *d1 = &x->block[i+1];
+
+        if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
+            build_inter_predictors2b(x, d0, 8);
+        else
+        {
+            vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
+            vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
+        }
+    }
 }
 
+void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
+{
+    if (x->mode_info_context->mbmi.mode != SPLITMV)
+    {
+        vp8_build_inter16x16_predictors_mb(x, x->predictor, &x->predictor[256],
+                                           &x->predictor[320], 16, 8);
+    }
+    else
+    {
+        vp8_build_inter4x4_predictors_mb(x);
+    }
+}
+
 void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
 {
     int i, j;
@@ -455,91 +466,5 @@
 }
 
 
-/* The following functions are wriiten for skip_recon_mb() to call. Since there is no recon in this
- * situation, we can write the result directly to dst buffer instead of writing it to predictor
- * buffer and then copying it to dst buffer.
- */
-static void vp8_build_inter_predictors_b_s(BLOCKD *d, unsigned char *dst_ptr, vp8_subpix_fn_t sppf)
-{
-    int r;
-    unsigned char *ptr_base;
-    unsigned char *ptr;
-    /*unsigned char *pred_ptr = d->predictor;*/
-    int dst_stride = d->dst_stride;
-    int pre_stride = d->pre_stride;
 
-    ptr_base = *(d->base_pre);
 
-    if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
-    {
-        ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
-        sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst_ptr, dst_stride);
-    }
-    else
-    {
-        ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
-        ptr = ptr_base;
-
-        for (r = 0; r < 4; r++)
-        {
-#ifdef MUST_BE_ALIGNED
-            dst_ptr[0]   = ptr[0];
-            dst_ptr[1]   = ptr[1];
-            dst_ptr[2]   = ptr[2];
-            dst_ptr[3]   = ptr[3];
-#else
-            *(int *)dst_ptr = *(int *)ptr ;
-#endif
-            dst_ptr      += dst_stride;
-            ptr         += pre_stride;
-        }
-    }
-}
-
-
-
-void vp8_build_inter16x16_predictors_mb_s(MACROBLOCKD *x)
-{
-    unsigned char *dst_ptr = x->dst.y_buffer;
-
-    int offset;
-    unsigned char *ptr_base;
-    unsigned char *ptr;
-    unsigned char *uptr, *vptr;
-    unsigned char *udst_ptr = x->dst.u_buffer;
-    unsigned char *vdst_ptr = x->dst.v_buffer;
-
-    int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
-    int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
-    int pre_stride = x->dst.y_stride; /*x->block[0].pre_stride;*/
-
-    ptr_base = x->pre.y_buffer;
-    ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
-
-    if ((mv_row | mv_col) & 7)
-    {
-        x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
-    }
-    else
-    {
-        RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
-    }
-
-    mv_row = x->block[16].bmi.mv.as_mv.row;
-    mv_col = x->block[16].bmi.mv.as_mv.col;
-    pre_stride >>= 1;
-    offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
-    uptr = x->pre.u_buffer + offset;
-    vptr = x->pre.v_buffer + offset;
-
-    if ((mv_row | mv_col) & 7)
-    {
-        x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, udst_ptr, x->dst.uv_stride);
-        x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vdst_ptr, x->dst.uv_stride);
-    }
-    else
-    {
-        RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, udst_ptr, x->dst.uv_stride);
-        RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vdst_ptr, x->dst.uv_stride);
-    }
-}
--- a/vp8/common/reconinter.h
+++ b/vp8/common/reconinter.h
@@ -13,7 +13,13 @@
 #define __INC_RECONINTER_H
 
 extern void vp8_build_inter_predictors_mb(MACROBLOCKD *x);
-extern void vp8_build_inter16x16_predictors_mb_s(MACROBLOCKD *x);
+extern void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
+                                               unsigned char *dst_y,
+                                               unsigned char *dst_u,
+                                               unsigned char *dst_v,
+                                               int dst_ystride,
+                                               int dst_uvstride);
+
 
 extern void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x);
 extern void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel);
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -119,7 +119,9 @@
     }
     else
     {
-        vp8_build_inter16x16_predictors_mb_s(xd);
+        vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
+                                           xd->dst.u_buffer, xd->dst.v_buffer,
+                                           xd->dst.y_stride, xd->dst.uv_stride);
     }
 }
 
@@ -221,6 +223,9 @@
                          build_intra_predictors_mby)(xd);
         } else {
             vp8_intra_prediction_down_copy(xd);
+
+
+
         }
     }
     else
@@ -232,6 +237,7 @@
     if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
     {
         BLOCKD *b = &xd->block[24];
+
         DEQUANT_INVOKE(&pbi->dequant, block)(b);
 
         /* do 2nd order transform on the dc block */
--- a/vp8/decoder/threading.c
+++ b/vp8/decoder/threading.c
@@ -118,7 +118,7 @@
         xd->mode_info_context->mbmi.mb_skip_coeff = 1;
 
         /*mt_skip_recon_mb(pbi, xd, mb_row, mb_col);*/
-        if (xd->frame_type == KEY_FRAME  ||  xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
+        if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
         {
             vp8mt_build_intra_predictors_mbuv_s(pbi, xd, mb_row, mb_col);
             vp8mt_build_intra_predictors_mby_s(pbi, xd, mb_row, mb_col);
@@ -125,7 +125,9 @@
         }
         else
         {
-            vp8_build_inter16x16_predictors_mb_s(xd);
+            vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
+                                               xd->dst.u_buffer, xd->dst.v_buffer,
+                                               xd->dst.y_stride, xd->dst.uv_stride);
         }
         return;
     }
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -1391,7 +1391,10 @@
 
         }
         else
-            vp8_build_inter16x16_predictors_mb_s(xd);
+            vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
+                                           xd->dst.u_buffer, xd->dst.v_buffer,
+                                           xd->dst.y_stride, xd->dst.uv_stride);
+
     }
 
     if (!x->skip)
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -1497,88 +1497,57 @@
     return bsi.segment_rd;
 }
 
-static void swap(int *x,int *y)
+static void insertsortmv(int arr[], int len)
 {
-   int tmp;
+    int i, j, k;
 
-   tmp = *x;
-   *x = *y;
-   *y = tmp;
-}
+    for ( i = 1 ; i <= len-1 ; i++ )
+    {
+        for ( j = 0 ; j < i ; j++ )
+        {
+            if ( arr[j] > arr[i] )
+            {
+                int temp;
 
-static void quicksortmv(int arr[],int left, int right)
-{
-   int lidx,ridx,pivot;
+                temp = arr[i];
 
-   lidx = left;
-   ridx = right;
+                for ( k = i; k >j; k--)
+                    arr[k] = arr[k - 1] ;
 
-   if( left < right)
-   {
-      pivot = (left + right)/2;
-
-      while(lidx <=pivot && ridx >=pivot)
-      {
-          while(arr[lidx] < arr[pivot] && lidx <= pivot)
-              lidx++;
-          while(arr[ridx] > arr[pivot] && ridx >= pivot)
-              ridx--;
-          swap(&arr[lidx], &arr[ridx]);
-          lidx++;
-          ridx--;
-          if(lidx-1 == pivot)
-          {
-              ridx++;
-              pivot = ridx;
-          }
-          else if(ridx+1 == pivot)
-          {
-              lidx--;
-              pivot = lidx;
-          }
-      }
-      quicksortmv(arr, left, pivot - 1);
-      quicksortmv(arr, pivot + 1, right);
-   }
+                arr[j] = temp ;
+            }
+        }
+    }
 }
 
-static void quicksortsad(int arr[],int idx[], int left, int right)
+static void insertsortsad(int arr[],int idx[], int len)
 {
-   int lidx,ridx,pivot;
+    int i, j, k;
 
-   lidx = left;
-   ridx = right;
+    for ( i = 1 ; i <= len-1 ; i++ )
+    {
+        for ( j = 0 ; j < i ; j++ )
+        {
+            if ( arr[j] > arr[i] )
+            {
+                int temp, tempi;
 
-   if( left < right)
-   {
-      pivot = (left + right)/2;
+                temp = arr[i];
+                tempi = idx[i];
 
-      while(lidx <=pivot && ridx >=pivot)
-      {
-          while(arr[lidx] < arr[pivot] && lidx <= pivot)
-              lidx++;
-          while(arr[ridx] > arr[pivot] && ridx >= pivot)
-              ridx--;
-          swap(&arr[lidx], &arr[ridx]);
-          swap(&idx[lidx], &idx[ridx]);
-          lidx++;
-          ridx--;
-          if(lidx-1 == pivot)
-          {
-              ridx++;
-              pivot = ridx;
-          }
-          else if(ridx+1 == pivot)
-          {
-              lidx--;
-              pivot = lidx;
-          }
-      }
-      quicksortsad(arr, idx, left, pivot - 1);
-      quicksortsad(arr, idx, pivot + 1, right);
-   }
-}
+                for ( k = i; k >j; k--)
+                {
+                    arr[k] = arr[k - 1] ;
+                    idx[k] = idx[k - 1];
+                }
 
+                arr[j] = temp ;
+                idx[j] = tempi;
+            }
+        }
+    }
+}
+
 //The improved MV prediction
 void vp8_mv_pred
 (
@@ -1712,8 +1681,8 @@
                 mvy[i] = near_mvs[i].as_mv.col;
             }
 
-            quicksortmv (mvx, 0, vcnt-1);
-            quicksortmv (mvy, 0, vcnt-1);
+            insertsortmv(mvx, vcnt);
+            insertsortmv(mvy, vcnt);
             mv.as_mv.row = mvx[vcnt/2];
             mv.as_mv.col = mvy[vcnt/2];
 
@@ -1776,10 +1745,10 @@
 
     if(cpi->common.last_frame_type != KEY_FRAME)
     {
-        quicksortsad(near_sad, near_sadidx, 0, 7);
+        insertsortsad(near_sad, near_sadidx, 8);
     }else
     {
-        quicksortsad(near_sad, near_sadidx, 0, 2);
+        insertsortsad(near_sad, near_sadidx, 3);
     }
 }