ref: 18433aef17d9c4674de98a329e4e46e5677f846e
parent: 1cc406ab4a16549fc3b44c0b20f7e81dfc2b649c
author: Ronald S. Bultje <rbultje@google.com>
date: Wed Apr 18 09:51:58 EDT 2012
Compound prediction for splitmv macroblocks. Change-Id: I0af3395500b1cb0ed629249eb6636a0c9322cb18
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -168,7 +168,10 @@
B_PREDICTION_MODE second;
#endif
} as_mode;
- int_mv mv;
+ struct {
+ int_mv first;
+ int_mv second;
+ } as_mv;
};
typedef enum
@@ -222,6 +225,7 @@
/* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
unsigned char **base_pre;
+ unsigned char **base_second_pre;
int pre;
int pre_stride;
@@ -316,6 +320,8 @@
vp8_subpix_fn_t subpixel_predict8x4;
vp8_subpix_fn_t subpixel_predict8x8;
vp8_subpix_fn_t subpixel_predict16x16;
+ vp8_subpix_fn_t subpixel_predict_avg;
+ vp8_subpix_fn_t subpixel_predict_avg8x4;
vp8_subpix_fn_t subpixel_predict_avg8x8;
vp8_subpix_fn_t subpixel_predict_avg16x16;
#if CONFIG_HIGH_PRECISION_MV
--- a/vp8/common/debugmodes.c
+++ b/vp8/common/debugmodes.c
@@ -148,7 +148,9 @@
{
mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2);
bindex = (b_row & 3) * 4 + (b_col & 3);
- fprintf(mvs, "%3d:%-3d ", mi[mb_index].bmi[bindex].mv.as_mv.row, mi[mb_index].bmi[bindex].mv.as_mv.col);
+ fprintf(mvs, "%3d:%-3d ",
+ mi[mb_index].bmi[bindex].as_mv.first.as_mv.row,
+ mi[mb_index].bmi[bindex].as_mv.first.as_mv.col);
}
--- a/vp8/common/filter.c
+++ b/vp8/common/filter.c
@@ -456,6 +456,56 @@
filter_block2d_6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter);
}
+
+/*
+ * The difference between filter_block2d_6() and filter_block2d_avg_6 is
+ * that filter_block2d_6() does a 6-tap filter and stores it in the output
+ * buffer, whereas filter_block2d_avg_6() does the same 6-tap filter, and
+ * then averages that with the content already present in the output
+ * ((filter_result + dest + 1) >> 1) and stores that in the output.
+ */
+static void filter_block2d_avg_6
+(
+ unsigned char *src_ptr,
+ unsigned char *output_ptr,
+ unsigned int src_pixels_per_line,
+ int output_pitch,
+ const short *HFilter,
+ const short *VFilter
+)
+{
+ int FData[(3+Interp_Extend*2)*4]; /* Temp data buffer used in filtering */
+
+ /* First filter 1-D horizontally... */
+ filter_block2d_first_pass_6(src_ptr - ((Interp_Extend-1) * src_pixels_per_line),
+ FData, src_pixels_per_line, 1,
+ 3+Interp_Extend*2, 4, HFilter);
+
+ /* then filter verticaly... */
+ filter_block2d_second_pass_avg_6(FData + 4*(Interp_Extend-1), output_ptr,
+ output_pitch, 4, 4, 4, 4, VFilter);
+}
+
+void vp8_sixtap_predict_avg_c
+(
+ unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char *dst_ptr,
+ int dst_pitch
+)
+{
+ const short *HFilter;
+ const short *VFilter;
+
+ HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */
+ VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */
+
+ filter_block2d_avg_6(src_ptr, dst_ptr, src_pixels_per_line,
+ dst_pitch, HFilter, VFilter);
+}
+
void vp8_sixtap_predict8x8_c
(
unsigned char *src_ptr,
@@ -1364,6 +1414,26 @@
#endif
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 4, 4);
+}
+
+void vp8_bilinear_predict_avg4x4_c
+(
+ unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char *dst_ptr,
+ int dst_pitch
+)
+{
+ const short *HFilter;
+ const short *VFilter;
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ filter_block2d_bil_avg(src_ptr, dst_ptr, src_pixels_per_line,
+ dst_pitch, HFilter, VFilter, 4, 4);
}
void vp8_bilinear_predict8x8_c
--- a/vp8/common/findnearmv.h
+++ b/vp8/common/findnearmv.h
@@ -102,9 +102,24 @@
b += 4;
}
- return (cur_mb->bmi + b - 1)->mv.as_int;
+ return (cur_mb->bmi + b - 1)->as_mv.first.as_int;
}
+static int left_block_second_mv(const MODE_INFO *cur_mb, int b)
+{
+ if (!(b & 3))
+ {
+ /* On L edge, get from MB to left of us */
+ --cur_mb;
+
+ if(cur_mb->mbmi.mode != SPLITMV)
+ return cur_mb->mbmi.second_ref_frame ? cur_mb->mbmi.second_mv.as_int : cur_mb->mbmi.mv.as_int;
+ b += 4;
+ }
+
+ return cur_mb->mbmi.second_ref_frame ? (cur_mb->bmi + b - 1)->as_mv.second.as_int : (cur_mb->bmi + b - 1)->as_mv.first.as_int;
+}
+
static int above_block_mv(const MODE_INFO *cur_mb, int b, int mi_stride)
{
if (!(b >> 2))
@@ -117,8 +132,24 @@
b += 16;
}
- return (cur_mb->bmi + b - 4)->mv.as_int;
+ return (cur_mb->bmi + b - 4)->as_mv.first.as_int;
}
+
+static int above_block_second_mv(const MODE_INFO *cur_mb, int b, int mi_stride)
+{
+ if (!(b >> 2))
+ {
+ /* On top edge, get from MB above us */
+ cur_mb -= mi_stride;
+
+ if(cur_mb->mbmi.mode != SPLITMV)
+ return cur_mb->mbmi.second_ref_frame ? cur_mb->mbmi.second_mv.as_int : cur_mb->mbmi.mv.as_int;
+ b += 16;
+ }
+
+ return cur_mb->mbmi.second_ref_frame ? (cur_mb->bmi + b - 4)->as_mv.second.as_int : (cur_mb->bmi + b - 4)->as_mv.first.as_int;
+}
+
static B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b)
{
if (!(b & 3))
--- a/vp8/common/generic/systemdependent.c
+++ b/vp8/common/generic/systemdependent.c
@@ -99,6 +99,7 @@
rtcd->subpix.sixtap_avg8x8 = vp8_sixtap_predict_avg8x8_c;
rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_c;
rtcd->subpix.sixtap4x4 = vp8_sixtap_predict_c;
+ rtcd->subpix.sixtap_avg4x4 = vp8_sixtap_predict_avg_c;
rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_c;
rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_c;
rtcd->subpix.bilinear_avg16x16 = vp8_bilinear_predict_avg16x16_c;
@@ -105,6 +106,7 @@
rtcd->subpix.bilinear_avg8x8 = vp8_bilinear_predict_avg8x8_c;
rtcd->subpix.bilinear8x4 = vp8_bilinear_predict8x4_c;
rtcd->subpix.bilinear4x4 = vp8_bilinear_predict4x4_c;
+ rtcd->subpix.bilinear_avg4x4 = vp8_bilinear_predict_avg4x4_c;
rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_c;
rtcd->loopfilter.normal_b_v = vp8_loop_filter_bv_c;
--- a/vp8/common/mbpitch.c
+++ b/vp8/common/mbpitch.c
@@ -22,6 +22,7 @@
BLOCKD *b,
int mv_stride,
unsigned char **base,
+ unsigned char **base2,
int Stride,
int offset,
BLOCKSET bs
@@ -39,6 +40,7 @@
b->pre_stride = Stride;
b->pre = offset;
b->base_pre = base;
+ b->base_second_pre = base2;
}
}
@@ -49,6 +51,7 @@
int block;
unsigned char **y, **u, **v;
+ unsigned char **y2, **u2, **v2;
if (bs == DEST)
{
@@ -61,20 +64,24 @@
y = &x->pre.y_buffer;
u = &x->pre.u_buffer;
v = &x->pre.v_buffer;
+
+ y2 = &x->second_pre.y_buffer;
+ u2 = &x->second_pre.u_buffer;
+ v2 = &x->second_pre.v_buffer;
}
for (block = 0; block < 16; block++) /* y blocks */
{
- setup_block(&x->block[block], x->dst.y_stride, y, x->dst.y_stride,
+ setup_block(&x->block[block], x->dst.y_stride, y, y2, x->dst.y_stride,
(block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4, bs);
}
for (block = 16; block < 20; block++) /* U and V blocks */
{
- setup_block(&x->block[block], x->dst.uv_stride, u, x->dst.uv_stride,
+ setup_block(&x->block[block], x->dst.uv_stride, u, u2, x->dst.uv_stride,
((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs);
- setup_block(&x->block[block+4], x->dst.uv_stride, v, x->dst.uv_stride,
+ setup_block(&x->block[block+4], x->dst.uv_stride, v, v2, x->dst.uv_stride,
((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs);
}
}
--- a/vp8/common/reconinter.c
+++ b/vp8/common/reconinter.c
@@ -174,21 +174,23 @@
unsigned char *ptr_base;
unsigned char *ptr;
unsigned char *pred_ptr = d->predictor;
+ int_mv mv;
ptr_base = *(d->base_pre);
+ mv.as_int = d->bmi.as_mv.first.as_int;
- if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
+ if (mv.as_mv.row & 7 || mv.as_mv.col & 7)
{
- ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
+ ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
#if CONFIG_SIXTEENTH_SUBPEL_UV
- sppf(ptr, d->pre_stride, (d->bmi.mv.as_mv.col & 7)<<1, (d->bmi.mv.as_mv.row & 7)<<1, pred_ptr, pitch);
+ sppf(ptr, d->pre_stride, (mv.as_mv.col & 7)<<1, (mv.as_mv.row & 7)<<1, pred_ptr, pitch);
#else
- sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
+ sppf(ptr, d->pre_stride, mv.as_mv.col & 7,mv.as_mv.row & 7, pred_ptr, pitch);
#endif
}
else
{
- ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
+ ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
ptr = ptr_base;
for (r = 0; r < 4; r++)
@@ -207,21 +209,66 @@
}
}
+/*
+ * Similar to vp8_build_inter_predictors_b(), but instead of storing the
+ * results in d->predictor, we average the contents of d->predictor (which
+ * come from an earlier call to vp8_build_inter_predictors_b()) with the
+ * predictor of the second reference frame / motion vector.
+ */
+void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf)
+{
+ int r;
+ unsigned char *ptr_base;
+ unsigned char *ptr;
+ unsigned char *pred_ptr = d->predictor;
+ int_mv mv;
+
+ ptr_base = *(d->base_second_pre);
+ mv.as_int = d->bmi.as_mv.second.as_int;
+
+ if (mv.as_mv.row & 7 || mv.as_mv.col & 7)
+ {
+ ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
+#if CONFIG_SIXTEENTH_SUBPEL_UV
+ sppf(ptr, d->pre_stride, (mv.as_mv.col & 7)<<1, (mv.as_mv.row & 7)<<1, pred_ptr, pitch);
+#else
+ sppf(ptr, d->pre_stride, mv.as_mv.col & 7,mv.as_mv.row & 7, pred_ptr, pitch);
+#endif
+ }
+ else
+ {
+ ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
+ ptr = ptr_base;
+
+ for (r = 0; r < 4; r++)
+ {
+ pred_ptr[0] = (pred_ptr[0] + ptr[0] + 1) >> 1;
+ pred_ptr[1] = (pred_ptr[1] + ptr[1] + 1) >> 1;
+ pred_ptr[2] = (pred_ptr[2] + ptr[2] + 1) >> 1;
+ pred_ptr[3] = (pred_ptr[3] + ptr[3] + 1) >> 1;
+ pred_ptr += pitch;
+ ptr += d->pre_stride;
+ }
+ }
+}
+
static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch)
{
unsigned char *ptr_base;
unsigned char *ptr;
unsigned char *pred_ptr = d->predictor;
+ int_mv mv;
ptr_base = *(d->base_pre);
- ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
+ mv.as_int = d->bmi.as_mv.first.as_int;
+ ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
- if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
+ if (mv.as_mv.row & 7 || mv.as_mv.col & 7)
{
#if CONFIG_SIXTEENTH_SUBPEL_UV
- x->subpixel_predict8x8(ptr, d->pre_stride, (d->bmi.mv.as_mv.col & 7)<<1, (d->bmi.mv.as_mv.row & 7)<<1, pred_ptr, pitch);
+ x->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7)<<1, (mv.as_mv.row & 7)<<1, pred_ptr, pitch);
#else
- x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
+ x->subpixel_predict8x8(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
#endif
}
else
@@ -230,21 +277,54 @@
}
}
+/*
+ * Similar to build_inter_predictors_4b(), but instead of storing the
+ * results in d->predictor, we average the contents of d->predictor (which
+ * come from an earlier call to build_inter_predictors_4b()) with the
+ * predictor of the second reference frame / motion vector.
+ */
+static void build_2nd_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch)
+{
+ unsigned char *ptr_base;
+ unsigned char *ptr;
+ unsigned char *pred_ptr = d->predictor;
+ int_mv mv;
+
+ ptr_base = *(d->base_second_pre);
+ mv.as_int = d->bmi.as_mv.second.as_int;
+ ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
+
+ if (mv.as_mv.row & 7 || mv.as_mv.col & 7)
+ {
+#if CONFIG_SIXTEENTH_SUBPEL_UV
+ x->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7)<<1, (mv.as_mv.row & 7)<<1, pred_ptr, pitch);
+#else
+ x->subpixel_predict_avg8x8(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
+#endif
+ }
+ else
+ {
+ RECON_INVOKE(&x->rtcd->recon, avg8x8)(ptr, d->pre_stride, pred_ptr, pitch);
+ }
+}
+
static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch)
{
unsigned char *ptr_base;
unsigned char *ptr;
unsigned char *pred_ptr = d->predictor;
+ int_mv mv;
ptr_base = *(d->base_pre);
- ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
+ mv.as_int = d->bmi.as_mv.first.as_int;
+ ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
- if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
+ if (mv.as_mv.row & 7 || mv.as_mv.col & 7)
{
#if CONFIG_SIXTEENTH_SUBPEL_UV
- x->subpixel_predict8x4(ptr, d->pre_stride, (d->bmi.mv.as_mv.col & 7)<<1, (d->bmi.mv.as_mv.row & 7)<<1, pred_ptr, pitch);
+ x->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7)<<1, (mv.as_mv.row & 7)<<1, pred_ptr, pitch);
#else
- x->subpixel_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
+ x->subpixel_predict8x4(ptr, d->pre_stride, mv.as_mv.col & 7, mv.as_mv.row & 7, pred_ptr, pitch);
#endif
}
else
@@ -322,33 +402,72 @@
int yoffset = i * 8 + j * 2;
int uoffset = 16 + i * 2 + j;
int voffset = 20 + i * 2 + j;
-
int temp;
- temp = x->block[yoffset ].bmi.mv.as_mv.row
- + x->block[yoffset+1].bmi.mv.as_mv.row
- + x->block[yoffset+4].bmi.mv.as_mv.row
- + x->block[yoffset+5].bmi.mv.as_mv.row;
+ temp = x->block[yoffset ].bmi.as_mv.first.as_mv.row
+ + x->block[yoffset+1].bmi.as_mv.first.as_mv.row
+ + x->block[yoffset+4].bmi.as_mv.first.as_mv.row
+ + x->block[yoffset+5].bmi.as_mv.first.as_mv.row;
if (temp < 0) temp -= 4;
else temp += 4;
- x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
+ x->block[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) & x->fullpixel_mask;
- temp = x->block[yoffset ].bmi.mv.as_mv.col
- + x->block[yoffset+1].bmi.mv.as_mv.col
- + x->block[yoffset+4].bmi.mv.as_mv.col
- + x->block[yoffset+5].bmi.mv.as_mv.col;
+ temp = x->block[yoffset ].bmi.as_mv.first.as_mv.col
+ + x->block[yoffset+1].bmi.as_mv.first.as_mv.col
+ + x->block[yoffset+4].bmi.as_mv.first.as_mv.col
+ + x->block[yoffset+5].bmi.as_mv.first.as_mv.col;
if (temp < 0) temp -= 4;
else temp += 4;
- x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
+ x->block[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) & x->fullpixel_mask;
- x->block[voffset].bmi.mv.as_mv.row =
- x->block[uoffset].bmi.mv.as_mv.row ;
- x->block[voffset].bmi.mv.as_mv.col =
- x->block[uoffset].bmi.mv.as_mv.col ;
+ x->block[voffset].bmi.as_mv.first.as_mv.row =
+ x->block[uoffset].bmi.as_mv.first.as_mv.row ;
+ x->block[voffset].bmi.as_mv.first.as_mv.col =
+ x->block[uoffset].bmi.as_mv.first.as_mv.col ;
+
+ if (x->mode_info_context->mbmi.second_ref_frame)
+ {
+ temp = x->block[yoffset ].bmi.as_mv.second.as_mv.row
+ + x->block[yoffset+1].bmi.as_mv.second.as_mv.row
+ + x->block[yoffset+4].bmi.as_mv.second.as_mv.row
+ + x->block[yoffset+5].bmi.as_mv.second.as_mv.row;
+
+ if (temp < 0)
+ {
+ temp -= 4;
+ }
+ else
+ {
+ temp += 4;
+ }
+
+ x->block[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) & x->fullpixel_mask;
+
+ temp = x->block[yoffset ].bmi.as_mv.second.as_mv.col
+ + x->block[yoffset+1].bmi.as_mv.second.as_mv.col
+ + x->block[yoffset+4].bmi.as_mv.second.as_mv.col
+ + x->block[yoffset+5].bmi.as_mv.second.as_mv.col;
+
+ if (temp < 0)
+ {
+ temp -= 4;
+ }
+ else
+ {
+ temp += 4;
+ }
+
+ x->block[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) & x->fullpixel_mask;
+
+ x->block[voffset].bmi.as_mv.second.as_mv.row =
+ x->block[uoffset].bmi.as_mv.second.as_mv.row ;
+ x->block[voffset].bmi.as_mv.second.as_mv.col =
+ x->block[uoffset].bmi.as_mv.second.as_mv.col ;
+ }
}
}
@@ -357,7 +476,7 @@
BLOCKD *d0 = &x->block[i];
BLOCKD *d1 = &x->block[i+1];
- if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
+ if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
build_inter_predictors2b(x, d0, 8);
else
{
@@ -364,6 +483,12 @@
vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
}
+
+ if (x->mode_info_context->mbmi.second_ref_frame)
+ {
+ vp8_build_2nd_inter_predictors_b(d0, 8, x->subpixel_predict_avg);
+ vp8_build_2nd_inter_predictors_b(d1, 8, x->subpixel_predict_avg);
+ }
}
}
@@ -622,10 +747,17 @@
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
{
- clamp_mv_to_umv_border(&x->block[ 0].bmi.mv.as_mv, x);
- clamp_mv_to_umv_border(&x->block[ 2].bmi.mv.as_mv, x);
- clamp_mv_to_umv_border(&x->block[ 8].bmi.mv.as_mv, x);
- clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[ 0].bmi.as_mv.first.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[ 2].bmi.as_mv.first.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[ 8].bmi.as_mv.first.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[10].bmi.as_mv.first.as_mv, x);
+ if (x->mode_info_context->mbmi.second_ref_frame)
+ {
+ clamp_mv_to_umv_border(&x->block[ 0].bmi.as_mv.second.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[ 2].bmi.as_mv.second.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[ 8].bmi.as_mv.second.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[10].bmi.as_mv.second.as_mv, x);
+ }
}
@@ -633,6 +765,14 @@
build_inter_predictors4b(x, &x->block[ 2], 16);
build_inter_predictors4b(x, &x->block[ 8], 16);
build_inter_predictors4b(x, &x->block[10], 16);
+
+ if (x->mode_info_context->mbmi.second_ref_frame)
+ {
+ build_2nd_inter_predictors4b(x, &x->block[ 0], 16);
+ build_2nd_inter_predictors4b(x, &x->block[ 2], 16);
+ build_2nd_inter_predictors4b(x, &x->block[ 8], 16);
+ build_2nd_inter_predictors4b(x, &x->block[10], 16);
+ }
}
else
{
@@ -646,11 +786,16 @@
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
{
- clamp_mv_to_umv_border(&x->block[i+0].bmi.mv.as_mv, x);
- clamp_mv_to_umv_border(&x->block[i+1].bmi.mv.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[i+0].bmi.as_mv.first.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[i+1].bmi.as_mv.first.as_mv, x);
+ if (x->mode_info_context->mbmi.second_ref_frame)
+ {
+ clamp_mv_to_umv_border(&x->block[i+0].bmi.as_mv.second.as_mv, x);
+ clamp_mv_to_umv_border(&x->block[i+1].bmi.as_mv.second.as_mv, x);
+ }
}
- if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
+ if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
build_inter_predictors2b(x, d0, 16);
else
{
@@ -658,8 +803,12 @@
vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict);
}
+ if (x->mode_info_context->mbmi.second_ref_frame)
+ {
+ vp8_build_2nd_inter_predictors_b(d0, 16, x->subpixel_predict_avg);
+ vp8_build_2nd_inter_predictors_b(d1, 16, x->subpixel_predict_avg);
+ }
}
-
}
for (i = 16; i < 24; i += 2)
@@ -667,7 +816,7 @@
BLOCKD *d0 = &x->block[i];
BLOCKD *d1 = &x->block[i+1];
- if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
+ if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
build_inter_predictors2b(x, d0, 8);
else
{
@@ -674,6 +823,12 @@
vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
}
+
+ if (x->mode_info_context->mbmi.second_ref_frame)
+ {
+ vp8_build_2nd_inter_predictors_b(d0, 8, x->subpixel_predict_avg);
+ vp8_build_2nd_inter_predictors_b(d1, 8, x->subpixel_predict_avg);
+ }
}
}
@@ -692,36 +847,82 @@
int temp;
- temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row
- + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row
- + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row
- + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
+ temp = x->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.row
+ + x->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.row
+ + x->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.row
+ + x->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.row;
if (temp < 0) temp -= 4;
else temp += 4;
- x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
+ x->block[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) & x->fullpixel_mask;
- temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col
- + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col
- + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col
- + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
+ temp = x->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.col
+ + x->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.col
+ + x->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.col
+ + x->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.col;
if (temp < 0) temp -= 4;
else temp += 4;
- x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
+ x->block[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) & x->fullpixel_mask;
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
- clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
+ clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.as_mv.first.as_mv, x);
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
- clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
+ clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.as_mv.first.as_mv, x);
- x->block[voffset].bmi.mv.as_mv.row =
- x->block[uoffset].bmi.mv.as_mv.row ;
- x->block[voffset].bmi.mv.as_mv.col =
- x->block[uoffset].bmi.mv.as_mv.col ;
+ x->block[voffset].bmi.as_mv.first.as_mv.row =
+ x->block[uoffset].bmi.as_mv.first.as_mv.row ;
+ x->block[voffset].bmi.as_mv.first.as_mv.col =
+ x->block[uoffset].bmi.as_mv.first.as_mv.col ;
+
+ if (x->mode_info_context->mbmi.second_ref_frame)
+ {
+ temp = x->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.row
+ + x->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.row
+ + x->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.row
+ + x->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.row;
+
+ if (temp < 0)
+ {
+ temp -= 4;
+ }
+ else
+ {
+ temp += 4;
+ }
+
+ x->block[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) & x->fullpixel_mask;
+
+ temp = x->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.col
+ + x->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.col
+ + x->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.col
+ + x->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.col;
+
+ if (temp < 0)
+ {
+ temp -= 4;
+ }
+ else
+ {
+ temp += 4;
+ }
+
+ x->block[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) & x->fullpixel_mask;
+
+ if (x->mode_info_context->mbmi.need_to_clamp_mvs)
+ clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.as_mv.second.as_mv, x);
+
+ if (x->mode_info_context->mbmi.need_to_clamp_mvs)
+ clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.as_mv.second.as_mv, x);
+
+ x->block[voffset].bmi.as_mv.second.as_mv.row =
+ x->block[uoffset].bmi.as_mv.second.as_mv.row ;
+ x->block[voffset].bmi.as_mv.second.as_mv.col =
+ x->block[uoffset].bmi.as_mv.second.as_mv.col ;
+ }
}
}
}
--- a/vp8/common/reconinter.h
+++ b/vp8/common/reconinter.h
@@ -29,6 +29,7 @@
extern void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x);
extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf);
+extern void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf);
extern void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x);
extern void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x);
--- a/vp8/common/subpixel.h
+++ b/vp8/common/subpixel.h
@@ -53,6 +53,11 @@
#endif
extern prototype_subpixel_predict(vp8_subpix_sixtap4x4);
+#ifndef vp8_subpix_sixtap_avg4x4
+#define vp8_subpix_sixtap_avg4x4 vp8_sixtap_predict_avg_c
+#endif
+extern prototype_subpixel_predict(vp8_subpix_sixtap_avg4x4);
+
#if CONFIG_ENHANCED_INTERP
#ifndef vp8_subpix_eighttap16x16
#define vp8_subpix_eighttap16x16 vp8_eighttap_predict16x16_c
@@ -145,6 +150,11 @@
#endif
extern prototype_subpixel_predict(vp8_subpix_bilinear4x4);
+#ifndef vp8_subpix_bilinear_avg4x4
+#define vp8_subpix_bilinear_avg4x4 vp8_bilinear_predict_avg4x4_c
+#endif
+extern prototype_subpixel_predict(vp8_subpix_bilinear_avg4x4);
+
typedef prototype_subpixel_predict((*vp8_subpix_fn_t));
typedef struct
{
@@ -168,6 +178,7 @@
vp8_subpix_fn_t sixtap_avg8x8;
vp8_subpix_fn_t sixtap8x4;
vp8_subpix_fn_t sixtap4x4;
+ vp8_subpix_fn_t sixtap_avg4x4;
vp8_subpix_fn_t bilinear16x16;
vp8_subpix_fn_t bilinear8x8;
vp8_subpix_fn_t bilinear_avg16x16;
@@ -174,6 +185,7 @@
vp8_subpix_fn_t bilinear_avg8x8;
vp8_subpix_fn_t bilinear8x4;
vp8_subpix_fn_t bilinear4x4;
+ vp8_subpix_fn_t bilinear_avg4x4;
} vp8_subpix_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -730,6 +730,7 @@
{
int rct[4];
int_mv nearest, nearby, best_mv;
+ int_mv nearest_second, nearby_second, best_mv_second;
vp8_prob mv_ref_p [VP8_MVREFS-1];
vp8_find_near_mvs(xd, mi,
@@ -751,6 +752,31 @@
vp8_accum_mv_refs(&pbi->common, mbmi->mode, rct);
}
+ if ( cm->comp_pred_mode == COMP_PREDICTION_ONLY ||
+ (cm->comp_pred_mode == HYBRID_PREDICTION &&
+ vp8_read(bc, get_pred_prob( cm, xd, PRED_COMP ))) )
+ {
+ /* Since we have 3 reference frames, we can only have 3 unique
+ * combinations of combinations of 2 different reference frames
+ * (A-G, G-L or A-L). In the bitstream, we use this to simply
+ * derive the second reference frame from the first reference
+ * frame, by saying it's the next one in the enumerator, and
+ * if that's > n_refs, then the second reference frame is the
+ * first one in the enumerator. */
+ mbmi->second_ref_frame = mbmi->ref_frame + 1;
+ if (mbmi->second_ref_frame == 4)
+ mbmi->second_ref_frame = 1;
+
+ vp8_find_near_mvs(xd, mi,
+ prev_mi,
+ &nearest_second, &nearby_second, &best_mv_second, rct,
+ mbmi->second_ref_frame, pbi->common.ref_frame_sign_bias);
+ }
+ else
+ {
+ mbmi->second_ref_frame = 0;
+ }
+
mbmi->uv_mode = DC_PRED;
switch (mbmi->mode)
{
@@ -764,17 +790,25 @@
mbmi->need_to_clamp_mvs = 0;
do /* for each subset j */
{
- int_mv leftmv, abovemv;
- int_mv blockmv;
+ int_mv leftmv, abovemv, second_leftmv, second_abovemv;
+ int_mv blockmv, secondmv;
int k; /* first block in subset j */
int mv_contz;
+ int blockmode;
+
k = vp8_mbsplit_offset[s][j];
leftmv.as_int = left_block_mv(mi, k);
abovemv.as_int = above_block_mv(mi, k, mis);
+ if (mbmi->second_ref_frame)
+ {
+ second_leftmv.as_int = left_block_second_mv(mi, k);
+ second_abovemv.as_int = above_block_second_mv(mi, k, mis);
+ }
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
+ blockmode = sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz]);
- switch (sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/
+ switch (blockmode)
{
case NEW4X4:
#if CONFIG_HIGH_PRECISION_MV
@@ -785,6 +819,18 @@
read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc);
blockmv.as_mv.row += best_mv.as_mv.row;
blockmv.as_mv.col += best_mv.as_mv.col;
+
+ if (mbmi->second_ref_frame)
+ {
+#if CONFIG_HIGH_PRECISION_MV
+ if (xd->allow_high_precision_mv)
+ read_mv_hp(bc, &secondmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
+ else
+#endif
+ read_mv(bc, &secondmv.as_mv, (const MV_CONTEXT *) mvc);
+ secondmv.as_mv.row += best_mv_second.as_mv.row;
+ secondmv.as_mv.col += best_mv_second.as_mv.col;
+ }
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][3]++;
#endif
@@ -791,6 +837,8 @@
break;
case LEFT4X4:
blockmv.as_int = leftmv.as_int;
+ if (mbmi->second_ref_frame)
+ secondmv.as_int = second_leftmv.as_int;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][0]++;
#endif
@@ -797,6 +845,8 @@
break;
case ABOVE4X4:
blockmv.as_int = abovemv.as_int;
+ if (mbmi->second_ref_frame)
+ secondmv.as_int = second_abovemv.as_int;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][1]++;
#endif
@@ -803,6 +853,8 @@
break;
case ZERO4X4:
blockmv.as_int = 0;
+ if (mbmi->second_ref_frame)
+ secondmv.as_int = 0;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][2]++;
#endif
@@ -816,6 +868,14 @@
mb_to_right_edge,
mb_to_top_edge,
mb_to_bottom_edge);
+ if (mbmi->second_ref_frame)
+ {
+ mbmi->need_to_clamp_mvs |= vp8_check_mv_bounds(&secondmv,
+ mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ }
{
/* Fill (uniform) modes, mvs of jth subset.
@@ -827,7 +887,9 @@
fill_offset = &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]];
do {
- mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int;
+ mi->bmi[ *fill_offset].as_mv.first.as_int = blockmv.as_int;
+ if (mbmi->second_ref_frame)
+ mi->bmi[ *fill_offset].as_mv.second.as_int = secondmv.as_int;
fill_offset++;
}while (--fill_count);
}
@@ -836,7 +898,8 @@
while (++j < num_p);
}
- mv->as_int = mi->bmi[15].mv.as_int;
+ mv->as_int = mi->bmi[15].as_mv.first.as_int;
+ mbmi->second_mv.as_int = mi->bmi[15].as_mv.second.as_int;
break; /* done with SPLITMV */
@@ -845,7 +908,13 @@
/* Clip "next_nearest" so that it does not extend to far out of image */
vp8_clamp_mv(mv, mb_to_left_edge, mb_to_right_edge,
mb_to_top_edge, mb_to_bottom_edge);
- goto propagate_mv;
+ if (mbmi->second_ref_frame)
+ {
+ mbmi->second_mv.as_int = nearby_second.as_int;
+ vp8_clamp_mv(&mbmi->second_mv, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
+ }
+ break;
case NEARESTMV:
mv->as_int = nearest.as_int;
@@ -852,11 +921,19 @@
/* Clip "next_nearest" so that it does not extend to far out of image */
vp8_clamp_mv(mv, mb_to_left_edge, mb_to_right_edge,
mb_to_top_edge, mb_to_bottom_edge);
- goto propagate_mv;
+ if (mbmi->second_ref_frame)
+ {
+ mbmi->second_mv.as_int = nearest_second.as_int;
+ vp8_clamp_mv(&mbmi->second_mv, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
+ }
+ break;
case ZEROMV:
mv->as_int = 0;
- goto propagate_mv;
+ if (mbmi->second_ref_frame)
+ mbmi->second_mv.as_int = 0;
+ break;
case NEWMV:
#if CONFIG_HIGH_PRECISION_MV
@@ -878,39 +955,8 @@
mb_to_right_edge,
mb_to_top_edge,
mb_to_bottom_edge);
-
- propagate_mv: /* same MV throughout */
-
- if ( cm->comp_pred_mode == COMP_PREDICTION_ONLY ||
- (cm->comp_pred_mode == HYBRID_PREDICTION &&
- vp8_read(bc, get_pred_prob( cm, xd, PRED_COMP ))) )
- {
- mbmi->second_ref_frame = mbmi->ref_frame + 1;
- if (mbmi->second_ref_frame == 4)
- mbmi->second_ref_frame = 1;
- }
if (mbmi->second_ref_frame)
{
- vp8_find_near_mvs(xd, mi,
- prev_mi,
- &nearest, &nearby, &best_mv, rct,
- (int)mbmi->second_ref_frame,
- pbi->common.ref_frame_sign_bias);
- switch (mbmi->mode) {
- case ZEROMV:
- mbmi->second_mv.as_int = 0;
- break;
- case NEARMV:
- mbmi->second_mv.as_int = nearby.as_int;
- vp8_clamp_mv(&mbmi->second_mv, mb_to_left_edge, mb_to_right_edge,
- mb_to_top_edge, mb_to_bottom_edge);
- break;
- case NEARESTMV:
- mbmi->second_mv.as_int = nearest.as_int;
- vp8_clamp_mv(&mbmi->second_mv, mb_to_left_edge, mb_to_right_edge,
- mb_to_top_edge, mb_to_bottom_edge);
- break;
- case NEWMV:
#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv)
read_mv_hp(bc, &mbmi->second_mv.as_mv,
@@ -918,19 +964,14 @@
else
#endif
read_mv(bc, &mbmi->second_mv.as_mv, (const MV_CONTEXT *) mvc);
- mbmi->second_mv.as_mv.row += best_mv.as_mv.row;
- mbmi->second_mv.as_mv.col += best_mv.as_mv.col;
- mbmi->need_to_clamp_secondmv = vp8_check_mv_bounds(&mbmi->second_mv,
+ mbmi->second_mv.as_mv.row += best_mv_second.as_mv.row;
+ mbmi->second_mv.as_mv.col += best_mv_second.as_mv.col;
+ mbmi->need_to_clamp_secondmv |= vp8_check_mv_bounds(&mbmi->second_mv,
mb_to_left_edge,
mb_to_right_edge,
mb_to_top_edge,
mb_to_bottom_edge);
- break;
- default:
- break;
- }
}
-
break;
default:;
#if CONFIG_DEBUG
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -714,6 +714,7 @@
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg16x16);
}
@@ -747,6 +748,7 @@
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg16x16);
}
@@ -1138,6 +1140,7 @@
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg16x16);
}
@@ -1165,6 +1168,7 @@
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg16x16);
}
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -963,7 +963,7 @@
}
else
{
- int_mv best_mv;
+ int_mv best_mv, best_second_mv;
int ct[4];
vp8_prob mv_ref_p [VP8_MVREFS-1];
@@ -975,7 +975,6 @@
rf, cpi->common.ref_frame_sign_bias);
vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
-
#ifdef ENTROPY_STATS
accum_mv_refs(mode, ct);
#endif
@@ -992,7 +991,26 @@
vp8_accum_mv_refs(&cpi->common, mode, ct);
}
+ if (mi->second_ref_frame &&
+ (mode == NEWMV || mode == SPLITMV))
{
+ int_mv n1, n2;
+
+ vp8_find_near_mvs(xd, m,
+ prev_m,
+ &n1, &n2, &best_second_mv, ct,
+ mi->second_ref_frame, cpi->common.ref_frame_sign_bias);
+ }
+
+ // does the feature use compound prediction or not
+ // (if not specified at the frame/segment level)
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
+ {
+ vp8_write(w, mi->second_ref_frame != INTRA_FRAME,
+ get_pred_prob( pc, xd, PRED_COMP ) );
+ }
+
+ {
switch (mode) /* new, split require MVs */
{
case NEWMV:
@@ -1007,30 +1025,16 @@
#endif
write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- vp8_write(w,
- mi->second_ref_frame != INTRA_FRAME,
- get_pred_prob( pc, xd, PRED_COMP ) );
- }
if (mi->second_ref_frame)
{
- const int second_rf = mi->second_ref_frame;
- int_mv n1, n2;
- int ct[4];
- vp8_find_near_mvs(xd, m,
- prev_m,
- &n1, &n2, &best_mv,
- ct, second_rf,
- cpi->common.ref_frame_sign_bias);
#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv)
write_mv_hp(w, &mi->second_mv.as_mv,
- &best_mv, mvc_hp);
+ &best_second_mv, mvc_hp);
else
#endif
- write_mv(w, &mi->second_mv.as_mv, &best_mv,
- mvc);
+ write_mv(w, &mi->second_mv.as_mv,
+ &best_second_mv, mvc);
}
break;
case SPLITMV:
@@ -1082,6 +1086,18 @@
#endif
write_mv(w, &blockmv.as_mv, &best_mv,
(const MV_CONTEXT *) mvc);
+
+ if (mi->second_ref_frame)
+ {
+#if CONFIG_HIGH_PRECISION_MV
+ if (xd->allow_high_precision_mv)
+ write_mv_hp(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &best_second_mv, (const MV_CONTEXT_HP *) mvc_hp);
+ else
+#endif
+ write_mv(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &best_second_mv, (const MV_CONTEXT *) mvc);
+ }
}
}
while (++j < cpi->mb.partition_info->count);
@@ -1088,12 +1104,6 @@
}
break;
default:
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- vp8_write(w,
- mi->second_ref_frame != INTRA_FRAME,
- get_pred_prob( pc, xd, PRED_COMP ) );
- }
break;
}
}
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -45,6 +45,7 @@
short zbin_extra;
unsigned char **base_src;
+ unsigned char **base_second_src;
int src;
int src_stride;
@@ -60,6 +61,7 @@
{
B_PREDICTION_MODE mode;
int_mv mv;
+ int_mv second_mv;
} bmi[16];
} PARTITION_INFO;
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -428,6 +428,8 @@
xd->mode_info_context->mbmi.mv.as_int =
x->partition_info->bmi[15].mv.as_int;
+ xd->mode_info_context->mbmi.second_mv.as_int =
+ x->partition_info->bmi[15].second_mv.as_int;
}
if (cpi->common.frame_type == KEY_FRAME)
@@ -1085,6 +1087,8 @@
&cpi->common.rtcd.subpix, sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
@@ -1132,6 +1136,8 @@
&cpi->common.rtcd.subpix, bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -583,20 +583,18 @@
// Other than for the first frame do a motion search
if (cm->current_video_frame > 0)
{
- BLOCKD *d = &x->e_mbd.block[0];
- MV tmp_mv = {0, 0};
int tmp_err;
int motion_error = INT_MAX;
+ int_mv mv, tmp_mv;
// Simple 0,0 motion with no mv overhead
zz_motion_search( cpi, x, lst_yv12, &motion_error, recon_yoffset );
- d->bmi.mv.as_mv.row = 0;
- d->bmi.mv.as_mv.col = 0;
+ mv.as_int = tmp_mv.as_int = 0;
// Test last reference frame using the previous best mv as the
// starting point (best reference) for the search
first_pass_motion_search(cpi, x, &best_ref_mv,
- &d->bmi.mv.as_mv, lst_yv12,
+ &mv.as_mv, lst_yv12,
&motion_error, recon_yoffset);
// If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
@@ -603,14 +601,13 @@
if (best_ref_mv.as_int)
{
tmp_err = INT_MAX;
- first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv,
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv,
lst_yv12, &tmp_err, recon_yoffset);
if ( tmp_err < motion_error )
{
motion_error = tmp_err;
- d->bmi.mv.as_mv.row = tmp_mv.row;
- d->bmi.mv.as_mv.col = tmp_mv.col;
+ mv.as_int = tmp_mv.as_int;
}
}
@@ -617,23 +614,13 @@
// Experimental search in a second reference frame ((0,0) based only)
if (cm->current_video_frame > 1)
{
- first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12, &gf_motion_error, recon_yoffset);
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv, gld_yv12, &gf_motion_error, recon_yoffset);
if ((gf_motion_error < motion_error) && (gf_motion_error < this_error))
{
second_ref_count++;
- //motion_error = gf_motion_error;
- //d->bmi.mv.as_mv.row = tmp_mv.row;
- //d->bmi.mv.as_mv.col = tmp_mv.col;
}
- /*else
- {
- xd->pre.y_buffer = cm->last_frame.y_buffer + recon_yoffset;
- xd->pre.u_buffer = cm->last_frame.u_buffer + recon_uvoffset;
- xd->pre.v_buffer = cm->last_frame.v_buffer + recon_uvoffset;
- }*/
-
// Reset to last frame as reference buffer
xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset;
xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset;
@@ -656,44 +643,44 @@
neutral_count++;
}
- d->bmi.mv.as_mv.row <<= 3;
- d->bmi.mv.as_mv.col <<= 3;
+ mv.as_mv.row <<= 3;
+ mv.as_mv.col <<= 3;
this_error = motion_error;
- vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv);
+ vp8_set_mbmode_and_mvs(x, NEWMV, &mv);
vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
- sum_mvr += d->bmi.mv.as_mv.row;
- sum_mvr_abs += abs(d->bmi.mv.as_mv.row);
- sum_mvc += d->bmi.mv.as_mv.col;
- sum_mvc_abs += abs(d->bmi.mv.as_mv.col);
- sum_mvrs += d->bmi.mv.as_mv.row * d->bmi.mv.as_mv.row;
- sum_mvcs += d->bmi.mv.as_mv.col * d->bmi.mv.as_mv.col;
+ sum_mvr += mv.as_mv.row;
+ sum_mvr_abs += abs(mv.as_mv.row);
+ sum_mvc += mv.as_mv.col;
+ sum_mvc_abs += abs(mv.as_mv.col);
+ sum_mvrs += mv.as_mv.row * mv.as_mv.row;
+ sum_mvcs += mv.as_mv.col * mv.as_mv.col;
intercount++;
- best_ref_mv.as_int = d->bmi.mv.as_int;
+ best_ref_mv.as_int = mv.as_int;
// Was the vector non-zero
- if (d->bmi.mv.as_int)
+ if (mv.as_int)
{
mvcount++;
// Was it different from the last non zero vector
- if ( d->bmi.mv.as_int != lastmv_as_int )
+ if ( mv.as_int != lastmv_as_int )
new_mv_count++;
- lastmv_as_int = d->bmi.mv.as_int;
+ lastmv_as_int = mv.as_int;
// Does the Row vector point inwards or outwards
if (mb_row < cm->mb_rows / 2)
{
- if (d->bmi.mv.as_mv.row > 0)
+ if (mv.as_mv.row > 0)
sum_in_vectors--;
- else if (d->bmi.mv.as_mv.row < 0)
+ else if (mv.as_mv.row < 0)
sum_in_vectors++;
}
else if (mb_row > cm->mb_rows / 2)
{
- if (d->bmi.mv.as_mv.row > 0)
+ if (mv.as_mv.row > 0)
sum_in_vectors++;
- else if (d->bmi.mv.as_mv.row < 0)
+ else if (mv.as_mv.row < 0)
sum_in_vectors--;
}
@@ -700,16 +687,16 @@
// Does the Row vector point inwards or outwards
if (mb_col < cm->mb_cols / 2)
{
- if (d->bmi.mv.as_mv.col > 0)
+ if (mv.as_mv.col > 0)
sum_in_vectors--;
- else if (d->bmi.mv.as_mv.col < 0)
+ else if (mv.as_mv.col < 0)
sum_in_vectors++;
}
else if (mb_col > cm->mb_cols / 2)
{
- if (d->bmi.mv.as_mv.col > 0)
+ if (mv.as_mv.col > 0)
sum_in_vectors++;
- else if (d->bmi.mv.as_mv.col < 0)
+ else if (mv.as_mv.col < 0)
sum_in_vectors--;
}
}
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -1720,7 +1720,7 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.mv;
+ int_mv *best_mv = &d->bmi.as_mv.first;
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1825,7 +1825,7 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.mv;
+ int_mv *best_mv = &d->bmi.as_mv.first;
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1968,7 +1968,7 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.mv;
+ int_mv *best_mv = &d->bmi.as_mv.first;
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -780,6 +780,10 @@
sf->thresh_mult[THR_COMP_NEWLA ] = 1000;
sf->thresh_mult[THR_COMP_NEWGA ] = 1000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 2500;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 5000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 5000;
+
sf->first_step = 0;
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
break;
@@ -834,6 +838,10 @@
sf->thresh_mult[THR_COMP_NEWLA ] = 1000;
sf->thresh_mult[THR_COMP_NEWGA ] = 1000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 1700;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 4500;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 4500;
+
if (Speed > 0)
{
/* Disable coefficient optimization above speed 0 */
@@ -845,6 +853,10 @@
cpi->mode_check_freq[THR_SPLITG] = 2;
cpi->mode_check_freq[THR_SPLITA] = 2;
cpi->mode_check_freq[THR_SPLITMV] = 0;
+
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 0;
}
if (Speed > 1)
@@ -853,6 +865,10 @@
cpi->mode_check_freq[THR_SPLITA] = 4;
cpi->mode_check_freq[THR_SPLITMV] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 4;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 4;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 2;
+
sf->thresh_mult[THR_TM ] = 1500;
sf->thresh_mult[THR_V_PRED ] = 1500;
sf->thresh_mult[THR_H_PRED ] = 1500;
@@ -863,6 +879,7 @@
{
sf->thresh_mult[THR_NEWMV ] = 2000;
sf->thresh_mult[THR_SPLITMV ] = 10000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 20000;
}
if (cpi->ref_frame_flags & VP8_GOLD_FLAG)
@@ -872,6 +889,7 @@
sf->thresh_mult[THR_NEARG ] = 1500;
sf->thresh_mult[THR_NEWG ] = 2000;
sf->thresh_mult[THR_SPLITG ] = 20000;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 20000;
}
if (cpi->ref_frame_flags & VP8_ALT_FLAG)
@@ -881,6 +899,7 @@
sf->thresh_mult[THR_NEARA ] = 1500;
sf->thresh_mult[THR_NEWA ] = 2000;
sf->thresh_mult[THR_SPLITA ] = 20000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 10000;
}
sf->thresh_mult[THR_COMP_ZEROLG ] = 1500;
@@ -904,6 +923,10 @@
cpi->mode_check_freq[THR_SPLITA] = 15;
cpi->mode_check_freq[THR_SPLITMV] = 7;
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 15;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 15;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 7;
+
sf->thresh_mult[THR_TM ] = 2000;
sf->thresh_mult[THR_V_PRED ] = 2000;
sf->thresh_mult[THR_H_PRED ] = 2000;
@@ -914,6 +937,7 @@
{
sf->thresh_mult[THR_NEWMV ] = 2000;
sf->thresh_mult[THR_SPLITMV ] = 25000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 50000;
}
if (cpi->ref_frame_flags & VP8_GOLD_FLAG)
@@ -923,6 +947,7 @@
sf->thresh_mult[THR_NEARG ] = 2000;
sf->thresh_mult[THR_NEWG ] = 2500;
sf->thresh_mult[THR_SPLITG ] = 50000;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 50000;
}
if (cpi->ref_frame_flags & VP8_ALT_FLAG)
@@ -932,6 +957,7 @@
sf->thresh_mult[THR_NEARA ] = 2000;
sf->thresh_mult[THR_NEWA ] = 2500;
sf->thresh_mult[THR_SPLITA ] = 50000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 25000;
}
sf->thresh_mult[THR_COMP_ZEROLG ] = 2000;
@@ -994,6 +1020,7 @@
sf->thresh_mult[THR_COMP_NEARESTLG] = INT_MAX;
sf->thresh_mult[THR_COMP_NEARLG ] = INT_MAX;
sf->thresh_mult[THR_COMP_NEWLG ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = INT_MAX;
}
if ((cpi->ref_frame_flags & (VP8_LAST_FLAG | VP8_ALT_FLAG)) != (VP8_LAST_FLAG | VP8_ALT_FLAG))
@@ -1002,6 +1029,7 @@
sf->thresh_mult[THR_COMP_NEARESTLA] = INT_MAX;
sf->thresh_mult[THR_COMP_NEARLA ] = INT_MAX;
sf->thresh_mult[THR_COMP_NEWLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = INT_MAX;
}
if ((cpi->ref_frame_flags & (VP8_GOLD_FLAG | VP8_ALT_FLAG)) != (VP8_GOLD_FLAG | VP8_ALT_FLAG))
@@ -1010,6 +1038,7 @@
sf->thresh_mult[THR_COMP_NEARESTGA] = INT_MAX;
sf->thresh_mult[THR_COMP_NEARGA ] = INT_MAX;
sf->thresh_mult[THR_COMP_NEWGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = INT_MAX;
}
// Slow quant, dct and trellis not worthwhile for first pass
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -41,7 +41,7 @@
#define AF_THRESH 25
#define AF_THRESH2 100
#define ARF_DECAY_THRESH 12
-#define MAX_MODES 33
+#define MAX_MODES 36
#define MIN_THRESHMULT 32
#define MAX_THRESHMULT 512
@@ -204,6 +204,10 @@
THR_COMP_NEWLG = 30,
THR_COMP_NEWLA = 31,
THR_COMP_NEWGA = 32,
+
+ THR_COMP_SPLITLG = 33,
+ THR_COMP_SPLITLA = 34,
+ THR_COMP_SPLITGA = 35,
}
THR_MODES;
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -58,6 +58,8 @@
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
+#define INVALID_MV 0x80008000
+
static const int auto_speed_thresh[17] =
{
1000,
@@ -127,6 +129,10 @@
NEWMV,
NEWMV,
NEWMV,
+
+ SPLITMV,
+ SPLITMV,
+ SPLITMV,
};
const MV_REFERENCE_FRAME vp8_ref_frame_order[MAX_MODES] =
@@ -177,6 +183,10 @@
LAST_FRAME,
ALTREF_FRAME,
GOLDEN_FRAME,
+
+ LAST_FRAME,
+ ALTREF_FRAME,
+ GOLDEN_FRAME,
};
const MV_REFERENCE_FRAME vp8_second_ref_frame_order[MAX_MODES] =
@@ -201,6 +211,10 @@
GOLDEN_FRAME,
LAST_FRAME,
ALTREF_FRAME,
+
+ GOLDEN_FRAME,
+ LAST_FRAME,
+ ALTREF_FRAME,
};
static void fill_token_costs(
@@ -1554,7 +1568,10 @@
MACROBLOCK *x,
int const *labelings, int which_label,
B_PREDICTION_MODE this_mode,
- int_mv *this_mv, int_mv *best_ref_mv,
+ int_mv *this_mv, int_mv *this_second_mv,
+ int_mv seg_mvs[MAX_REF_FRAMES - 1],
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
int *mvcost[2]
)
{
@@ -1592,21 +1609,42 @@
switch (m = this_mode)
{
case NEW4X4 :
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ {
+ this_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.ref_frame - 1].as_int;
+ this_second_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.second_ref_frame - 1].as_int;
+ }
+
#if CONFIG_HIGH_PRECISION_MV
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
102, xd->allow_high_precision_mv);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ {
+ thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost,
+ 102, xd->allow_high_precision_mv);
+ }
#else
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ {
+ thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost, 102);
+ }
#endif
break;
case LEFT4X4:
- this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
+ this_mv->as_int = col ? d[-1].bmi.as_mv.first.as_int : left_block_mv(mic, i);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = col ? d[-1].bmi.as_mv.second.as_int : left_block_second_mv(mic, i);
break;
case ABOVE4X4:
- this_mv->as_int = row ? d[-4].bmi.mv.as_int : above_block_mv(mic, i, mis);
+ this_mv->as_int = row ? d[-4].bmi.as_mv.first.as_int : above_block_mv(mic, i, mis);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = row ? d[-4].bmi.as_mv.second.as_int : above_block_second_mv(mic, i, mis);
break;
case ZERO4X4:
this_mv->as_int = 0;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = 0;
break;
default:
break;
@@ -1614,12 +1652,17 @@
if (m == ABOVE4X4) // replace above with left if same
{
- int_mv left_mv;
+ int_mv left_mv, left_second_mv;
- left_mv.as_int = col ? d[-1].bmi.mv.as_int :
+ left_mv.as_int = col ? d[-1].bmi.as_mv.first.as_int :
left_block_mv(mic, i);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ left_second_mv.as_int = col ? d[-1].bmi.as_mv.second.as_int :
+ left_block_second_mv(mic, i);
- if (left_mv.as_int == this_mv->as_int)
+ if (left_mv.as_int == this_mv->as_int &&
+ (!xd->mode_info_context->mbmi.second_ref_frame ||
+ left_second_mv.as_int == this_second_mv->as_int))
m = LEFT4X4;
}
@@ -1626,11 +1669,14 @@
cost = x->inter_bmode_costs[ m];
}
- d->bmi.mv.as_int = this_mv->as_int;
+ d->bmi.as_mv.first.as_int = this_mv->as_int;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ d->bmi.as_mv.second.as_int = this_second_mv->as_int;
x->partition_info->bmi[i].mode = m;
x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
-
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
}
while (++i < 16);
@@ -1673,6 +1719,8 @@
int thisdistortion;
vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ vp8_build_2nd_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict_avg);
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
@@ -1694,7 +1742,7 @@
typedef struct
{
- int_mv *ref_mv;
+ int_mv *ref_mv, *second_ref_mv;
int_mv mvp;
int segment_rd;
@@ -1703,7 +1751,7 @@
int d;
int segment_yrate;
B_PREDICTION_MODE modes[16];
- int_mv mvs[16];
+ int_mv mvs[16], second_mvs[16];
unsigned char eobs[16];
int mvthresh;
@@ -1716,7 +1764,8 @@
static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
- BEST_SEG_INFO *bsi, unsigned int segmentation)
+ BEST_SEG_INFO *bsi, unsigned int segmentation,
+ int_mv seg_mvs[16 /* n_blocks */][MAX_REF_FRAMES - 1])
{
int i;
int const *labels;
@@ -1771,7 +1820,7 @@
for (i = 0; i < label_count; i++)
{
- int_mv mode_mv[B_MODE_COUNT];
+ int_mv mode_mv[B_MODE_COUNT], second_mode_mv[B_MODE_COUNT];
int best_label_rd = INT_MAX;
B_PREDICTION_MODE mode_selected = ZERO4X4;
int bestlabelyrate = 0;
@@ -1792,7 +1841,8 @@
ta_s = (ENTROPY_CONTEXT *)&t_above_s;
tl_s = (ENTROPY_CONTEXT *)&t_left_s;
- if (this_mode == NEW4X4)
+ // motion search for newmv (single predictor case only)
+ if (!x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4)
{
int sseshift;
int num00;
@@ -1823,9 +1873,9 @@
// use previous block's result as next block's MV predictor.
if (segmentation == BLOCK_4X4 && i>0)
{
- bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.mv.as_int;
+ bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.as_mv.first.as_int;
if (i==4 || i==8 || i==12)
- bsi->mvp.as_int = x->e_mbd.block[i-4].bmi.mv.as_int;
+ bsi->mvp.as_int = x->e_mbd.block[i-4].bmi.as_mv.first.as_int;
step_param = 2;
}
}
@@ -1894,12 +1944,12 @@
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEW4X4].as_int = e->bmi.mv.as_int;
+ mode_mv[NEW4X4].as_int = e->bmi.as_mv.first.as_int;
}
else
{
// The full search result is actually worse so re-instate the previous best vector
- e->bmi.mv.as_int = mode_mv[NEW4X4].as_int;
+ e->bmi.as_mv.first.as_int = mode_mv[NEW4X4].as_int;
}
}
}
@@ -1911,11 +1961,23 @@
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
bsi->ref_mv, x->errorperbit, v_fn_ptr, XMVCOST,
&distortion, &sse);
+
+ // safe motion search result for use in compound prediction
+ seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
}
} /* NEW4X4 */
+ else if (x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4)
+ {
+ // motion search not completed? Then skip newmv for this block with comppred
+ if (seg_mvs[i][x->e_mbd.mode_info_context->mbmi.second_ref_frame - 1].as_int == INVALID_MV ||
+ seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int == INVALID_MV)
+ {
+ continue;
+ }
+ }
rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
- bsi->ref_mv, XMVCOST);
+ &second_mode_mv[this_mode], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
// Trap vectors that reach beyond the UMV borders
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
@@ -1923,6 +1985,16 @@
{
continue;
}
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ {
+ if (((second_mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
+ ((second_mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((second_mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
+ ((second_mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
+ {
+ continue;
+ }
+ }
distortion = vp8_encode_inter_mb_segment(
x, labels, i,
@@ -1951,7 +2023,7 @@
vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
- bsi->ref_mv, XMVCOST);
+ &second_mode_mv[mode_selected], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
br += sbr;
bd += sbd;
@@ -1979,6 +2051,8 @@
BLOCKD *bd = &x->e_mbd.block[i];
bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ bsi->second_mvs[i].as_mv = x->partition_info->bmi[i].second_mv.as_mv;
bsi->modes[i] = x->partition_info->bmi[i].mode;
bsi->eobs[i] = bd->eob;
}
@@ -2000,10 +2074,11 @@
}
static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv, int best_rd,
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv, int best_rd,
int *mdcounts, int *returntotrate,
int *returnyrate, int *returndistortion,
- int mvthresh)
+ int mvthresh,
+ int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1])
{
int i;
BEST_SEG_INFO bsi;
@@ -2012,6 +2087,7 @@
bsi.segment_rd = best_rd;
bsi.ref_mv = best_ref_mv;
+ bsi.second_ref_mv = second_best_ref_mv;
bsi.mvp.as_int = best_ref_mv->as_int;
bsi.mvthresh = mvthresh;
bsi.mdcounts = mdcounts;
@@ -2025,16 +2101,16 @@
{
/* for now, we will keep the original segmentation order
when in best quality mode */
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
}
else
{
int sr;
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
if (bsi.segment_rd < best_rd)
@@ -2074,7 +2150,7 @@
sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
}
/* block 16X8 */
@@ -2085,7 +2161,7 @@
sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
}
/* If 8x8 is better than 16x8/8x16, then do 4x4 search */
@@ -2093,7 +2169,7 @@
if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
{
bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
}
/* restore UMV window */
@@ -2109,7 +2185,9 @@
{
BLOCKD *bd = &x->e_mbd.block[i];
- bd->bmi.mv.as_int = bsi.mvs[i].as_int;
+ bd->bmi.as_mv.first.as_int = bsi.mvs[i].as_int;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ bd->bmi.as_mv.second.as_int = bsi.second_mvs[i].as_int;
bd->eob = bsi.eobs[i];
}
@@ -2129,11 +2207,15 @@
x->partition_info->bmi[i].mode = bsi.modes[j];
x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ x->partition_info->bmi[i].second_mv.as_mv = bsi.second_mvs[j].as_mv;
}
/*
* used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
*/
x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ x->partition_info->bmi[15].second_mv.as_int = bsi.second_mvs[15].as_int;
return bsi.segment_rd;
}
@@ -2564,7 +2646,7 @@
union b_mode_info best_bmodes[16];
MB_MODE_INFO best_mbmode;
PARTITION_INFO best_partition;
- int_mv best_ref_mv;
+ int_mv best_ref_mv, second_best_ref_mv;
int_mv mode_mv[MB_MODE_COUNT];
MB_PREDICTION_MODE this_mode;
int num00;
@@ -2615,6 +2697,7 @@
unsigned char *v_buffer[4];
unsigned int ref_costs[MAX_REF_FRAMES];
+ int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1];
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
@@ -2622,10 +2705,24 @@
for (i = 0; i < 4; i++)
{
-#define INVALID_MV 0x80008000
mc_search_result[i].as_int = INVALID_MV;
}
+ for (i = 0; i < BLOCK_MAX_SEGMENTS - 1; i++)
+ {
+ int j;
+
+ for (j = 0; j < 16; j++)
+ {
+ int k;
+
+ for (k = 0; k < MAX_REF_FRAMES - 1; k++)
+ {
+ seg_mvs[i][j][k].as_int = INVALID_MV;
+ }
+ }
+ }
+
if (cpi->ref_frame_flags & VP8_LAST_FLAG)
{
YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
@@ -2709,7 +2806,9 @@
// Test best rd so far against threshold for trying this mode.
if (best_rd <= cpi->rd_threshes[mode_index])
+ {
continue;
+ }
// These variables hold are rolling total cost and distortion for this mode
rate2 = 0;
@@ -2756,7 +2855,9 @@
{
if (this_mode != ZEROMV ||
x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
+ {
continue;
+ }
}
}
@@ -2763,15 +2864,27 @@
/* everything but intra */
if (x->e_mbd.mode_info_context->mbmi.ref_frame)
{
- x->e_mbd.pre.y_buffer = y_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
- x->e_mbd.pre.u_buffer = u_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
- x->e_mbd.pre.v_buffer = v_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
- mode_mv[NEARESTMV] = frame_nearest_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
- mode_mv[NEARMV] = frame_near_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
- best_ref_mv = frame_best_ref_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
- vpx_memcpy(mdcounts, frame_mdcounts[x->e_mbd.mode_info_context->mbmi.ref_frame], sizeof(mdcounts));
+ int ref = x->e_mbd.mode_info_context->mbmi.ref_frame;
+
+ x->e_mbd.pre.y_buffer = y_buffer[ref];
+ x->e_mbd.pre.u_buffer = u_buffer[ref];
+ x->e_mbd.pre.v_buffer = v_buffer[ref];
+ mode_mv[NEARESTMV] = frame_nearest_mv[ref];
+ mode_mv[NEARMV] = frame_near_mv[ref];
+ best_ref_mv = frame_best_ref_mv[ref];
+ vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
}
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ {
+ int ref = x->e_mbd.mode_info_context->mbmi.second_ref_frame;
+
+ x->e_mbd.second_pre.y_buffer = y_buffer[ref];
+ x->e_mbd.second_pre.u_buffer = u_buffer[ref];
+ x->e_mbd.second_pre.v_buffer = v_buffer[ref];
+ second_best_ref_mv = frame_best_ref_mv[ref];
+ }
+
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
if (cpi->zbin_mode_boost_enabled)
@@ -2867,9 +2980,9 @@
this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG]: this_rd_thresh;
- tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
+ tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, NULL,
best_yrd, mdcounts,
- &rate, &rate_y, &distortion, this_rd_thresh) ;
+ &rate, &rate_y, &distortion, this_rd_thresh, seg_mvs) ;
rate2 += rate;
distortion2 += distortion;
@@ -2887,6 +3000,9 @@
this_rd = INT_MAX;
disable_skip = 1;
}
+ mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
+ compmode_cost =
+ vp8_cost_bit( get_pred_prob( cm, xd, PRED_COMP ), 0 );
}
break;
case DC_PRED:
@@ -2973,11 +3089,11 @@
// Initial step/diamond search
{
- bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.mv,
+ bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.as_mv.first,
step_param, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &best_ref_mv);
- mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
// Further step/diamond searches as necessary
n = 0;
@@ -2999,7 +3115,7 @@
else
{
thissme = cpi->diamond_search_sad(x, b, d, &mvp_full,
- &d->bmi.mv, step_param + n, sadpb, &num00,
+ &d->bmi.as_mv.first, step_param + n, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &best_ref_mv);
@@ -3010,11 +3126,11 @@
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
}
else
{
- d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
+ d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
}
}
}
@@ -3030,7 +3146,7 @@
search_range = 8;
//thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
- thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv, sadpb,
+ thissme = cpi->refining_search_sad(x, b, d, &d->bmi.as_mv.first, sadpb,
search_range, &cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &best_ref_mv);
@@ -3037,11 +3153,11 @@
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
}
else
{
- d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
+ d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
}
}
@@ -3054,14 +3170,14 @@
{
int dis; /* TODO: use dis in distortion calculation later. */
unsigned int sse;
- cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv,
+ cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv.first, &best_ref_mv,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &dis, &sse);
}
- mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.mv.as_int;
+ mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.as_mv.first.as_int;
- mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
// Add the new motion vector cost to our rolling cost variable
#if CONFIG_HIGH_PRECISION_MV
@@ -3081,7 +3197,9 @@
// Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && (mode_mv[this_mode].as_int == 0))
+ {
continue;
+ }
case ZEROMV:
@@ -3090,7 +3208,9 @@
// because of the lack of break statements in the previous two cases.
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
+ {
continue;
+ }
vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
vp8_build_inter16x16_predictors_mby(&x->e_mbd);
@@ -3216,68 +3336,104 @@
break;
case NEARMV:
if (frame_near_mv[ref1].as_int == 0 || frame_near_mv[ref2].as_int == 0)
+ {
continue;
+ }
x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_near_mv[ref1].as_int;
x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_near_mv[ref2].as_int;
break;
case NEARESTMV:
if (frame_nearest_mv[ref1].as_int == 0 || frame_nearest_mv[ref2].as_int == 0)
+ {
continue;
+ }
x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_nearest_mv[ref1].as_int;
x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_nearest_mv[ref2].as_int;
break;
+ case SPLITMV:
+ {
+ int tmp_rd;
+ int this_rd_thresh;
+
+ this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
+ this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG]: this_rd_thresh;
+
+ tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, &second_best_ref_mv,
+ best_yrd, mdcounts,
+ &rate, &rate_y, &distortion, this_rd_thresh, seg_mvs) ;
+
+ rate2 += rate;
+ distortion2 += distortion;
+
+ // If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
+ if (tmp_rd < best_yrd)
+ {
+ // Now work out UV cost and add it in
+ rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ }
+ else
+ {
+ this_rd = INT_MAX;
+ disable_skip = 1;
+ }
+ }
+ break;
default:
break;
}
- /* Add in the Mv/mode cost */
- rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
+ if (this_mode != SPLITMV)
+ {
+ /* Add in the Mv/mode cost */
+ rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
- vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.mv, xd);
- vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.second_mv, xd);
- if (((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) < x->mv_row_min) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) > x->mv_row_max) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) < x->mv_col_min) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) > x->mv_col_max) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) < x->mv_row_min) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) > x->mv_row_max) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) < x->mv_col_min) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) > x->mv_col_max))
- continue;
+ vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.mv, xd);
+ vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.second_mv, xd);
+ if (((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) < x->mv_row_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) > x->mv_row_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) < x->mv_col_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) > x->mv_col_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) < x->mv_row_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) > x->mv_row_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) < x->mv_col_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) > x->mv_col_max))
+ {
+ continue;
+ }
- /* build first and second prediction */
- vp8_build_inter16x16_predictors_mby(&x->e_mbd);
- vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
- /* do second round and average the results */
- x->e_mbd.second_pre.y_buffer = y_buffer[ref2];
- x->e_mbd.second_pre.u_buffer = u_buffer[ref2];
- x->e_mbd.second_pre.v_buffer = v_buffer[ref2];
- vp8_build_2nd_inter16x16_predictors_mb(&x->e_mbd, x->e_mbd.predictor,
- &x->e_mbd.predictor[256],
- &x->e_mbd.predictor[320], 16, 8);
+ /* build first and second prediction */
+ vp8_build_inter16x16_predictors_mby(&x->e_mbd);
+ vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
+ /* do second round and average the results */
+ vp8_build_2nd_inter16x16_predictors_mb(&x->e_mbd, x->e_mbd.predictor,
+ &x->e_mbd.predictor[256],
+ &x->e_mbd.predictor[320], 16, 8);
- /* Y cost and distortion */
- if(cpi->common.txfm_mode == ALLOW_8X8)
- macro_block_yrd_8x8(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
- else
- macro_block_yrd(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
+ /* Y cost and distortion */
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ macro_block_yrd_8x8(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
+ else
+ macro_block_yrd(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
- rate2 += rate_y;
- distortion2 += distortion;
+ rate2 += rate_y;
+ distortion2 += distortion;
- /* UV cost and distortion */
- if(cpi->common.txfm_mode == ALLOW_8X8)
- rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
- &distortion_uv,
- cpi->common.full_pixel);
- else
- rd_inter16x16_uv(cpi, x, &rate_uv,
- &distortion_uv,
- cpi->common.full_pixel);
- rate2 += rate_uv;
- distortion2 += distortion_uv;
+ /* UV cost and distortion */
+ if(cpi->common.txfm_mode == ALLOW_8X8)
+ rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
+ &distortion_uv,
+ cpi->common.full_pixel);
+ else
+ rd_inter16x16_uv(cpi, x, &rate_uv,
+ &distortion_uv,
+ cpi->common.full_pixel);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ }
/* don't bother w/ skip, we would never have come here if skip were enabled */
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
@@ -3399,8 +3555,7 @@
*returnintra = distortion2 ;
}
- if (!disable_skip &&
- (this_mode == SPLITMV || x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME))
+ if (!disable_skip && x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
if (this_rd < best_comp_rd)
best_comp_rd = this_rd;
@@ -3470,8 +3625,7 @@
/* keep record of best compound/single-only prediction */
if (!disable_skip &&
- x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
- this_mode != SPLITMV)
+ x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME)
{
int single_rd, hybrid_rd, single_rate, hybrid_rate;
@@ -3581,12 +3735,17 @@
if (best_mbmode.mode == SPLITMV)
{
for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].mv.as_int = best_bmodes[i].mv.as_int;
+ xd->mode_info_context->bmi[i].as_mv.first.as_int = best_bmodes[i].as_mv.first.as_int;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ for (i = 0; i < 16; i++)
+ xd->mode_info_context->bmi[i].as_mv.second.as_int = best_bmodes[i].as_mv.second.as_int;
vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
x->e_mbd.mode_info_context->mbmi.mv.as_int =
x->partition_info->bmi[15].mv.as_int;
+ x->e_mbd.mode_info_context->mbmi.second_mv.as_int =
+ x->partition_info->bmi[15].second_mv.as_int;
}
if (best_single_rd == INT_MAX)
@@ -3729,8 +3888,7 @@
cpi->rd_single_diff += single;
cpi->rd_comp_diff += compound;
cpi->rd_hybrid_diff += hybrid;
- if (xd->mode_info_context->mbmi.ref_frame &&
- xd->mode_info_context->mbmi.mode != SPLITMV)
+ if (xd->mode_info_context->mbmi.ref_frame)
{
unsigned char pred_context;
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -224,7 +224,7 @@
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
bestsme = vp8_hex_search(x, b, d,
- &best_ref_mv1_full, &d->bmi.mv,
+ &best_ref_mv1_full, &d->bmi.as_mv.first,
step_param,
sadpb,
&cpi->fn_ptr[BLOCK_16X16],
@@ -243,7 +243,7 @@
int distortion;
unsigned int sse;
bestsme = cpi->find_fractional_mv_step(x, b, d,
- &d->bmi.mv, &best_ref_mv1,
+ &d->bmi.as_mv.first, &best_ref_mv1,
x->errorperbit, &cpi->fn_ptr[BLOCK_16X16],
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?mvcost_hp:mvcost,
@@ -333,8 +333,8 @@
if (cpi->frames[frame] == NULL)
continue;
- mbd->block[0].bmi.mv.as_mv.row = 0;
- mbd->block[0].bmi.mv.as_mv.col = 0;
+ mbd->block[0].bmi.as_mv.first.as_mv.row = 0;
+ mbd->block[0].bmi.as_mv.first.as_mv.col = 0;
#if ALT_REF_MC_ENABLED
#define THRESH_LOW 10000
@@ -364,8 +364,8 @@
cpi->frames[frame]->u_buffer + mb_uv_offset,
cpi->frames[frame]->v_buffer + mb_uv_offset,
cpi->frames[frame]->y_stride,
- mbd->block[0].bmi.mv.as_mv.row,
- mbd->block[0].bmi.mv.as_mv.col,
+ mbd->block[0].bmi.as_mv.first.as_mv.row,
+ mbd->block[0].bmi.as_mv.first.as_mv.col,
predictor);
// Apply the filter (YUV)
--
⑨