ref: 7ca517f75592eaddbe008117790d834cdbed9199
parent: dc836109e4b8013ee28f4f36b4a48fd18ca209e4
author: John Koleszar <jkoleszar@google.com>
date: Fri Feb 8 14:46:36 EST 2013
Replace as_mv struct with array Replace as_mv.{first, second} with a two element array, so that they can easily be processed with an index variable. Change-Id: I1e429155544d2a94a5b72a5b467c53d8b8728190
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -218,10 +218,7 @@
B_PREDICTION_MODE context;
#endif
} as_mode;
- struct {
- int_mv first;
- int_mv second;
- } as_mv;
+ int_mv as_mv[2]; // first, second inter predictor motion vectors
};
typedef enum {
--- a/vp9/common/vp9_debugmodes.c
+++ b/vp9/common/vp9_debugmodes.c
@@ -129,8 +129,8 @@
mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2);
bindex = (b_row & 3) * 4 + (b_col & 3);
fprintf(mvs, "%3d:%-3d ",
- mi[mb_index].bmi[bindex].as_mv.first.as_mv.row,
- mi[mb_index].bmi[bindex].as_mv.first.as_mv.col);
+ mi[mb_index].bmi[bindex].as_mv[0].as_mv.row,
+ mi[mb_index].bmi[bindex].as_mv[0].as_mv.col);
}
--- a/vp9/common/vp9_findnearmv.h
+++ b/vp9/common/vp9_findnearmv.h
@@ -98,7 +98,7 @@
b += 4;
}
- return (cur_mb->bmi + b - 1)->as_mv.first.as_int;
+ return (cur_mb->bmi + b - 1)->as_mv[0].as_int;
}
static int left_block_second_mv(const MACROBLOCKD *xd,
@@ -117,8 +117,8 @@
}
return cur_mb->mbmi.second_ref_frame > 0 ?
- (cur_mb->bmi + b - 1)->as_mv.second.as_int :
- (cur_mb->bmi + b - 1)->as_mv.first.as_int;
+ (cur_mb->bmi + b - 1)->as_mv[1].as_int :
+ (cur_mb->bmi + b - 1)->as_mv[0].as_int;
}
static int above_block_mv(const MODE_INFO *cur_mb, int b, int mi_stride) {
@@ -131,7 +131,7 @@
b += 16;
}
- return (cur_mb->bmi + b - 4)->as_mv.first.as_int;
+ return (cur_mb->bmi + b - 4)->as_mv[0].as_int;
}
static int above_block_second_mv(const MODE_INFO *cur_mb, int b, int mi_stride) {
@@ -146,8 +146,8 @@
}
return cur_mb->mbmi.second_ref_frame > 0 ?
- (cur_mb->bmi + b - 4)->as_mv.second.as_int :
- (cur_mb->bmi + b - 4)->as_mv.first.as_int;
+ (cur_mb->bmi + b - 4)->as_mv[1].as_int :
+ (cur_mb->bmi + b - 4)->as_mv[0].as_int;
}
static B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) {
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -154,7 +154,7 @@
int_mv mv;
ptr_base = *(d->base_pre);
- mv.as_int = d->bmi.as_mv.first.as_int;
+ mv.as_int = d->bmi.as_mv[0].as_int;
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
(mv.as_mv.col >> 3);
@@ -179,7 +179,7 @@
int_mv mv;
ptr_base = *(d->base_second_pre);
- mv.as_int = d->bmi.as_mv.second.as_int;
+ mv.as_int = d->bmi.as_mv[1].as_int;
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
(mv.as_mv.col >> 3);
@@ -197,7 +197,7 @@
int_mv mv;
ptr_base = *(d->base_pre);
- mv.as_int = d->bmi.as_mv.first.as_int;
+ mv.as_int = d->bmi.as_mv[0].as_int;
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
(mv.as_mv.col >> 3);
@@ -222,7 +222,7 @@
int_mv mv;
ptr_base = *(d->base_second_pre);
- mv.as_int = d->bmi.as_mv.second.as_int;
+ mv.as_int = d->bmi.as_mv[1].as_int;
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
(mv.as_mv.col >> 3);
@@ -240,7 +240,7 @@
int_mv mv;
ptr_base = *(d->base_pre);
- mv.as_int = d->bmi.as_mv.first.as_int;
+ mv.as_int = d->bmi.as_mv[0].as_int;
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
(mv.as_mv.col >> 3);
@@ -264,38 +264,38 @@
int voffset = 20 + i * 2 + j;
int temp;
- temp = blockd[yoffset ].bmi.as_mv.first.as_mv.row
- + blockd[yoffset + 1].bmi.as_mv.first.as_mv.row
- + blockd[yoffset + 4].bmi.as_mv.first.as_mv.row
- + blockd[yoffset + 5].bmi.as_mv.first.as_mv.row;
+ temp = blockd[yoffset ].bmi.as_mv[0].as_mv.row
+ + blockd[yoffset + 1].bmi.as_mv[0].as_mv.row
+ + blockd[yoffset + 4].bmi.as_mv[0].as_mv.row
+ + blockd[yoffset + 5].bmi.as_mv[0].as_mv.row;
if (temp < 0) temp -= 4;
else temp += 4;
- xd->block[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) &
+ xd->block[uoffset].bmi.as_mv[0].as_mv.row = (temp / 8) &
xd->fullpixel_mask;
- temp = blockd[yoffset ].bmi.as_mv.first.as_mv.col
- + blockd[yoffset + 1].bmi.as_mv.first.as_mv.col
- + blockd[yoffset + 4].bmi.as_mv.first.as_mv.col
- + blockd[yoffset + 5].bmi.as_mv.first.as_mv.col;
+ temp = blockd[yoffset ].bmi.as_mv[0].as_mv.col
+ + blockd[yoffset + 1].bmi.as_mv[0].as_mv.col
+ + blockd[yoffset + 4].bmi.as_mv[0].as_mv.col
+ + blockd[yoffset + 5].bmi.as_mv[0].as_mv.col;
if (temp < 0) temp -= 4;
else temp += 4;
- blockd[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) &
+ blockd[uoffset].bmi.as_mv[0].as_mv.col = (temp / 8) &
xd->fullpixel_mask;
- blockd[voffset].bmi.as_mv.first.as_mv.row =
- blockd[uoffset].bmi.as_mv.first.as_mv.row;
- blockd[voffset].bmi.as_mv.first.as_mv.col =
- blockd[uoffset].bmi.as_mv.first.as_mv.col;
+ blockd[voffset].bmi.as_mv[0].as_mv.row =
+ blockd[uoffset].bmi.as_mv[0].as_mv.row;
+ blockd[voffset].bmi.as_mv[0].as_mv.col =
+ blockd[uoffset].bmi.as_mv[0].as_mv.col;
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- temp = blockd[yoffset ].bmi.as_mv.second.as_mv.row
- + blockd[yoffset + 1].bmi.as_mv.second.as_mv.row
- + blockd[yoffset + 4].bmi.as_mv.second.as_mv.row
- + blockd[yoffset + 5].bmi.as_mv.second.as_mv.row;
+ temp = blockd[yoffset ].bmi.as_mv[1].as_mv.row
+ + blockd[yoffset + 1].bmi.as_mv[1].as_mv.row
+ + blockd[yoffset + 4].bmi.as_mv[1].as_mv.row
+ + blockd[yoffset + 5].bmi.as_mv[1].as_mv.row;
if (temp < 0) {
temp -= 4;
@@ -303,13 +303,13 @@
temp += 4;
}
- blockd[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) &
+ blockd[uoffset].bmi.as_mv[1].as_mv.row = (temp / 8) &
xd->fullpixel_mask;
- temp = blockd[yoffset ].bmi.as_mv.second.as_mv.col
- + blockd[yoffset + 1].bmi.as_mv.second.as_mv.col
- + blockd[yoffset + 4].bmi.as_mv.second.as_mv.col
- + blockd[yoffset + 5].bmi.as_mv.second.as_mv.col;
+ temp = blockd[yoffset ].bmi.as_mv[1].as_mv.col
+ + blockd[yoffset + 1].bmi.as_mv[1].as_mv.col
+ + blockd[yoffset + 4].bmi.as_mv[1].as_mv.col
+ + blockd[yoffset + 5].bmi.as_mv[1].as_mv.col;
if (temp < 0) {
temp -= 4;
@@ -317,13 +317,13 @@
temp += 4;
}
- blockd[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) &
+ blockd[uoffset].bmi.as_mv[1].as_mv.col = (temp / 8) &
xd->fullpixel_mask;
- blockd[voffset].bmi.as_mv.second.as_mv.row =
- blockd[uoffset].bmi.as_mv.second.as_mv.row;
- blockd[voffset].bmi.as_mv.second.as_mv.col =
- blockd[uoffset].bmi.as_mv.second.as_mv.col;
+ blockd[voffset].bmi.as_mv[1].as_mv.row =
+ blockd[uoffset].bmi.as_mv[1].as_mv.row;
+ blockd[voffset].bmi.as_mv[1].as_mv.col =
+ blockd[uoffset].bmi.as_mv[1].as_mv.col;
}
}
}
@@ -332,7 +332,7 @@
BLOCKD *d0 = &blockd[i];
BLOCKD *d1 = &blockd[i + 1];
- if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
+ if (d0->bmi.as_mv[0].as_int == d1->bmi.as_mv[0].as_int)
build_inter_predictors2b(xd, d0, 8);
else {
vp9_build_inter_predictors_b(d0, 8, &xd->subpix);
@@ -717,15 +717,15 @@
blockd[10].bmi = xd->mode_info_context->bmi[10];
if (mbmi->need_to_clamp_mvs) {
- clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv.first.as_mv, xd);
- clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv.first.as_mv, xd);
- clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv.first.as_mv, xd);
- clamp_mv_to_umv_border(&blockd[10].bmi.as_mv.first.as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv[0].as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv[0].as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv[0].as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[10].bmi.as_mv[0].as_mv, xd);
if (mbmi->second_ref_frame > 0) {
- clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv.second.as_mv, xd);
- clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv.second.as_mv, xd);
- clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv.second.as_mv, xd);
- clamp_mv_to_umv_border(&blockd[10].bmi.as_mv.second.as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv[1].as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv[1].as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv[1].as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[10].bmi.as_mv[1].as_mv, xd);
}
}
@@ -750,15 +750,15 @@
blockd[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
if (mbmi->need_to_clamp_mvs) {
- clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv.first.as_mv, xd);
- clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv.first.as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv[0].as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv[0].as_mv, xd);
if (mbmi->second_ref_frame > 0) {
- clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv.second.as_mv, xd);
- clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv.second.as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv[1].as_mv, xd);
+ clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv[1].as_mv, xd);
}
}
- if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
+ if (d0->bmi.as_mv[0].as_int == d1->bmi.as_mv[0].as_int)
build_inter_predictors2b(xd, d0, 16);
else {
vp9_build_inter_predictors_b(d0, 16, &xd->subpix);
@@ -776,7 +776,7 @@
BLOCKD *d0 = &blockd[i];
BLOCKD *d1 = &blockd[i + 1];
- if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
+ if (d0->bmi.as_mv[0].as_int == d1->bmi.as_mv[0].as_int)
build_inter_predictors2b(xd, d0, 8);
else {
vp9_build_inter_predictors_b(d0, 8, &xd->subpix);
@@ -803,44 +803,44 @@
int temp;
- temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.row
- + xd->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.row
- + xd->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.row
- + xd->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.row;
+ temp = xd->mode_info_context->bmi[yoffset + 0].as_mv[0].as_mv.row
+ + xd->mode_info_context->bmi[yoffset + 1].as_mv[0].as_mv.row
+ + xd->mode_info_context->bmi[yoffset + 4].as_mv[0].as_mv.row
+ + xd->mode_info_context->bmi[yoffset + 5].as_mv[0].as_mv.row;
if (temp < 0) temp -= 4;
else temp += 4;
- blockd[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) &
+ blockd[uoffset].bmi.as_mv[0].as_mv.row = (temp / 8) &
xd->fullpixel_mask;
- temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.col
- + xd->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.col
- + xd->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.col
- + xd->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.col;
+ temp = xd->mode_info_context->bmi[yoffset + 0].as_mv[0].as_mv.col
+ + xd->mode_info_context->bmi[yoffset + 1].as_mv[0].as_mv.col
+ + xd->mode_info_context->bmi[yoffset + 4].as_mv[0].as_mv.col
+ + xd->mode_info_context->bmi[yoffset + 5].as_mv[0].as_mv.col;
if (temp < 0) temp -= 4;
else temp += 4;
- blockd[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) &
+ blockd[uoffset].bmi.as_mv[0].as_mv.col = (temp / 8) &
xd->fullpixel_mask;
// if (x->mode_info_context->mbmi.need_to_clamp_mvs)
- clamp_uvmv_to_umv_border(&blockd[uoffset].bmi.as_mv.first.as_mv, xd);
+ clamp_uvmv_to_umv_border(&blockd[uoffset].bmi.as_mv[0].as_mv, xd);
// if (x->mode_info_context->mbmi.need_to_clamp_mvs)
- clamp_uvmv_to_umv_border(&blockd[uoffset].bmi.as_mv.first.as_mv, xd);
+ clamp_uvmv_to_umv_border(&blockd[uoffset].bmi.as_mv[0].as_mv, xd);
- blockd[voffset].bmi.as_mv.first.as_mv.row =
- blockd[uoffset].bmi.as_mv.first.as_mv.row;
- blockd[voffset].bmi.as_mv.first.as_mv.col =
- blockd[uoffset].bmi.as_mv.first.as_mv.col;
+ blockd[voffset].bmi.as_mv[0].as_mv.row =
+ blockd[uoffset].bmi.as_mv[0].as_mv.row;
+ blockd[voffset].bmi.as_mv[0].as_mv.col =
+ blockd[uoffset].bmi.as_mv[0].as_mv.col;
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.row
- + xd->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.row
- + xd->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.row
- + xd->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.row;
+ temp = xd->mode_info_context->bmi[yoffset + 0].as_mv[1].as_mv.row
+ + xd->mode_info_context->bmi[yoffset + 1].as_mv[1].as_mv.row
+ + xd->mode_info_context->bmi[yoffset + 4].as_mv[1].as_mv.row
+ + xd->mode_info_context->bmi[yoffset + 5].as_mv[1].as_mv.row;
if (temp < 0) {
temp -= 4;
@@ -848,13 +848,13 @@
temp += 4;
}
- blockd[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) &
+ blockd[uoffset].bmi.as_mv[1].as_mv.row = (temp / 8) &
xd->fullpixel_mask;
- temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.col
- + xd->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.col
- + xd->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.col
- + xd->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.col;
+ temp = xd->mode_info_context->bmi[yoffset + 0].as_mv[1].as_mv.col
+ + xd->mode_info_context->bmi[yoffset + 1].as_mv[1].as_mv.col
+ + xd->mode_info_context->bmi[yoffset + 4].as_mv[1].as_mv.col
+ + xd->mode_info_context->bmi[yoffset + 5].as_mv[1].as_mv.col;
if (temp < 0) {
temp -= 4;
@@ -862,21 +862,21 @@
temp += 4;
}
- blockd[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) &
+ blockd[uoffset].bmi.as_mv[1].as_mv.col = (temp / 8) &
xd->fullpixel_mask;
// if (mbmi->need_to_clamp_mvs)
clamp_uvmv_to_umv_border(
- &blockd[uoffset].bmi.as_mv.second.as_mv, xd);
+ &blockd[uoffset].bmi.as_mv[1].as_mv, xd);
// if (mbmi->need_to_clamp_mvs)
clamp_uvmv_to_umv_border(
- &blockd[uoffset].bmi.as_mv.second.as_mv, xd);
+ &blockd[uoffset].bmi.as_mv[1].as_mv, xd);
- blockd[voffset].bmi.as_mv.second.as_mv.row =
- blockd[uoffset].bmi.as_mv.second.as_mv.row;
- blockd[voffset].bmi.as_mv.second.as_mv.col =
- blockd[uoffset].bmi.as_mv.second.as_mv.col;
+ blockd[voffset].bmi.as_mv[1].as_mv.row =
+ blockd[uoffset].bmi.as_mv[1].as_mv.row;
+ blockd[voffset].bmi.as_mv[1].as_mv.col =
+ blockd[uoffset].bmi.as_mv[1].as_mv.col;
}
}
}
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -1041,9 +1041,9 @@
fill_offset = &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]];
do {
- mi->bmi[ *fill_offset].as_mv.first.as_int = blockmv.as_int;
+ mi->bmi[ *fill_offset].as_mv[0].as_int = blockmv.as_int;
if (mbmi->second_ref_frame > 0)
- mi->bmi[ *fill_offset].as_mv.second.as_int = secondmv.as_int;
+ mi->bmi[ *fill_offset].as_mv[1].as_int = secondmv.as_int;
fill_offset++;
} while (--fill_count);
}
@@ -1051,8 +1051,8 @@
} while (++j < num_p);
}
- mv->as_int = mi->bmi[15].as_mv.first.as_int;
- mbmi->mv[1].as_int = mi->bmi[15].as_mv.second.as_int;
+ mv->as_int = mi->bmi[15].as_mv[0].as_int;
+ mbmi->mv[1].as_int = mi->bmi[15].as_mv[1].as_int;
break; /* done with SPLITMV */
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -1546,7 +1546,7 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
uint8_t *bestaddress;
- int_mv *best_mv = &d->bmi.as_mv.first;
+ int_mv *best_mv = &d->bmi.as_mv[0];
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1641,7 +1641,7 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
uint8_t *bestaddress;
- int_mv *best_mv = &d->bmi.as_mv.first;
+ int_mv *best_mv = &d->bmi.as_mv[0];
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int r, c;
@@ -1770,7 +1770,7 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
uint8_t *bestaddress;
- int_mv *best_mv = &d->bmi.as_mv.first;
+ int_mv *best_mv = &d->bmi.as_mv[0];
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int r, c;
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -2162,17 +2162,17 @@
}
break;
case LEFT4X4:
- this_mv->as_int = col ? d[-1].bmi.as_mv.first.as_int :
+ this_mv->as_int = col ? d[-1].bmi.as_mv[0].as_int :
left_block_mv(xd, mic, i);
if (mbmi->second_ref_frame > 0)
- this_second_mv->as_int = col ? d[-1].bmi.as_mv.second.as_int :
+ this_second_mv->as_int = col ? d[-1].bmi.as_mv[1].as_int :
left_block_second_mv(xd, mic, i);
break;
case ABOVE4X4:
- this_mv->as_int = row ? d[-4].bmi.as_mv.first.as_int :
+ this_mv->as_int = row ? d[-4].bmi.as_mv[0].as_int :
above_block_mv(mic, i, mis);
if (mbmi->second_ref_frame > 0)
- this_second_mv->as_int = row ? d[-4].bmi.as_mv.second.as_int :
+ this_second_mv->as_int = row ? d[-4].bmi.as_mv[1].as_int :
above_block_second_mv(mic, i, mis);
break;
case ZERO4X4:
@@ -2188,10 +2188,10 @@
int_mv left_mv, left_second_mv;
left_second_mv.as_int = 0;
- left_mv.as_int = col ? d[-1].bmi.as_mv.first.as_int :
+ left_mv.as_int = col ? d[-1].bmi.as_mv[0].as_int :
left_block_mv(xd, mic, i);
if (mbmi->second_ref_frame > 0)
- left_second_mv.as_int = col ? d[-1].bmi.as_mv.second.as_int :
+ left_second_mv.as_int = col ? d[-1].bmi.as_mv[1].as_int :
left_block_second_mv(xd, mic, i);
if (left_mv.as_int == this_mv->as_int &&
@@ -2208,9 +2208,9 @@
#endif
}
- d->bmi.as_mv.first.as_int = this_mv->as_int;
+ d->bmi.as_mv[0].as_int = this_mv->as_int;
if (mbmi->second_ref_frame > 0)
- d->bmi.as_mv.second.as_int = this_second_mv->as_int;
+ d->bmi.as_mv[1].as_int = this_second_mv->as_int;
x->partition_info->bmi[i].mode = m;
x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
@@ -2496,9 +2496,9 @@
// use previous block's result as next block's MV predictor.
if (segmentation == PARTITIONING_4X4 && i > 0) {
- bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.as_mv.first.as_int;
+ bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.as_mv[0].as_int;
if (i == 4 || i == 8 || i == 12)
- bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.as_mv.first.as_int;
+ bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.as_mv[0].as_int;
step_param = 2;
}
}
@@ -2537,11 +2537,11 @@
if (thissme < bestsme) {
bestsme = thissme;
- mode_mv[NEW4X4].as_int = e->bmi.as_mv.first.as_int;
+ mode_mv[NEW4X4].as_int = e->bmi.as_mv[0].as_int;
} else {
/* The full search result is actually worse so re-instate the
* previous best vector */
- e->bmi.as_mv.first.as_int = mode_mv[NEW4X4].as_int;
+ e->bmi.as_mv[0].as_int = mode_mv[NEW4X4].as_int;
}
}
}
@@ -2881,9 +2881,9 @@
for (i = 0; i < 16; i++) {
BLOCKD *bd = &x->e_mbd.block[i];
- bd->bmi.as_mv.first.as_int = bsi.mvs[i].as_int;
+ bd->bmi.as_mv[0].as_int = bsi.mvs[i].as_int;
if (mbmi->second_ref_frame > 0)
- bd->bmi.as_mv.second.as_int = bsi.second_mvs[i].as_int;
+ bd->bmi.as_mv[1].as_int = bsi.second_mvs[i].as_int;
bd->eob = bsi.eobs[i];
}
@@ -3303,8 +3303,8 @@
x->nmvjointcost, x->mvcost,
&dis, &sse);
}
- d->bmi.as_mv.first.as_int = tmp_mv.as_int;
- frame_mv[NEWMV][refs[0]].as_int = d->bmi.as_mv.first.as_int;
+ d->bmi.as_mv[0].as_int = tmp_mv.as_int;
+ frame_mv[NEWMV][refs[0]].as_int = d->bmi.as_mv[0].as_int;
// Add the new motion vector cost to our rolling cost variable
*rate2 += vp9_mv_bit_cost(&tmp_mv, &ref_mv[0],
@@ -4247,10 +4247,12 @@
if (best_mbmode.mode == SPLITMV) {
for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].as_mv.first.as_int = best_bmodes[i].as_mv.first.as_int;
+ xd->mode_info_context->bmi[i].as_mv[0].as_int =
+ best_bmodes[i].as_mv[0].as_int;
if (mbmi->second_ref_frame > 0)
for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].as_mv.second.as_int = best_bmodes[i].as_mv.second.as_int;
+ xd->mode_info_context->bmi[i].as_mv[1].as_int =
+ best_bmodes[i].as_mv[1].as_int;
vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -171,7 +171,7 @@
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
// Ignore mv costing by sending NULL pointer instead of cost arrays
- bestsme = vp9_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.as_mv.first,
+ bestsme = vp9_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.as_mv[0],
step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16],
NULL, NULL, NULL, NULL,
&best_ref_mv1);
@@ -183,7 +183,7 @@
int distortion;
unsigned int sse;
// Ignore mv costing by sending NULL pointer instead of cost array
- bestsme = cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv.first,
+ bestsme = cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv[0],
&best_ref_mv1,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
@@ -263,8 +263,8 @@
if (cpi->frames[frame] == NULL)
continue;
- mbd->block[0].bmi.as_mv.first.as_mv.row = 0;
- mbd->block[0].bmi.as_mv.first.as_mv.col = 0;
+ mbd->block[0].bmi.as_mv[0].as_mv.row = 0;
+ mbd->block[0].bmi.as_mv[0].as_mv.col = 0;
if (frame == alt_ref_index) {
filter_weight = 2;
@@ -297,8 +297,8 @@
cpi->frames[frame]->u_buffer + mb_uv_offset,
cpi->frames[frame]->v_buffer + mb_uv_offset,
cpi->frames[frame]->y_stride,
- mbd->block[0].bmi.as_mv.first.as_mv.row,
- mbd->block[0].bmi.as_mv.first.as_mv.col,
+ mbd->block[0].bmi.as_mv[0].as_mv.row,
+ mbd->block[0].bmi.as_mv[0].as_mv.col,
predictor);
// Apply the filter (YUV)
--
⑨