ref: 6b25501bf1b5f7f729665c39cb3e25a7b1edf385
parent: 6ed81fa5b3aba4c6be6950872a03d67ddbc8c990
author: Scott LaVarnway <slavarnway@google.com>
date: Thu May 12 06:50:16 EDT 2011
Using int_mv instead of MV The compiler produces better assembly when using int_mv for assignments. The compiler shifts and ors the two 16bit values when assigning MV. Change-Id: I52ce4bc2bfbfaf3f1151204b2f21e1e0654f960f
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -140,11 +140,7 @@
typedef struct
{
B_PREDICTION_MODE mode;
- union
- {
- int as_int;
- MV as_mv;
- } mv;
+ int_mv mv;
} B_MODE_INFO;
@@ -161,11 +157,7 @@
{
MB_PREDICTION_MODE mode, uv_mode;
MV_REFERENCE_FRAME ref_frame;
- union
- {
- int as_int;
- MV as_mv;
- } mv;
+ int_mv mv;
unsigned char partitioning;
unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
--- a/vp8/common/findnearmv.c
+++ b/vp8/common/findnearmv.c
@@ -25,9 +25,9 @@
(
MACROBLOCKD *xd,
const MODE_INFO *here,
- MV *nearest,
- MV *nearby,
- MV *best_mv,
+ int_mv *nearest,
+ int_mv *nearby,
+ int_mv *best_mv,
int cnt[4],
int refframe,
int *ref_frame_sign_bias
@@ -131,13 +131,14 @@
near_mvs[CNT_INTRA] = near_mvs[CNT_NEAREST];
/* Set up return values */
- *best_mv = near_mvs[0].as_mv;
- *nearest = near_mvs[CNT_NEAREST].as_mv;
- *nearby = near_mvs[CNT_NEAR].as_mv;
+ best_mv->as_int = near_mvs[0].as_int;
+ nearest->as_int = near_mvs[CNT_NEAREST].as_int;
+ nearby->as_int = near_mvs[CNT_NEAR].as_int;
- vp8_clamp_mv(nearest, xd);
- vp8_clamp_mv(nearby, xd);
- vp8_clamp_mv(best_mv, xd); /*TODO: move this up before the copy*/
+ //TODO: move clamp outside findnearmv
+ vp8_clamp_mv2(nearest, xd);
+ vp8_clamp_mv2(nearby, xd);
+ vp8_clamp_mv2(best_mv, xd);
}
vp8_prob *vp8_mv_ref_probs(
--- a/vp8/common/findnearmv.h
+++ b/vp8/common/findnearmv.h
@@ -17,11 +17,6 @@
#include "modecont.h"
#include "treecoder.h"
-typedef union
-{
- unsigned int as_int;
- MV as_mv;
-} int_mv; /* facilitates rapid equality tests */
static void mv_bias(int refmb_ref_frame_sign_bias, int refframe, int_mv *mvp, const int *ref_frame_sign_bias)
{
@@ -39,24 +34,48 @@
#define LEFT_TOP_MARGIN (16 << 3)
#define RIGHT_BOTTOM_MARGIN (16 << 3)
-static void vp8_clamp_mv(MV *mv, const MACROBLOCKD *xd)
+static void vp8_clamp_mv2(int_mv *mv, const MACROBLOCKD *xd)
{
- if (mv->col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN))
- mv->col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
- else if (mv->col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN)
- mv->col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
+ if (mv->as_mv.col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN))
+ mv->as_mv.col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
+ else if (mv->as_mv.col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN)
+ mv->as_mv.col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
- if (mv->row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN))
- mv->row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
- else if (mv->row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN)
- mv->row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
+ if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN))
+ mv->as_mv.row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
+ else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN)
+ mv->as_mv.row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
}
+static void vp8_clamp_mv(int_mv *mv, int mb_to_left_edge, int mb_to_right_edge,
+ int mb_to_top_edge, int mb_to_bottom_edge)
+{
+ mv->as_mv.col = (mv->as_mv.col < mb_to_left_edge) ?
+ mb_to_left_edge : mv->as_mv.col;
+ mv->as_mv.col = (mv->as_mv.col > mb_to_right_edge) ?
+ mb_to_right_edge : mv->as_mv.col;
+ mv->as_mv.row = (mv->as_mv.row < mb_to_top_edge) ?
+ mb_to_top_edge : mv->as_mv.row;
+ mv->as_mv.row = (mv->as_mv.row > mb_to_bottom_edge) ?
+ mb_to_bottom_edge : mv->as_mv.row;
+}
+static unsigned int vp8_check_mv_bounds(int_mv *mv, int mb_to_left_edge,
+ int mb_to_right_edge, int mb_to_top_edge,
+ int mb_to_bottom_edge)
+{
+ unsigned int need_to_clamp;
+ need_to_clamp = (mv->as_mv.col < mb_to_left_edge) ? 1 : 0;
+ need_to_clamp |= (mv->as_mv.col > mb_to_right_edge) ? 1 : 0;
+ need_to_clamp |= (mv->as_mv.row < mb_to_top_edge) ? 1 : 0;
+ need_to_clamp |= (mv->as_mv.row > mb_to_bottom_edge) ? 1 : 0;
+ return need_to_clamp;
+}
+
void vp8_find_near_mvs
(
MACROBLOCKD *xd,
const MODE_INFO *here,
- MV *nearest, MV *nearby, MV *best,
+ int_mv *nearest, int_mv *nearby, int_mv *best,
int near_mv_ref_cts[4],
int refframe,
int *ref_frame_sign_bias
--- a/vp8/common/mv.h
+++ b/vp8/common/mv.h
@@ -11,6 +11,7 @@
#ifndef __INC_MV_H
#define __INC_MV_H
+#include "vpx/vpx_integer.h"
typedef struct
{
@@ -17,5 +18,11 @@
short row;
short col;
} MV;
+
+typedef union
+{
+ uint32_t as_int;
+ MV as_mv;
+} int_mv; /* facilitates faster equality tests and copies */
#endif
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -283,12 +283,11 @@
static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
int mb_row, int mb_col)
{
- const MV Zero = { 0, 0};
vp8_reader *const bc = & pbi->bc;
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
const int mis = pbi->common.mode_info_stride;
- MV *const mv = & mbmi->mv.as_mv;
+ int_mv *const mv = & mbmi->mv;
int mb_to_left_edge;
int mb_to_right_edge;
int mb_to_top_edge;
@@ -325,7 +324,7 @@
{
int rct[4];
vp8_prob mv_ref_p [VP8_MVREFS-1];
- MV nearest, nearby, best_mv;
+ int_mv nearest, nearby, best_mv;
if (vp8_read(bc, pbi->prob_last))
{
@@ -349,8 +348,6 @@
do /* for each subset j */
{
B_MODE_INFO bmi;
- MV *const mv = & bmi.mv.as_mv;
-
int k; /* first block in subset j */
int mv_contz;
k = vp8_mbsplit_offset[s][j];
@@ -360,27 +357,27 @@
switch (bmi.mode = (B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/
{
case NEW4X4:
- read_mv(bc, mv, (const MV_CONTEXT *) mvc);
- mv->row += best_mv.row;
- mv->col += best_mv.col;
+ read_mv(bc, &bmi.mv.as_mv, (const MV_CONTEXT *) mvc);
+ bmi.mv.as_mv.row += best_mv.as_mv.row;
+ bmi.mv.as_mv.col += best_mv.as_mv.col;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][3]++;
#endif
break;
case LEFT4X4:
- *mv = vp8_left_bmi(mi, k)->mv.as_mv;
+ bmi.mv.as_int = vp8_left_bmi(mi, k)->mv.as_int;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][0]++;
#endif
break;
case ABOVE4X4:
- *mv = vp8_above_bmi(mi, k, mis)->mv.as_mv;
+ bmi.mv.as_int = vp8_above_bmi(mi, k, mis)->mv.as_int;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][1]++;
#endif
break;
case ZERO4X4:
- *mv = Zero;
+ bmi.mv.as_int = 0;
#ifdef VPX_MODE_COUNT
vp8_mv_cont_count[mv_contz][2]++;
#endif
@@ -389,10 +386,11 @@
break;
}
- mbmi->need_to_clamp_mvs |= (mv->col < mb_to_left_edge) ? 1 : 0;
- mbmi->need_to_clamp_mvs |= (mv->col > mb_to_right_edge) ? 1 : 0;
- mbmi->need_to_clamp_mvs |= (mv->row < mb_to_top_edge) ? 1 : 0;
- mbmi->need_to_clamp_mvs |= (mv->row > mb_to_bottom_edge) ? 1 : 0;
+ mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&bmi.mv,
+ mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
{
/* Fill (uniform) modes, mvs of jth subset.
@@ -414,36 +412,32 @@
while (++j < num_p);
}
- *mv = mi->bmi[15].mv.as_mv;
+ mv->as_int = mi->bmi[15].mv.as_int;
break; /* done with SPLITMV */
case NEARMV:
- *mv = nearby;
+ mv->as_int = nearby.as_int;
/* Clip "next_nearest" so that it does not extend to far out of image */
- mv->col = (mv->col < mb_to_left_edge) ? mb_to_left_edge : mv->col;
- mv->col = (mv->col > mb_to_right_edge) ? mb_to_right_edge : mv->col;
- mv->row = (mv->row < mb_to_top_edge) ? mb_to_top_edge : mv->row;
- mv->row = (mv->row > mb_to_bottom_edge) ? mb_to_bottom_edge : mv->row;
+ vp8_clamp_mv(mv, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
goto propagate_mv;
case NEARESTMV:
- *mv = nearest;
+ mv->as_int = nearest.as_int;
/* Clip "next_nearest" so that it does not extend to far out of image */
- mv->col = (mv->col < mb_to_left_edge) ? mb_to_left_edge : mv->col;
- mv->col = (mv->col > mb_to_right_edge) ? mb_to_right_edge : mv->col;
- mv->row = (mv->row < mb_to_top_edge) ? mb_to_top_edge : mv->row;
- mv->row = (mv->row > mb_to_bottom_edge) ? mb_to_bottom_edge : mv->row;
+ vp8_clamp_mv(mv, mb_to_left_edge, mb_to_right_edge,
+ mb_to_top_edge, mb_to_bottom_edge);
goto propagate_mv;
case ZEROMV:
- *mv = Zero;
+ mv->as_int = 0;
goto propagate_mv;
case NEWMV:
- read_mv(bc, mv, (const MV_CONTEXT *) mvc);
- mv->row += best_mv.row;
- mv->col += best_mv.col;
+ read_mv(bc, &mv->as_mv, (const MV_CONTEXT *) mvc);
+ mv->as_mv.row += best_mv.as_mv.row;
+ mv->as_mv.col += best_mv.as_mv.col;
/* Don't need to check this on NEARMV and NEARESTMV modes
* since those modes clamp the MV. The NEWMV mode does not,
@@ -450,36 +444,30 @@
* so signal to the prediction stage whether special
* handling may be required.
*/
- mbmi->need_to_clamp_mvs = (mv->col < mb_to_left_edge) ? 1 : 0;
- mbmi->need_to_clamp_mvs |= (mv->col > mb_to_right_edge) ? 1 : 0;
- mbmi->need_to_clamp_mvs |= (mv->row < mb_to_top_edge) ? 1 : 0;
- mbmi->need_to_clamp_mvs |= (mv->row > mb_to_bottom_edge) ? 1 : 0;
+ mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(mv,
+ mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
propagate_mv: /* same MV throughout */
{
- /*int i=0;
- do
- {
- mi->bmi[i].mv.as_mv = *mv;
- }
- while( ++i < 16);*/
-
- mi->bmi[0].mv.as_mv = *mv;
- mi->bmi[1].mv.as_mv = *mv;
- mi->bmi[2].mv.as_mv = *mv;
- mi->bmi[3].mv.as_mv = *mv;
- mi->bmi[4].mv.as_mv = *mv;
- mi->bmi[5].mv.as_mv = *mv;
- mi->bmi[6].mv.as_mv = *mv;
- mi->bmi[7].mv.as_mv = *mv;
- mi->bmi[8].mv.as_mv = *mv;
- mi->bmi[9].mv.as_mv = *mv;
- mi->bmi[10].mv.as_mv = *mv;
- mi->bmi[11].mv.as_mv = *mv;
- mi->bmi[12].mv.as_mv = *mv;
- mi->bmi[13].mv.as_mv = *mv;
- mi->bmi[14].mv.as_mv = *mv;
- mi->bmi[15].mv.as_mv = *mv;
+ mi->bmi[ 0].mv.as_int =
+ mi->bmi[ 1].mv.as_int =
+ mi->bmi[ 2].mv.as_int =
+ mi->bmi[ 3].mv.as_int =
+ mi->bmi[ 4].mv.as_int =
+ mi->bmi[ 5].mv.as_int =
+ mi->bmi[ 6].mv.as_int =
+ mi->bmi[ 7].mv.as_int =
+ mi->bmi[ 8].mv.as_int =
+ mi->bmi[ 9].mv.as_int =
+ mi->bmi[10].mv.as_int =
+ mi->bmi[11].mv.as_int =
+ mi->bmi[12].mv.as_int =
+ mi->bmi[13].mv.as_int =
+ mi->bmi[14].mv.as_int =
+ mi->bmi[15].mv.as_int = mv->as_int;
}
break;
default:;
@@ -494,7 +482,7 @@
int j = 0;
do
{
- mi->bmi[j].mv.as_mv = Zero;
+ mi->bmi[j].mv.as_int = 0;
}
while (++j < 16);
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -799,12 +799,12 @@
static void write_mv
(
- vp8_writer *w, const MV *mv, const MV *ref, const MV_CONTEXT *mvc
+ vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc
)
{
MV e;
- e.row = mv->row - ref->row;
- e.col = mv->col - ref->col;
+ e.row = mv->row - ref->as_mv.row;
+ e.col = mv->col - ref->as_mv.col;
vp8_encode_motion_vector(w, &e, mvc);
}
@@ -957,7 +957,7 @@
}
else /* inter coded */
{
- MV best_mv;
+ int_mv best_mv;
vp8_prob mv_ref_p [VP8_MVREFS-1];
vp8_write(w, 1, cpi->prob_intra_coded);
@@ -971,7 +971,7 @@
}
{
- MV n1, n2;
+ int_mv n1, n2;
int ct[4];
vp8_find_near_mvs(xd, m, &n1, &n2, &best_mv, ct, rf, cpi->common.ref_frame_sign_bias);
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -1342,8 +1342,8 @@
}
else
{
- MV best_ref_mv;
- MV nearest, nearby;
+ int_mv best_ref_mv;
+ int_mv nearest, nearby;
int mdcounts[4];
int ref_fb_idx;
@@ -1371,15 +1371,15 @@
{
if (xd->block[i].bmi.mode == NEW4X4)
{
- cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv.as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv.as_mv.col) >> 1)]++;
}
}
}
else if (xd->mode_info_context->mbmi.mode == NEWMV)
{
- cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.as_mv.col) >> 1)]++;
}
if (!x->skip)
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -39,7 +39,7 @@
extern void vp8_build_block_offsets(MACROBLOCK *x);
extern void vp8_setup_block_ptrs(MACROBLOCK *x);
extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi);
-extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, MV *mv);
+extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
extern void vp8_alloc_compressor_data(VP8_COMP *cpi);
//#define GFQ_ADJUSTMENT (40 + ((15*Q)/10))
@@ -423,7 +423,10 @@
VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
}
-static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV *best_mv, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset )
+static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
+ int_mv *ref_mv, MV *best_mv,
+ YV12_BUFFER_CONFIG *recon_buffer,
+ int *best_motion_err, int recon_yoffset )
{
MACROBLOCKD *const xd = & x->e_mbd;
BLOCK *b = &x->block[0];
@@ -430,7 +433,7 @@
BLOCKD *d = &x->e_mbd.block[0];
int num00;
- MV tmp_mv = {0, 0};
+ int_mv tmp_mv;
int tmp_err;
int step_param = 3; //3; // Dont search over full range for first pass
@@ -446,6 +449,7 @@
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
// Initial step/diamond search centred on best mv
+ tmp_mv.as_int = 0;
tmp_err = cpi->diamond_search_sad(x, b, d, ref_mv, &tmp_mv, step_param, x->errorperbit, &num00, &v_fn_ptr, x->mvcost, ref_mv);
if ( tmp_err < INT_MAX-new_mv_mode_penalty )
tmp_err += new_mv_mode_penalty;
@@ -453,8 +457,8 @@
if (tmp_err < *best_motion_err)
{
*best_motion_err = tmp_err;
- best_mv->row = tmp_mv.row;
- best_mv->col = tmp_mv.col;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
}
// Further step/diamond searches as necessary
@@ -476,8 +480,8 @@
if (tmp_err < *best_motion_err)
{
*best_motion_err = tmp_err;
- best_mv->row = tmp_mv.row;
- best_mv->col = tmp_mv.col;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
}
}
}
@@ -510,8 +514,10 @@
int sum_in_vectors = 0;
- MV zero_ref_mv = {0, 0};
+ int_mv zero_ref_mv;
+ zero_ref_mv.as_int = 0;
+
vp8_clear_system_state(); //__asm emms;
x->src = * cpi->Source;
@@ -602,7 +608,7 @@
// Test last reference frame using the previous best mv as the
// starting point (best reference) for the search
- first_pass_motion_search(cpi, x, &best_ref_mv.as_mv,
+ first_pass_motion_search(cpi, x, &best_ref_mv,
&d->bmi.mv.as_mv, lst_yv12,
&motion_error, recon_yoffset);
@@ -666,7 +672,7 @@
d->bmi.mv.as_mv.row <<= 3;
d->bmi.mv.as_mv.col <<= 3;
this_error = motion_error;
- vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv.as_mv);
+ vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv);
vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
sum_mvr += d->bmi.mv.as_mv.row;
sum_mvr_abs += abs(d->bmi.mv.as_mv.row);
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -34,39 +34,30 @@
}
-int vp8_mv_bit_cost(MV *mv, MV *ref, int *mvcost[2], int Weight)
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
{
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
// over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
// cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
- return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col) >> 1]) * Weight) >> 7;
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
}
-static int mv_err_cost(MV *mv, MV *ref, int *mvcost[2], int error_per_bit)
+static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
{
- //int i;
- //return ((mvcost[0][(mv->row - ref->row)>>1] + mvcost[1][(mv->col - ref->col)>>1] + 128) * error_per_bit) >> 8;
- //return ( (vp8_mv_bit_cost(mv, ref, mvcost, 100) + 128) * error_per_bit) >> 8;
-
- //i = (vp8_mv_bit_cost(mv, ref, mvcost, 100) * error_per_bit + 128) >> 8;
- return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col) >> 1]) * error_per_bit + 128) >> 8;
- //return (vp8_mv_bit_cost(mv, ref, mvcost, 128) * error_per_bit + 128) >> 8;
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
+ mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
+ * error_per_bit + 128) >> 8;
}
-static int mvsad_err_cost(MV *mv, MV *ref, int *mvsadcost[2], int error_per_bit)
+static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
{
/* Calculate sad error cost on full pixel basis. */
- return ((mvsadcost[0][(mv->row - ref->row)] + mvsadcost[1][(mv->col - ref->col)]) * error_per_bit + 128) >> 8;
+ return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
+ mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
+ * error_per_bit + 128) >> 8;
}
-static int mv_bits(MV *mv, MV *ref, int *mvcost[2])
-{
- // get the estimated number of bits for a motion vector, to be used for costing in SAD based
- // motion estimation
- return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col)>> 1]) + 128) >> 8;
-}
-
void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
{
int Len;
@@ -200,13 +191,18 @@
//#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
-int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse1)
+int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse1)
{
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
unsigned char *z = (*(b->base_src) + b->src);
- int rr = ref_mv->row >> 1, rc = ref_mv->col >> 1;
- int br = bestmv->row << 2, bc = bestmv->col << 2;
+ int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
+ int br = bestmv->as_mv.row << 2, bc = bestmv->as_mv.col << 2;
int tr = br, tc = bc;
unsigned int besterr = INT_MAX;
unsigned int left, right, up, down, diag;
@@ -216,14 +212,14 @@
unsigned int quarteriters = 4;
int thismse;
- int minc = MAX(x->mv_col_min << 2, (ref_mv->col >> 1) - ((1 << mvlong_width) - 1));
- int maxc = MIN(x->mv_col_max << 2, (ref_mv->col >> 1) + ((1 << mvlong_width) - 1));
- int minr = MAX(x->mv_row_min << 2, (ref_mv->row >> 1) - ((1 << mvlong_width) - 1));
- int maxr = MIN(x->mv_row_max << 2, (ref_mv->row >> 1) + ((1 << mvlong_width) - 1));
+ int minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
+ int maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
+ int minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
+ int maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
// central mv
- bestmv->row <<= 3;
- bestmv->col <<= 3;
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
// calculate central point error
besterr = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
@@ -300,10 +296,11 @@
tc = bc;
}
- bestmv->row = br << 1;
- bestmv->col = bc << 1;
+ bestmv->as_mv.row = br << 1;
+ bestmv->as_mv.col = bc << 1;
- if ((abs(bestmv->col - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs(bestmv->row - ref_mv->row) > MAX_FULL_PEL_VAL))
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > MAX_FULL_PEL_VAL) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > MAX_FULL_PEL_VAL))
return INT_MAX;
return besterr;
@@ -316,13 +313,17 @@
#undef CHECK_BETTER
#undef MIN
#undef MAX
-int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse1)
+int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse1)
{
int bestmse = INT_MAX;
- MV startmv;
- //MV this_mv;
- MV this_mv;
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
+ int_mv startmv;
+ int_mv this_mv;
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
unsigned char *z = (*(b->base_src) + b->src);
int left, right, up, down, diag;
unsigned int sse;
@@ -331,17 +332,18 @@
// Trap uncodable vectors
- if ((abs((bestmv->col << 3) - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs((bestmv->row << 3) - ref_mv->row) > MAX_FULL_PEL_VAL))
+ if ((abs((bestmv->as_mv.col << 3) - ref_mv->as_mv.col) > MAX_FULL_PEL_VAL)
+ || (abs((bestmv->as_mv.row << 3) - ref_mv->as_mv.row) > MAX_FULL_PEL_VAL))
{
- bestmv->row <<= 3;
- bestmv->col <<= 3;
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
*distortion = INT_MAX;
return INT_MAX;
}
// central mv
- bestmv->row <<= 3;
- bestmv->col <<= 3;
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
startmv = *bestmv;
// calculate central point error
@@ -350,8 +352,8 @@
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
- this_mv.row = startmv.row;
- this_mv.col = ((startmv.col - 8) | 4);
+ this_mv.as_mv.row = startmv.as_mv.row;
+ this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -363,7 +365,7 @@
*sse1 = sse;
}
- this_mv.col += 8;
+ this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -376,8 +378,8 @@
}
// go up then down and check error
- this_mv.col = startmv.col;
- this_mv.row = ((startmv.row - 8) | 4);
+ this_mv.as_mv.col = startmv.as_mv.col;
+ this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -389,7 +391,7 @@
*sse1 = sse;
}
- this_mv.row += 8;
+ this_mv.as_mv.row += 8;
thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -411,24 +413,24 @@
switch (whichdir)
{
case 0:
- this_mv.col = (this_mv.col - 8) | 4;
- this_mv.row = (this_mv.row - 8) | 4;
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
break;
case 1:
- this_mv.col += 4;
- this_mv.row = (this_mv.row - 8) | 4;
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
break;
case 2:
- this_mv.col = (this_mv.col - 8) | 4;
- this_mv.row += 4;
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
break;
case 3:
default:
- this_mv.col += 4;
- this_mv.row += 4;
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
break;
}
@@ -447,10 +449,10 @@
// time to check quarter pels.
- if (bestmv->row < startmv.row)
+ if (bestmv->as_mv.row < startmv.as_mv.row)
y -= d->pre_stride;
- if (bestmv->col < startmv.col)
+ if (bestmv->as_mv.col < startmv.as_mv.col)
y--;
startmv = *bestmv;
@@ -458,17 +460,17 @@
// go left then right and check error
- this_mv.row = startmv.row;
+ this_mv.as_mv.row = startmv.as_mv.row;
- if (startmv.col & 7)
+ if (startmv.as_mv.col & 7)
{
- this_mv.col = startmv.col - 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.col = startmv.as_mv.col - 2;
+ thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
- this_mv.col = (startmv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -481,8 +483,8 @@
*sse1 = sse;
}
- this_mv.col += 4;
- thismse = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.col += 4;
+ thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (right < bestmse)
@@ -494,17 +496,17 @@
}
// go up then down and check error
- this_mv.col = startmv.col;
+ this_mv.as_mv.col = startmv.as_mv.col;
- if (startmv.row & 7)
+ if (startmv.as_mv.row & 7)
{
- this_mv.row = startmv.row - 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.row = startmv.as_mv.row - 2;
+ thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
- this_mv.row = (startmv.row - 8) | 6;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+ thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -517,8 +519,8 @@
*sse1 = sse;
}
- this_mv.row += 4;
- thismse = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
if (down < bestmse)
@@ -541,33 +543,33 @@
{
case 0:
- if (startmv.row & 7)
+ if (startmv.as_mv.row & 7)
{
- this_mv.row -= 2;
+ this_mv.as_mv.row -= 2;
- if (startmv.col & 7)
+ if (startmv.as_mv.col & 7)
{
- this_mv.col -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
- this_mv.col = (startmv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);;
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
}
}
else
{
- this_mv.row = (startmv.row - 8) | 6;
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- if (startmv.col & 7)
+ if (startmv.as_mv.col & 7)
{
- this_mv.col -= 2;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
else
{
- this_mv.col = (startmv.col - 8) | 6;
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
thismse = vfp->svf(y - d->pre_stride - 1, d->pre_stride, 6, 6, z, b->src_stride, &sse);
}
}
@@ -574,39 +576,39 @@
break;
case 1:
- this_mv.col += 2;
+ this_mv.as_mv.col += 2;
- if (startmv.row & 7)
+ if (startmv.as_mv.row & 7)
{
- this_mv.row -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.row -= 2;
+ thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
- this_mv.row = (startmv.row - 8) | 6;
- thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+ thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
}
break;
case 2:
- this_mv.row += 2;
+ this_mv.as_mv.row += 2;
- if (startmv.col & 7)
+ if (startmv.as_mv.col & 7)
{
- this_mv.col -= 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
else
{
- this_mv.col = (startmv.col - 8) | 6;
- thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);;
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
}
break;
case 3:
- this_mv.col += 2;
- this_mv.row += 2;
- thismse = vfp->svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
+ this_mv.as_mv.col += 2;
+ this_mv.as_mv.row += 2;
+ thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
break;
}
@@ -623,13 +625,17 @@
return bestmse;
}
-int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse1)
+int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse1)
{
int bestmse = INT_MAX;
- MV startmv;
- //MV this_mv;
- MV this_mv;
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
+ int_mv startmv;
+ int_mv this_mv;
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
unsigned char *z = (*(b->base_src) + b->src);
int left, right, up, down, diag;
unsigned int sse;
@@ -636,17 +642,18 @@
int thismse;
// Trap uncodable vectors
- if ((abs((bestmv->col << 3) - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs((bestmv->row << 3) - ref_mv->row) > MAX_FULL_PEL_VAL))
+ if ((abs((bestmv->as_mv.col << 3) - ref_mv->as_mv.col) > MAX_FULL_PEL_VAL)
+ || (abs((bestmv->as_mv.row << 3) - ref_mv->as_mv.row) > MAX_FULL_PEL_VAL))
{
- bestmv->row <<= 3;
- bestmv->col <<= 3;
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
*distortion = INT_MAX;
return INT_MAX;
}
// central mv
- bestmv->row <<= 3;
- bestmv->col <<= 3;
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
startmv = *bestmv;
// calculate central point error
@@ -655,8 +662,8 @@
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
// go left then right and check error
- this_mv.row = startmv.row;
- this_mv.col = ((startmv.col - 8) | 4);
+ this_mv.as_mv.row = startmv.as_mv.row;
+ this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -668,7 +675,7 @@
*sse1 = sse;
}
- this_mv.col += 8;
+ this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -681,8 +688,8 @@
}
// go up then down and check error
- this_mv.col = startmv.col;
- this_mv.row = ((startmv.row - 8) | 4);
+ this_mv.as_mv.col = startmv.as_mv.col;
+ this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -694,7 +701,7 @@
*sse1 = sse;
}
- this_mv.row += 8;
+ this_mv.as_mv.row += 8;
thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -745,8 +752,8 @@
}
#else
- this_mv.col = (this_mv.col - 8) | 4;
- this_mv.row = (this_mv.row - 8) | 4;
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -758,7 +765,7 @@
*sse1 = sse;
}
- this_mv.col += 8;
+ this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -770,8 +777,8 @@
*sse1 = sse;
}
- this_mv.col = (this_mv.col - 8) | 4;
- this_mv.row = startmv.row + 4;
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = startmv.as_mv.row + 4;
thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -783,7 +790,7 @@
*sse1 = sse;
}
- this_mv.col += 8;
+ this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
@@ -819,8 +826,8 @@
MACROBLOCK *x,
BLOCK *b,
BLOCKD *d,
- MV *ref_mv,
- MV *best_mv,
+ int_mv *ref_mv,
+ int_mv *best_mv,
int search_param,
int error_per_bit,
int *num00,
@@ -827,7 +834,7 @@
const vp8_variance_fn_ptr_t *vfp,
int *mvsadcost[2],
int *mvcost[2],
- MV *center_mv
+ int_mv *center_mv
)
{
MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
@@ -837,8 +844,8 @@
int i, j;
unsigned char *src = (*(b->base_src) + b->src);
int src_stride = b->src_stride;
- int rr = center_mv->row, rc = center_mv->col;
- int br = ref_mv->row >> 3, bc = ref_mv->col >> 3, tr, tc;
+ int rr = center_mv->as_mv.row, rc = center_mv->as_mv.col;
+ int br = ref_mv->as_mv.row >> 3, bc = ref_mv->as_mv.col >> 3, tr, tc;
unsigned int besterr, thiserr = 0x7fffffff;
int k = -1, tk;
@@ -947,8 +954,8 @@
break;
}
- best_mv->row = br;
- best_mv->col = bc;
+ best_mv->as_mv.row = br;
+ best_mv->as_mv.col = bc;
return vfp->vf(src, src_stride, PRE(br, bc), d->pre_stride, &thiserr) + mv_err_cost(best_mv, center_mv, mvcost, error_per_bit) ;
}
@@ -965,14 +972,14 @@
MACROBLOCK *x,
BLOCK *b,
BLOCKD *d,
- MV *ref_mv,
- MV *best_mv,
+ int_mv *ref_mv,
+ int_mv *best_mv,
int search_param,
int error_per_bit,
int *num00,
vp8_variance_fn_ptr_t *fn_ptr,
int *mvcost[2],
- MV *center_mv
+ int_mv *center_mv
)
{
int i, j, step;
@@ -984,14 +991,14 @@
unsigned char *best_address;
int tot_steps;
- MV this_mv;
+ int_mv this_mv;
int bestsad = INT_MAX;
int best_site = 0;
int last_site = 0;
- int ref_row = ref_mv->row >> 3;
- int ref_col = ref_mv->col >> 3;
+ int ref_row = ref_mv->as_mv.row >> 3;
+ int ref_col = ref_mv->as_mv.col >> 3;
int this_row_offset;
int this_col_offset;
search_site *ss;
@@ -1000,14 +1007,14 @@
int thissad;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- MV fcenter_mv;
- fcenter_mv.row = center_mv->row >> 3;
- fcenter_mv.col = center_mv->col >> 3;
+ int_mv fcenter_mv;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
*num00 = 0;
- best_mv->row = ref_row;
- best_mv->col = ref_col;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
// Work out the start point for the search
in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
@@ -1033,8 +1040,8 @@
for (j = 0 ; j < x->searches_per_step ; j++)
{
// Trap illegal vectors
- this_row_offset = best_mv->row + ss[i].mv.row;
- this_col_offset = best_mv->col + ss[i].mv.col;
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
(this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
@@ -1045,8 +1052,8 @@
if (thissad < bestsad)
{
- this_mv.row = this_row_offset;
- this_mv.col = this_col_offset;
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
@@ -1062,8 +1069,8 @@
if (best_site != last_site)
{
- best_mv->row += ss[best_site].mv.row;
- best_mv->col += ss[best_site].mv.col;
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
best_address += ss[best_site].offset;
last_site = best_site;
}
@@ -1071,8 +1078,8 @@
(*num00)++;
}
- this_mv.row = best_mv->row << 3;
- this_mv.col = best_mv->col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
if (bestsad == INT_MAX)
return INT_MAX;
@@ -1086,14 +1093,14 @@
MACROBLOCK *x,
BLOCK *b,
BLOCKD *d,
- MV *ref_mv,
- MV *best_mv,
+ int_mv *ref_mv,
+ int_mv *best_mv,
int search_param,
int error_per_bit,
int *num00,
vp8_variance_fn_ptr_t *fn_ptr,
int *mvcost[2],
- MV *center_mv
+ int_mv *center_mv
)
{
int i, j, step;
@@ -1105,14 +1112,14 @@
unsigned char *best_address;
int tot_steps;
- MV this_mv;
+ int_mv this_mv;
int bestsad = INT_MAX;
int best_site = 0;
int last_site = 0;
- int ref_row = ref_mv->row >> 3;
- int ref_col = ref_mv->col >> 3;
+ int ref_row = ref_mv->as_mv.row >> 3;
+ int ref_col = ref_mv->as_mv.col >> 3;
int this_row_offset;
int this_col_offset;
search_site *ss;
@@ -1121,13 +1128,13 @@
unsigned int thissad;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- MV fcenter_mv;
- fcenter_mv.row = center_mv->row >> 3;
- fcenter_mv.col = center_mv->col >> 3;
+ int_mv fcenter_mv;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
*num00 = 0;
- best_mv->row = ref_row;
- best_mv->col = ref_col;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
// Work out the start point for the search
in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
@@ -1154,10 +1161,10 @@
// To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
// checking 4 bounds for each points.
- all_in &= ((best_mv->row + ss[i].mv.row)> x->mv_row_min);
- all_in &= ((best_mv->row + ss[i+1].mv.row) < x->mv_row_max);
- all_in &= ((best_mv->col + ss[i+2].mv.col) > x->mv_col_min);
- all_in &= ((best_mv->col + ss[i+3].mv.col) < x->mv_col_max);
+ all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
+ all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
+ all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
+ all_in &= ((best_mv->as_mv.col + ss[i+3].mv.col) < x->mv_col_max);
if (all_in)
{
@@ -1176,8 +1183,8 @@
{
if (sad_array[t] < bestsad)
{
- this_mv.row = best_mv->row + ss[i].mv.row;
- this_mv.col = best_mv->col + ss[i].mv.col;
+ this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
+ this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (sad_array[t] < bestsad)
@@ -1194,8 +1201,8 @@
for (j = 0 ; j < x->searches_per_step ; j++)
{
// Trap illegal vectors
- this_row_offset = best_mv->row + ss[i].mv.row;
- this_col_offset = best_mv->col + ss[i].mv.col;
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
(this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
@@ -1205,8 +1212,8 @@
if (thissad < bestsad)
{
- this_mv.row = this_row_offset;
- this_mv.col = this_col_offset;
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
@@ -1222,8 +1229,8 @@
if (best_site != last_site)
{
- best_mv->row += ss[best_site].mv.row;
- best_mv->col += ss[best_site].mv.col;
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
best_address += ss[best_site].offset;
last_site = best_site;
}
@@ -1231,8 +1238,8 @@
(*num00)++;
}
- this_mv.row = best_mv->row << 3;
- this_mv.col = best_mv->col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
if (bestsad == INT_MAX)
return INT_MAX;
@@ -1241,7 +1248,10 @@
+ mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
}
-int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], MV *center_mv)
+int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int error_per_bit, int distance,
+ vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ int_mv *center_mv)
{
unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride;
@@ -1249,8 +1259,8 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- MV *best_mv = &d->bmi.mv.as_mv;
- MV this_mv;
+ int_mv *best_mv = &d->bmi.mv;
+ int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1257,8 +1267,8 @@
unsigned char *check_here;
int thissad;
- int ref_row = ref_mv->row;
- int ref_col = ref_mv->col;
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
int row_min = ref_row - distance;
int row_max = ref_row + distance;
@@ -1266,16 +1276,16 @@
int col_max = ref_col + distance;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- MV fcenter_mv;
- fcenter_mv.row = center_mv->row >> 3;
- fcenter_mv.col = center_mv->col >> 3;
+ int_mv fcenter_mv;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
// Work out the mid point for the search
in_what = *(d->base_pre) + d->pre;
bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
- best_mv->row = ref_row;
- best_mv->col = ref_col;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
// We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
@@ -1302,7 +1312,7 @@
for (r = row_min; r < row_max ; r++)
{
- this_mv.row = r;
+ this_mv.as_mv.row = r;
check_here = r * mv_stride + in_what + col_min;
for (c = col_min; c < col_max; c++)
@@ -1309,7 +1319,7 @@
{
thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
- this_mv.col = c;
+ this_mv.as_mv.col = c;
//thissad += (int)sqrt(mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
//thissad += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)];
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
@@ -1317,8 +1327,8 @@
if (thissad < bestsad)
{
bestsad = thissad;
- best_mv->row = r;
- best_mv->col = c;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
bestaddress = check_here;
}
@@ -1326,8 +1336,8 @@
}
}
- this_mv.row = best_mv->row << 3;
- this_mv.col = best_mv->col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
@@ -1336,7 +1346,10 @@
return INT_MAX;
}
-int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], MV *center_mv)
+int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int error_per_bit, int distance,
+ vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ int_mv *center_mv)
{
unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride;
@@ -1344,8 +1357,8 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- MV *best_mv = &d->bmi.mv.as_mv;
- MV this_mv;
+ int_mv *best_mv = &d->bmi.mv;
+ int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1352,8 +1365,8 @@
unsigned char *check_here;
unsigned int thissad;
- int ref_row = ref_mv->row;
- int ref_col = ref_mv->col;
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
int row_min = ref_row - distance;
int row_max = ref_row + distance;
@@ -1363,16 +1376,16 @@
unsigned int sad_array[3];
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- MV fcenter_mv;
- fcenter_mv.row = center_mv->row >> 3;
- fcenter_mv.col = center_mv->col >> 3;
+ int_mv fcenter_mv;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
// Work out the mid point for the search
in_what = *(d->base_pre) + d->pre;
bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
- best_mv->row = ref_row;
- best_mv->col = ref_col;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
// We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
@@ -1397,7 +1410,7 @@
for (r = row_min; r < row_max ; r++)
{
- this_mv.row = r;
+ this_mv.as_mv.row = r;
check_here = r * mv_stride + in_what + col_min;
c = col_min;
@@ -1413,14 +1426,14 @@
if (thissad < bestsad)
{
- this_mv.col = c;
+ this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
bestsad = thissad;
- best_mv->row = r;
- best_mv->col = c;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
bestaddress = check_here;
}
}
@@ -1436,14 +1449,14 @@
if (thissad < bestsad)
{
- this_mv.col = c;
+ this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
bestsad = thissad;
- best_mv->row = r;
- best_mv->col = c;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
bestaddress = check_here;
}
}
@@ -1454,8 +1467,8 @@
}
- this_mv.row = best_mv->row << 3;
- this_mv.col = best_mv->col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
@@ -1464,7 +1477,10 @@
return INT_MAX;
}
-int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], MV *center_mv)
+int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int error_per_bit, int distance,
+ vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ int_mv *center_mv)
{
unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride;
@@ -1472,8 +1488,8 @@
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- MV *best_mv = &d->bmi.mv.as_mv;
- MV this_mv;
+ int_mv *best_mv = &d->bmi.mv;
+ int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1480,8 +1496,8 @@
unsigned char *check_here;
unsigned int thissad;
- int ref_row = ref_mv->row;
- int ref_col = ref_mv->col;
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
int row_min = ref_row - distance;
int row_max = ref_row + distance;
@@ -1492,16 +1508,16 @@
unsigned int sad_array[3];
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- MV fcenter_mv;
- fcenter_mv.row = center_mv->row >> 3;
- fcenter_mv.col = center_mv->col >> 3;
+ int_mv fcenter_mv;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
// Work out the mid point for the search
in_what = *(d->base_pre) + d->pre;
bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
- best_mv->row = ref_row;
- best_mv->col = ref_col;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
// We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
@@ -1526,7 +1542,7 @@
for (r = row_min; r < row_max ; r++)
{
- this_mv.row = r;
+ this_mv.as_mv.row = r;
check_here = r * mv_stride + in_what + col_min;
c = col_min;
@@ -1542,14 +1558,14 @@
if (thissad < bestsad)
{
- this_mv.col = c;
+ this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
bestsad = thissad;
- best_mv->row = r;
- best_mv->col = c;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
bestaddress = check_here;
}
}
@@ -1571,14 +1587,14 @@
if (thissad < bestsad)
{
- this_mv.col = c;
+ this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
bestsad = thissad;
- best_mv->row = r;
- best_mv->col = c;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
bestaddress = check_here;
}
}
@@ -1594,14 +1610,14 @@
if (thissad < bestsad)
{
- this_mv.col = c;
+ this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
{
bestsad = thissad;
- best_mv->row = r;
- best_mv->col = c;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
bestaddress = check_here;
}
}
@@ -1611,8 +1627,8 @@
}
}
- this_mv.row = best_mv->row << 3;
- this_mv.col = best_mv->col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
@@ -1621,7 +1637,10 @@
return INT_MAX;
}
-int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int search_range, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], MV *center_mv)
+int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int error_per_bit, int search_range,
+ vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ int_mv *center_mv)
{
MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
int i, j;
@@ -1630,17 +1649,18 @@
int what_stride = b->src_stride;
int in_what_stride = d->pre_stride;
unsigned char *what = (*(b->base_src) + b->src);
- unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre + (ref_mv->row * (d->pre_stride)) + ref_mv->col);
+ unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
+ (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
unsigned char *check_here;
unsigned int thissad;
- MV this_mv;
+ int_mv this_mv;
unsigned int bestsad = INT_MAX;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- MV fcenter_mv;
+ int_mv fcenter_mv;
- fcenter_mv.row = center_mv->row >> 3;
- fcenter_mv.col = center_mv->col >> 3;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
@@ -1650,8 +1670,8 @@
for (j = 0 ; j < 4 ; j++)
{
- this_row_offset = ref_mv->row + neighbors[j].row;
- this_col_offset = ref_mv->col + neighbors[j].col;
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
(this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
@@ -1661,8 +1681,8 @@
if (thissad < bestsad)
{
- this_mv.row = this_row_offset;
- this_mv.col = this_col_offset;
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
@@ -1678,14 +1698,14 @@
break;
else
{
- ref_mv->row += neighbors[best_site].row;
- ref_mv->col += neighbors[best_site].col;
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
}
}
- this_mv.row = ref_mv->row << 3;
- this_mv.col = ref_mv->col << 3;
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
@@ -1694,7 +1714,10 @@
return INT_MAX;
}
-int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int search_range, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], MV *center_mv)
+int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2], int_mv *center_mv)
{
MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
int i, j;
@@ -1703,17 +1726,18 @@
int what_stride = b->src_stride;
int in_what_stride = d->pre_stride;
unsigned char *what = (*(b->base_src) + b->src);
- unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre + (ref_mv->row * (d->pre_stride)) + ref_mv->col);
+ unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
+ (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
unsigned char *check_here;
unsigned int thissad;
- MV this_mv;
+ int_mv this_mv;
unsigned int bestsad = INT_MAX;
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- MV fcenter_mv;
+ int_mv fcenter_mv;
- fcenter_mv.row = center_mv->row >> 3;
- fcenter_mv.col = center_mv->col >> 3;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
@@ -1722,10 +1746,10 @@
int best_site = -1;
int all_in = 1;
- all_in &= ((ref_mv->row - 1) > x->mv_row_min);
- all_in &= ((ref_mv->row + 1) < x->mv_row_max);
- all_in &= ((ref_mv->col - 1) > x->mv_col_min);
- all_in &= ((ref_mv->col + 1) < x->mv_col_max);
+ all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
+ all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
+ all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
+ all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
if(all_in)
{
@@ -1742,8 +1766,8 @@
{
if (sad_array[j] < bestsad)
{
- this_mv.row = ref_mv->row + neighbors[j].row;
- this_mv.col = ref_mv->col + neighbors[j].col;
+ this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
+ this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (sad_array[j] < bestsad)
@@ -1758,8 +1782,8 @@
{
for (j = 0 ; j < 4 ; j++)
{
- this_row_offset = ref_mv->row + neighbors[j].row;
- this_col_offset = ref_mv->col + neighbors[j].col;
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
(this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
@@ -1769,8 +1793,8 @@
if (thissad < bestsad)
{
- this_mv.row = this_row_offset;
- this_mv.col = this_col_offset;
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
if (thissad < bestsad)
@@ -1787,14 +1811,14 @@
break;
else
{
- ref_mv->row += neighbors[best_site].row;
- ref_mv->col += neighbors[best_site].col;
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
}
}
- this_mv.row = ref_mv->row << 3;
- this_mv.col = ref_mv->col << 3;
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
--- a/vp8/encoder/mcomp.h
+++ b/vp8/encoder/mcomp.h
@@ -26,7 +26,7 @@
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Maximum size of the first step in full pel units
extern void print_mode_context(void);
-extern int vp8_mv_bit_cost(MV *mv, MV *ref, int *mvcost[2], int Weight);
+extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);
extern void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
@@ -36,8 +36,8 @@
MACROBLOCK *x,
BLOCK *b,
BLOCKD *d,
- MV *ref_mv,
- MV *best_mv,
+ int_mv *ref_mv,
+ int_mv *best_mv,
int search_param,
int error_per_bit,
int *num00,
@@ -44,12 +44,13 @@
const vp8_variance_fn_ptr_t *vf,
int *mvsadcost[2],
int *mvcost[2],
- MV *center_mv
+ int_mv *center_mv
);
typedef int (fractional_mv_step_fp)
- (MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv,
- int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse);
+ (MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2],
+ int *distortion, unsigned int *sse);
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively;
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step;
extern fractional_mv_step_fp vp8_find_best_half_pixel_step;
@@ -61,12 +62,12 @@
MACROBLOCK *x, \
BLOCK *b, \
BLOCKD *d, \
- MV *ref_mv, \
+ int_mv *ref_mv, \
int error_per_bit, \
int distance, \
vp8_variance_fn_ptr_t *fn_ptr, \
int *mvcost[2], \
- MV *center_mv \
+ int_mv *center_mv \
)
#define prototype_refining_search_sad(sym)\
@@ -75,12 +76,12 @@
MACROBLOCK *x, \
BLOCK *b, \
BLOCKD *d, \
- MV *ref_mv, \
+ int_mv *ref_mv, \
int error_per_bit, \
int distance, \
vp8_variance_fn_ptr_t *fn_ptr, \
int *mvcost[2], \
- MV *center_mv \
+ int_mv *center_mv \
)
#define prototype_diamond_search_sad(sym)\
@@ -89,14 +90,14 @@
MACROBLOCK *x, \
BLOCK *b, \
BLOCKD *d, \
- MV *ref_mv, \
- MV *best_mv, \
+ int_mv *ref_mv, \
+ int_mv *best_mv, \
int search_param, \
int error_per_bit, \
int *num00, \
vp8_variance_fn_ptr_t *fn_ptr, \
int *mvcost[2], \
- MV *center_mv \
+ int_mv *center_mv \
)
#if ARCH_X86 || ARCH_X86_64
--- a/vp8/encoder/pickinter.c
+++ b/vp8/encoder/pickinter.c
@@ -47,10 +47,15 @@
extern unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride);
extern int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *best_ref_mv, int best_rd, int *, int *, int *, int, int *mvcost[2], int, int fullpixel);
extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]);
-extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, MV *mv);
+extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
-int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse)
+int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvcost[2], int *distortion,
+ unsigned int *sse)
{
(void) b;
(void) d;
@@ -60,8 +65,8 @@
(void) mvcost;
(void) distortion;
(void) sse;
- bestmv->row <<= 3;
- bestmv->col <<= 3;
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
return 0;
}
@@ -429,8 +434,8 @@
B_MODE_INFO best_bmodes[16];
MB_MODE_INFO best_mbmode;
PARTITION_INFO best_partition;
- MV best_ref_mv;
- MV mode_mv[MB_MODE_COUNT];
+ int_mv best_ref_mv;
+ int_mv mode_mv[MB_MODE_COUNT];
MB_PREDICTION_MODE this_mode;
int num00;
int i;
@@ -447,14 +452,14 @@
int best_mode_index = 0;
unsigned int sse = INT_MAX;
- MV mvp;
+ int_mv mvp;
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
int saddone=0;
int sr=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
- MV nearest_mv[4];
- MV near_mv[4];
- MV frame_best_ref_mv[4];
+ int_mv nearest_mv[4];
+ int_mv near_mv[4];
+ int_mv frame_best_ref_mv[4];
int MDCounts[4][4];
unsigned char *y_buffer[4];
unsigned char *u_buffer[4];
@@ -631,14 +636,11 @@
x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
/* adjust mvp to make sure it is within MV range */
- if(mvp.row > best_ref_mv.row + MAX_FULL_PEL_VAL)
- mvp.row = best_ref_mv.row + MAX_FULL_PEL_VAL;
- else if(mvp.row < best_ref_mv.row - MAX_FULL_PEL_VAL)
- mvp.row = best_ref_mv.row - MAX_FULL_PEL_VAL;
- if(mvp.col > best_ref_mv.col + MAX_FULL_PEL_VAL)
- mvp.col = best_ref_mv.col + MAX_FULL_PEL_VAL;
- else if(mvp.col < best_ref_mv.col - MAX_FULL_PEL_VAL)
- mvp.col = best_ref_mv.col - MAX_FULL_PEL_VAL;
+ vp8_clamp_mv(&mvp,
+ best_ref_mv.as_mv.row - MAX_FULL_PEL_VAL,
+ best_ref_mv.as_mv.row + MAX_FULL_PEL_VAL,
+ best_ref_mv.as_mv.col - MAX_FULL_PEL_VAL,
+ best_ref_mv.as_mv.col + MAX_FULL_PEL_VAL);
}
switch (this_mode)
@@ -723,10 +725,10 @@
if(sr > step_param)
step_param = sr;
- col_min = (best_ref_mv.col - MAX_FULL_PEL_VAL) >>3;
- col_max = (best_ref_mv.col + MAX_FULL_PEL_VAL) >>3;
- row_min = (best_ref_mv.row - MAX_FULL_PEL_VAL) >>3;
- row_max = (best_ref_mv.row + MAX_FULL_PEL_VAL) >>3;
+ col_min = (best_ref_mv.as_mv.col - MAX_FULL_PEL_VAL) >>3;
+ col_max = (best_ref_mv.as_mv.col + MAX_FULL_PEL_VAL) >>3;
+ row_min = (best_ref_mv.as_mv.row - MAX_FULL_PEL_VAL) >>3;
+ row_max = (best_ref_mv.as_mv.row + MAX_FULL_PEL_VAL) >>3;
// Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
if (x->mv_col_min < col_min )
@@ -739,8 +741,7 @@
x->mv_row_max = row_max;
}else
{
- mvp.row = best_ref_mv.row;
- mvp.col = best_ref_mv.col;
+ mvp.as_int = best_ref_mv.as_int;
}
further_steps = (cpi->Speed >= 8)? 0: (cpi->sf.max_step_search_steps - 1 - step_param);
@@ -747,15 +748,13 @@
if (cpi->sf.search_method == HEX)
{
- bestsme = vp8_hex_search(x, b, d, &mvp, &d->bmi.mv.as_mv, step_param, sadpb/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv);
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ bestsme = vp8_hex_search(x, b, d, &mvp, &d->bmi.mv, step_param, sadpb/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv);
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
}
else
{
- bestsme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv.as_mv, step_param, sadpb / 2/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); //sadpb < 9
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ bestsme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv, step_param, sadpb / 2/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); //sadpb < 9
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Further step/diamond searches as necessary
n = 0;
@@ -772,18 +771,21 @@
num00--;
else
{
- thissme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv.as_mv, step_param + n, sadpb / 4/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); //sadpb = 9
-
+ thissme =
+ cpi->diamond_search_sad(x, b, d, &mvp,
+ &d->bmi.mv,
+ step_param + n,
+ sadpb / 4, &num00,
+ &cpi->fn_ptr[BLOCK_16X16],
+ x->mvcost, &best_ref_mv);
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
}
else
{
- d->bmi.mv.as_mv.row = mode_mv[NEWMV].row;
- d->bmi.mv.as_mv.col = mode_mv[NEWMV].col;
+ d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
}
}
}
@@ -798,10 +800,9 @@
}
if (bestsme < INT_MAX)
- cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv.as_mv, &best_ref_mv, x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
+ cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// mv cost;
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvcost, 128);
@@ -810,7 +811,7 @@
case NEARESTMV:
case NEARMV:
- if (mode_mv[this_mode].row == 0 && mode_mv[this_mode].col == 0)
+ if (mode_mv[this_mode].as_int == 0)
continue;
case ZEROMV:
@@ -818,13 +819,13 @@
// Trap vectors that reach beyond the UMV borders
// Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
// because of the lack of break statements in the previous two cases.
- if (((mode_mv[this_mode].row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].row >> 3) > x->mv_row_max) ||
- ((mode_mv[this_mode].col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].col >> 3) > x->mv_col_max))
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
continue;
rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
- x->e_mbd.mode_info_context->mbmi.mv.as_mv = mode_mv[this_mode];
+ x->e_mbd.mode_info_context->mbmi.mv.as_mv = mode_mv[this_mode].as_mv;
x->e_mbd.block[0].bmi.mode = this_mode;
x->e_mbd.block[0].bmi.mv.as_int = x->e_mbd.mode_info_context->mbmi.mv.as_int;
@@ -965,7 +966,7 @@
}
else
{
- vp8_set_mbmode_and_mvs(x, x->e_mbd.mode_info_context->mbmi.mode, &best_bmodes[0].mv.as_mv);
+ vp8_set_mbmode_and_mvs(x, x->e_mbd.mode_info_context->mbmi.mode, &best_bmodes[0].mv);
}
x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -848,20 +848,18 @@
vp8_mv_ref_encoding_array - NEARESTMV + m);
}
-void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, MV *mv)
+void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv)
{
int i;
x->e_mbd.mode_info_context->mbmi.mode = mb;
- x->e_mbd.mode_info_context->mbmi.mv.as_mv.row = mv->row;
- x->e_mbd.mode_info_context->mbmi.mv.as_mv.col = mv->col;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int;
for (i = 0; i < 16; i++)
{
B_MODE_INFO *bmi = &x->e_mbd.block[i].bmi;
bmi->mode = (B_PREDICTION_MODE) mb;
- bmi->mv.as_mv.row = mv->row;
- bmi->mv.as_mv.col = mv->col;
+ bmi->mv.as_int = mv->as_int;
}
}
@@ -869,7 +867,7 @@
MACROBLOCK *x,
int const *labelings, int which_label,
B_PREDICTION_MODE this_mode,
- MV *this_mv, MV *best_ref_mv,
+ int_mv *this_mv, int_mv *best_ref_mv,
int *mvcost[2]
)
{
@@ -910,13 +908,13 @@
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
break;
case LEFT4X4:
- *this_mv = col ? d[-1].bmi.mv.as_mv : vp8_left_bmi(mic, i)->mv.as_mv;
+ this_mv->as_int = col ? d[-1].bmi.mv.as_int : vp8_left_bmi(mic, i)->mv.as_int;
break;
case ABOVE4X4:
- *this_mv = row ? d[-4].bmi.mv.as_mv : vp8_above_bmi(mic, i, mis)->mv.as_mv;
+ this_mv->as_int = row ? d[-4].bmi.mv.as_int : vp8_above_bmi(mic, i, mis)->mv.as_int;
break;
case ZERO4X4:
- this_mv->row = this_mv->col = 0;
+ this_mv->as_int = 0;
break;
default:
break;
@@ -924,9 +922,11 @@
if (m == ABOVE4X4) // replace above with left if same
{
- const MV mv = col ? d[-1].bmi.mv.as_mv : vp8_left_bmi(mic, i)->mv.as_mv;
+ int_mv left_mv;
+ left_mv.as_int = col ? d[-1].bmi.mv.as_int :
+ vp8_left_bmi(mic, i)->mv.as_int;
- if (mv.row == this_mv->row && mv.col == this_mv->col)
+ if (left_mv.as_int == this_mv->as_int)
m = LEFT4X4;
}
@@ -934,7 +934,7 @@
}
d->bmi.mode = m;
- d->bmi.mv.as_mv = *this_mv;
+ d->bmi.mv.as_int = this_mv->as_int;
}
while (++i < 16);
@@ -994,8 +994,8 @@
typedef struct
{
- MV *ref_mv;
- MV *mvp;
+ int_mv *ref_mv;
+ int_mv mvp;
int segment_rd;
int segment_num;
@@ -1009,7 +1009,7 @@
int mvthresh;
int *mdcounts;
- MV sv_mvp[4]; // save 4 mvp from 8x8
+ int_mv sv_mvp[4]; // save 4 mvp from 8x8
int sv_istep[2]; // save 2 initial step_param for 16x8/8x16
} BEST_SEG_INFO;
@@ -1071,7 +1071,7 @@
for (i = 0; i < label_count; i++)
{
- MV mode_mv[B_MODE_COUNT];
+ int_mv mode_mv[B_MODE_COUNT];
int best_label_rd = INT_MAX;
B_PREDICTION_MODE mode_selected = ZERO4X4;
int bestlabelyrate = 0;
@@ -1101,7 +1101,7 @@
int n;
int thissme;
int bestsme = INT_MAX;
- MV temp_mv;
+ int_mv temp_mv;
BLOCK *c;
BLOCKD *e;
@@ -1113,8 +1113,9 @@
{
if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8)
{
- bsi->mvp = &bsi->sv_mvp[i];
- if (i==1 && segmentation == BLOCK_16X8) bsi->mvp = &bsi->sv_mvp[2];
+ bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
+ if (i==1 && segmentation == BLOCK_16X8)
+ bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
step_param = bsi->sv_istep[i];
}
@@ -1122,8 +1123,9 @@
// use previous block's result as next block's MV predictor.
if (segmentation == BLOCK_4X4 && i>0)
{
- bsi->mvp = &(x->e_mbd.block[i-1].bmi.mv.as_mv);
- if (i==4 || i==8 || i==12) bsi->mvp = &(x->e_mbd.block[i-4].bmi.mv.as_mv);
+ bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.mv.as_int;
+ if (i==4 || i==8 || i==12)
+ bsi->mvp.as_int = x->e_mbd.block[i-4].bmi.mv.as_int;
step_param = 2;
}
}
@@ -1145,7 +1147,7 @@
else
{
- bestsme = cpi->diamond_search_sad(x, c, e, bsi->mvp,
+ bestsme = cpi->diamond_search_sad(x, c, e, &bsi->mvp,
&mode_mv[NEW4X4], step_param,
sadpb / 2, &num00, v_fn_ptr, x->mvcost, bsi->ref_mv);
@@ -1160,7 +1162,7 @@
num00--;
else
{
- thissme = cpi->diamond_search_sad(x, c, e, bsi->mvp,
+ thissme = cpi->diamond_search_sad(x, c, e, &bsi->mvp,
&temp_mv, step_param + n,
sadpb / 2, &num00, v_fn_ptr, x->mvcost, bsi->ref_mv);
@@ -1167,8 +1169,7 @@
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEW4X4].row = temp_mv.row;
- mode_mv[NEW4X4].col = temp_mv.col;
+ mode_mv[NEW4X4].as_int = temp_mv.as_int;
}
}
}
@@ -1179,10 +1180,10 @@
// Should we do a full search (best quality only)
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
{
- MV full_mvp;
+ int_mv full_mvp;
- full_mvp.row = bsi->mvp->row >>3;
- full_mvp.col = bsi->mvp->col >>3;
+ full_mvp.as_mv.row = bsi->mvp.as_mv.row >>3;
+ full_mvp.as_mv.col = bsi->mvp.as_mv.col >>3;
thissme = cpi->full_search_sad(x, c, e, &full_mvp,
sadpb / 4, 16, v_fn_ptr, x->mvcost, bsi->ref_mv);
@@ -1190,12 +1191,12 @@
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEW4X4] = e->bmi.mv.as_mv;
+ mode_mv[NEW4X4].as_int = e->bmi.mv.as_int;
}
else
{
// The full search result is actually worse so re-instate the previous best vector
- e->bmi.mv.as_mv = mode_mv[NEW4X4];
+ e->bmi.mv.as_int = mode_mv[NEW4X4].as_int;
}
}
}
@@ -1218,8 +1219,8 @@
bsi->ref_mv, x->mvcost);
// Trap vectors that reach beyond the UMV borders
- if (((mode_mv[this_mode].row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].row >> 3) > x->mv_row_max) ||
- ((mode_mv[this_mode].col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].col >> 3) > x->mv_col_max))
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
{
continue;
}
@@ -1296,7 +1297,7 @@
}
static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
- MV *best_ref_mv, int best_rd,
+ int_mv *best_ref_mv, int best_rd,
int *mdcounts, int *returntotrate,
int *returnyrate, int *returndistortion,
int mvthresh)
@@ -1308,7 +1309,7 @@
bsi.segment_rd = best_rd;
bsi.ref_mv = best_ref_mv;
- bsi.mvp = best_ref_mv;
+ bsi.mvp.as_int = best_ref_mv->as_int;
bsi.mvthresh = mvthresh;
bsi.mdcounts = mdcounts;
@@ -1334,10 +1335,10 @@
if (bsi.segment_rd < best_rd)
{
- int col_min = (best_ref_mv->col - MAX_FULL_PEL_VAL) >>3;
- int col_max = (best_ref_mv->col + MAX_FULL_PEL_VAL) >>3;
- int row_min = (best_ref_mv->row - MAX_FULL_PEL_VAL) >>3;
- int row_max = (best_ref_mv->row + MAX_FULL_PEL_VAL) >>3;
+ int col_min = (best_ref_mv->as_mv.col - MAX_FULL_PEL_VAL) >>3;
+ int col_max = (best_ref_mv->as_mv.col + MAX_FULL_PEL_VAL) >>3;
+ int row_min = (best_ref_mv->as_mv.row - MAX_FULL_PEL_VAL) >>3;
+ int row_max = (best_ref_mv->as_mv.row + MAX_FULL_PEL_VAL) >>3;
int tmp_col_min = x->mv_col_min;
int tmp_col_max = x->mv_col_max;
@@ -1355,18 +1356,18 @@
x->mv_row_max = row_max;
/* Get 8x8 result */
- bsi.sv_mvp[0] = bsi.mvs[0].as_mv;
- bsi.sv_mvp[1] = bsi.mvs[2].as_mv;
- bsi.sv_mvp[2] = bsi.mvs[8].as_mv;
- bsi.sv_mvp[3] = bsi.mvs[10].as_mv;
+ bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int;
+ bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int;
+ bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
+ bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
/* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range according to the closeness of 2 MV. */
/* block 8X16 */
{
- sr = MAXF((abs(bsi.sv_mvp[0].row - bsi.sv_mvp[2].row))>>3, (abs(bsi.sv_mvp[0].col - bsi.sv_mvp[2].col))>>3);
+ sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row))>>3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[0]);
- sr = MAXF((abs(bsi.sv_mvp[1].row - bsi.sv_mvp[3].row))>>3, (abs(bsi.sv_mvp[1].col - bsi.sv_mvp[3].col))>>3);
+ sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[1]);
rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
@@ -1374,10 +1375,10 @@
/* block 16X8 */
{
- sr = MAXF((abs(bsi.sv_mvp[0].row - bsi.sv_mvp[1].row))>>3, (abs(bsi.sv_mvp[0].col - bsi.sv_mvp[1].col))>>3);
+ sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row))>>3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[0]);
- sr = MAXF((abs(bsi.sv_mvp[2].row - bsi.sv_mvp[3].row))>>3, (abs(bsi.sv_mvp[2].col - bsi.sv_mvp[3].col))>>3);
+ sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[1]);
rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
@@ -1387,7 +1388,7 @@
/* Not skip 4x4 if speed=0 (good quality) */
if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
{
- bsi.mvp = &bsi.sv_mvp[0];
+ bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
}
@@ -1487,7 +1488,7 @@
VP8_COMP *cpi,
MACROBLOCKD *xd,
const MODE_INFO *here,
- MV *mvp,
+ int_mv *mvp,
int refframe,
int *ref_frame_sign_bias,
int *sr,
@@ -1626,8 +1627,8 @@
}
/* Set up return values */
- *mvp = mv.as_mv;
- vp8_clamp_mv(mvp, xd);
+ mvp->as_int = mv.as_int;
+ vp8_clamp_mv2(mvp, xd);
}
void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[])
@@ -1693,8 +1694,8 @@
B_MODE_INFO best_bmodes[16];
MB_MODE_INFO best_mbmode;
PARTITION_INFO best_partition;
- MV best_ref_mv;
- MV mode_mv[MB_MODE_COUNT];
+ int_mv best_ref_mv;
+ int_mv mode_mv[MB_MODE_COUNT];
MB_PREDICTION_MODE this_mode;
int num00;
int best_mode_index = 0;
@@ -1719,14 +1720,14 @@
//int intermodecost[MAX_MODES];
MB_PREDICTION_MODE uv_intra_mode;
- MV mvp;
+ int_mv mvp;
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
int saddone=0;
int sr=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
- MV frame_nearest_mv[4];
- MV frame_near_mv[4];
- MV frame_best_ref_mv[4];
+ int_mv frame_nearest_mv[4];
+ int_mv frame_near_mv[4];
+ int_mv frame_best_ref_mv[4];
int frame_mdcounts[4][4];
int frame_lf_or_gf[4];
unsigned char *y_buffer[4];
@@ -1876,14 +1877,11 @@
x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
/* adjust mvp to make sure it is within MV range */
- if(mvp.row > best_ref_mv.row + MAX_FULL_PEL_VAL)
- mvp.row = best_ref_mv.row + MAX_FULL_PEL_VAL;
- else if(mvp.row < best_ref_mv.row - MAX_FULL_PEL_VAL)
- mvp.row = best_ref_mv.row - MAX_FULL_PEL_VAL;
- if(mvp.col > best_ref_mv.col + MAX_FULL_PEL_VAL)
- mvp.col = best_ref_mv.col + MAX_FULL_PEL_VAL;
- else if(mvp.col < best_ref_mv.col - MAX_FULL_PEL_VAL)
- mvp.col = best_ref_mv.col - MAX_FULL_PEL_VAL;
+ vp8_clamp_mv(&mvp,
+ best_ref_mv.as_mv.row - MAX_FULL_PEL_VAL,
+ best_ref_mv.as_mv.row + MAX_FULL_PEL_VAL,
+ best_ref_mv.as_mv.col - MAX_FULL_PEL_VAL,
+ best_ref_mv.as_mv.col + MAX_FULL_PEL_VAL);
}
// Check to see if the testing frequency for this mode is at its max
@@ -2015,10 +2013,10 @@
int sadpb = x->sadperbit16;
- int col_min = (best_ref_mv.col - MAX_FULL_PEL_VAL) >>3;
- int col_max = (best_ref_mv.col + MAX_FULL_PEL_VAL) >>3;
- int row_min = (best_ref_mv.row - MAX_FULL_PEL_VAL) >>3;
- int row_max = (best_ref_mv.row + MAX_FULL_PEL_VAL) >>3;
+ int col_min = (best_ref_mv.as_mv.col - MAX_FULL_PEL_VAL) >>3;
+ int col_max = (best_ref_mv.as_mv.col + MAX_FULL_PEL_VAL) >>3;
+ int row_min = (best_ref_mv.as_mv.row - MAX_FULL_PEL_VAL) >>3;
+ int row_max = (best_ref_mv.as_mv.row + MAX_FULL_PEL_VAL) >>3;
int tmp_col_min = x->mv_col_min;
int tmp_col_max = x->mv_col_max;
@@ -2042,15 +2040,13 @@
// Initial step/diamond search
if (cpi->sf.search_method == HEX)
{
- bestsme = vp8_hex_search(x, b, d, &best_ref_mv, &d->bmi.mv.as_mv, step_param, sadpb/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv);
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ bestsme = vp8_hex_search(x, b, d, &best_ref_mv, &d->bmi.mv, step_param, sadpb/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv);
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
}
else
{
- bestsme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv.as_mv, step_param, sadpb / 2/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); //sadpb < 9
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ bestsme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv, step_param, sadpb / 2/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); //sadpb < 9
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Further step/diamond searches as necessary
n = 0;
@@ -2071,7 +2067,7 @@
num00--;
else
{
- thissme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv.as_mv, step_param + n, sadpb / 4/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); //sadpb = 9
+ thissme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv, step_param + n, sadpb / 4/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); //sadpb = 9
/* check to see if refining search is needed. */
if (num00 > (further_steps-n))
@@ -2080,13 +2076,11 @@
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
}
else
{
- d->bmi.mv.as_mv.row = mode_mv[NEWMV].row;
- d->bmi.mv.as_mv.col = mode_mv[NEWMV].col;
+ d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
}
}
}
@@ -2102,18 +2096,16 @@
search_range = 8;
//thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
- thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb/4, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
+ thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv, sadpb/4, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
}
else
{
- d->bmi.mv.as_mv.row = mode_mv[NEWMV].row;
- d->bmi.mv.as_mv.col = mode_mv[NEWMV].col;
+ d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
}
}
@@ -2126,11 +2118,10 @@
{
int dis; /* TODO: use dis in distortion calculation later. */
unsigned int sse;
- cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv.as_mv, &best_ref_mv, x->errorperbit / 4, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &dis, &sse);
+ cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit / 4, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &dis, &sse);
}
- mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
- mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
+ mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Add the new motion vector cost to our rolling cost variable
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96);
@@ -2138,21 +2129,11 @@
case NEARESTMV:
case NEARMV:
-
// Clip "next_nearest" so that it does not extend to far out of image
- if (mode_mv[this_mode].col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN))
- mode_mv[this_mode].col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
- else if (mode_mv[this_mode].col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN)
- mode_mv[this_mode].col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
+ vp8_clamp_mv2(&mode_mv[this_mode], xd);
- if (mode_mv[this_mode].row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN))
- mode_mv[this_mode].row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
- else if (mode_mv[this_mode].row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN)
- mode_mv[this_mode].row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
-
// Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
- if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) &&
- ((mode_mv[this_mode].row == 0) && (mode_mv[this_mode].col == 0)))
+ if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && (mode_mv[this_mode].as_int == 0))
continue;
case ZEROMV:
@@ -2160,8 +2141,8 @@
// Trap vectors that reach beyond the UMV borders
// Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
// because of the lack of break statements in the previous two cases.
- if (((mode_mv[this_mode].row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].row >> 3) > x->mv_row_max) ||
- ((mode_mv[this_mode].col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].col >> 3) > x->mv_col_max))
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
continue;
vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
--- a/vp8/encoder/rdopt.h
+++ b/vp8/encoder/rdopt.h
@@ -25,7 +25,7 @@
VP8_COMP *cpi,
MACROBLOCKD *xd,
const MODE_INFO *here,
- MV *mvp,
+ int_mv *mvp,
int refframe,
int *ref_frame_sign_bias,
int *sr,
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -157,7 +157,7 @@
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
- MV best_ref_mv1 = {0,0};
+ int_mv best_ref_mv1;
int *mvcost[2] = { &dummy_cost[mv_max+1], &dummy_cost[mv_max+1] };
int *mvsadcost[2] = { &dummy_cost[mv_max+1], &dummy_cost[mv_max+1] };
@@ -170,6 +170,8 @@
int pre = d->pre;
int pre_stride = d->pre_stride;
+ best_ref_mv1.as_int = 0;
+
// Setup frame pointers
b->base_src = &arf_frame->y_buffer;
b->src_stride = arf_frame->y_stride;
@@ -196,7 +198,7 @@
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
bestsme = vp8_hex_search(x, b, d,
- &best_ref_mv1, &d->bmi.mv.as_mv,
+ &best_ref_mv1, &d->bmi.mv,
step_param,
sadpb/*x->errorperbit*/,
&num00, &cpi->fn_ptr[BLOCK_16X16],
@@ -209,7 +211,7 @@
int distortion;
unsigned int sse;
bestsme = cpi->find_fractional_mv_step(x, b, d,
- &d->bmi.mv.as_mv, &best_ref_mv1,
+ &d->bmi.mv, &best_ref_mv1,
x->errorperbit, &cpi->fn_ptr[BLOCK_16X16],
mvcost, &distortion, &sse);
}
--
⑨