ref: 5a80e7a29232e5a717bf948d184d1848240d8882
parent: 09fb253b3951baab6f9384bbf1b93496908340a4
author: Ronald S. Bultje <rbultje@google.com>
date: Mon Oct 22 07:25:48 EDT 2012
Use SPLITMV_PARTITIONING instead of a plain integer type. This can be used to distinguish between 16x8, 8x16, 8x8 and 4x4 partitioning modes when choosing splitmv as a MB mode. Change-Id: Idc8b59772e1a80ccc4ad44d63c5c2ec3fc061a3c
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -177,6 +177,14 @@
#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */
#define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4)
+typedef enum {
+ PARTITIONING_16X8 = 0,
+ PARTITIONING_8X16,
+ PARTITIONING_8X8,
+ PARTITIONING_4X4,
+ NB_PARTITIONINGS,
+} SPLITMV_PARTITIONING_TYPE;
+
/* For keyframes, intra block modes are predicted by the (already decoded)
modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */
@@ -220,7 +228,7 @@
int mv_ref_index[MAX_REF_FRAMES];
#endif
- unsigned char partitioning;
+ SPLITMV_PARTITIONING_TYPE partitioning;
unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
unsigned char need_to_clamp_mvs;
unsigned char need_to_clamp_secondmv;
--- a/vp8/common/entropymode.c
+++ b/vp8/common/entropymode.c
@@ -215,9 +215,9 @@
};
const vp8_tree_index vp8_mbsplit_tree[6] = {
- -3, 2,
- -2, 4,
- -0, -1
+ -PARTITIONING_4X4, 2,
+ -PARTITIONING_8X8, 4,
+ -PARTITIONING_16X8, -PARTITIONING_8X16,
};
const vp8_tree_index vp8_mv_ref_tree[8] = {
--- a/vp8/common/postproc.c
+++ b/vp8/common/postproc.c
@@ -783,7 +783,7 @@
if (mi->mbmi.mode == SPLITMV) {
switch (mi->mbmi.partitioning) {
- case 0 : { /* mv_top_bottom */
+ case PARTITIONING_16X8 : { /* mv_top_bottom */
union b_mode_info *bmi = &mi->bmi[0];
MV *mv = &bmi->mv.as_mv;
@@ -803,7 +803,7 @@
break;
}
- case 1 : { /* mv_left_right */
+ case PARTITIONING_8X16 : { /* mv_left_right */
union b_mode_info *bmi = &mi->bmi[0];
MV *mv = &bmi->mv.as_mv;
@@ -823,7 +823,7 @@
break;
}
- case 2 : { /* mv_quarters */
+ case PARTITIONING_8X8 : { /* mv_quarters */
union b_mode_info *bmi = &mi->bmi[0];
MV *mv = &bmi->mv.as_mv;
@@ -858,6 +858,7 @@
vp8_blit_line(x0 + 12, x1, y0 + 12, y1, y_buffer, y_stride);
break;
}
+ case PARTITIONING_4X4:
default : {
union b_mode_info *bmi = mi->bmi;
int bx0, by0;
--- a/vp8/common/reconinter.c
+++ b/vp8/common/reconinter.c
@@ -965,7 +965,7 @@
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
BLOCKD *blockd = xd->block;
- if (xd->mode_info_context->mbmi.partitioning < 3) {
+ if (xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4) {
blockd[ 0].bmi = xd->mode_info_context->bmi[ 0];
blockd[ 2].bmi = xd->mode_info_context->bmi[ 2];
blockd[ 8].bmi = xd->mode_info_context->bmi[ 8];
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -372,10 +372,10 @@
} VP8_ENCODER_RTCD;
enum {
- BLOCK_16X8,
- BLOCK_8X16,
- BLOCK_8X8,
- BLOCK_4X4,
+ BLOCK_16X8 = PARTITIONING_16X8,
+ BLOCK_8X16 = PARTITIONING_8X16,
+ BLOCK_8X8 = PARTITIONING_8X8,
+ BLOCK_4X4 = PARTITIONING_4X4,
BLOCK_16X16,
BLOCK_MAX_SEGMENTS,
BLOCK_32X32 = BLOCK_MAX_SEGMENTS,
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -2272,7 +2272,7 @@
int_mv mvp;
int64_t segment_rd;
- int segment_num;
+ SPLITMV_PARTITIONING_TYPE segment_num;
int r;
int d;
int segment_yrate;
@@ -2299,8 +2299,10 @@
}
static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
- BEST_SEG_INFO *bsi, unsigned int segmentation,
- int_mv seg_mvs[16 /* n_blocks */][MAX_REF_FRAMES - 1]) {
+ BEST_SEG_INFO *bsi,
+ SPLITMV_PARTITIONING_TYPE segmentation,
+ int_mv seg_mvs[16 /* n_blocks */]
+ [MAX_REF_FRAMES - 1]) {
int i, j;
int const *labels;
int br = 0, bd = 0;
@@ -2341,7 +2343,8 @@
label_mv_thresh = 1 * bsi->mvthresh / label_count;
// Segmentation method overheads
- rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation);
+ rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs,
+ vp8_mbsplit_encodings + segmentation);
rate += vp8_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts);
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
br += rate;
@@ -2376,7 +2379,8 @@
BLOCK *c;
BLOCKD *e;
- // Is the best so far sufficiently good that we cant justify doing and new motion search.
+ /* Is the best so far sufficiently good that we cant justify doing
+ * and new motion search. */
if (best_label_rd < label_mv_thresh)
break;
@@ -2422,7 +2426,8 @@
// Should we do a full search (best quality only)
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) {
/* Check if mvp_full is within the range. */
- vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
+ x->mv_row_min, x->mv_row_max);
thissme = cpi->full_search_sad(x, c, e, &mvp_full,
sadpb, 16, v_fn_ptr,
@@ -2432,7 +2437,8 @@
bestsme = thissme;
mode_mv[NEW4X4].as_int = e->bmi.as_mv.first.as_int;
} else {
- // The full search result is actually worse so re-instate the previous best vector
+ /* The full search result is actually worse so re-instate the
+ * previous best vector */
e->bmi.as_mv.first.as_int = mode_mv[NEW4X4].as_int;
}
}
@@ -2442,8 +2448,8 @@
int distortion;
unsigned int sse;
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
- bsi->ref_mv, x->errorperbit, v_fn_ptr, XMVCOST,
- &distortion, &sse);
+ bsi->ref_mv, x->errorperbit, v_fn_ptr,
+ XMVCOST, &distortion, &sse);
// safe motion search result for use in compound prediction
seg_mvs[i][mbmi->ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
@@ -2450,7 +2456,8 @@
}
} /* NEW4X4 */
else if (mbmi->second_ref_frame && this_mode == NEW4X4) {
- // motion search not completed? Then skip newmv for this block with comppred
+ /* motion search not completed? Then skip newmv for this block with
+ * comppred */
if (seg_mvs[i][mbmi->second_ref_frame - 1].as_int == INVALID_MV ||
seg_mvs[i][mbmi->ref_frame - 1].as_int == INVALID_MV) {
continue;
@@ -2472,7 +2479,7 @@
mv_check_bounds(x, &second_mode_mv[this_mode]))
continue;
- if (segmentation == BLOCK_4X4) {
+ if (segmentation == PARTITIONING_4X4) {
this_rd = encode_inter_mb_segment(x, labels, i, &labelyrate,
&distortion,
ta_s, tl_s, IF_RTCD(&cpi->rtcd));
@@ -2504,7 +2511,8 @@
vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
- &second_mode_mv[mode_selected], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
+ &second_mode_mv[mode_selected], seg_mvs[i], bsi->ref_mv,
+ bsi->second_ref_mv, XMVCOST);
br += sbr;
bd += sbd;
@@ -2551,12 +2559,18 @@
*sp = MAX_MVSEARCH_STEPS - 1 - step;
}
-static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv, int_mv *second_best_ref_mv, int64_t best_rd,
- int *mdcounts, int *returntotrate,
- int *returnyrate, int *returndistortion,
- int *skippable, int mvthresh,
- int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1]) {
+static int rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
+ int64_t best_rd,
+ int *mdcounts,
+ int *returntotrate,
+ int *returnyrate,
+ int *returndistortion,
+ int *skippable, int mvthresh,
+ int_mv seg_mvs[NB_PARTITIONINGS]
+ [16 /* n_blocks */]
+ [MAX_REF_FRAMES - 1]) {
int i;
BEST_SEG_INFO bsi;
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
@@ -2576,14 +2590,19 @@
if (cpi->compressor_speed == 0) {
/* for now, we will keep the original segmentation order
when in best quality mode */
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_16X8,
+ seg_mvs[PARTITIONING_16X8]);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_8X16,
+ seg_mvs[PARTITIONING_8X16]);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_8X8,
+ seg_mvs[PARTITIONING_8X8]);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_4X4,
+ seg_mvs[PARTITIONING_4X4]);
} else {
int sr;
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_8X8,
+ seg_mvs[PARTITIONING_8X8]);
if (bsi.segment_rd < best_rd) {
@@ -2600,34 +2619,37 @@
bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
- /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range according to the closeness of 2 MV. */
- /* block 8X16 */
- {
- sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3);
- vp8_cal_step_param(sr, &bsi.sv_istep[0]);
+ /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range
+ * according to the closeness of 2 MV. */
+ sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[0]);
- sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
- vp8_cal_step_param(sr, &bsi.sv_istep[1]);
+ sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
- }
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_8X16,
+ seg_mvs[PARTITIONING_8X16]);
- /* block 16X8 */
- {
- sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3);
- vp8_cal_step_param(sr, &bsi.sv_istep[0]);
+ sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[0]);
- sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
- vp8_cal_step_param(sr, &bsi.sv_istep[1]);
+ sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
- }
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_16X8,
+ seg_mvs[PARTITIONING_16X8]);
/* If 8x8 is better than 16x8/8x16, then do 4x4 search */
/* Not skip 4x4 if speed=0 (good quality) */
- if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) { /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
+ if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) {
+ /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_4X4,
+ seg_mvs[PARTITIONING_4X4]);
}
/* restore UMV window */
@@ -3342,7 +3364,7 @@
unsigned char *y_buffer[4], *u_buffer[4], *v_buffer[4];
unsigned int ref_costs[MAX_REF_FRAMES];
- int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1];
+ int_mv seg_mvs[NB_PARTITIONINGS][16 /* n_blocks */][MAX_REF_FRAMES - 1];
vpx_memset(mode8x8, 0, sizeof(mode8x8));
vpx_memset(&frame_mv, 0, sizeof(frame_mv));
@@ -3357,7 +3379,7 @@
for (i = 0; i < NB_TXFM_MODES; i++)
best_txfm_rd[i] = INT64_MAX;
- for (i = 0; i < BLOCK_MAX_SEGMENTS - 1; i++) {
+ for (i = 0; i < NB_PARTITIONINGS; i++) {
int j, k;
for (j = 0; j < 16; j++)
@@ -3724,11 +3746,11 @@
cpi->rd_threshes[THR_NEWG] : this_rd_thresh;
mbmi->txfm_size = TX_4X4; // FIXME use 8x8 in case of 8x8/8x16/16x8
- tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
- second_ref, best_yrd, mdcounts,
- &rate, &rate_y, &distortion,
- &skippable,
- this_rd_thresh, seg_mvs);
+ tmp_rd = rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
+ second_ref, best_yrd, mdcounts,
+ &rate, &rate_y, &distortion,
+ &skippable,
+ this_rd_thresh, seg_mvs);
rate2 += rate;
distortion2 += distortion;
--
⑨