ref: dc836109e4b8013ee28f4f36b4a48fd18ca209e4
parent: 02ff360b333a88ca8866b191d8928e4b980911dd
parent: 6125a1ed811df7ab5d99db5760a47c572ace7d9d
author: John Koleszar <jkoleszar@google.com>
date: Fri Feb 8 15:20:37 EST 2013
Merge "Pass macroblock index to pick inter functions" into experimental
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -323,9 +323,7 @@
int fullpixel_mask;
YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
- struct {
- uint8_t *y_buffer, *u_buffer, *v_buffer;
- } second_pre;
+ YV12_BUFFER_CONFIG second_pre;
YV12_BUFFER_CONFIG dst;
MODE_INFO *prev_mode_info_context;
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -47,15 +47,12 @@
extern void select_interp_filter_type(VP9_COMP *cpi);
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
- int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col);
static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
- int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col);
static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
- int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col);
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
@@ -623,24 +620,12 @@
}
static void set_offsets(VP9_COMP *cpi,
- int mb_row, int mb_col, int block_size,
- int *ref_yoffset, int *ref_uvoffset) {
+ int mb_row, int mb_col, int block_size) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const int dst_fb_idx = cm->new_fb_idx;
- const int recon_y_stride = cm->yv12_fb[dst_fb_idx].y_stride;
- const int recon_uv_stride = cm->yv12_fb[dst_fb_idx].uv_stride;
- const int recon_yoffset = 16 * mb_row * recon_y_stride + 16 * mb_col;
- const int recon_uvoffset = 8 * mb_row * recon_uv_stride + 8 * mb_col;
- const int src_y_stride = x->src.y_stride;
- const int src_uv_stride = x->src.uv_stride;
- const int src_yoffset = 16 * mb_row * src_y_stride + 16 * mb_col;
- const int src_uvoffset = 8 * mb_row * src_uv_stride + 8 * mb_col;
- const int ref_fb_idx = cm->active_ref_idx[cpi->lst_fb_idx];
- const int ref_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
- const int ref_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
const int idx_map = mb_row * cm->mb_cols + mb_col;
const int idx_str = xd->mode_info_stride * mb_row + mb_col;
@@ -666,9 +651,9 @@
xd->prev_mode_info_context = cm->prev_mi + idx_str;
// Set up destination pointers
- xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
- xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
- xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
+ setup_pred_block(&xd->dst,
+ &cm->yv12_fb[dst_fb_idx],
+ mb_row, mb_col);
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
@@ -692,14 +677,8 @@
xd->left_available = (mb_col > cm->cur_tile_mb_col_start);
xd->right_available = (mb_col + block_size < cm->cur_tile_mb_col_end);
- /* Reference buffer offsets */
- *ref_yoffset = (mb_row * ref_y_stride * 16) + (mb_col * 16);
- *ref_uvoffset = (mb_row * ref_uv_stride * 8) + (mb_col * 8);
-
/* set up source buffers */
- x->src.y_buffer = cpi->Source->y_buffer + src_yoffset;
- x->src.u_buffer = cpi->Source->u_buffer + src_uvoffset;
- x->src.v_buffer = cpi->Source->v_buffer + src_uvoffset;
+ setup_pred_block(&x->src, cpi->Source, mb_row, mb_col);
/* R/D setup */
x->rddiv = cpi->RDDIV;
@@ -742,8 +721,8 @@
}
static int pick_mb_modes(VP9_COMP *cpi,
- int mb_row,
- int mb_col,
+ int mb_row0,
+ int mb_col0,
TOKENEXTRA **tp,
int *totalrate,
int *totaldist) {
@@ -751,16 +730,15 @@
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int i;
- int recon_yoffset, recon_uvoffset;
int splitmodes_used = 0;
ENTROPY_CONTEXT_PLANES left_context[2];
ENTROPY_CONTEXT_PLANES above_context[2];
ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
- + mb_col;
+ + mb_col0;
/* Function should not modify L & A contexts; save and restore on exit */
vpx_memcpy(left_context,
- cm->left_context + (mb_row & 2),
+ cm->left_context + (mb_row0 & 2),
sizeof(left_context));
vpx_memcpy(above_context,
initial_above_context_ptr,
@@ -769,9 +747,11 @@
/* Encode MBs in raster order within the SB */
for (i = 0; i < 4; i++) {
const int x_idx = i & 1, y_idx = i >> 1;
+ const int mb_row = mb_row0 + y_idx;
+ const int mb_col = mb_col0 + x_idx;
MB_MODE_INFO *mbmi;
- if ((mb_row + y_idx >= cm->mb_rows) || (mb_col + x_idx >= cm->mb_cols)) {
+ if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
// MB lies outside frame, move on
continue;
}
@@ -778,8 +758,7 @@
// Index of the MB in the SB 0..3
xd->mb_index = i;
- set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16,
- &recon_yoffset, &recon_uvoffset);
+ set_offsets(cpi, mb_row, mb_col, 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
@@ -800,8 +779,8 @@
*totaldist += d;
// Dummy encode, do not do the tokenization
- encode_macroblock(cpi, tp, recon_yoffset, recon_uvoffset, 0,
- mb_row + y_idx, mb_col + x_idx);
+ encode_macroblock(cpi, tp, 0, mb_row, mb_col);
+
// Note the encoder may have changed the segment_id
// Save the coding context
@@ -814,8 +793,7 @@
if (enc_debug)
printf("inter pick_mb_modes %d %d\n", mb_row, mb_col);
#endif
- vp9_pick_mode_inter_macroblock(cpi, x, recon_yoffset,
- recon_uvoffset, &r, &d);
+ vp9_pick_mode_inter_macroblock(cpi, x, mb_row, mb_col, &r, &d);
*totalrate += r;
*totaldist += d;
@@ -822,8 +800,7 @@
splitmodes_used += (mbmi->mode == SPLITMV);
// Dummy encode, do not do the tokenization
- encode_macroblock(cpi, tp, recon_yoffset, recon_uvoffset, 0,
- mb_row + y_idx, mb_col + x_idx);
+ encode_macroblock(cpi, tp, 0, mb_row, mb_col);
seg_id = mbmi->segment_id;
if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0) {
@@ -846,7 +823,7 @@
}
/* Restore L & A coding context to those in place on entry */
- vpx_memcpy(cm->left_context + (mb_row & 2),
+ vpx_memcpy(cm->left_context + (mb_row0 & 2),
left_context,
sizeof(left_context));
vpx_memcpy(initial_above_context_ptr,
@@ -865,9 +842,8 @@
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- int recon_yoffset, recon_uvoffset;
- set_offsets(cpi, mb_row, mb_col, 32, &recon_yoffset, &recon_uvoffset);
+ set_offsets(cpi, mb_row, mb_col, 32);
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB32X32;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
@@ -883,11 +859,7 @@
vpx_memcpy(&x->sb32_context[xd->sb_index].mic, xd->mode_info_context,
sizeof(MODE_INFO));
} else {
- vp9_rd_pick_inter_mode_sb32(cpi, x,
- recon_yoffset,
- recon_uvoffset,
- totalrate,
- totaldist);
+ vp9_rd_pick_inter_mode_sb32(cpi, x, mb_row, mb_col, totalrate, totaldist);
}
}
@@ -900,9 +872,8 @@
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- int recon_yoffset, recon_uvoffset;
- set_offsets(cpi, mb_row, mb_col, 64, &recon_yoffset, &recon_uvoffset);
+ set_offsets(cpi, mb_row, mb_col, 64);
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
@@ -918,11 +889,7 @@
vpx_memcpy(&x->sb64_context.mic, xd->mode_info_context,
sizeof(MODE_INFO));
} else {
- vp9_rd_pick_inter_mode_sb64(cpi, x,
- recon_yoffset,
- recon_uvoffset,
- totalrate,
- totaldist);
+ vp9_rd_pick_inter_mode_sb64(cpi, x, mb_row, mb_col, totalrate, totaldist);
}
}
@@ -990,14 +957,13 @@
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- int recon_yoffset, recon_uvoffset;
cpi->sb32_count[is_sb]++;
if (is_sb) {
- set_offsets(cpi, mb_row, mb_col, 32, &recon_yoffset, &recon_uvoffset);
+ set_offsets(cpi, mb_row, mb_col, 32);
update_state(cpi, &x->sb32_context[xd->sb_index], 32, output_enabled);
- encode_superblock32(cpi, tp, recon_yoffset, recon_uvoffset,
+ encode_superblock32(cpi, tp,
output_enabled, mb_row, mb_col);
if (output_enabled)
update_stats(cpi);
@@ -1019,8 +985,7 @@
continue;
}
- set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16,
- &recon_yoffset, &recon_uvoffset);
+ set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16);
xd->mb_index = i;
update_state(cpi, &x->mb_context[xd->sb_index][i], 16, output_enabled);
@@ -1027,7 +992,7 @@
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
- encode_macroblock(cpi, tp, recon_yoffset, recon_uvoffset,
+ encode_macroblock(cpi, tp,
output_enabled, mb_row + y_idx, mb_col + x_idx);
if (output_enabled)
update_stats(cpi);
@@ -1062,11 +1027,9 @@
cpi->sb64_count[is_sb[0] == 2]++;
if (is_sb[0] == 2) {
- int recon_yoffset, recon_uvoffset;
-
- set_offsets(cpi, mb_row, mb_col, 64, &recon_yoffset, &recon_uvoffset);
+ set_offsets(cpi, mb_row, mb_col, 64);
update_state(cpi, &x->sb64_context, 64, 1);
- encode_superblock64(cpi, tp, recon_yoffset, recon_uvoffset,
+ encode_superblock64(cpi, tp,
1, mb_row, mb_col);
update_stats(cpi);
@@ -2025,7 +1988,6 @@
}
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
- int recon_yoffset, int recon_uvoffset,
int output_enabled,
int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
@@ -2122,9 +2084,9 @@
else
ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx];
- xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
- xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
- xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
+ setup_pred_block(&xd->pre,
+ &cpi->common.yv12_fb[ref_fb_idx],
+ mb_row, mb_col);
if (mbmi->second_ref_frame > 0) {
int second_ref_fb_idx;
@@ -2136,12 +2098,9 @@
else
second_ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx];
- xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
- recon_yoffset;
- xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
- recon_uvoffset;
- xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
- recon_uvoffset;
+ setup_pred_block(&xd->second_pre,
+ &cpi->common.yv12_fb[second_ref_fb_idx],
+ mb_row, mb_col);
}
if (!x->skip) {
@@ -2282,7 +2241,6 @@
}
static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
- int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
@@ -2361,9 +2319,9 @@
else
ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx];
- xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
- xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
- xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
+ setup_pred_block(&xd->pre,
+ &cpi->common.yv12_fb[ref_fb_idx],
+ mb_row, mb_col);
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
int second_ref_fb_idx;
@@ -2375,12 +2333,9 @@
else
second_ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx];
- xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
- recon_yoffset;
- xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
- recon_uvoffset;
- xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
- recon_uvoffset;
+ setup_pred_block(&xd->second_pre,
+ &cpi->common.yv12_fb[second_ref_fb_idx],
+ mb_row, mb_col);
}
vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
@@ -2513,7 +2468,6 @@
}
static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
- int recon_yoffset, int recon_uvoffset,
int output_enabled, int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
@@ -2591,12 +2545,9 @@
else
ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx];
- xd->pre.y_buffer =
- cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
- xd->pre.u_buffer =
- cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
- xd->pre.v_buffer =
- cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
+ setup_pred_block(&xd->pre,
+ &cpi->common.yv12_fb[ref_fb_idx],
+ mb_row, mb_col);
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
int second_ref_fb_idx;
@@ -2608,12 +2559,9 @@
else
second_ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx];
- xd->second_pre.y_buffer =
- cpi->common.yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset;
- xd->second_pre.u_buffer =
- cpi->common.yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset;
- xd->second_pre.v_buffer =
- cpi->common.yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset;
+ setup_pred_block(&xd->second_pre,
+ &cpi->common.yv12_fb[second_ref_fb_idx],
+ mb_row, mb_col);
}
vp9_build_inter64x64_predictors_sb(xd, xd->dst.y_buffer,
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -3170,20 +3170,16 @@
static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
int idx, MV_REFERENCE_FRAME frame_type,
int block_size,
- int recon_yoffset, int recon_uvoffset,
+ int mb_row, int mb_col,
int_mv frame_nearest_mv[MAX_REF_FRAMES],
int_mv frame_near_mv[MAX_REF_FRAMES],
int frame_mdcounts[4][4],
- uint8_t *y_buffer[4],
- uint8_t *u_buffer[4],
- uint8_t *v_buffer[4]) {
+ YV12_BUFFER_CONFIG yv12_mb[4]) {
YV12_BUFFER_CONFIG *yv12 = &cpi->common.yv12_fb[idx];
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
- y_buffer[frame_type] = yv12->y_buffer + recon_yoffset;
- u_buffer[frame_type] = yv12->u_buffer + recon_uvoffset;
- v_buffer[frame_type] = yv12->v_buffer + recon_uvoffset;
+ setup_pred_block(&yv12_mb[frame_type], yv12, mb_row, mb_col);
// Gets an initial list of candidate vectors from neighbours and orders them
vp9_find_mv_refs(&cpi->common, xd, xd->mode_info_context,
@@ -3197,7 +3193,7 @@
vp9_find_best_ref_mvs(xd,
cpi->common.error_resilient_mode ||
cpi->common.frame_parallel_decoding_mode ?
- 0 : y_buffer[frame_type],
+ 0 : yv12_mb[frame_type].y_buffer,
yv12->y_stride,
mbmi->ref_mvs[frame_type],
&frame_nearest_mv[frame_type],
@@ -3205,7 +3201,7 @@
// Further refinement that is encode side only to test the top few candidates
// in full and choose the best as the centre point for subsequent searches.
- mv_pred(cpi, x, y_buffer[frame_type], yv12->y_stride,
+ mv_pred(cpi, x, yv12_mb[frame_type].y_buffer, yv12->y_stride,
frame_type, block_size);
}
@@ -3222,7 +3218,7 @@
int *rate_y, int *distortion_y,
int *rate_uv, int *distortion_uv,
int *mode_excluded, int *disable_skip,
- int recon_yoffset, int mode_index,
+ int mode_index,
int_mv frame_mv[MB_MODE_COUNT]
[MAX_REF_FRAMES]) {
VP9_COMMON *cm = &cpi->common;
@@ -3517,7 +3513,7 @@
}
static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
- int recon_yoffset, int recon_uvoffset,
+ int mb_row, int mb_col,
int *returnrate, int *returndistortion,
int64_t *returnintra) {
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
@@ -3568,7 +3564,7 @@
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
int frame_mdcounts[4][4];
- uint8_t *y_buffer[4], *u_buffer[4], *v_buffer[4];
+ YV12_BUFFER_CONFIG yv12_mb[4];
unsigned int ref_costs[MAX_REF_FRAMES];
int_mv seg_mvs[NB_PARTITIONINGS][16 /* n_blocks */][MAX_REF_FRAMES - 1];
@@ -3600,23 +3596,23 @@
if (cpi->ref_frame_flags & VP9_LAST_FLAG) {
setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->lst_fb_idx],
- LAST_FRAME, BLOCK_16X16, recon_yoffset, recon_uvoffset,
+ LAST_FRAME, BLOCK_16X16, mb_row, mb_col,
frame_mv[NEARESTMV], frame_mv[NEARMV],
- frame_mdcounts, y_buffer, u_buffer, v_buffer);
+ frame_mdcounts, yv12_mb);
}
if (cpi->ref_frame_flags & VP9_GOLD_FLAG) {
setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->gld_fb_idx],
- GOLDEN_FRAME, BLOCK_16X16, recon_yoffset, recon_uvoffset,
+ GOLDEN_FRAME, BLOCK_16X16, mb_row, mb_col,
frame_mv[NEARESTMV], frame_mv[NEARMV],
- frame_mdcounts, y_buffer, u_buffer, v_buffer);
+ frame_mdcounts, yv12_mb);
}
if (cpi->ref_frame_flags & VP9_ALT_FLAG) {
setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->alt_fb_idx],
- ALTREF_FRAME, BLOCK_16X16, recon_yoffset, recon_uvoffset,
+ ALTREF_FRAME, BLOCK_16X16, mb_row, mb_col,
frame_mv[NEARESTMV], frame_mv[NEARMV],
- frame_mdcounts, y_buffer, u_buffer, v_buffer);
+ frame_mdcounts, yv12_mb);
}
*returnintra = INT64_MAX;
@@ -3734,9 +3730,7 @@
if (mbmi->ref_frame) {
int ref = mbmi->ref_frame;
- xd->pre.y_buffer = y_buffer[ref];
- xd->pre.u_buffer = u_buffer[ref];
- xd->pre.v_buffer = v_buffer[ref];
+ xd->pre = yv12_mb[ref];
best_ref_mv = mbmi->ref_mvs[ref][0];
vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
}
@@ -3744,9 +3738,7 @@
if (mbmi->second_ref_frame > 0) {
int ref = mbmi->second_ref_frame;
- xd->second_pre.y_buffer = y_buffer[ref];
- xd->second_pre.u_buffer = u_buffer[ref];
- xd->second_pre.v_buffer = v_buffer[ref];
+ xd->second_pre = yv12_mb[ref];
second_best_ref_mv = mbmi->ref_mvs[ref][0];
}
@@ -3983,7 +3975,7 @@
#endif
&rate_y, &distortion,
&rate_uv, &distortion_uv,
- &mode_excluded, &disable_skip, recon_yoffset,
+ &mode_excluded, &disable_skip,
mode_index, frame_mv);
if (this_rd == INT64_MAX)
continue;
@@ -4467,7 +4459,7 @@
}
static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
- int recon_yoffset, int recon_uvoffset,
+ int mb_row, int mb_col,
int *returnrate,
int *returndistortion,
int block_size) {
@@ -4481,9 +4473,7 @@
int comp_pred, i;
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
int frame_mdcounts[4][4];
- uint8_t *y_buffer[4];
- uint8_t *u_buffer[4];
- uint8_t *v_buffer[4];
+ YV12_BUFFER_CONFIG yv12_mb[4];
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
VP9_ALT_FLAG };
int idx_list[4] = {0,
@@ -4529,9 +4519,9 @@
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
setup_buffer_inter(cpi, x, idx_list[ref_frame], ref_frame, block_size,
- recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
+ mb_row, mb_col, frame_mv[NEARESTMV],
frame_mv[NEARMV], frame_mdcounts,
- y_buffer, u_buffer, v_buffer);
+ yv12_mb);
}
frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
frame_mv[ZEROMV][ref_frame].as_int = 0;
@@ -4653,9 +4643,7 @@
continue;
mbmi->second_ref_frame = second_ref;
- xd->second_pre.y_buffer = y_buffer[second_ref];
- xd->second_pre.u_buffer = u_buffer[second_ref];
- xd->second_pre.v_buffer = v_buffer[second_ref];
+ xd->second_pre = yv12_mb[second_ref];
mode_excluded =
mode_excluded ?
mode_excluded : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
@@ -4673,9 +4661,7 @@
}
}
- xd->pre.y_buffer = y_buffer[ref_frame];
- xd->pre.u_buffer = u_buffer[ref_frame];
- xd->pre.v_buffer = v_buffer[ref_frame];
+ xd->pre = yv12_mb[ref_frame];
vpx_memcpy(mdcounts, frame_mdcounts[ref_frame], sizeof(mdcounts));
// If the segment reference frame feature is enabled....
@@ -4753,7 +4739,7 @@
#endif
&rate_y, &distortion_y,
&rate_uv, &distortion_uv,
- &mode_excluded, &disable_skip, recon_yoffset,
+ &mode_excluded, &disable_skip,
mode_index, frame_mv);
if (this_rd == INT64_MAX)
continue;
@@ -5022,24 +5008,23 @@
}
int64_t vp9_rd_pick_inter_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
- int recon_yoffset, int recon_uvoffset,
+ int mb_row, int mb_col,
int *returnrate,
int *returndistortion) {
- return vp9_rd_pick_inter_mode_sb(cpi, x, recon_yoffset, recon_uvoffset,
+ return vp9_rd_pick_inter_mode_sb(cpi, x, mb_row, mb_col,
returnrate, returndistortion, BLOCK_32X32);
}
int64_t vp9_rd_pick_inter_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
- int recon_yoffset, int recon_uvoffset,
+ int mb_row, int mb_col,
int *returnrate,
int *returndistortion) {
- return vp9_rd_pick_inter_mode_sb(cpi, x, recon_yoffset, recon_uvoffset,
+ return vp9_rd_pick_inter_mode_sb(cpi, x, mb_row, mb_col,
returnrate, returndistortion, BLOCK_64X64);
}
void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
- int recon_yoffset,
- int recon_uvoffset,
+ int mb_row, int mb_col,
int *totalrate, int *totaldist) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
@@ -5057,7 +5042,7 @@
{
int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
- rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
+ rd_pick_inter_mode(cpi, x, mb_row, mb_col, &rate,
&distortion, &intra_error);
/* restore cpi->zbin_mode_boost_enabled */
--- a/vp9/encoder/vp9_rdopt.h
+++ b/vp9/encoder/vp9_rdopt.h
@@ -29,15 +29,15 @@
int *r, int *d);
extern void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
- int ref_yoffset, int ref_uvoffset,
+ int mb_row, int mb_col,
int *r, int *d);
extern int64_t vp9_rd_pick_inter_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
- int ref_yoffset, int ref_uvoffset,
+ int mb_row, int mb_col,
int *r, int *d);
extern int64_t vp9_rd_pick_inter_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
- int ref_yoffset, int ref_uvoffset,
+ int mb_row, int mb_col,
int *r, int *d);
extern void vp9_init_me_luts();
@@ -44,5 +44,19 @@
extern void vp9_set_mbmode_and_mvs(MACROBLOCK *x,
MB_PREDICTION_MODE mb, int_mv *mv);
+
+static void setup_pred_block(YV12_BUFFER_CONFIG *dst,
+ const YV12_BUFFER_CONFIG *src,
+ int mb_row, int mb_col) {
+ const int recon_y_stride = src->y_stride;
+ const int recon_uv_stride = src->uv_stride;
+ const int recon_yoffset = 16 * mb_row * recon_y_stride + 16 * mb_col;
+ const int recon_uvoffset = 8 * mb_row * recon_uv_stride + 8 * mb_col;
+
+ *dst = *src;
+ dst->y_buffer += recon_yoffset;
+ dst->u_buffer += recon_uvoffset;
+ dst->v_buffer += recon_uvoffset;
+}
#endif // VP9_ENCODER_VP9_RDOPT_H_
--
⑨