ref: 82d7c6fb3ca4f15643320c38d1a41ad47ae71a6b
parent: 1492698ed35f346bd3a8fd20c4de1e23fd71f707
parent: 1c552e79bd2729e085167f683e56c0a698b3d297
author: Dmitry Kovalev <dkovalev@google.com>
date: Wed Aug 7 12:32:09 EDT 2013
Merge "Using only one scale function in scale_factors struct."
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -185,8 +185,7 @@
int (*scale_value_x)(int val, const struct scale_factors *scale);
int (*scale_value_y)(int val, const struct scale_factors *scale);
void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col);
- MV32 (*scale_mv_q3_to_q4)(const MV *mv, const struct scale_factors *scale);
- MV32 (*scale_mv_q4)(const MV *mv, const struct scale_factors *scale);
+ MV32 (*scale_mv)(const MV *mv, const struct scale_factors *scale);
convolve_fn_t predict[2][2][2]; // horiz, vert, avg
};
--- a/vp9/common/vp9_filter.h
+++ b/vp9/common/vp9_filter.h
@@ -19,7 +19,9 @@
#define VP9_FILTER_WEIGHT 128
#define VP9_FILTER_SHIFT 7
-#define SUBPEL_SHIFTS 16
+#define SUBPEL_BITS 4
+#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1)
+#define SUBPEL_SHIFTS (1 << SUBPEL_BITS)
extern const int16_t vp9_bilinear_filters[SUBPEL_SHIFTS][8];
extern const int16_t vp9_sub_pel_filters_6[SUBPEL_SHIFTS][8];
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -33,27 +33,7 @@
return val;
}
-static MV32 mv_q3_to_q4_with_scaling(const MV *mv,
- const struct scale_factors *scale) {
- const MV32 res = {
- ((mv->row << 1) * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT)
- + scale->y_offset_q4,
- ((mv->col << 1) * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT)
- + scale->x_offset_q4
- };
- return res;
-}
-
-static MV32 mv_q3_to_q4_without_scaling(const MV *mv,
- const struct scale_factors *scale) {
- const MV32 res = {
- mv->row << 1,
- mv->col << 1
- };
- return res;
-}
-
-static MV32 mv_q4_with_scaling(const MV *mv,
+static MV32 mv_with_scaling(const MV *mv,
const struct scale_factors *scale) {
const MV32 res = {
(mv->row * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT) + scale->y_offset_q4,
@@ -62,8 +42,8 @@
return res;
}
-static MV32 mv_q4_without_scaling(const MV *mv,
- const struct scale_factors *scale) {
+static MV32 mv_without_scaling(const MV *mv,
+ const struct scale_factors *scale) {
const MV32 res = {
mv->row,
mv->col
@@ -109,14 +89,12 @@
scale->scale_value_x = unscaled_value;
scale->scale_value_y = unscaled_value;
scale->set_scaled_offsets = set_offsets_without_scaling;
- scale->scale_mv_q3_to_q4 = mv_q3_to_q4_without_scaling;
- scale->scale_mv_q4 = mv_q4_without_scaling;
+ scale->scale_mv = mv_without_scaling;
} else {
scale->scale_value_x = scale_value_x_with_scaling;
scale->scale_value_y = scale_value_y_with_scaling;
scale->set_scaled_offsets = set_offsets_with_scaling;
- scale->scale_mv_q3_to_q4 = mv_q3_to_q4_with_scaling;
- scale->scale_mv_q4 = mv_q4_with_scaling;
+ scale->scale_mv = mv_with_scaling;
}
// TODO(agrange): Investigate the best choice of functions to use here
@@ -202,14 +180,15 @@
int w, int h, int weight,
const struct subpix_fn_table *subpix,
enum mv_precision precision) {
- const MV32 mv = precision == MV_PRECISION_Q4
- ? scale->scale_mv_q4(src_mv, scale)
- : scale->scale_mv_q3_to_q4(src_mv, scale);
- const int subpel_x = mv.col & 15;
- const int subpel_y = mv.row & 15;
+ const int is_q4 = precision == MV_PRECISION_Q4;
+ const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row << 1,
+ is_q4 ? src_mv->col : src_mv->col << 1 };
+ const MV32 mv = scale->scale_mv(&mv_q4, scale);
+ const int subpel_x = mv.col & SUBPEL_MASK;
+ const int subpel_y = mv.row & SUBPEL_MASK;
- src += (mv.row >> 4) * src_stride + (mv.col >> 4);
- scale->predict[!!subpel_x][!!subpel_y][weight](
+ src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
+ scale->predict[subpel_x != 0][subpel_y != 0][weight](
src, src_stride, dst, dst_stride,
subpix->filter_x[subpel_x], scale->x_step_q4,
subpix->filter_y[subpel_y], scale->y_step_q4,
--
⑨