ref: e0cc52db3fc9b09c99d7bbee35153cf82964a860
parent: 3a04c9c9c4c4935925f4c00dcc70610100c5e9dd
author: clang-format <noreply@google.com>
date: Tue Jul 26 16:43:23 EDT 2016
vp9/encoder: apply clang-format Change-Id: I45d9fb4013f50766b24363a86365e8063e8954c2
--- a/vp9/encoder/arm/neon/vp9_dct_neon.c
+++ b/vp9/encoder/arm/neon/vp9_dct_neon.c
@@ -18,14 +18,13 @@
#include "vpx_dsp/txfm_common.h"
void vp9_fdct8x8_quant_neon(const int16_t *input, int stride,
- int16_t* coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t* zbin_ptr,
- const int16_t* round_ptr, const int16_t* quant_ptr,
- const int16_t* quant_shift_ptr,
- int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr,
- const int16_t* dequant_ptr, uint16_t* eob_ptr,
- const int16_t* scan_ptr,
- const int16_t* iscan_ptr) {
+ int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan_ptr,
+ const int16_t *iscan_ptr) {
int16_t temp_buffer[64];
(void)coeff_ptr;
--- a/vp9/encoder/arm/neon/vp9_quantize_neon.c
+++ b/vp9/encoder/arm/neon/vp9_quantize_neon.c
@@ -26,8 +26,8 @@
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
(void)zbin_ptr;
@@ -54,12 +54,12 @@
const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]);
const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
- const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
- vget_low_s16(v_quant));
- const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
- vget_high_s16(v_quant));
- const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
- vshrn_n_s32(v_tmp_hi, 16));
+ const int32x4_t v_tmp_lo =
+ vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
+ const int32x4_t v_tmp_hi =
+ vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
+ const int16x8_t v_tmp2 =
+ vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
@@ -79,12 +79,12 @@
const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]);
const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
- const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
- vget_low_s16(v_quant));
- const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
- vget_high_s16(v_quant));
- const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
- vshrn_n_s32(v_tmp_hi, 16));
+ const int32x4_t v_tmp_lo =
+ vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
+ const int32x4_t v_tmp_hi =
+ vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
+ const int16x8_t v_tmp2 =
+ vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
@@ -96,9 +96,8 @@
vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff);
}
{
- const int16x4_t v_eobmax_3210 =
- vmax_s16(vget_low_s16(v_eobmax_76543210),
- vget_high_s16(v_eobmax_76543210));
+ const int16x4_t v_eobmax_3210 = vmax_s16(
+ vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210));
const int64x1_t v_eobmax_xx32 =
vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
const int16x4_t v_eobmax_tmp =
--- a/vp9/encoder/mips/msa/vp9_error_msa.c
+++ b/vp9/encoder/mips/msa/vp9_error_msa.c
@@ -11,74 +11,73 @@
#include "./vp9_rtcd.h"
#include "vpx_dsp/mips/macros_msa.h"
-#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize) \
-static int64_t block_error_##BSize##size_msa(const int16_t *coeff_ptr, \
- const int16_t *dq_coeff_ptr, \
- int64_t *ssz) { \
- int64_t err = 0; \
- uint32_t loop_cnt; \
- v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h; \
- v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w; \
- v2i64 sq_coeff_r, sq_coeff_l; \
- v2i64 err0, err_dup0, err1, err_dup1; \
- \
- coeff = LD_SH(coeff_ptr); \
- dq_coeff = LD_SH(dq_coeff_ptr); \
- UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
- ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
- HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
- DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, \
- sq_coeff_r, sq_coeff_l); \
- DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1); \
- \
- coeff = LD_SH(coeff_ptr + 8); \
- dq_coeff = LD_SH(dq_coeff_ptr + 8); \
- UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
- ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
- HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
- DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
- DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
- \
- coeff_ptr += 16; \
- dq_coeff_ptr += 16; \
- \
- for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) { \
- coeff = LD_SH(coeff_ptr); \
- dq_coeff = LD_SH(dq_coeff_ptr); \
- UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
- ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
- HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
- DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
- DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
- \
- coeff = LD_SH(coeff_ptr + 8); \
- dq_coeff = LD_SH(dq_coeff_ptr + 8); \
- UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
- ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
- HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
- DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
- DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
- \
- coeff_ptr += 16; \
- dq_coeff_ptr += 16; \
- } \
- \
- err_dup0 = __msa_splati_d(sq_coeff_r, 1); \
- err_dup1 = __msa_splati_d(sq_coeff_l, 1); \
- sq_coeff_r += err_dup0; \
- sq_coeff_l += err_dup1; \
- *ssz = __msa_copy_s_d(sq_coeff_r, 0); \
- *ssz += __msa_copy_s_d(sq_coeff_l, 0); \
- \
- err_dup0 = __msa_splati_d(err0, 1); \
- err_dup1 = __msa_splati_d(err1, 1); \
- err0 += err_dup0; \
- err1 += err_dup1; \
- err = __msa_copy_s_d(err0, 0); \
- err += __msa_copy_s_d(err1, 0); \
- \
- return err; \
-}
+#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize) \
+ static int64_t block_error_##BSize##size_msa( \
+ const int16_t *coeff_ptr, const int16_t *dq_coeff_ptr, int64_t *ssz) { \
+ int64_t err = 0; \
+ uint32_t loop_cnt; \
+ v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h; \
+ v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w; \
+ v2i64 sq_coeff_r, sq_coeff_l; \
+ v2i64 err0, err_dup0, err1, err_dup1; \
+ \
+ coeff = LD_SH(coeff_ptr); \
+ dq_coeff = LD_SH(dq_coeff_ptr); \
+ UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
+ ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
+ HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
+ DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, sq_coeff_r, \
+ sq_coeff_l); \
+ DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1); \
+ \
+ coeff = LD_SH(coeff_ptr + 8); \
+ dq_coeff = LD_SH(dq_coeff_ptr + 8); \
+ UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
+ ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
+ HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
+ DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
+ DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
+ \
+ coeff_ptr += 16; \
+ dq_coeff_ptr += 16; \
+ \
+ for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) { \
+ coeff = LD_SH(coeff_ptr); \
+ dq_coeff = LD_SH(dq_coeff_ptr); \
+ UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
+ ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
+ HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
+ DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
+ DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
+ \
+ coeff = LD_SH(coeff_ptr + 8); \
+ dq_coeff = LD_SH(dq_coeff_ptr + 8); \
+ UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
+ ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
+ HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
+ DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
+ DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
+ \
+ coeff_ptr += 16; \
+ dq_coeff_ptr += 16; \
+ } \
+ \
+ err_dup0 = __msa_splati_d(sq_coeff_r, 1); \
+ err_dup1 = __msa_splati_d(sq_coeff_l, 1); \
+ sq_coeff_r += err_dup0; \
+ sq_coeff_l += err_dup1; \
+ *ssz = __msa_copy_s_d(sq_coeff_r, 0); \
+ *ssz += __msa_copy_s_d(sq_coeff_l, 0); \
+ \
+ err_dup0 = __msa_splati_d(err0, 1); \
+ err_dup1 = __msa_splati_d(err1, 1); \
+ err0 += err_dup0; \
+ err1 += err_dup1; \
+ err = __msa_copy_s_d(err0, 0); \
+ err += __msa_copy_s_d(err1, 0); \
+ \
+ return err; \
+ }
BLOCK_ERROR_BLOCKSIZE_MSA(16);
BLOCK_ERROR_BLOCKSIZE_MSA(64);
@@ -86,25 +85,17 @@
BLOCK_ERROR_BLOCKSIZE_MSA(1024);
int64_t vp9_block_error_msa(const tran_low_t *coeff_ptr,
- const tran_low_t *dq_coeff_ptr,
- intptr_t blk_size, int64_t *ssz) {
+ const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
+ int64_t *ssz) {
int64_t err;
const int16_t *coeff = (const int16_t *)coeff_ptr;
const int16_t *dq_coeff = (const int16_t *)dq_coeff_ptr;
switch (blk_size) {
- case 16:
- err = block_error_16size_msa(coeff, dq_coeff, ssz);
- break;
- case 64:
- err = block_error_64size_msa(coeff, dq_coeff, ssz);
- break;
- case 256:
- err = block_error_256size_msa(coeff, dq_coeff, ssz);
- break;
- case 1024:
- err = block_error_1024size_msa(coeff, dq_coeff, ssz);
- break;
+ case 16: err = block_error_16size_msa(coeff, dq_coeff, ssz); break;
+ case 64: err = block_error_64size_msa(coeff, dq_coeff, ssz); break;
+ case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break;
+ case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break;
default:
err = vp9_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
break;
--- a/vp9/encoder/mips/msa/vp9_fdct16x16_msa.c
+++ b/vp9/encoder/mips/msa/vp9_fdct16x16_msa.c
@@ -159,8 +159,8 @@
/* load input data */
LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
- TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
- r0, r1, r2, r3, r4, r5, r6, r7);
+ TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+ r7);
FDCT_POSTPROC_2V_NEG_H(r0, r1);
FDCT_POSTPROC_2V_NEG_H(r2, r3);
FDCT_POSTPROC_2V_NEG_H(r4, r5);
@@ -169,8 +169,8 @@
out += 64;
LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
- TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
- r8, r9, r10, r11, r12, r13, r14, r15);
+ TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+ r12, r13, r14, r15);
FDCT_POSTPROC_2V_NEG_H(r8, r9);
FDCT_POSTPROC_2V_NEG_H(r10, r11);
FDCT_POSTPROC_2V_NEG_H(r12, r13);
@@ -181,8 +181,8 @@
/* load input data */
input += 128;
LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
- TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
- r0, r1, r2, r3, r4, r5, r6, r7);
+ TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+ r7);
FDCT_POSTPROC_2V_NEG_H(r0, r1);
FDCT_POSTPROC_2V_NEG_H(r2, r3);
FDCT_POSTPROC_2V_NEG_H(r4, r5);
@@ -191,8 +191,8 @@
out += 64;
LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
- TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
- r8, r9, r10, r11, r12, r13, r14, r15);
+ TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+ r12, r13, r14, r15);
FDCT_POSTPROC_2V_NEG_H(r8, r9);
FDCT_POSTPROC_2V_NEG_H(r10, r11);
FDCT_POSTPROC_2V_NEG_H(r12, r13);
@@ -339,12 +339,12 @@
v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
/* load input data */
- LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11,
- l4, l12, l5, l13, l6, l14, l7, l15);
- TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
- r0, r1, r2, r3, r4, r5, r6, r7);
- TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
- r8, r9, r10, r11, r12, r13, r14, r15);
+ LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+ l7, l15);
+ TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+ r7);
+ TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+ r12, r13, r14, r15);
ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
out += 16 * 8;
@@ -351,12 +351,12 @@
/* load input data */
input += 128;
- LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11,
- l4, l12, l5, l13, l6, l14, l7, l15);
- TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
- r0, r1, r2, r3, r4, r5, r6, r7);
- TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
- r8, r9, r10, r11, r12, r13, r14, r15);
+ LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+ l7, l15);
+ TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+ r7);
+ TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+ r12, r13, r14, r15);
ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
}
@@ -371,10 +371,10 @@
LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7);
temp = intermediate + 8;
LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
- in8, in9, in10, in11, in12, in13, in14, in15);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+ in10, in11, in12, in13, in14, in15);
FDCT_POSTPROC_2V_NEG_H(in0, in1);
FDCT_POSTPROC_2V_NEG_H(in2, in3);
FDCT_POSTPROC_2V_NEG_H(in4, in5);
@@ -383,29 +383,28 @@
FDCT_POSTPROC_2V_NEG_H(in10, in11);
FDCT_POSTPROC_2V_NEG_H(in12, in13);
FDCT_POSTPROC_2V_NEG_H(in14, in15);
- BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
- in8, in9, in10, in11, in12, in13, in14, in15,
- tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
- in8, in9, in10, in11, in12, in13, in14, in15);
+ BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+ in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6,
+ tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
temp = intermediate;
ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16);
- FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
- tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+ FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
+ tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
temp = intermediate;
LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
- FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3,
- tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3);
+ FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0,
+ tmp1, in1, tmp2, in2, tmp3, in3);
ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16);
- TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7,
- tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7);
+ TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4,
+ tmp5, in5, tmp6, in6, tmp7, in7);
out = output + 8;
ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
}
-void vp9_fht16x16_msa(const int16_t *input, int16_t *output,
- int32_t stride, int32_t tx_type) {
+void vp9_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
+ int32_t tx_type) {
DECLARE_ALIGNED(32, int16_t, tmp[256]);
DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
DECLARE_ALIGNED(32, int16_t, tmp_buf[128]);
@@ -413,35 +412,31 @@
int16_t *ptmpbuf = &tmp_buf[0];
int16_t *trans = &trans_buf[0];
const int32_t const_arr[29 * 4] = {
- 52707308, 52707308, 52707308, 52707308,
- -1072430300, -1072430300, -1072430300, -1072430300,
- 795618043, 795618043, 795618043, 795618043,
- -721080468, -721080468, -721080468, -721080468,
- 459094491, 459094491, 459094491, 459094491,
- -970646691, -970646691, -970646691, -970646691,
- 1010963856, 1010963856, 1010963856, 1010963856,
- -361743294, -361743294, -361743294, -361743294,
- 209469125, 209469125, 209469125, 209469125,
- -1053094788, -1053094788, -1053094788, -1053094788,
- 1053160324, 1053160324, 1053160324, 1053160324,
- 639644520, 639644520, 639644520, 639644520,
- -862444000, -862444000, -862444000, -862444000,
- 1062144356, 1062144356, 1062144356, 1062144356,
- -157532337, -157532337, -157532337, -157532337,
- 260914709, 260914709, 260914709, 260914709,
- -1041559667, -1041559667, -1041559667, -1041559667,
- 920985831, 920985831, 920985831, 920985831,
- -551995675, -551995675, -551995675, -551995675,
- 596522295, 596522295, 596522295, 596522295,
- 892853362, 892853362, 892853362, 892853362,
- -892787826, -892787826, -892787826, -892787826,
- 410925857, 410925857, 410925857, 410925857,
- -992012162, -992012162, -992012162, -992012162,
- 992077698, 992077698, 992077698, 992077698,
- 759246145, 759246145, 759246145, 759246145,
- -759180609, -759180609, -759180609, -759180609,
- -759222975, -759222975, -759222975, -759222975,
- 759288511, 759288511, 759288511, 759288511 };
+ 52707308, 52707308, 52707308, 52707308, -1072430300,
+ -1072430300, -1072430300, -1072430300, 795618043, 795618043,
+ 795618043, 795618043, -721080468, -721080468, -721080468,
+ -721080468, 459094491, 459094491, 459094491, 459094491,
+ -970646691, -970646691, -970646691, -970646691, 1010963856,
+ 1010963856, 1010963856, 1010963856, -361743294, -361743294,
+ -361743294, -361743294, 209469125, 209469125, 209469125,
+ 209469125, -1053094788, -1053094788, -1053094788, -1053094788,
+ 1053160324, 1053160324, 1053160324, 1053160324, 639644520,
+ 639644520, 639644520, 639644520, -862444000, -862444000,
+ -862444000, -862444000, 1062144356, 1062144356, 1062144356,
+ 1062144356, -157532337, -157532337, -157532337, -157532337,
+ 260914709, 260914709, 260914709, 260914709, -1041559667,
+ -1041559667, -1041559667, -1041559667, 920985831, 920985831,
+ 920985831, 920985831, -551995675, -551995675, -551995675,
+ -551995675, 596522295, 596522295, 596522295, 596522295,
+ 892853362, 892853362, 892853362, 892853362, -892787826,
+ -892787826, -892787826, -892787826, 410925857, 410925857,
+ 410925857, 410925857, -992012162, -992012162, -992012162,
+ -992012162, 992077698, 992077698, 992077698, 992077698,
+ 759246145, 759246145, 759246145, 759246145, -759180609,
+ -759180609, -759180609, -759180609, -759222975, -759222975,
+ -759222975, -759222975, 759288511, 759288511, 759288511,
+ 759288511
+ };
switch (tx_type) {
case DCT_DCT:
@@ -500,8 +495,6 @@
fadst16_transpose_msa(tmp, output);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
--- a/vp9/encoder/mips/msa/vp9_fdct4x4_msa.c
+++ b/vp9/encoder/mips/msa/vp9_fdct4x4_msa.c
@@ -86,9 +86,7 @@
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
--- a/vp9/encoder/mips/msa/vp9_fdct8x8_msa.c
+++ b/vp9/encoder/mips/msa/vp9_fdct8x8_msa.c
@@ -23,44 +23,42 @@
switch (tx_type) {
case DCT_DCT:
- VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+ in3, in4, in5, in6, in7);
+ VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
break;
case ADST_DCT:
- VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+ in3, in4, in5, in6, in7);
+ VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
break;
case DCT_ADST:
- VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+ in3, in4, in5, in6, in7);
+ VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
break;
case ADST_ADST:
- VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+ in3, in4, in5, in6, in7);
+ VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
}
--- a/vp9/encoder/mips/msa/vp9_fdct_msa.h
+++ b/vp9/encoder/mips/msa/vp9_fdct_msa.h
@@ -15,103 +15,102 @@
#include "vpx_dsp/mips/txfm_macros_msa.h"
#include "vpx_ports/mem.h"
-#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3, out4, out5, out6, out7) { \
- v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
- v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
- v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
- cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
- v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
- cospi_24_64, -cospi_24_64, 0, 0 }; \
+#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
+ out3, out4, out5, out6, out7) \
+ { \
+ v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
+ v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
+ v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
+ cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
+ v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
+ cospi_24_64, -cospi_24_64, 0, 0 }; \
+ \
+ SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \
+ cnst2_m = -cnst0_m; \
+ ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
+ SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \
+ cnst4_m = -cnst2_m; \
+ ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
+ \
+ ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
+ ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \
+ cnst2_m, cnst3_m, in7, in0, in4, in3); \
+ \
+ SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
+ cnst2_m = -cnst0_m; \
+ ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
+ SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \
+ cnst4_m = -cnst2_m; \
+ ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
+ \
+ ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
+ ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
+ \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \
+ cnst2_m, cnst3_m, in5, in2, in6, in1); \
+ BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
+ out7 = -s0_m; \
+ out0 = s1_m; \
+ \
+ SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \
+ \
+ ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \
+ cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
+ cnst1_m = cnst0_m; \
+ \
+ ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
+ ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst2_m, \
+ cnst3_m, cnst1_m, out1, out6, s0_m, s1_m); \
+ \
+ SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
+ cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
+ \
+ ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
+ ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
+ out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
+ out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
+ out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
+ \
+ out1 = -out1; \
+ out3 = -out3; \
+ out5 = -out5; \
+ }
+
+#define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \
+ v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \
\
- SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \
- cnst2_m = -cnst0_m; \
- ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
- SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \
- cnst4_m = -cnst2_m; \
- ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
+ UNPCK_R_SH_SW(in0, in0_r_m); \
+ UNPCK_R_SH_SW(in1, in1_r_m); \
+ UNPCK_R_SH_SW(in2, in2_r_m); \
+ UNPCK_R_SH_SW(in3, in3_r_m); \
\
- ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
- ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
- DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst1_m, cnst2_m, cnst3_m, in7, in0, \
- in4, in3); \
+ constant_m = __msa_fill_w(sinpi_4_9); \
+ MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m); \
\
- SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
- cnst2_m = -cnst0_m; \
- ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
- SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \
- cnst4_m = -cnst2_m; \
- ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
+ constant_m = __msa_fill_w(sinpi_1_9); \
+ s0_m += in0_r_m * constant_m; \
+ s1_m -= in1_r_m * constant_m; \
\
- ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
- ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
+ constant_m = __msa_fill_w(sinpi_2_9); \
+ s0_m += in1_r_m * constant_m; \
+ s1_m += in3_r_m * constant_m; \
\
- DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst1_m, cnst2_m, cnst3_m, in5, in2, \
- in6, in1); \
- BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
- out7 = -s0_m; \
- out0 = s1_m; \
+ s2_m = in0_r_m + in1_r_m - in3_r_m; \
\
- SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \
+ constant_m = __msa_fill_w(sinpi_3_9); \
+ MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m); \
\
- ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \
- cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
- cnst1_m = cnst0_m; \
+ in0_r_m = s0_m + s3_m; \
+ s2_m = s1_m - s3_m; \
+ s3_m = s1_m - s0_m + s3_m; \
\
- ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
- ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
- DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst2_m, cnst3_m, cnst1_m, out1, out6, \
- s0_m, s1_m); \
- \
- SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
- cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
- \
- ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
- ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
- out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
- out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
- out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
- out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
- \
- out1 = -out1; \
- out3 = -out3; \
- out5 = -out5; \
-}
-
-#define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) { \
- v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \
- v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \
- \
- UNPCK_R_SH_SW(in0, in0_r_m); \
- UNPCK_R_SH_SW(in1, in1_r_m); \
- UNPCK_R_SH_SW(in2, in2_r_m); \
- UNPCK_R_SH_SW(in3, in3_r_m); \
- \
- constant_m = __msa_fill_w(sinpi_4_9); \
- MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m); \
- \
- constant_m = __msa_fill_w(sinpi_1_9); \
- s0_m += in0_r_m * constant_m; \
- s1_m -= in1_r_m * constant_m; \
- \
- constant_m = __msa_fill_w(sinpi_2_9); \
- s0_m += in1_r_m * constant_m; \
- s1_m += in3_r_m * constant_m; \
- \
- s2_m = in0_r_m + in1_r_m - in3_r_m; \
- \
- constant_m = __msa_fill_w(sinpi_3_9); \
- MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m); \
- \
- in0_r_m = s0_m + s3_m; \
- s2_m = s1_m - s3_m; \
- s3_m = s1_m - s0_m + s3_m; \
- \
- SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \
- PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, \
- s3_m, s3_m, out0, out1, out2, out3); \
-}
-#endif /* VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_ */
+ SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \
+ PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
+ out0, out1, out2, out3); \
+ }
+#endif /* VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_ */
--- a/vp9/encoder/mips/msa/vp9_temporal_filter_msa.c
+++ b/vp9/encoder/mips/msa/vp9_temporal_filter_msa.c
@@ -11,12 +11,9 @@
#include "./vp9_rtcd.h"
#include "vpx_dsp/mips/macros_msa.h"
-static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
- uint32_t stride,
- uint8_t *frm2_ptr,
- int32_t filt_sth,
- int32_t filt_wgt,
- uint32_t *acc,
+static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, uint32_t stride,
+ uint8_t *frm2_ptr, int32_t filt_sth,
+ int32_t filt_wgt, uint32_t *acc,
uint16_t *cnt) {
uint32_t row;
uint64_t f0, f1, f2, f3;
@@ -54,10 +51,10 @@
HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
UNPCK_SH_SW(diff0, diff0_r, diff0_l);
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
- MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l,
- diff1_l, mod0_w, mod1_w, mod2_w, mod3_w);
- MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
+ MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
@@ -65,8 +62,8 @@
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
- SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+ mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
@@ -85,8 +82,8 @@
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
- ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
@@ -101,10 +98,10 @@
HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
UNPCK_SH_SW(diff0, diff0_r, diff0_l);
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
- MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l,
- diff1_l, mod0_w, mod1_w, mod2_w, mod3_w);
- MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
+ MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
@@ -112,8 +109,8 @@
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
- SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+ mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
@@ -131,8 +128,8 @@
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
- ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
@@ -141,13 +138,10 @@
}
}
-static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
- uint32_t stride,
+static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, uint32_t stride,
uint8_t *frm2_ptr,
- int32_t filt_sth,
- int32_t filt_wgt,
- uint32_t *acc,
- uint16_t *cnt) {
+ int32_t filt_sth, int32_t filt_wgt,
+ uint32_t *acc, uint16_t *cnt) {
uint32_t row;
v16i8 frm1, frm2, frm3, frm4;
v16u8 frm_r, frm_l;
@@ -183,8 +177,8 @@
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
- MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
@@ -192,8 +186,8 @@
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
- SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+ mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
@@ -212,8 +206,8 @@
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
- ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
@@ -230,8 +224,8 @@
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
- MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
@@ -239,8 +233,8 @@
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
- SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+ mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
@@ -259,8 +253,8 @@
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
- ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
ST_SW2(mod2_w, mod3_w, acc, 4);
@@ -277,11 +271,11 @@
int32_t filt_wgt, uint32_t *accu,
uint16_t *cnt) {
if (8 == (blk_w * blk_h)) {
- temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr,
- strength, filt_wgt, accu, cnt);
+ temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, strength,
+ filt_wgt, accu, cnt);
} else if (16 == (blk_w * blk_h)) {
- temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr,
- strength, filt_wgt, accu, cnt);
+ temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
+ filt_wgt, accu, cnt);
} else {
vp9_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
strength, filt_wgt, accu, cnt);
--- a/vp9/encoder/vp9_aq_360.c
+++ b/vp9/encoder/vp9_aq_360.c
@@ -22,8 +22,8 @@
#include "vp9/encoder/vp9_rd.h"
#include "vp9/encoder/vp9_segmentation.h"
-static const double rate_ratio[MAX_SEGMENTS] =
- {1.0, 0.75, 0.6, 0.5, 0.4, 0.3, 0.25};
+static const double rate_ratio[MAX_SEGMENTS] = { 1.0, 0.75, 0.6, 0.5,
+ 0.4, 0.3, 0.25 };
// Sets segment id 0 for the equatorial region, 1 for temperate region
// and 2 for the polar regions
--- a/vp9/encoder/vp9_aq_360.h
+++ b/vp9/encoder/vp9_aq_360.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_AQ_360_H_
#define VP9_ENCODER_VP9_AQ_360_H_
--- a/vp9/encoder/vp9_aq_complexity.c
+++ b/vp9/encoder/vp9_aq_complexity.c
@@ -19,21 +19,24 @@
#include "vp9/common/vp9_seg_common.h"
#include "vp9/encoder/vp9_segmentation.h"
-#define AQ_C_SEGMENTS 5
-#define DEFAULT_AQ2_SEG 3 // Neutral Q segment
+#define AQ_C_SEGMENTS 5
+#define DEFAULT_AQ2_SEG 3 // Neutral Q segment
#define AQ_C_STRENGTHS 3
-static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
- { {1.75, 1.25, 1.05, 1.00, 0.90},
- {2.00, 1.50, 1.15, 1.00, 0.85},
- {2.50, 1.75, 1.25, 1.00, 0.80} };
-static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
- { {0.15, 0.30, 0.55, 2.00, 100.0},
- {0.20, 0.40, 0.65, 2.00, 100.0},
- {0.25, 0.50, 0.75, 2.00, 100.0} };
-static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
- { {-4.0, -3.0, -2.0, 100.00, 100.0},
- {-3.5, -2.5, -1.5, 100.00, 100.0},
- {-3.0, -2.0, -1.0, 100.00, 100.0} };
+static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+ { 1.75, 1.25, 1.05, 1.00, 0.90 },
+ { 2.00, 1.50, 1.15, 1.00, 0.85 },
+ { 2.50, 1.75, 1.25, 1.00, 0.80 }
+};
+static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+ { 0.15, 0.30, 0.55, 2.00, 100.0 },
+ { 0.20, 0.40, 0.65, 2.00, 100.0 },
+ { 0.25, 0.50, 0.75, 2.00, 100.0 }
+};
+static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+ { -4.0, -3.0, -2.0, 100.00, 100.0 },
+ { -3.5, -2.5, -1.5, 100.00, 100.0 },
+ { -3.0, -2.0, -1.0, 100.00, 100.0 }
+};
static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) {
// Approximate base quatizer (truncated to int)
@@ -78,15 +81,12 @@
for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
int qindex_delta;
- if (segment == DEFAULT_AQ2_SEG)
- continue;
+ if (segment == DEFAULT_AQ2_SEG) continue;
- qindex_delta =
- vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
- aq_c_q_adj_factor[aq_strength][segment],
- cm->bit_depth);
+ qindex_delta = vp9_compute_qdelta_by_rate(
+ &cpi->rc, cm->frame_type, cm->base_qindex,
+ aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
-
// For AQ complexity mode, we dont allow Q0 in a segment if the base
// Q is not 0. Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment
// Q delta is sometimes applied without going back around the rd loop.
@@ -125,26 +125,25 @@
} else {
// Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh).
// It is converted to bits * 256 units.
- const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) /
- (bw * bh);
+ const int target_rate =
+ (cpi->rc.sb64_target_rate * xmis * ymis * 256) / (bw * bh);
double logvar;
double low_var_thresh;
const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
vpx_clear_system_state();
- low_var_thresh = (cpi->oxcf.pass == 2)
- ? VPXMAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH)
- : DEFAULT_LV_THRESH;
+ low_var_thresh = (cpi->oxcf.pass == 2) ? VPXMAX(cpi->twopass.mb_av_energy,
+ MIN_DEFAULT_LV_THRESH)
+ : DEFAULT_LV_THRESH;
vp9_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
logvar = vp9_log_block_var(cpi, mb, bs);
- segment = AQ_C_SEGMENTS - 1; // Just in case no break out below.
+ segment = AQ_C_SEGMENTS - 1; // Just in case no break out below.
for (i = 0; i < AQ_C_SEGMENTS; ++i) {
// Test rate against a threshold value and variance against a threshold.
// Increasing segment number (higher variance and complexity) = higher Q.
- if ((projected_rate <
- target_rate * aq_c_transitions[aq_strength][i]) &&
+ if ((projected_rate < target_rate * aq_c_transitions[aq_strength][i]) &&
(logvar < (low_var_thresh + aq_c_var_thresholds[aq_strength][i]))) {
segment = i;
break;
--- a/vp9/encoder/vp9_aq_complexity.h
+++ b/vp9/encoder/vp9_aq_complexity.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_AQ_COMPLEXITY_H_
#define VP9_ENCODER_VP9_AQ_COMPLEXITY_H_
@@ -23,8 +22,8 @@
// Select a segment for the current Block.
void vp9_caq_select_segment(struct VP9_COMP *cpi, struct macroblock *,
- BLOCK_SIZE bs,
- int mi_row, int mi_col, int projected_rate);
+ BLOCK_SIZE bs, int mi_row, int mi_col,
+ int projected_rate);
// This function sets up a set of segments with delta Q values around
// the baseline frame quantizer.
--- a/vp9/encoder/vp9_aq_cyclicrefresh.c
+++ b/vp9/encoder/vp9_aq_cyclicrefresh.c
@@ -24,8 +24,7 @@
CYCLIC_REFRESH *vp9_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
size_t last_coded_q_map_size;
CYCLIC_REFRESH *const cr = vpx_calloc(1, sizeof(*cr));
- if (cr == NULL)
- return NULL;
+ if (cr == NULL) return NULL;
cr->map = vpx_calloc(mi_rows * mi_cols, sizeof(*cr->map));
if (cr->map == NULL) {
@@ -53,11 +52,8 @@
// (lower-qp coding). Decision can be based on various factors, such as
// size of the coding block (i.e., below min_block size rejected), coding
// mode, and rate/distortion.
-static int candidate_refresh_aq(const CYCLIC_REFRESH *cr,
- const MODE_INFO *mi,
- int64_t rate,
- int64_t dist,
- int bsize) {
+static int candidate_refresh_aq(const CYCLIC_REFRESH *cr, const MODE_INFO *mi,
+ int64_t rate, int64_t dist, int bsize) {
MV mv = mi->mv[0].as_mv;
// Reject the block for lower-qp coding if projected distortion
// is above the threshold, and any of the following is true:
@@ -69,11 +65,9 @@
mv.col > cr->motion_thresh || mv.col < -cr->motion_thresh ||
!is_inter_block(mi)))
return CR_SEGMENT_ID_BASE;
- else if (bsize >= BLOCK_16X16 &&
- rate < cr->thresh_rate_sb &&
- is_inter_block(mi) &&
- mi->mv[0].as_int == 0 &&
- cr->rate_boost_fac > 10)
+ else if (bsize >= BLOCK_16X16 && rate < cr->thresh_rate_sb &&
+ is_inter_block(mi) && mi->mv[0].as_int == 0 &&
+ cr->rate_boost_fac > 10)
// More aggressive delta-q for bigger blocks with zero motion.
return CR_SEGMENT_ID_BOOST2;
else
@@ -84,9 +78,8 @@
static int compute_deltaq(const VP9_COMP *cpi, int q, double rate_factor) {
const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const RATE_CONTROL *const rc = &cpi->rc;
- int deltaq = vp9_compute_qdelta_by_rate(rc, cpi->common.frame_type,
- q, rate_factor,
- cpi->common.bit_depth);
+ int deltaq = vp9_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
+ rate_factor, cpi->common.bit_depth);
if ((-deltaq) > cr->max_qdelta_perc * q / 100) {
deltaq = -cr->max_qdelta_perc * q / 100;
}
@@ -109,17 +102,18 @@
double weight_segment1 = (double)cr->actual_num_seg1_blocks / num8x8bl;
double weight_segment2 = (double)cr->actual_num_seg2_blocks / num8x8bl;
// Take segment weighted average for estimated bits.
- estimated_bits = (int)((1.0 - weight_segment1 - weight_segment2) *
- vp9_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
- correction_factor, cm->bit_depth) +
- weight_segment1 *
- vp9_estimate_bits_at_q(cm->frame_type,
- cm->base_qindex + cr->qindex_delta[1], mbs,
- correction_factor, cm->bit_depth) +
- weight_segment2 *
- vp9_estimate_bits_at_q(cm->frame_type,
- cm->base_qindex + cr->qindex_delta[2], mbs,
- correction_factor, cm->bit_depth));
+ estimated_bits =
+ (int)((1.0 - weight_segment1 - weight_segment2) *
+ vp9_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
+ correction_factor, cm->bit_depth) +
+ weight_segment1 *
+ vp9_estimate_bits_at_q(cm->frame_type,
+ cm->base_qindex + cr->qindex_delta[1],
+ mbs, correction_factor, cm->bit_depth) +
+ weight_segment2 *
+ vp9_estimate_bits_at_q(cm->frame_type,
+ cm->base_qindex + cr->qindex_delta[2],
+ mbs, correction_factor, cm->bit_depth));
return estimated_bits;
}
@@ -137,17 +131,20 @@
// Weight for segment prior to encoding: take the average of the target
// number for the frame to be encoded and the actual from the previous frame.
int target_refresh = cr->percent_refresh * cm->mi_rows * cm->mi_cols / 100;
- double weight_segment = (double)((target_refresh +
- cr->actual_num_seg1_blocks + cr->actual_num_seg2_blocks) >> 1) /
+ double weight_segment =
+ (double)((target_refresh + cr->actual_num_seg1_blocks +
+ cr->actual_num_seg2_blocks) >>
+ 1) /
num8x8bl;
// Compute delta-q corresponding to qindex i.
int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta);
// Take segment weighted average for bits per mb.
bits_per_mb = (int)((1.0 - weight_segment) *
- vp9_rc_bits_per_mb(cm->frame_type, i, correction_factor, cm->bit_depth) +
- weight_segment *
- vp9_rc_bits_per_mb(cm->frame_type, i + deltaq, correction_factor,
- cm->bit_depth));
+ vp9_rc_bits_per_mb(cm->frame_type, i,
+ correction_factor, cm->bit_depth) +
+ weight_segment *
+ vp9_rc_bits_per_mb(cm->frame_type, i + deltaq,
+ correction_factor, cm->bit_depth));
return bits_per_mb;
}
@@ -154,13 +151,9 @@
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
-void vp9_cyclic_refresh_update_segment(VP9_COMP *const cpi,
- MODE_INFO *const mi,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize,
- int64_t rate,
- int64_t dist,
- int skip,
+void vp9_cyclic_refresh_update_segment(VP9_COMP *const cpi, MODE_INFO *const mi,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int64_t rate, int64_t dist, int skip,
struct macroblock_plane *const p) {
const VP9_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
@@ -172,22 +165,16 @@
int refresh_this_block = candidate_refresh_aq(cr, mi, rate, dist, bsize);
// Default is to not update the refresh map.
int new_map_value = cr->map[block_index];
- int x = 0; int y = 0;
+ int x = 0;
+ int y = 0;
int is_skin = 0;
- if (refresh_this_block == 0 &&
- bsize <= BLOCK_16X16 &&
+ if (refresh_this_block == 0 && bsize <= BLOCK_16X16 &&
cpi->use_skin_detection) {
- is_skin = vp9_compute_skin_block(p[0].src.buf,
- p[1].src.buf,
- p[2].src.buf,
- p[0].src.stride,
- p[1].src.stride,
- bsize,
- 0,
- 0);
- if (is_skin)
- refresh_this_block = 1;
+ is_skin =
+ vp9_compute_skin_block(p[0].src.buf, p[1].src.buf, p[2].src.buf,
+ p[0].src.stride, p[1].src.stride, bsize, 0, 0);
+ if (is_skin) refresh_this_block = 1;
}
if (cpi->oxcf.rc_mode == VPX_VBR && mi->ref_frame[0] == GOLDEN_FRAME)
@@ -198,8 +185,7 @@
if (cyclic_refresh_segment_id_boosted(mi->segment_id)) {
mi->segment_id = refresh_this_block;
// Reset segment_id if it will be skipped.
- if (skip)
- mi->segment_id = CR_SEGMENT_ID_BASE;
+ if (skip) mi->segment_id = CR_SEGMENT_ID_BASE;
}
// Update the cyclic refresh map, to be used for setting segmentation map
@@ -212,8 +198,7 @@
// Else if it is accepted as candidate for refresh, and has not already
// been refreshed (marked as 1) then mark it as a candidate for cleanup
// for future time (marked as 0), otherwise don't update it.
- if (cr->map[block_index] == 1)
- new_map_value = 0;
+ if (cr->map[block_index] == 1) new_map_value = 0;
} else {
// Leave it marked as block that is not candidate for refresh.
new_map_value = 1;
@@ -250,13 +235,12 @@
// the map for this spatial location is not entirely correct.
if ((!is_inter_block(mi) || !mi->skip) &&
mi->segment_id <= CR_SEGMENT_ID_BOOST2) {
- cr->last_coded_q_map[map_offset] = clamp(
- cm->base_qindex + cr->qindex_delta[mi->segment_id], 0, MAXQ);
+ cr->last_coded_q_map[map_offset] =
+ clamp(cm->base_qindex + cr->qindex_delta[mi->segment_id], 0, MAXQ);
} else if (is_inter_block(mi) && mi->skip &&
mi->segment_id <= CR_SEGMENT_ID_BOOST2) {
cr->last_coded_q_map[map_offset] = VPXMIN(
- clamp(cm->base_qindex + cr->qindex_delta[mi->segment_id],
- 0, MAXQ),
+ clamp(cm->base_qindex + cr->qindex_delta[mi->segment_id], 0, MAXQ),
cr->last_coded_q_map[map_offset]);
}
}
@@ -272,11 +256,12 @@
cr->actual_num_seg2_blocks = 0;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row++)
for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) {
- if (cyclic_refresh_segment_id(
- seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST1)
+ if (cyclic_refresh_segment_id(seg_map[mi_row * cm->mi_cols + mi_col]) ==
+ CR_SEGMENT_ID_BOOST1)
cr->actual_num_seg1_blocks++;
else if (cyclic_refresh_segment_id(
- seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST2)
+ seg_map[mi_row * cm->mi_cols + mi_col]) ==
+ CR_SEGMENT_ID_BOOST2)
cr->actual_num_seg2_blocks++;
}
}
@@ -292,8 +277,7 @@
rc->baseline_gf_interval = VPXMIN(4 * (100 / cr->percent_refresh), 40);
else
rc->baseline_gf_interval = 40;
- if (cpi->oxcf.rc_mode == VPX_VBR)
- rc->baseline_gf_interval = 20;
+ if (cpi->oxcf.rc_mode == VPX_VBR) rc->baseline_gf_interval = 20;
}
// Update some encoding stats (from the just encoded frame). If this frame's
@@ -315,21 +299,21 @@
for (mi_row = 0; mi_row < rows; mi_row++) {
for (mi_col = 0; mi_col < cols; mi_col++) {
if (flag_force_gf_high_motion == 1) {
- int16_t abs_mvr = mi[0]->mv[0].as_mv.row >= 0 ?
- mi[0]->mv[0].as_mv.row : -1 * mi[0]->mv[0].as_mv.row;
- int16_t abs_mvc = mi[0]->mv[0].as_mv.col >= 0 ?
- mi[0]->mv[0].as_mv.col : -1 * mi[0]->mv[0].as_mv.col;
+ int16_t abs_mvr = mi[0]->mv[0].as_mv.row >= 0
+ ? mi[0]->mv[0].as_mv.row
+ : -1 * mi[0]->mv[0].as_mv.row;
+ int16_t abs_mvc = mi[0]->mv[0].as_mv.col >= 0
+ ? mi[0]->mv[0].as_mv.col
+ : -1 * mi[0]->mv[0].as_mv.col;
// Calculate the motion of the background.
if (abs_mvr <= 16 && abs_mvc <= 16) {
cnt1++;
- if (abs_mvr == 0 && abs_mvc == 0)
- cnt2++;
+ if (abs_mvr == 0 && abs_mvc == 0) cnt2++;
}
}
mi++;
// Accumulate low_content_frame.
- if (cr->map[mi_row * cols + mi_col] < 1)
- low_content_frame++;
+ if (cr->map[mi_row * cols + mi_col] < 1) low_content_frame++;
}
mi += 8;
}
@@ -339,7 +323,7 @@
// Also, force this frame as a golden update frame if this frame will change
// the resolution (resize_pending != 0).
if (cpi->resize_pending != 0 ||
- (cnt1 * 100 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
+ (cnt1 * 100 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
vp9_cyclic_refresh_set_golden_update(cpi);
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
@@ -348,8 +332,7 @@
cpi->refresh_golden_frame = 1;
force_gf_refresh = 1;
}
- fraction_low =
- (double)low_content_frame / (rows * cols);
+ fraction_low = (double)low_content_frame / (rows * cols);
// Update average.
cr->low_content_avg = (fraction_low + 3 * cr->low_content_avg) / 4;
if (!force_gf_refresh && cpi->refresh_golden_frame == 1) {
@@ -396,14 +379,14 @@
}
qindex_thresh =
cpi->oxcf.content == VP9E_CONTENT_SCREEN
- ? vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
- : vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST1, cm->base_qindex);
+ ? vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
+ : vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST1, cm->base_qindex);
// More aggressive settings for noisy content.
if (cpi->noise_estimate.enabled && cpi->noise_estimate.level >= kMedium) {
consec_zero_mv_thresh = 80;
qindex_thresh =
VPXMAX(vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST1, cm->base_qindex),
- 7 * cm->base_qindex >> 3);
+ 7 * cm->base_qindex >> 3);
}
do {
int sum_map = 0;
@@ -454,8 +437,7 @@
} while (cr->target_num_seg_blocks < block_count && i != cr->sb_index);
cr->sb_index = i;
cr->reduce_refresh = 0;
- if (count_sel < (3 * count_tot) >> 2)
- cr->reduce_refresh = 1;
+ if (count_sel<(3 * count_tot)>> 2) cr->reduce_refresh = 1;
}
// Set cyclic refresh parameters.
@@ -464,8 +446,7 @@
const VP9_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
cr->percent_refresh = 10;
- if (cr->reduce_refresh)
- cr->percent_refresh = 5;
+ if (cr->reduce_refresh) cr->percent_refresh = 5;
cr->max_qdelta_perc = 50;
cr->time_for_refresh = 0;
cr->motion_thresh = 32;
@@ -474,8 +455,8 @@
// periods of the refresh cycle, after a key frame.
// Account for larger interval on base layer for temporal layers.
if (cr->percent_refresh > 0 &&
- rc->frames_since_key < (4 * cpi->svc.number_temporal_layers) *
- (100 / cr->percent_refresh)) {
+ rc->frames_since_key <
+ (4 * cpi->svc.number_temporal_layers) * (100 / cr->percent_refresh)) {
cr->rate_ratio_qdelta = 3.0;
} else {
cr->rate_ratio_qdelta = 2.0;
@@ -486,9 +467,7 @@
}
}
// Adjust some parameters for low resolutions at low bitrates.
- if (cm->width <= 352 &&
- cm->height <= 288 &&
- rc->avg_frame_bandwidth < 3400) {
+ if (cm->width <= 352 && cm->height <= 288 && rc->avg_frame_bandwidth < 3400) {
cr->motion_thresh = 4;
cr->rate_boost_fac = 10;
}
@@ -520,11 +499,9 @@
// instead of completely shutting off at low bitrates. For now keep it on.
// const int apply_cyclic_refresh = apply_cyclic_refresh_bitrate(cm, rc);
const int apply_cyclic_refresh = 1;
- if (cm->current_video_frame == 0)
- cr->low_content_avg = 0.0;
+ if (cm->current_video_frame == 0) cr->low_content_avg = 0.0;
// Don't apply refresh on key frame or temporal enhancement layer frames.
- if (!apply_cyclic_refresh ||
- (cm->frame_type == KEY_FRAME) ||
+ if (!apply_cyclic_refresh || (cm->frame_type == KEY_FRAME) ||
(cpi->svc.temporal_layer_id > 0)) {
// Set segmentation map to 0 and disable.
unsigned char *const seg_map = cpi->segmentation_map;
@@ -590,8 +567,7 @@
vp9_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
// Reset if resoluton change has occurred.
- if (cpi->resize_pending != 0)
- vp9_cyclic_refresh_reset_resize(cpi);
+ if (cpi->resize_pending != 0) vp9_cyclic_refresh_reset_resize(cpi);
// Update the segmentation and refresh map.
cyclic_refresh_update_map(cpi);
--- a/vp9/encoder/vp9_aq_cyclicrefresh.h
+++ b/vp9/encoder/vp9_aq_cyclicrefresh.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_AQ_CYCLICREFRESH_H_
#define VP9_ENCODER_VP9_AQ_CYCLICREFRESH_H_
@@ -23,9 +22,9 @@
// The segment ids used in cyclic refresh: from base (no boost) to increasing
// boost (higher delta-qp).
-#define CR_SEGMENT_ID_BASE 0
-#define CR_SEGMENT_ID_BOOST1 1
-#define CR_SEGMENT_ID_BOOST2 2
+#define CR_SEGMENT_ID_BASE 0
+#define CR_SEGMENT_ID_BOOST1 1
+#define CR_SEGMENT_ID_BOOST2 2
// Maximum rate target ratio for setting segment delta-qp.
#define CR_MAX_RATE_TARGET_RATIO 4.0
@@ -91,8 +90,8 @@
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
void vp9_cyclic_refresh_update_segment(struct VP9_COMP *const cpi,
- MODE_INFO *const mi,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
+ MODE_INFO *const mi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip,
struct macroblock_plane *const p);
--- a/vp9/encoder/vp9_aq_variance.c
+++ b/vp9/encoder/vp9_aq_variance.c
@@ -23,19 +23,19 @@
#define ENERGY_MIN (-4)
#define ENERGY_MAX (1)
-#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
-#define ENERGY_IN_BOUNDS(energy)\
+#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
+#define ENERGY_IN_BOUNDS(energy) \
assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX)
-static const double rate_ratio[MAX_SEGMENTS] =
- {2.5, 2.0, 1.5, 1.0, 0.75, 1.0, 1.0, 1.0};
-static const int segment_id[ENERGY_SPAN] = {0, 1, 1, 2, 3, 4};
+static const double rate_ratio[MAX_SEGMENTS] = { 2.5, 2.0, 1.5, 1.0,
+ 0.75, 1.0, 1.0, 1.0 };
+static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 };
-#define SEGMENT_ID(i) segment_id[(i) - ENERGY_MIN]
+#define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
-DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = {0};
+DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 };
#if CONFIG_VP9_HIGHBITDEPTH
-DECLARE_ALIGNED(16, static const uint16_t, vp9_highbd_64_zeros[64]) = {0};
+DECLARE_ALIGNED(16, static const uint16_t, vp9_highbd_64_zeros[64]) = { 0 };
#endif
unsigned int vp9_vaq_segment_id(int energy) {
@@ -85,9 +85,9 @@
/* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions
* of variance() and highbd_8_variance(). It should not.
*/
-static void aq_variance(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- int w, int h, unsigned int *sse, int *sum) {
+static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, int w, int h, unsigned int *sse,
+ int *sum) {
int i, j;
*sum = 0;
@@ -106,9 +106,9 @@
}
#if CONFIG_VP9_HIGHBITDEPTH
-static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- int w, int h, uint64_t *sse, uint64_t *sum) {
+static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, int w, int h,
+ uint64_t *sse, uint64_t *sum) {
int i, j;
uint16_t *a = CONVERT_TO_SHORTPTR(a8);
@@ -127,9 +127,9 @@
}
}
-static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- int w, int h, unsigned int *sse, int *sum) {
+static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, int w, int h,
+ unsigned int *sse, int *sum) {
uint64_t sse_long = 0;
uint64_t sum_long = 0;
aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
@@ -142,10 +142,10 @@
BLOCK_SIZE bs) {
MACROBLOCKD *xd = &x->e_mbd;
unsigned int var, sse;
- int right_overflow = (xd->mb_to_right_edge < 0) ?
- ((-xd->mb_to_right_edge) >> 3) : 0;
- int bottom_overflow = (xd->mb_to_bottom_edge < 0) ?
- ((-xd->mb_to_bottom_edge) >> 3) : 0;
+ int right_overflow =
+ (xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0;
+ int bottom_overflow =
+ (xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0;
if (right_overflow || bottom_overflow) {
const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
@@ -159,12 +159,12 @@
sse >>= 2 * (xd->bd - 8);
avg >>= (xd->bd - 8);
} else {
- aq_variance(x->plane[0].src.buf, x->plane[0].src.stride,
- vp9_64_zeros, 0, bw, bh, &sse, &avg);
+ aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0,
+ bw, bh, &sse, &avg);
}
#else
- aq_variance(x->plane[0].src.buf, x->plane[0].src.stride,
- vp9_64_zeros, 0, bw, bh, &sse, &avg);
+ aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0,
+ bw, bh, &sse, &avg);
#endif // CONFIG_VP9_HIGHBITDEPTH
var = sse - (((int64_t)avg * avg) / (bw * bh));
return (unsigned int)(((uint64_t)256 * var) / (bw * bh));
@@ -171,18 +171,15 @@
} else {
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
- x->plane[0].src.stride,
- CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros),
- 0, &sse);
+ var =
+ cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+ CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, &sse);
} else {
- var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
- x->plane[0].src.stride,
+ var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
vp9_64_zeros, 0, &sse);
}
#else
- var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
- x->plane[0].src.stride,
+ var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
vp9_64_zeros, 0, &sse);
#endif // CONFIG_VP9_HIGHBITDEPTH
return (unsigned int)(((uint64_t)256 * var) >> num_pels_log2_lookup[bs]);
@@ -201,7 +198,7 @@
double energy_midpoint;
vpx_clear_system_state();
energy_midpoint =
- (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
+ (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
energy = vp9_log_block_var(cpi, x, bs) - energy_midpoint;
return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
}
--- a/vp9/encoder/vp9_aq_variance.h
+++ b/vp9/encoder/vp9_aq_variance.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_AQ_VARIANCE_H_
#define VP9_ENCODER_VP9_AQ_VARIANCE_H_
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -36,14 +36,17 @@
#include "vp9/encoder/vp9_tokenize.h"
static const struct vp9_token intra_mode_encodings[INTRA_MODES] = {
- {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
- {62, 6}, {2, 2}};
+ { 0, 1 }, { 6, 3 }, { 28, 5 }, { 30, 5 }, { 58, 6 },
+ { 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
+};
static const struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
- {{0, 1}, {2, 2}, {3, 2}};
-static const struct vp9_token partition_encodings[PARTITION_TYPES] =
- {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
-static const struct vp9_token inter_mode_encodings[INTER_MODES] =
- {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
+ { { 0, 1 }, { 2, 2 }, { 3, 2 } };
+static const struct vp9_token partition_encodings[PARTITION_TYPES] = {
+ { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
+};
+static const struct vp9_token inter_mode_encodings[INTER_MODES] = {
+ { 2, 2 }, { 6, 3 }, { 0, 1 }, { 7, 3 }
+};
static void write_intra_mode(vpx_writer *w, PREDICTION_MODE mode,
const vpx_prob *probs) {
@@ -57,15 +60,15 @@
&inter_mode_encodings[INTER_OFFSET(mode)]);
}
-static void encode_unsigned_max(struct vpx_write_bit_buffer *wb,
- int data, int max) {
+static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data,
+ int max) {
vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
}
static void prob_diff_update(const vpx_tree_index *tree,
vpx_prob probs[/*n - 1*/],
- const unsigned int counts[/*n - 1*/],
- int n, vpx_writer *w) {
+ const unsigned int counts[/*n - 1*/], int n,
+ vpx_writer *w) {
int i;
unsigned int branch_ct[32][2];
@@ -77,13 +80,13 @@
vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
}
-static void write_selected_tx_size(const VP9_COMMON *cm,
- const MACROBLOCKD *xd, vpx_writer *w) {
+static void write_selected_tx_size(const VP9_COMMON *cm, const MACROBLOCKD *xd,
+ vpx_writer *w) {
TX_SIZE tx_size = xd->mi[0]->tx_size;
BLOCK_SIZE bsize = xd->mi[0]->sb_type;
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
- const vpx_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
- &cm->fc->tx_probs);
+ const vpx_prob *const tx_probs =
+ get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
vpx_write(w, tx_size != TX_4X4, tx_probs[0]);
if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
vpx_write(w, tx_size != TX_8X8, tx_probs[1]);
@@ -120,18 +123,18 @@
counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
}
-static void pack_mb_tokens(vpx_writer *w,
- TOKENEXTRA **tp, const TOKENEXTRA *const stop,
+static void pack_mb_tokens(vpx_writer *w, TOKENEXTRA **tp,
+ const TOKENEXTRA *const stop,
vpx_bit_depth_t bit_depth) {
const TOKENEXTRA *p;
const vp9_extra_bit *const extra_bits =
#if CONFIG_VP9_HIGHBITDEPTH
- (bit_depth == VPX_BITS_12) ? vp9_extra_bits_high12 :
- (bit_depth == VPX_BITS_10) ? vp9_extra_bits_high10 :
- vp9_extra_bits;
+ (bit_depth == VPX_BITS_12)
+ ? vp9_extra_bits_high12
+ : (bit_depth == VPX_BITS_10) ? vp9_extra_bits_high10 : vp9_extra_bits;
#else
- vp9_extra_bits;
- (void) bit_depth;
+ vp9_extra_bits;
+ (void)bit_depth;
#endif // CONFIG_VP9_HIGHBITDEPTH
for (p = *tp; p < stop && p->token != EOSB_TOKEN; ++p) {
@@ -144,7 +147,7 @@
vpx_write(w, 0, p->context_tree[1]);
++p;
if (p == stop || p->token == EOSB_TOKEN) {
- *tp = (TOKENEXTRA*)(uintptr_t)p + (p->token == EOSB_TOKEN);
+ *tp = (TOKENEXTRA *)(uintptr_t)p + (p->token == EOSB_TOKEN);
return;
}
}
@@ -182,7 +185,7 @@
}
}
}
- *tp = (TOKENEXTRA*)(uintptr_t)p + (p->token == EOSB_TOKEN);
+ *tp = (TOKENEXTRA *)(uintptr_t)p + (p->token == EOSB_TOKEN);
}
static void write_segment_id(vpx_writer *w, const struct segmentation *seg,
@@ -203,7 +206,7 @@
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
assert(!is_compound);
assert(mi->ref_frame[0] ==
- get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
+ get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
} else {
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
@@ -248,8 +251,7 @@
const int pred_flag = mi->seg_id_predicted;
vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
vpx_write(w, pred_flag, pred_prob);
- if (!pred_flag)
- write_segment_id(w, seg, segment_id);
+ if (!pred_flag) write_segment_id(w, seg, segment_id);
} else {
write_segment_id(w, seg, segment_id);
}
@@ -338,8 +340,7 @@
const MODE_INFO *const left_mi = xd->left_mi;
const BLOCK_SIZE bsize = mi->sb_type;
- if (seg->update_map)
- write_segment_id(w, seg, mi->segment_id);
+ if (seg->update_map) write_segment_id(w, seg, mi->segment_id);
write_skip(cm, xd, mi->segment_id, mi, w);
@@ -367,8 +368,8 @@
static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
vpx_writer *w, TOKENEXTRA **tok,
- const TOKENEXTRA *const tok_end,
- int mi_row, int mi_col) {
+ const TOKENEXTRA *const tok_end, int mi_row,
+ int mi_col) {
const VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
MODE_INFO *m;
@@ -376,13 +377,12 @@
xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
m = xd->mi[0];
- cpi->td.mb.mbmi_ext = cpi->td.mb.mbmi_ext_base +
- (mi_row * cm->mi_cols + mi_col);
+ cpi->td.mb.mbmi_ext =
+ cpi->td.mb.mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
- set_mi_row_col(xd, tile,
- mi_row, num_8x8_blocks_high_lookup[m->sb_type],
- mi_col, num_8x8_blocks_wide_lookup[m->sb_type],
- cm->mi_rows, cm->mi_cols);
+ set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[m->sb_type],
+ mi_col, num_8x8_blocks_wide_lookup[m->sb_type], cm->mi_rows,
+ cm->mi_cols);
if (frame_is_intra_only(cm)) {
write_mb_modes_kf(cm, xd, xd->mi, w);
} else {
@@ -394,9 +394,9 @@
}
static void write_partition(const VP9_COMMON *const cm,
- const MACROBLOCKD *const xd,
- int hbs, int mi_row, int mi_col,
- PARTITION_TYPE p, BLOCK_SIZE bsize, vpx_writer *w) {
+ const MACROBLOCKD *const xd, int hbs, int mi_row,
+ int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
+ vpx_writer *w) {
const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
const vpx_prob *const probs = xd->partition_probs[ctx];
const int has_rows = (mi_row + hbs) < cm->mi_rows;
@@ -415,10 +415,10 @@
}
}
-static void write_modes_sb(VP9_COMP *cpi,
- const TileInfo *const tile, vpx_writer *w,
- TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
- int mi_row, int mi_col, BLOCK_SIZE bsize) {
+static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile,
+ vpx_writer *w, TOKENEXTRA **tok,
+ const TOKENEXTRA *const tok_end, int mi_row,
+ int mi_col, BLOCK_SIZE bsize) {
const VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
@@ -428,8 +428,7 @@
BLOCK_SIZE subsize;
const MODE_INFO *m = NULL;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
@@ -462,8 +461,7 @@
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
subsize);
break;
- default:
- assert(0);
+ default: assert(0);
}
}
@@ -473,9 +471,9 @@
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
}
-static void write_modes(VP9_COMP *cpi,
- const TileInfo *const tile, vpx_writer *w,
- TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
+static void write_modes(VP9_COMP *cpi, const TileInfo *const tile,
+ vpx_writer *w, TOKENEXTRA **tok,
+ const TOKENEXTRA *const tok_end) {
const VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
int mi_row, mi_col;
@@ -487,8 +485,7 @@
vp9_zero(xd->left_seg_context);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE)
- write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
- BLOCK_64X64);
+ write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
}
}
@@ -496,7 +493,7 @@
vp9_coeff_stats *coef_branch_ct,
vp9_coeff_probs_model *coef_probs) {
vp9_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
- unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
+ unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
cpi->common.counts.eob_branch[tx_size];
int i, j, k, l, m;
@@ -507,12 +504,12 @@
vp9_tree_probs_from_distribution(vp9_coef_tree,
coef_branch_ct[i][j][k][l],
coef_counts[i][j][k][l]);
- coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
- coef_branch_ct[i][j][k][l][0][0];
+ coef_branch_ct[i][j][k][l][0][1] =
+ eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
- coef_probs[i][j][k][l][m] = get_binary_prob(
- coef_branch_ct[i][j][k][l][m][0],
- coef_branch_ct[i][j][k][l][m][1]);
+ coef_probs[i][j][k][l][m] =
+ get_binary_prob(coef_branch_ct[i][j][k][l][m][0],
+ coef_branch_ct[i][j][k][l][m][1]);
}
}
}
@@ -519,7 +516,7 @@
}
}
-static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi,
+static void update_coef_probs_common(vpx_writer *const bc, VP9_COMP *cpi,
TX_SIZE tx_size,
vp9_coeff_stats *frame_branch_ct,
vp9_coeff_probs_model *new_coef_probs) {
@@ -533,7 +530,7 @@
case TWO_LOOP: {
/* dry run to see if there is any update at all needed */
int savings = 0;
- int update[2] = {0, 0};
+ int update[2] = { 0, 0 };
for (i = 0; i < PLANE_TYPES; ++i) {
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
@@ -550,8 +547,7 @@
else
s = vp9_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
- if (s > 0 && newp != oldp)
- u = 1;
+ if (s > 0 && newp != oldp) u = 1;
if (u)
savings += s - (int)(vp9_cost_zero(upd));
else
@@ -583,14 +579,12 @@
int u = 0;
if (t == PIVOT_NODE)
s = vp9_prob_diff_update_savings_search_model(
- frame_branch_ct[i][j][k][l][0],
- *oldp, &newp, upd, stepsize);
+ frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd,
+ stepsize);
else
s = vp9_prob_diff_update_savings_search(
- frame_branch_ct[i][j][k][l][t],
- *oldp, &newp, upd);
- if (s > 0 && newp != *oldp)
- u = 1;
+ frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
+ if (s > 0 && newp != *oldp) u = 1;
vpx_write(bc, u, upd);
if (u) {
/* send/use new probability */
@@ -621,16 +615,14 @@
if (t == PIVOT_NODE) {
s = vp9_prob_diff_update_savings_search_model(
- frame_branch_ct[i][j][k][l][0],
- *oldp, &newp, upd, stepsize);
+ frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd,
+ stepsize);
} else {
s = vp9_prob_diff_update_savings_search(
- frame_branch_ct[i][j][k][l][t],
- *oldp, &newp, upd);
+ frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
}
- if (s > 0 && newp != *oldp)
- u = 1;
+ if (s > 0 && newp != *oldp) u = 1;
updates += u;
if (u == 0 && updates == 0) {
noupdates_before_first++;
@@ -659,12 +651,11 @@
}
return;
}
- default:
- assert(0);
+ default: assert(0);
}
}
-static void update_coef_probs(VP9_COMP *cpi, vpx_writer* w) {
+static void update_coef_probs(VP9_COMP *cpi, vpx_writer *w) {
const TX_MODE tx_mode = cpi->common.tx_mode;
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
@@ -675,8 +666,7 @@
(tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
vpx_write_bit(w, 0);
} else {
- build_tree_distribution(cpi, tx_size, frame_branch_ct,
- frame_coef_probs);
+ build_tree_distribution(cpi, tx_size, frame_branch_ct, frame_coef_probs);
update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
frame_coef_probs);
}
@@ -748,8 +738,7 @@
const struct segmentation *seg = &cm->seg;
vpx_wb_write_bit(wb, seg->enabled);
- if (!seg->enabled)
- return;
+ if (!seg->enabled) return;
// Segmentation map
vpx_wb_write_bit(wb, seg->update_map);
@@ -761,8 +750,7 @@
const int prob = seg->tree_probs[i];
const int update = prob != MAX_PROB;
vpx_wb_write_bit(wb, update);
- if (update)
- vpx_wb_write_literal(wb, prob, 8);
+ if (update) vpx_wb_write_literal(wb, prob, 8);
}
// Write out the chosen coding method.
@@ -772,8 +760,7 @@
const int prob = seg->pred_probs[i];
const int update = prob != MAX_PROB;
vpx_wb_write_bit(wb, update);
- if (update)
- vpx_wb_write_literal(wb, prob, 8);
+ if (update) vpx_wb_write_literal(wb, prob, 8);
}
}
}
@@ -817,7 +804,6 @@
unsigned int ct_16x16p[TX_SIZES - 2][2];
unsigned int ct_32x32p[TX_SIZES - 1][2];
-
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
for (j = 0; j < TX_SIZES - 3; j++)
@@ -879,16 +865,13 @@
// columns
ones = cm->log2_tile_cols - min_log2_tile_cols;
- while (ones--)
- vpx_wb_write_bit(wb, 1);
+ while (ones--) vpx_wb_write_bit(wb, 1);
- if (cm->log2_tile_cols < max_log2_tile_cols)
- vpx_wb_write_bit(wb, 0);
+ if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0);
// rows
vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
- if (cm->log2_tile_rows != 0)
- vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
+ if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
}
int vp9_get_refresh_mask(VP9_COMP *cpi) {
@@ -935,7 +918,7 @@
TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
tok_end = cpi->tile_tok[tile_row][tile_col] +
- cpi->tok_count[tile_row][tile_col];
+ cpi->tok_count[tile_row][tile_col];
if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
vpx_start_encode(&residual_bc, data_ptr + total_size + 4);
@@ -942,8 +925,8 @@
else
vpx_start_encode(&residual_bc, data_ptr + total_size);
- write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
- &residual_bc, &tok, tok_end);
+ write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, &residual_bc, &tok,
+ tok_end);
assert(tok == tok_end);
vpx_stop_encode(&residual_bc);
if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
@@ -961,8 +944,8 @@
static void write_render_size(const VP9_COMMON *cm,
struct vpx_write_bit_buffer *wb) {
- const int scaling_active = cm->width != cm->render_width ||
- cm->height != cm->render_height;
+ const int scaling_active =
+ cm->width != cm->render_width || cm->height != cm->render_height;
vpx_wb_write_bit(wb, scaling_active);
if (scaling_active) {
vpx_wb_write_literal(wb, cm->render_width - 1, 16);
@@ -990,17 +973,17 @@
// Set "found" to 0 for temporal svc and for spatial svc key frame
if (cpi->use_svc &&
((cpi->svc.number_temporal_layers > 1 &&
- cpi->oxcf.rc_mode == VPX_CBR) ||
- (cpi->svc.number_spatial_layers > 1 &&
- cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) ||
- (is_two_pass_svc(cpi) &&
- cpi->svc.encode_empty_frame_state == ENCODING &&
- cpi->svc.layer_context[0].frames_from_key_frame <
- cpi->svc.number_temporal_layers + 1))) {
+ cpi->oxcf.rc_mode == VPX_CBR) ||
+ (cpi->svc.number_spatial_layers > 1 &&
+ cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) ||
+ (is_two_pass_svc(cpi) &&
+ cpi->svc.encode_empty_frame_state == ENCODING &&
+ cpi->svc.layer_context[0].frames_from_key_frame <
+ cpi->svc.number_temporal_layers + 1))) {
found = 0;
} else if (cfg != NULL) {
- found = cm->width == cfg->y_crop_width &&
- cm->height == cfg->y_crop_height;
+ found =
+ cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height;
}
vpx_wb_write_bit(wb, found);
if (found) {
@@ -1025,20 +1008,11 @@
static void write_profile(BITSTREAM_PROFILE profile,
struct vpx_write_bit_buffer *wb) {
switch (profile) {
- case PROFILE_0:
- vpx_wb_write_literal(wb, 0, 2);
- break;
- case PROFILE_1:
- vpx_wb_write_literal(wb, 2, 2);
- break;
- case PROFILE_2:
- vpx_wb_write_literal(wb, 1, 2);
- break;
- case PROFILE_3:
- vpx_wb_write_literal(wb, 6, 3);
- break;
- default:
- assert(0);
+ case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break;
+ case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break;
+ case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break;
+ case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break;
+ default: assert(0);
}
}
@@ -1093,8 +1067,7 @@
// will change to show_frame flag to 0, then add an one byte frame with
// show_existing_frame flag which tells the decoder which frame we want to
// show.
- if (!cm->show_frame)
- vpx_wb_write_bit(wb, cm->intra_only);
+ if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only);
if (!cm->error_resilient_mode)
vpx_wb_write_literal(wb, cm->reset_frame_context, 2);
@@ -1222,7 +1195,7 @@
void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
uint8_t *data = dest;
size_t first_part_size, uncompressed_hdr_size;
- struct vpx_write_bit_buffer wb = {data, 0};
+ struct vpx_write_bit_buffer wb = { data, 0 };
struct vpx_write_bit_buffer saved_wb;
write_uncompressed_header(cpi, &wb);
--- a/vp9/encoder/vp9_bitstream.h
+++ b/vp9/encoder/vp9_bitstream.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_BITSTREAM_H_
#define VP9_ENCODER_VP9_BITSTREAM_H_
@@ -25,10 +24,9 @@
static INLINE int vp9_preserve_existing_gf(VP9_COMP *cpi) {
return !cpi->multi_arf_allowed && cpi->refresh_golden_frame &&
cpi->rc.is_src_frame_alt_ref &&
- (!cpi->use_svc || // Add spatial svc base layer case here
- (is_two_pass_svc(cpi) &&
- cpi->svc.spatial_layer_id == 0 &&
- cpi->svc.layer_context[0].gold_ref_idx >=0 &&
+ (!cpi->use_svc || // Add spatial svc base layer case here
+ (is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id == 0 &&
+ cpi->svc.layer_context[0].gold_ref_idx >= 0 &&
cpi->oxcf.ss_enable_auto_arf[0]));
}
--- a/vp9/encoder/vp9_block.h
+++ b/vp9/encoder/vp9_block.h
@@ -77,8 +77,8 @@
int rddiv;
int rdmult;
int mb_energy;
- int * m_search_count_ptr;
- int * ex_search_count_ptr;
+ int *m_search_count_ptr;
+ int *ex_search_count_ptr;
// These are set to their default values at the beginning, and then adjusted
// further in the encoding process.
@@ -130,9 +130,9 @@
// skip forward transform and quantization
uint8_t skip_txfm[MAX_MB_PLANE << 2];
- #define SKIP_TXFM_NONE 0
- #define SKIP_TXFM_AC_DC 1
- #define SKIP_TXFM_AC_ONLY 2
+#define SKIP_TXFM_NONE 0
+#define SKIP_TXFM_AC_DC 1
+#define SKIP_TXFM_AC_ONLY 2
int64_t bsse[MAX_MB_PLANE << 2];
--- a/vp9/encoder/vp9_blockiness.c
+++ b/vp9/encoder/vp9_blockiness.c
@@ -63,9 +63,9 @@
s_blockiness += horizontal_filter(s);
r_blockiness += horizontal_filter(r);
sum_0 += s[0];
- sum_sq_0 += s[0]*s[0];
+ sum_sq_0 += s[0] * s[0];
sum_1 += s[-1];
- sum_sq_1 += s[-1]*s[-1];
+ sum_sq_1 += s[-1] * s[-1];
}
var_0 = variance(sum_0, sum_sq_0, size);
var_1 = variance(sum_1, sum_sq_1, size);
@@ -113,19 +113,19 @@
// This function returns the blockiness for the entire frame currently by
// looking at all borders in steps of 4.
double vp9_get_blockiness(const uint8_t *img1, int img1_pitch,
- const uint8_t *img2, int img2_pitch,
- int width, int height) {
+ const uint8_t *img2, int img2_pitch, int width,
+ int height) {
double blockiness = 0;
int i, j;
vpx_clear_system_state();
- for (i = 0; i < height; i += 4, img1 += img1_pitch * 4,
- img2 += img2_pitch * 4) {
+ for (i = 0; i < height;
+ i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
for (j = 0; j < width; j += 4) {
if (i > 0 && i < height && j > 0 && j < width) {
- blockiness += blockiness_vertical(img1 + j, img1_pitch,
- img2 + j, img2_pitch, 4);
- blockiness += blockiness_horizontal(img1 + j, img1_pitch,
- img2 + j, img2_pitch, 4);
+ blockiness +=
+ blockiness_vertical(img1 + j, img1_pitch, img2 + j, img2_pitch, 4);
+ blockiness += blockiness_horizontal(img1 + j, img1_pitch, img2 + j,
+ img2_pitch, 4);
}
}
}
--- a/vp9/encoder/vp9_context_tree.c
+++ b/vp9/encoder/vp9_context_tree.c
@@ -12,10 +12,7 @@
#include "vp9/encoder/vp9_encoder.h"
static const BLOCK_SIZE square[] = {
- BLOCK_8X8,
- BLOCK_16X16,
- BLOCK_32X32,
- BLOCK_64X64,
+ BLOCK_8X8, BLOCK_16X16, BLOCK_32X32, BLOCK_64X64,
};
static void alloc_mode_context(VP9_COMMON *cm, int num_4x4_blk,
@@ -25,8 +22,7 @@
int i, k;
ctx->num_4x4_blk = num_blk;
- CHECK_MEM_ERROR(cm, ctx->zcoeff_blk,
- vpx_calloc(num_blk, sizeof(uint8_t)));
+ CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, vpx_calloc(num_blk, sizeof(uint8_t)));
for (i = 0; i < MAX_MB_PLANE; ++i) {
for (k = 0; k < 3; ++k) {
CHECK_MEM_ERROR(cm, ctx->coeff[i][k],
@@ -37,10 +33,10 @@
vpx_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k])));
CHECK_MEM_ERROR(cm, ctx->eobs[i][k],
vpx_memalign(32, num_blk * sizeof(*ctx->eobs[i][k])));
- ctx->coeff_pbuf[i][k] = ctx->coeff[i][k];
- ctx->qcoeff_pbuf[i][k] = ctx->qcoeff[i][k];
+ ctx->coeff_pbuf[i][k] = ctx->coeff[i][k];
+ ctx->qcoeff_pbuf[i][k] = ctx->qcoeff[i][k];
ctx->dqcoeff_pbuf[i][k] = ctx->dqcoeff[i][k];
- ctx->eobs_pbuf[i][k] = ctx->eobs[i][k];
+ ctx->eobs_pbuf[i][k] = ctx->eobs[i][k];
}
}
}
@@ -66,12 +62,12 @@
static void alloc_tree_contexts(VP9_COMMON *cm, PC_TREE *tree,
int num_4x4_blk) {
alloc_mode_context(cm, num_4x4_blk, &tree->none);
- alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[0]);
- alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[0]);
+ alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[0]);
+ alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[0]);
if (num_4x4_blk > 4) {
- alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[1]);
- alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[1]);
+ alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[1]);
+ alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[1]);
} else {
memset(&tree->horizontal[1], 0, sizeof(tree->horizontal[1]));
memset(&tree->vertical[1], 0, sizeof(tree->vertical[1]));
@@ -101,11 +97,11 @@
int nodes;
vpx_free(td->leaf_tree);
- CHECK_MEM_ERROR(cm, td->leaf_tree, vpx_calloc(leaf_nodes,
- sizeof(*td->leaf_tree)));
+ CHECK_MEM_ERROR(cm, td->leaf_tree,
+ vpx_calloc(leaf_nodes, sizeof(*td->leaf_tree)));
vpx_free(td->pc_tree);
- CHECK_MEM_ERROR(cm, td->pc_tree, vpx_calloc(tree_nodes,
- sizeof(*td->pc_tree)));
+ CHECK_MEM_ERROR(cm, td->pc_tree,
+ vpx_calloc(tree_nodes, sizeof(*td->pc_tree)));
this_pc = &td->pc_tree[0];
this_leaf = &td->leaf_tree[0];
@@ -112,8 +108,7 @@
// 4x4 blocks smaller than 8x8 but in the same 8x8 block share the same
// context so we only need to allocate 1 for each 8x8 block.
- for (i = 0; i < leaf_nodes; ++i)
- alloc_mode_context(cm, 1, &td->leaf_tree[i]);
+ for (i = 0; i < leaf_nodes; ++i) alloc_mode_context(cm, 1, &td->leaf_tree[i]);
// Sets up all the leaf nodes in the tree.
for (pc_tree_index = 0; pc_tree_index < leaf_nodes; ++pc_tree_index) {
@@ -121,8 +116,7 @@
tree->block_size = square[0];
alloc_tree_contexts(cm, tree, 4);
tree->leaf_split[0] = this_leaf++;
- for (j = 1; j < 4; j++)
- tree->leaf_split[j] = tree->leaf_split[0];
+ for (j = 1; j < 4; j++) tree->leaf_split[j] = tree->leaf_split[0];
}
// Each node has 4 leaf nodes, fill each block_size level of the tree
@@ -132,8 +126,7 @@
PC_TREE *const tree = &td->pc_tree[pc_tree_index];
alloc_tree_contexts(cm, tree, 4 << (2 * square_index));
tree->block_size = square[square_index];
- for (j = 0; j < 4; j++)
- tree->split[j] = this_pc++;
+ for (j = 0; j < 4; j++) tree->split[j] = this_pc++;
++pc_tree_index;
}
++square_index;
@@ -147,12 +140,10 @@
int i;
// Set up all 4x4 mode contexts
- for (i = 0; i < 64; ++i)
- free_mode_context(&td->leaf_tree[i]);
+ for (i = 0; i < 64; ++i) free_mode_context(&td->leaf_tree[i]);
// Sets up all the leaf nodes in the tree.
- for (i = 0; i < tree_nodes; ++i)
- free_tree_contexts(&td->pc_tree[i]);
+ for (i = 0; i < tree_nodes; ++i) free_tree_contexts(&td->pc_tree[i]);
vpx_free(td->pc_tree);
td->pc_tree = NULL;
--- a/vp9/encoder/vp9_cost.c
+++ b/vp9/encoder/vp9_cost.c
@@ -14,31 +14,30 @@
/* round(-log2(i/256.) * (1 << VP9_PROB_COST_SHIFT))
Begins with a bogus entry for simpler addressing. */
const uint16_t vp9_prob_cost[256] = {
- 4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325,
- 2260, 2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780,
- 1748, 1718, 1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470,
- 1449, 1429, 1409, 1390, 1371, 1353, 1335, 1318, 1301, 1284, 1268, 1252,
- 1236, 1221, 1206, 1192, 1177, 1163, 1149, 1136, 1123, 1110, 1097, 1084,
- 1072, 1059, 1047, 1036, 1024, 1013, 1001, 990, 979, 968, 958, 947,
- 937, 927, 917, 907, 897, 887, 878, 868, 859, 850, 841, 832,
- 823, 814, 806, 797, 789, 780, 772, 764, 756, 748, 740, 732,
- 724, 717, 709, 702, 694, 687, 680, 673, 665, 658, 651, 644,
- 637, 631, 624, 617, 611, 604, 598, 591, 585, 578, 572, 566,
- 560, 554, 547, 541, 535, 530, 524, 518, 512, 506, 501, 495,
- 489, 484, 478, 473, 467, 462, 456, 451, 446, 441, 435, 430,
- 425, 420, 415, 410, 405, 400, 395, 390, 385, 380, 375, 371,
- 366, 361, 356, 352, 347, 343, 338, 333, 329, 324, 320, 316,
- 311, 307, 302, 298, 294, 289, 285, 281, 277, 273, 268, 264,
- 260, 256, 252, 248, 244, 240, 236, 232, 228, 224, 220, 216,
- 212, 209, 205, 201, 197, 194, 190, 186, 182, 179, 175, 171,
- 168, 164, 161, 157, 153, 150, 146, 143, 139, 136, 132, 129,
- 125, 122, 119, 115, 112, 109, 105, 102, 99, 95, 92, 89,
- 86, 82, 79, 76, 73, 70, 66, 63, 60, 57, 54, 51,
- 48, 45, 42, 38, 35, 32, 29, 26, 23, 20, 18, 15,
- 12, 9, 6, 3};
+ 4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, 2260,
+ 2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780, 1748, 1718,
+ 1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470, 1449, 1429, 1409,
+ 1390, 1371, 1353, 1335, 1318, 1301, 1284, 1268, 1252, 1236, 1221, 1206, 1192,
+ 1177, 1163, 1149, 1136, 1123, 1110, 1097, 1084, 1072, 1059, 1047, 1036, 1024,
+ 1013, 1001, 990, 979, 968, 958, 947, 937, 927, 917, 907, 897, 887,
+ 878, 868, 859, 850, 841, 832, 823, 814, 806, 797, 789, 780, 772,
+ 764, 756, 748, 740, 732, 724, 717, 709, 702, 694, 687, 680, 673,
+ 665, 658, 651, 644, 637, 631, 624, 617, 611, 604, 598, 591, 585,
+ 578, 572, 566, 560, 554, 547, 541, 535, 530, 524, 518, 512, 506,
+ 501, 495, 489, 484, 478, 473, 467, 462, 456, 451, 446, 441, 435,
+ 430, 425, 420, 415, 410, 405, 400, 395, 390, 385, 380, 375, 371,
+ 366, 361, 356, 352, 347, 343, 338, 333, 329, 324, 320, 316, 311,
+ 307, 302, 298, 294, 289, 285, 281, 277, 273, 268, 264, 260, 256,
+ 252, 248, 244, 240, 236, 232, 228, 224, 220, 216, 212, 209, 205,
+ 201, 197, 194, 190, 186, 182, 179, 175, 171, 168, 164, 161, 157,
+ 153, 150, 146, 143, 139, 136, 132, 129, 125, 122, 119, 115, 112,
+ 109, 105, 102, 99, 95, 92, 89, 86, 82, 79, 76, 73, 70,
+ 66, 63, 60, 57, 54, 51, 48, 45, 42, 38, 35, 32, 29,
+ 26, 23, 20, 18, 15, 12, 9, 6, 3
+};
-static void cost(int *costs, vpx_tree tree, const vpx_prob *probs,
- int i, int c) {
+static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, int i,
+ int c) {
const vpx_prob prob = probs[i / 2];
int b;
--- a/vp9/encoder/vp9_cost.h
+++ b/vp9/encoder/vp9_cost.h
@@ -27,8 +27,7 @@
#define vp9_cost_one(prob) vp9_cost_zero(256 - (prob))
-#define vp9_cost_bit(prob, bit) vp9_cost_zero((bit) ? 256 - (prob) \
- : (prob))
+#define vp9_cost_bit(prob, bit) vp9_cost_zero((bit) ? 256 - (prob) : (prob))
static INLINE unsigned int cost_branch256(const unsigned int ct[2],
vpx_prob p) {
@@ -35,8 +34,8 @@
return ct[0] * vp9_cost_zero(p) + ct[1] * vp9_cost_one(p);
}
-static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs,
- int bits, int len) {
+static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, int bits,
+ int len) {
int cost = 0;
vpx_tree_index i = 0;
--- a/vp9/encoder/vp9_dct.c
+++ b/vp9/encoder/vp9_dct.c
@@ -61,8 +61,8 @@
x3 = s0 - s3;
t0 = (x0 + x1) * cospi_16_64;
t1 = (x0 - x1) * cospi_16_64;
- t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
- t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
+ t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+ t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
output[0] = (tran_low_t)fdct_round_shift(t0);
output[2] = (tran_low_t)fdct_round_shift(t2);
output[4] = (tran_low_t)fdct_round_shift(t1);
@@ -81,10 +81,10 @@
x3 = s7 + t3;
// Stage 4
- t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
- t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
- t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
output[1] = (tran_low_t)fdct_round_shift(t0);
output[3] = (tran_low_t)fdct_round_shift(t2);
output[5] = (tran_low_t)fdct_round_shift(t1);
@@ -105,11 +105,11 @@
input[3] = in[3] + in[12];
input[4] = in[4] + in[11];
input[5] = in[5] + in[10];
- input[6] = in[6] + in[ 9];
- input[7] = in[7] + in[ 8];
+ input[6] = in[6] + in[9];
+ input[7] = in[7] + in[8];
- step1[0] = in[7] - in[ 8];
- step1[1] = in[6] - in[ 9];
+ step1[0] = in[7] - in[8];
+ step1[1] = in[6] - in[9];
step1[2] = in[5] - in[10];
step1[3] = in[4] - in[11];
step1[4] = in[3] - in[12];
@@ -140,7 +140,7 @@
x3 = s0 - s3;
t0 = (x0 + x1) * cospi_16_64;
t1 = (x0 - x1) * cospi_16_64;
- t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
+ t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
out[0] = (tran_low_t)fdct_round_shift(t0);
out[4] = (tran_low_t)fdct_round_shift(t2);
@@ -160,10 +160,10 @@
x3 = s7 + t3;
// Stage 4
- t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
- t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
- t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
out[2] = (tran_low_t)fdct_round_shift(t0);
out[6] = (tran_low_t)fdct_round_shift(t2);
out[10] = (tran_low_t)fdct_round_shift(t1);
@@ -191,12 +191,12 @@
step3[7] = step1[7] + step2[4];
// step 4
- temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
- temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64;
+ temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
+ temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64;
step2[1] = fdct_round_shift(temp1);
step2[2] = fdct_round_shift(temp2);
temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64;
- temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
+ temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
step2[5] = fdct_round_shift(temp1);
step2[6] = fdct_round_shift(temp2);
@@ -211,23 +211,23 @@
step1[7] = step3[7] + step2[6];
// step 6
- temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
+ temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
out[1] = (tran_low_t)fdct_round_shift(temp1);
out[9] = (tran_low_t)fdct_round_shift(temp2);
temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
- temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
+ temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
out[5] = (tran_low_t)fdct_round_shift(temp1);
out[13] = (tran_low_t)fdct_round_shift(temp2);
- temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
+ temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
out[3] = (tran_low_t)fdct_round_shift(temp1);
out[11] = (tran_low_t)fdct_round_shift(temp2);
temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
- temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
+ temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
out[7] = (tran_low_t)fdct_round_shift(temp1);
out[15] = (tran_low_t)fdct_round_shift(temp2);
}
@@ -285,14 +285,14 @@
tran_high_t x7 = input[6];
// stage 1
- s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
- s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
- s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
- s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+ s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
x0 = fdct_round_shift(s0 + s4);
x1 = fdct_round_shift(s1 + s5);
@@ -308,10 +308,10 @@
s1 = x1;
s2 = x2;
s3 = x3;
- s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
- s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
- s6 = - cospi_24_64 * x6 + cospi_8_64 * x7;
- s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+ s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
x0 = s0 + s2;
x1 = s1 + s3;
@@ -365,11 +365,11 @@
tran_high_t x15 = input[14];
// stage 1
- s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
- s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
- s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -378,9 +378,9 @@
s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
- s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
- s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
x0 = fdct_round_shift(s0 + s8);
x1 = fdct_round_shift(s1 + s9);
@@ -390,8 +390,8 @@
x5 = fdct_round_shift(s5 + s13);
x6 = fdct_round_shift(s6 + s14);
x7 = fdct_round_shift(s7 + s15);
- x8 = fdct_round_shift(s0 - s8);
- x9 = fdct_round_shift(s1 - s9);
+ x8 = fdct_round_shift(s0 - s8);
+ x9 = fdct_round_shift(s1 - s9);
x10 = fdct_round_shift(s2 - s10);
x11 = fdct_round_shift(s3 - s11);
x12 = fdct_round_shift(s4 - s12);
@@ -408,14 +408,14 @@
s5 = x5;
s6 = x6;
s7 = x7;
- s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
- s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
- s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
- s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
- s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
- s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
- s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
- s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+ s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+ s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+ s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
x0 = s0 + s4;
x1 = s1 + s5;
@@ -439,18 +439,18 @@
s1 = x1;
s2 = x2;
s3 = x3;
- s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
- s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
- s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+ s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
s8 = x8;
s9 = x9;
s10 = x10;
s11 = x11;
- s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
- s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
- s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+ s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+ s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
x0 = s0 + s2;
x1 = s1 + s3;
@@ -470,13 +470,13 @@
x15 = fdct_round_shift(s13 - s15);
// stage 4
- s2 = (- cospi_16_64) * (x2 + x3);
+ s2 = (-cospi_16_64) * (x2 + x3);
s3 = cospi_16_64 * (x2 - x3);
s6 = cospi_16_64 * (x6 + x7);
- s7 = cospi_16_64 * (- x6 + x7);
+ s7 = cospi_16_64 * (-x6 + x7);
s10 = cospi_16_64 * (x10 + x11);
- s11 = cospi_16_64 * (- x10 + x11);
- s14 = (- cospi_16_64) * (x14 + x15);
+ s11 = cospi_16_64 * (-x10 + x11);
+ s14 = (-cospi_16_64) * (x14 + x15);
s15 = cospi_16_64 * (x14 - x15);
x2 = fdct_round_shift(s2);
@@ -507,28 +507,28 @@
}
static const transform_2d FHT_4[] = {
- { fdct4, fdct4 }, // DCT_DCT = 0
- { fadst4, fdct4 }, // ADST_DCT = 1
- { fdct4, fadst4 }, // DCT_ADST = 2
- { fadst4, fadst4 } // ADST_ADST = 3
+ { fdct4, fdct4 }, // DCT_DCT = 0
+ { fadst4, fdct4 }, // ADST_DCT = 1
+ { fdct4, fadst4 }, // DCT_ADST = 2
+ { fadst4, fadst4 } // ADST_ADST = 3
};
static const transform_2d FHT_8[] = {
- { fdct8, fdct8 }, // DCT_DCT = 0
- { fadst8, fdct8 }, // ADST_DCT = 1
- { fdct8, fadst8 }, // DCT_ADST = 2
- { fadst8, fadst8 } // ADST_ADST = 3
+ { fdct8, fdct8 }, // DCT_DCT = 0
+ { fadst8, fdct8 }, // ADST_DCT = 1
+ { fdct8, fadst8 }, // DCT_ADST = 2
+ { fadst8, fadst8 } // ADST_ADST = 3
};
static const transform_2d FHT_16[] = {
- { fdct16, fdct16 }, // DCT_DCT = 0
- { fadst16, fdct16 }, // ADST_DCT = 1
- { fdct16, fadst16 }, // DCT_ADST = 2
- { fadst16, fadst16 } // ADST_ADST = 3
+ { fdct16, fdct16 }, // DCT_DCT = 0
+ { fadst16, fdct16 }, // ADST_DCT = 1
+ { fdct16, fadst16 }, // DCT_ADST = 2
+ { fadst16, fadst16 } // ADST_ADST = 3
};
-void vp9_fht4x4_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
vpx_fdct4x4_c(input, output, stride);
} else {
@@ -539,22 +539,17 @@
// Columns
for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j)
- temp_in[j] = input[j * stride + i] * 16;
- if (i == 0 && temp_in[0])
- temp_in[0] += 1;
+ for (j = 0; j < 4; ++j) temp_in[j] = input[j * stride + i] * 16;
+ if (i == 0 && temp_in[0]) temp_in[0] += 1;
ht.cols(temp_in, temp_out);
- for (j = 0; j < 4; ++j)
- out[j * 4 + i] = temp_out[j];
+ for (j = 0; j < 4; ++j) out[j * 4 + i] = temp_out[j];
}
// Rows
for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j)
- temp_in[j] = out[j + i * 4];
+ for (j = 0; j < 4; ++j) temp_in[j] = out[j + i * 4];
ht.rows(temp_in, temp_out);
- for (j = 0; j < 4; ++j)
- output[j + i * 4] = (temp_out[j] + 1) >> 2;
+ for (j = 0; j < 4; ++j) output[j + i * 4] = (temp_out[j] + 1) >> 2;
}
}
}
@@ -561,14 +556,12 @@
void vp9_fdct8x8_quant_c(const int16_t *input, int stride,
tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
int eob = -1;
int i, j;
@@ -600,8 +593,8 @@
x3 = s0 - s3;
t0 = (x0 + x1) * cospi_16_64;
t1 = (x0 - x1) * cospi_16_64;
- t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
- t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
+ t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+ t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
output[0 * 8] = (tran_low_t)fdct_round_shift(t0);
output[2 * 8] = (tran_low_t)fdct_round_shift(t2);
output[4 * 8] = (tran_low_t)fdct_round_shift(t1);
@@ -620,10 +613,10 @@
x3 = s7 + t3;
// Stage 4
- t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
- t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
- t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
output[1 * 8] = (tran_low_t)fdct_round_shift(t0);
output[3 * 8] = (tran_low_t)fdct_round_shift(t2);
output[5 * 8] = (tran_low_t)fdct_round_shift(t1);
@@ -636,8 +629,7 @@
// Rows
for (i = 0; i < 8; ++i) {
fdct8(&intermediate[i * 8], &coeff_ptr[i * 8]);
- for (j = 0; j < 8; ++j)
- coeff_ptr[j + i * 8] /= 2;
+ for (j = 0; j < 8; ++j) coeff_ptr[j + i * 8] /= 2;
}
// TODO(jingning) Decide the need of these arguments after the
@@ -664,15 +656,14 @@
qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
- if (tmp)
- eob = i;
+ if (tmp) eob = i;
}
}
*eob_ptr = eob + 1;
}
-void vp9_fht8x8_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
vpx_fdct8x8_c(input, output, stride);
} else {
@@ -683,17 +674,14 @@
// Columns
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j)
- temp_in[j] = input[j * stride + i] * 4;
+ for (j = 0; j < 8; ++j) temp_in[j] = input[j * stride + i] * 4;
ht.cols(temp_in, temp_out);
- for (j = 0; j < 8; ++j)
- out[j * 8 + i] = temp_out[j];
+ for (j = 0; j < 8; ++j) out[j * 8 + i] = temp_out[j];
}
// Rows
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j)
- temp_in[j] = out[j + i * 8];
+ for (j = 0; j < 8; ++j) temp_in[j] = out[j + i * 8];
ht.rows(temp_in, temp_out);
for (j = 0; j < 8; ++j)
output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
@@ -757,8 +745,8 @@
}
}
-void vp9_fht16x16_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
vpx_fdct16x16_c(input, output, stride);
} else {
@@ -769,8 +757,7 @@
// Columns
for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; ++j)
- temp_in[j] = input[j * stride + i] * 4;
+ for (j = 0; j < 16; ++j) temp_in[j] = input[j * stride + i] * 4;
ht.cols(temp_in, temp_out);
for (j = 0; j < 16; ++j)
out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
@@ -778,23 +765,21 @@
// Rows
for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; ++j)
- temp_in[j] = out[j + i * 16];
+ for (j = 0; j < 16; ++j) temp_in[j] = out[j + i * 16];
ht.rows(temp_in, temp_out);
- for (j = 0; j < 16; ++j)
- output[j + i * 16] = temp_out[j];
+ for (j = 0; j < 16; ++j) output[j + i * 16] = temp_out[j];
}
}
}
#if CONFIG_VP9_HIGHBITDEPTH
-void vp9_highbd_fht4x4_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
vp9_fht4x4_c(input, output, stride, tx_type);
}
-void vp9_highbd_fht8x8_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
vp9_fht8x8_c(input, output, stride, tx_type);
}
@@ -803,8 +788,8 @@
vp9_fwht4x4_c(input, output, stride);
}
-void vp9_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_highbd_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
vp9_fht16x16_c(input, output, stride, tx_type);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
--- a/vp9/encoder/vp9_denoiser.c
+++ b/vp9/encoder/vp9_denoiser.c
@@ -48,8 +48,7 @@
static int sse_diff_thresh(BLOCK_SIZE bs, int increase_denoising,
int motion_magnitude) {
- if (motion_magnitude >
- noise_motion_thresh(bs, increase_denoising)) {
+ if (motion_magnitude > noise_motion_thresh(bs, increase_denoising)) {
if (increase_denoising)
return (1 << num_pels_log2_lookup[bs]) << 2;
else
@@ -67,18 +66,15 @@
// we might need to update the code for calculating 'total_adj' in
// case the C code is not bit-exact with corresponding sse2 code.
int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride,
- const uint8_t *mc_avg,
- int mc_avg_stride,
- uint8_t *avg, int avg_stride,
- int increase_denoising,
- BLOCK_SIZE bs,
- int motion_magnitude) {
+ const uint8_t *mc_avg, int mc_avg_stride,
+ uint8_t *avg, int avg_stride, int increase_denoising,
+ BLOCK_SIZE bs, int motion_magnitude) {
int r, c;
const uint8_t *sig_start = sig;
const uint8_t *mc_avg_start = mc_avg;
uint8_t *avg_start = avg;
int diff, adj, absdiff, delta;
- int adj_val[] = {3, 4, 6};
+ int adj_val[] = { 3, 4, 6 };
int total_adj = 0;
int shift_inc = 1;
@@ -105,15 +101,19 @@
total_adj += diff;
} else {
switch (absdiff) {
- case 4: case 5: case 6: case 7:
- adj = adj_val[0];
- break;
- case 8: case 9: case 10: case 11:
- case 12: case 13: case 14: case 15:
- adj = adj_val[1];
- break;
- default:
- adj = adj_val[2];
+ case 4:
+ case 5:
+ case 6:
+ case 7: adj = adj_val[0]; break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15: adj = adj_val[1]; break;
+ default: adj = adj_val[2];
}
if (diff > 0) {
avg[c] = VPXMIN(UINT8_MAX, sig[c] + adj);
@@ -135,14 +135,15 @@
}
// Otherwise, we try to dampen the filter if the delta is not too high.
- delta = ((abs(total_adj) - total_adj_strong_thresh(bs, increase_denoising))
- >> num_pels_log2_lookup[bs]) + 1;
+ delta = ((abs(total_adj) - total_adj_strong_thresh(bs, increase_denoising)) >>
+ num_pels_log2_lookup[bs]) +
+ 1;
if (delta >= delta_thresh(bs, increase_denoising)) {
return COPY_BLOCK;
}
- mc_avg = mc_avg_start;
+ mc_avg = mc_avg_start;
avg = avg_start;
sig = sig_start;
for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
@@ -178,22 +179,15 @@
return COPY_BLOCK;
}
-static uint8_t *block_start(uint8_t *framebuf, int stride,
- int mi_row, int mi_col) {
- return framebuf + (stride * mi_row << 3) + (mi_col << 3);
+static uint8_t *block_start(uint8_t *framebuf, int stride, int mi_row,
+ int mi_col) {
+ return framebuf + (stride * mi_row << 3) + (mi_col << 3);
}
-static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser,
- MACROBLOCK *mb,
- BLOCK_SIZE bs,
- int increase_denoising,
- int mi_row,
- int mi_col,
- PICK_MODE_CONTEXT *ctx,
- int motion_magnitude,
- int is_skin,
- int *zeromv_filter,
- int consec_zeromv) {
+static VP9_DENOISER_DECISION perform_motion_compensation(
+ VP9_DENOISER *denoiser, MACROBLOCK *mb, BLOCK_SIZE bs,
+ int increase_denoising, int mi_row, int mi_col, PICK_MODE_CONTEXT *ctx,
+ int motion_magnitude, int is_skin, int *zeromv_filter, int consec_zeromv) {
int sse_diff = ctx->zeromv_sse - ctx->newmv_sse;
MV_REFERENCE_FRAME frame;
MACROBLOCKD *filter_mbd = &mb->e_mbd;
@@ -206,20 +200,18 @@
frame = ctx->best_reference_frame;
saved_mi = *mi;
- if (is_skin && (motion_magnitude > 0 || consec_zeromv < 4))
- return COPY_BLOCK;
+ if (is_skin && (motion_magnitude > 0 || consec_zeromv < 4)) return COPY_BLOCK;
// Avoid denoising for small block (unless motion is small).
// Small blocks are selected in variance partition (before encoding) and
// will typically lie on moving areas.
- if (denoiser->denoising_level < kDenHigh &&
- motion_magnitude > 16 && bs <= BLOCK_8X8)
+ if (denoiser->denoising_level < kDenHigh && motion_magnitude > 16 &&
+ bs <= BLOCK_8X8)
return COPY_BLOCK;
// If the best reference frame uses inter-prediction and there is enough of a
// difference in sum-squared-error, use it.
- if (frame != INTRA_FRAME &&
- ctx->newmv_sse != UINT_MAX &&
+ if (frame != INTRA_FRAME && ctx->newmv_sse != UINT_MAX &&
sse_diff > sse_diff_thresh(bs, increase_denoising, motion_magnitude)) {
mi->ref_frame[0] = ctx->best_reference_frame;
mi->mode = ctx->best_sse_inter_mode;
@@ -230,7 +222,7 @@
ctx->newmv_sse = ctx->zeromv_sse;
// Bias to last reference.
if (frame != LAST_FRAME &&
- ((ctx->zeromv_lastref_sse < (5 * ctx->zeromv_sse) >> 2) ||
+ ((ctx->zeromv_lastref_sse<(5 * ctx->zeromv_sse)>> 2) ||
denoiser->denoising_level >= kDenHigh)) {
frame = LAST_FRAME;
ctx->newmv_sse = ctx->zeromv_lastref_sse;
@@ -251,8 +243,7 @@
*mi = saved_mi;
return COPY_BLOCK;
}
- if (motion_magnitude >
- (noise_motion_thresh(bs, increase_denoising) << 3)) {
+ if (motion_magnitude > (noise_motion_thresh(bs, increase_denoising) << 3)) {
// Restore everything to its original state
*mi = saved_mi;
return COPY_BLOCK;
@@ -268,37 +259,28 @@
// struct.
filter_mbd->plane[0].pre[0].buf =
block_start(denoiser->running_avg_y[frame].y_buffer,
- denoiser->running_avg_y[frame].y_stride,
- mi_row, mi_col);
- filter_mbd->plane[0].pre[0].stride =
- denoiser->running_avg_y[frame].y_stride;
+ denoiser->running_avg_y[frame].y_stride, mi_row, mi_col);
+ filter_mbd->plane[0].pre[0].stride = denoiser->running_avg_y[frame].y_stride;
filter_mbd->plane[1].pre[0].buf =
- block_start(denoiser->running_avg_y[frame].u_buffer,
- denoiser->running_avg_y[frame].uv_stride,
- mi_row, mi_col);
- filter_mbd->plane[1].pre[0].stride =
- denoiser->running_avg_y[frame].uv_stride;
+ block_start(denoiser->running_avg_y[frame].u_buffer,
+ denoiser->running_avg_y[frame].uv_stride, mi_row, mi_col);
+ filter_mbd->plane[1].pre[0].stride = denoiser->running_avg_y[frame].uv_stride;
filter_mbd->plane[2].pre[0].buf =
block_start(denoiser->running_avg_y[frame].v_buffer,
- denoiser->running_avg_y[frame].uv_stride,
- mi_row, mi_col);
- filter_mbd->plane[2].pre[0].stride =
- denoiser->running_avg_y[frame].uv_stride;
+ denoiser->running_avg_y[frame].uv_stride, mi_row, mi_col);
+ filter_mbd->plane[2].pre[0].stride = denoiser->running_avg_y[frame].uv_stride;
filter_mbd->plane[0].dst.buf =
block_start(denoiser->mc_running_avg_y.y_buffer,
- denoiser->mc_running_avg_y.y_stride,
- mi_row, mi_col);
+ denoiser->mc_running_avg_y.y_stride, mi_row, mi_col);
filter_mbd->plane[0].dst.stride = denoiser->mc_running_avg_y.y_stride;
filter_mbd->plane[1].dst.buf =
block_start(denoiser->mc_running_avg_y.u_buffer,
- denoiser->mc_running_avg_y.uv_stride,
- mi_row, mi_col);
+ denoiser->mc_running_avg_y.uv_stride, mi_row, mi_col);
filter_mbd->plane[1].dst.stride = denoiser->mc_running_avg_y.uv_stride;
filter_mbd->plane[2].dst.buf =
block_start(denoiser->mc_running_avg_y.v_buffer,
- denoiser->mc_running_avg_y.uv_stride,
- mi_row, mi_col);
+ denoiser->mc_running_avg_y.uv_stride, mi_row, mi_col);
filter_mbd->plane[2].dst.stride = denoiser->mc_running_avg_y.uv_stride;
vp9_build_inter_predictors_sby(filter_mbd, mi_row, mi_col, bs);
@@ -313,9 +295,8 @@
return FILTER_BLOCK;
}
-void vp9_denoiser_denoise(VP9_COMP *cpi, MACROBLOCK *mb,
- int mi_row, int mi_col, BLOCK_SIZE bs,
- PICK_MODE_CONTEXT *ctx,
+void vp9_denoiser_denoise(VP9_COMP *cpi, MACROBLOCK *mb, int mi_row, int mi_col,
+ BLOCK_SIZE bs, PICK_MODE_CONTEXT *ctx,
VP9_DENOISER_DECISION *denoiser_decision) {
int mv_col, mv_row;
int motion_magnitude = 0;
@@ -325,8 +306,8 @@
YV12_BUFFER_CONFIG avg = denoiser->running_avg_y[INTRA_FRAME];
YV12_BUFFER_CONFIG mc_avg = denoiser->mc_running_avg_y;
uint8_t *avg_start = block_start(avg.y_buffer, avg.y_stride, mi_row, mi_col);
- uint8_t *mc_avg_start = block_start(mc_avg.y_buffer, mc_avg.y_stride,
- mi_row, mi_col);
+ uint8_t *mc_avg_start =
+ block_start(mc_avg.y_buffer, mc_avg.y_stride, mi_row, mi_col);
struct buf_2d src = mb->plane[0].src;
int is_skin = 0;
int consec_zeromv = 0;
@@ -334,8 +315,7 @@
mv_row = ctx->best_sse_mv.as_mv.row;
motion_magnitude = mv_row * mv_row + mv_col * mv_col;
- if (cpi->use_skin_detection &&
- bs <= BLOCK_32X32 &&
+ if (cpi->use_skin_detection && bs <= BLOCK_32X32 &&
denoiser->denoising_level < kDenHigh) {
int motion_level = (motion_magnitude < 16) ? 0 : 1;
// If motion for current block is small/zero, compute consec_zeromv for
@@ -343,7 +323,7 @@
// consec_zeromv when current block has small/zero motion).
consec_zeromv = 0;
if (motion_level == 0) {
- VP9_COMMON * const cm = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
int j, i;
// Loop through the 8x8 sub-blocks.
const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
@@ -367,17 +347,12 @@
}
}
// TODO(marpan): Compute skin detection over sub-blocks.
- is_skin = vp9_compute_skin_block(mb->plane[0].src.buf,
- mb->plane[1].src.buf,
- mb->plane[2].src.buf,
- mb->plane[0].src.stride,
- mb->plane[1].src.stride,
- bs,
- consec_zeromv,
- motion_level);
+ is_skin = vp9_compute_skin_block(
+ mb->plane[0].src.buf, mb->plane[1].src.buf, mb->plane[2].src.buf,
+ mb->plane[0].src.stride, mb->plane[1].src.stride, bs, consec_zeromv,
+ motion_level);
}
- if (!is_skin &&
- denoiser->denoising_level == kDenHigh) {
+ if (!is_skin && denoiser->denoising_level == kDenHigh) {
denoiser->increase_denoising = 1;
} else {
denoiser->increase_denoising = 0;
@@ -384,31 +359,23 @@
}
if (denoiser->denoising_level >= kDenLow)
- decision = perform_motion_compensation(denoiser, mb, bs,
- denoiser->increase_denoising,
- mi_row, mi_col, ctx,
- motion_magnitude,
- is_skin,
- &zeromv_filter,
- consec_zeromv);
+ decision = perform_motion_compensation(
+ denoiser, mb, bs, denoiser->increase_denoising, mi_row, mi_col, ctx,
+ motion_magnitude, is_skin, &zeromv_filter, consec_zeromv);
if (decision == FILTER_BLOCK) {
- decision = vp9_denoiser_filter(src.buf, src.stride,
- mc_avg_start, mc_avg.y_stride,
- avg_start, avg.y_stride,
- denoiser->increase_denoising,
- bs, motion_magnitude);
+ decision = vp9_denoiser_filter(
+ src.buf, src.stride, mc_avg_start, mc_avg.y_stride, avg_start,
+ avg.y_stride, denoiser->increase_denoising, bs, motion_magnitude);
}
if (decision == FILTER_BLOCK) {
- vpx_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride,
- NULL, 0, NULL, 0,
- num_4x4_blocks_wide_lookup[bs] << 2,
+ vpx_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride, NULL, 0,
+ NULL, 0, num_4x4_blocks_wide_lookup[bs] << 2,
num_4x4_blocks_high_lookup[bs] << 2);
} else { // COPY_BLOCK
- vpx_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride,
- NULL, 0, NULL, 0,
- num_4x4_blocks_wide_lookup[bs] << 2,
+ vpx_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride, NULL, 0,
+ NULL, 0, num_4x4_blocks_wide_lookup[bs] << 2,
num_4x4_blocks_high_lookup[bs] << 2);
}
*denoiser_decision = decision;
@@ -416,8 +383,8 @@
*denoiser_decision = FILTER_ZEROMV_BLOCK;
}
-static void copy_frame(YV12_BUFFER_CONFIG * const dest,
- const YV12_BUFFER_CONFIG * const src) {
+static void copy_frame(YV12_BUFFER_CONFIG *const dest,
+ const YV12_BUFFER_CONFIG *const src) {
int r;
const uint8_t *srcbuf = src->y_buffer;
uint8_t *destbuf = dest->y_buffer;
@@ -432,8 +399,8 @@
}
}
-static void swap_frame_buffer(YV12_BUFFER_CONFIG * const dest,
- YV12_BUFFER_CONFIG * const src) {
+static void swap_frame_buffer(YV12_BUFFER_CONFIG *const dest,
+ YV12_BUFFER_CONFIG *const src) {
uint8_t *tmp_buf = dest->y_buffer;
assert(dest->y_width == src->y_width);
assert(dest->y_height == src->y_height);
@@ -446,8 +413,7 @@
FRAME_TYPE frame_type,
int refresh_alt_ref_frame,
int refresh_golden_frame,
- int refresh_last_frame,
- int resized) {
+ int refresh_last_frame, int resized) {
// Copy source into denoised reference buffers on KEY_FRAME or
// if the just encoded frame was resized.
if (frame_type == KEY_FRAME || resized != 0 || denoiser->reset) {
@@ -460,8 +426,7 @@
}
// If more than one refresh occurs, must copy frame buffer.
- if ((refresh_alt_ref_frame + refresh_golden_frame + refresh_last_frame)
- > 1) {
+ if ((refresh_alt_ref_frame + refresh_golden_frame + refresh_last_frame) > 1) {
if (refresh_alt_ref_frame) {
copy_frame(&denoiser->running_avg_y[ALTREF_FRAME],
&denoiser->running_avg_y[INTRA_FRAME]);
@@ -503,8 +468,7 @@
if (mi->mv[0].as_int == 0 && sse < ctx->zeromv_sse) {
ctx->zeromv_sse = sse;
ctx->best_zeromv_reference_frame = mi->ref_frame[0];
- if (mi->ref_frame[0] == LAST_FRAME)
- ctx->zeromv_lastref_sse = sse;
+ if (mi->ref_frame[0] == LAST_FRAME) ctx->zeromv_lastref_sse = sse;
}
if (mi->mv[0].as_int != 0 && sse < ctx->newmv_sse) {
@@ -515,8 +479,8 @@
}
}
-int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height,
- int ssx, int ssy,
+int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, int ssx,
+ int ssy,
#if CONFIG_VP9_HIGHBITDEPTH
int use_highbitdepth,
#endif
@@ -541,8 +505,8 @@
#endif
}
- fail = vpx_alloc_frame_buffer(&denoiser->mc_running_avg_y, width, height,
- ssx, ssy,
+ fail = vpx_alloc_frame_buffer(&denoiser->mc_running_avg_y, width, height, ssx,
+ ssy,
#if CONFIG_VP9_HIGHBITDEPTH
use_highbitdepth,
#endif
@@ -552,8 +516,7 @@
return 1;
}
- fail = vpx_alloc_frame_buffer(&denoiser->last_source, width, height,
- ssx, ssy,
+ fail = vpx_alloc_frame_buffer(&denoiser->last_source, width, height, ssx, ssy,
#if CONFIG_VP9_HIGHBITDEPTH
use_highbitdepth,
#endif
@@ -586,8 +549,7 @@
vpx_free_frame_buffer(&denoiser->last_source);
}
-void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser,
- int noise_level) {
+void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser, int noise_level) {
denoiser->denoising_level = noise_level;
if (denoiser->denoising_level > kDenLowLow &&
denoiser->prev_denoising_level == kDenLowLow)
--- a/vp9/encoder/vp9_denoiser.h
+++ b/vp9/encoder/vp9_denoiser.h
@@ -64,22 +64,20 @@
FRAME_TYPE frame_type,
int refresh_alt_ref_frame,
int refresh_golden_frame,
- int refresh_last_frame,
- int resized);
+ int refresh_last_frame, int resized);
-void vp9_denoiser_denoise(struct VP9_COMP *cpi, MACROBLOCK *mb,
- int mi_row, int mi_col, BLOCK_SIZE bs,
- PICK_MODE_CONTEXT *ctx ,
+void vp9_denoiser_denoise(struct VP9_COMP *cpi, MACROBLOCK *mb, int mi_row,
+ int mi_col, BLOCK_SIZE bs, PICK_MODE_CONTEXT *ctx,
VP9_DENOISER_DECISION *denoiser_decision);
void vp9_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx);
-void vp9_denoiser_update_frame_stats(MODE_INFO *mi,
- unsigned int sse, PREDICTION_MODE mode,
+void vp9_denoiser_update_frame_stats(MODE_INFO *mi, unsigned int sse,
+ PREDICTION_MODE mode,
PICK_MODE_CONTEXT *ctx);
-int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height,
- int ssx, int ssy,
+int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, int ssx,
+ int ssy,
#if CONFIG_VP9_HIGHBITDEPTH
int use_highbitdepth,
#endif
@@ -97,8 +95,7 @@
void vp9_denoiser_free(VP9_DENOISER *denoiser);
-void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser,
- int noise_level);
+void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser, int noise_level);
#ifdef __cplusplus
} // extern "C"
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -48,10 +48,9 @@
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_tokenize.h"
-static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
- TOKENEXTRA **t, int output_enabled,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx);
+static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+ int output_enabled, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
// This is used as a reference when computing the source variance for the
// purpose of activity masking.
@@ -58,48 +57,44 @@
// Eventually this should be replaced by custom no-reference routines,
// which will be faster.
static const uint8_t VP9_VAR_OFFS[64] = {
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
};
#if CONFIG_VP9_HIGHBITDEPTH
static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
};
static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
};
static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16
};
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -107,31 +102,32 @@
const struct buf_2d *ref,
BLOCK_SIZE bs) {
unsigned int sse;
- const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- VP9_VAR_OFFS, 0, &sse);
+ const unsigned int var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP9_VAR_OFFS, 0, &sse);
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
#if CONFIG_VP9_HIGHBITDEPTH
-unsigned int vp9_high_get_sby_perpixel_variance(
- VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
+unsigned int vp9_high_get_sby_perpixel_variance(VP9_COMP *cpi,
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs, int bd) {
unsigned int var, sse;
switch (bd) {
case 10:
- var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
- 0, &sse);
+ var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+ CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), 0, &sse);
break;
case 12:
- var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
- 0, &sse);
+ var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+ CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), 0, &sse);
break;
case 8:
default:
- var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
- 0, &sse);
+ var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+ CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), 0, &sse);
break;
}
return ROUND_POWER_OF_TWO((int64_t)var, num_pels_log2_lookup[bs]);
@@ -154,11 +150,9 @@
}
static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
- int mi_row,
- int mi_col) {
- unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
- mi_row, mi_col,
- BLOCK_64X64);
+ int mi_row, int mi_col) {
+ unsigned int var = get_sby_perpixel_diff_variance(
+ cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64);
if (var < 8)
return BLOCK_64X64;
else if (var < 128)
@@ -173,8 +167,7 @@
// pointers.
static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
MACROBLOCK *const x,
- MACROBLOCKD *const xd,
- int mi_row,
+ MACROBLOCKD *const xd, int mi_row,
int mi_col) {
const int idx_str = xd->mi_stride * mi_row + mi_col;
xd->mi = cm->mi_grid_visible + idx_str;
@@ -210,8 +203,8 @@
// Set up distance of MB to edge of frame in 1/8th pel units.
assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
- set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
- cm->mi_rows, cm->mi_cols);
+ set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
+ cm->mi_cols);
// Set up source buffers.
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
@@ -224,8 +217,8 @@
if (seg->enabled) {
if (cpi->oxcf.aq_mode != VARIANCE_AQ &&
cpi->oxcf.aq_mode != EQUATOR360_AQ) {
- const uint8_t *const map = seg->update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
vp9_init_plane_quantizers(cpi, x);
@@ -243,23 +236,20 @@
static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- const int block_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize],
- cm->mi_cols - mi_col);
- const int block_height = VPXMIN(num_8x8_blocks_high_lookup[bsize],
- cm->mi_rows - mi_row);
+ const int block_width =
+ VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
+ const int block_height =
+ VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
const int mi_stride = xd->mi_stride;
MODE_INFO *const src_mi = xd->mi[0];
int i, j;
for (j = 0; j < block_height; ++j)
- for (i = 0; i < block_width; ++i)
- xd->mi[j * mi_stride + i] = src_mi;
+ for (i = 0; i < block_width; ++i) xd->mi[j * mi_stride + i] = src_mi;
}
-static void set_block_size(VP9_COMP * const cpi,
- MACROBLOCK *const x,
- MACROBLOCKD *const xd,
- int mi_row, int mi_col,
+static void set_block_size(VP9_COMP *const cpi, MACROBLOCK *const x,
+ MACROBLOCKD *const xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col);
@@ -321,7 +311,7 @@
node->part_variances = NULL;
switch (bsize) {
case BLOCK_64X64: {
- v64x64 *vt = (v64x64 *) data;
+ v64x64 *vt = (v64x64 *)data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
@@ -328,7 +318,7 @@
break;
}
case BLOCK_32X32: {
- v32x32 *vt = (v32x32 *) data;
+ v32x32 *vt = (v32x32 *)data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
@@ -335,7 +325,7 @@
break;
}
case BLOCK_16X16: {
- v16x16 *vt = (v16x16 *) data;
+ v16x16 *vt = (v16x16 *)data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
@@ -342,7 +332,7 @@
break;
}
case BLOCK_8X8: {
- v8x8 *vt = (v8x8 *) data;
+ v8x8 *vt = (v8x8 *)data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
@@ -349,10 +339,9 @@
break;
}
case BLOCK_4X4: {
- v4x4 *vt = (v4x4 *) data;
+ v4x4 *vt = (v4x4 *)data;
node->part_variances = &vt->part_variances;
- for (i = 0; i < 4; i++)
- node->split[i] = &vt->split[i];
+ for (i = 0; i < 4; i++) node->split[i] = &vt->split[i];
break;
}
default: {
@@ -370,8 +359,10 @@
}
static void get_variance(var *v) {
- v->variance = (int)(256 * (v->sum_square_error -
- ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
+ v->variance =
+ (int)(256 * (v->sum_square_error -
+ ((v->sum_error * v->sum_error) >> v->log2_count)) >>
+ v->log2_count);
}
static void sum_2_variances(const var *a, const var *b, var *r) {
@@ -392,17 +383,12 @@
&node.part_variances->none);
}
-static int set_vt_partitioning(VP9_COMP *cpi,
- MACROBLOCK *const x,
- MACROBLOCKD *const xd,
- void *data,
- BLOCK_SIZE bsize,
- int mi_row,
- int mi_col,
- int64_t threshold,
- BLOCK_SIZE bsize_min,
+static int set_vt_partitioning(VP9_COMP *cpi, MACROBLOCK *const x,
+ MACROBLOCKD *const xd, void *data,
+ BLOCK_SIZE bsize, int mi_row, int mi_col,
+ int64_t threshold, BLOCK_SIZE bsize_min,
int force_split) {
- VP9_COMMON * const cm = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
variance_node vt;
const int block_width = num_8x8_blocks_wide_lookup[bsize];
const int block_height = num_8x8_blocks_high_lookup[bsize];
@@ -410,8 +396,7 @@
assert(block_height == block_width);
tree_to_node(data, bsize, &vt);
- if (force_split == 1)
- return 0;
+ if (force_split == 1) return 0;
// For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
// variance is below threshold, otherwise split will be selected.
@@ -418,8 +403,7 @@
// No check for vert/horiz split as too few samples for variance.
if (bsize == bsize_min) {
// Variance already computed to set the force_split.
- if (cm->frame_type == KEY_FRAME)
- get_variance(&vt.part_variances->none);
+ if (cm->frame_type == KEY_FRAME) get_variance(&vt.part_variances->none);
if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows &&
vt.part_variances->none.variance < threshold) {
@@ -429,12 +413,11 @@
return 0;
} else if (bsize > bsize_min) {
// Variance already computed to set the force_split.
- if (cm->frame_type == KEY_FRAME)
- get_variance(&vt.part_variances->none);
+ if (cm->frame_type == KEY_FRAME) get_variance(&vt.part_variances->none);
// For key frame: take split for bsize above 32X32 or very high variance.
if (cm->frame_type == KEY_FRAME &&
(bsize > BLOCK_32X32 ||
- vt.part_variances->none.variance > (threshold << 4))) {
+ vt.part_variances->none.variance > (threshold << 4))) {
return 0;
}
// If variance is low, take the bsize (no split).
@@ -485,8 +468,8 @@
VP9_COMMON *const cm = &cpi->common;
const int is_key_frame = (cm->frame_type == KEY_FRAME);
const int threshold_multiplier = is_key_frame ? 20 : 1;
- int64_t threshold_base = (int64_t)(threshold_multiplier *
- cpi->y_dequant[q][1]);
+ int64_t threshold_base =
+ (int64_t)(threshold_multiplier * cpi->y_dequant[q][1]);
if (is_key_frame) {
thresholds[0] = threshold_base;
thresholds[1] = threshold_base >> 2;
@@ -495,8 +478,8 @@
} else {
// Increase base variance threshold based on estimated noise level.
if (cpi->noise_estimate.enabled) {
- NOISE_LEVEL noise_level = vp9_noise_estimate_extract_level(
- &cpi->noise_estimate);
+ NOISE_LEVEL noise_level =
+ vp9_noise_estimate_extract_level(&cpi->noise_estimate);
if (noise_level == kHigh)
threshold_base = 3 * threshold_base;
else if (noise_level == kMedium)
@@ -537,8 +520,9 @@
if (cm->width <= 352 && cm->height <= 288)
cpi->vbp_threshold_sad = 10;
else
- cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ?
- (cpi->y_dequant[q][1] << 1) : 1000;
+ cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000
+ ? (cpi->y_dequant[q][1] << 1)
+ : 1000;
cpi->vbp_bsize_min = BLOCK_16X16;
}
cpi->vbp_threshold_minmax = 15 + (q >> 3);
@@ -551,8 +535,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
int highbd_flag,
#endif
- int pixels_wide,
- int pixels_high) {
+ int pixels_wide, int pixels_high) {
int k;
int minmax_max = 0;
int minmax_min = 255;
@@ -566,22 +549,17 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
- d + y8_idx * dp + x8_idx, dp,
- &min, &max);
+ d + y8_idx * dp + x8_idx, dp, &min, &max);
} else {
- vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
- d + y8_idx * dp + x8_idx, dp,
- &min, &max);
+ vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx,
+ dp, &min, &max);
}
#else
- vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
- d + y8_idx * dp + x8_idx, dp,
+ vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp,
&min, &max);
#endif
- if ((max - min) > minmax_max)
- minmax_max = (max - min);
- if ((max - min) < minmax_min)
- minmax_min = (max - min);
+ if ((max - min) > minmax_max) minmax_max = (max - min);
+ if ((max - min) < minmax_min) minmax_min = (max - min);
}
}
return (minmax_max - minmax_min);
@@ -592,8 +570,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
int highbd_flag,
#endif
- int pixels_wide,
- int pixels_high,
+ int pixels_wide, int pixels_high,
int is_key_frame) {
int k;
for (k = 0; k < 4; k++) {
@@ -611,13 +588,11 @@
d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
} else {
s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
- if (!is_key_frame)
- d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
+ if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
}
#else
s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
- if (!is_key_frame)
- d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
+ if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
#endif
sum = s_avg - d_avg;
sse = sum * sum;
@@ -631,8 +606,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
int highbd_flag,
#endif
- int pixels_wide,
- int pixels_high,
+ int pixels_wide, int pixels_high,
int is_key_frame) {
int k;
for (k = 0; k < 4; k++) {
@@ -650,13 +624,11 @@
d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
} else {
s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
- if (!is_key_frame)
- d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+ if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
}
#else
s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
- if (!is_key_frame)
- d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+ if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
#endif
sum = s_avg - d_avg;
sse = sum * sum;
@@ -670,12 +642,12 @@
// 32x32, and set x->sb_is_skin for use in mode selection.
static int skin_sb_split(VP9_COMP *cpi, MACROBLOCK *x, const int low_res,
int mi_row, int mi_col, int *force_split) {
- VP9_COMMON * const cm = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
// Avoid checking superblocks on/near boundary and avoid low resolutions.
// Note superblock may still pick 64X64 if y_sad is very small
// (i.e., y_sad < cpi->vbp_threshold_sad) below. For now leave this as is.
if (!low_res && (mi_col >= 8 && mi_col + 8 < cm->mi_cols && mi_row >= 8 &&
- mi_row + 8 < cm->mi_rows)) {
+ mi_row + 8 < cm->mi_rows)) {
int num_16x16_skin = 0;
int num_16x16_nonskin = 0;
uint8_t *ysignal = x->plane[0].src.buf;
@@ -690,24 +662,19 @@
const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
// Loop through the 16x16 sub-blocks.
int i, j;
- for (i = 0; i < ymis; i+=2) {
- for (j = 0; j < xmis; j+=2) {
+ for (i = 0; i < ymis; i += 2) {
+ for (j = 0; j < xmis; j += 2) {
int bl_index = block_index + i * cm->mi_cols + j;
int bl_index1 = bl_index + 1;
int bl_index2 = bl_index + cm->mi_cols;
int bl_index3 = bl_index2 + 1;
- int consec_zeromv = VPXMIN(cpi->consec_zero_mv[bl_index],
- VPXMIN(cpi->consec_zero_mv[bl_index1],
- VPXMIN(cpi->consec_zero_mv[bl_index2],
- cpi->consec_zero_mv[bl_index3])));
- int is_skin = vp9_compute_skin_block(ysignal,
- usignal,
- vsignal,
- sp,
- spuv,
- BLOCK_16X16,
- consec_zeromv,
- 0);
+ int consec_zeromv =
+ VPXMIN(cpi->consec_zero_mv[bl_index],
+ VPXMIN(cpi->consec_zero_mv[bl_index1],
+ VPXMIN(cpi->consec_zero_mv[bl_index2],
+ cpi->consec_zero_mv[bl_index3])));
+ int is_skin = vp9_compute_skin_block(
+ ysignal, usignal, vsignal, sp, spuv, BLOCK_16X16, consec_zeromv, 0);
num_16x16_skin += is_skin;
num_16x16_nonskin += (1 - is_skin);
if (num_16x16_nonskin > 3) {
@@ -732,13 +699,13 @@
}
#endif
-static void set_low_temp_var_flag(VP9_COMP *cpi, MACROBLOCK *x,
- MACROBLOCKD *xd, v64x64 *vt,
- int force_split[], int64_t thresholds[],
+static void set_low_temp_var_flag(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
+ v64x64 *vt, int force_split[],
+ int64_t thresholds[],
MV_REFERENCE_FRAME ref_frame_partition,
int mi_col, int mi_row) {
int i, j;
- VP9_COMMON * const cm = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
const int mv_thr = cm->width > 640 ? 8 : 4;
// Check temporal variance for bsize >= 16x16, if LAST_FRAME was selected and
// int_pro mv is small. If the temporal variance is small set the flag
@@ -767,11 +734,10 @@
for (i = 0; i < 4; i++) {
if (!force_split[i + 1]) {
// 32x32
- if (vt->split[i].part_variances.none.variance <
- (thresholds[1] >> 1))
+ if (vt->split[i].part_variances.none.variance < (thresholds[1] >> 1))
x->variance_low[i + 5] = 1;
} else if (cpi->sf.short_circuit_low_temp_var == 2) {
- int idx[4] = {0, 4, xd->mi_stride << 2, (xd->mi_stride << 2) + 4};
+ int idx[4] = { 0, 4, xd->mi_stride << 2, (xd->mi_stride << 2) + 4 };
const int idx_str = cm->mi_stride * mi_row + mi_col + idx[i];
MODE_INFO **this_mi = cm->mi_grid_visible + idx_str;
// For 32x16 and 16x32 blocks, the flag is set on each 16x16 block
@@ -795,18 +761,17 @@
unsigned int y_sad, int is_key_frame) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
- if (is_key_frame)
- return;
+ if (is_key_frame) return;
for (i = 1; i <= 2; ++i) {
unsigned int uv_sad = UINT_MAX;
- struct macroblock_plane *p = &x->plane[i];
+ struct macroblock_plane *p = &x->plane[i];
struct macroblockd_plane *pd = &xd->plane[i];
const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
if (bs != BLOCK_INVALID)
- uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
- pd->dst.buf, pd->dst.stride);
+ uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf,
+ pd->dst.stride);
// TODO(marpan): Investigate if we should lower this threshold if
// superblock is detected as skin.
@@ -816,11 +781,9 @@
// This function chooses partitioning based on the variance between source and
// reconstructed last, where variance is computed for down-sampled inputs.
-static int choose_partitioning(VP9_COMP *cpi,
- const TileInfo *const tile,
- MACROBLOCK *x,
- int mi_row, int mi_col) {
- VP9_COMMON * const cm = &cpi->common;
+static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
+ MACROBLOCK *x, int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
int i, j, k, m;
v64x64 vt;
@@ -837,14 +800,15 @@
// Ref frame used in partitioning.
MV_REFERENCE_FRAME ref_frame_partition = LAST_FRAME;
int pixels_wide = 64, pixels_high = 64;
- int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
- cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
+ int64_t thresholds[4] = { cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
+ cpi->vbp_thresholds[2], cpi->vbp_thresholds[3] };
// For the variance computation under SVC mode, we treat the frame as key if
// the reference (base layer frame) is key frame (i.e., is_key_frame == 1).
- const int is_key_frame = (cm->frame_type == KEY_FRAME ||
- (is_one_pass_cbr_svc(cpi) &&
- cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame));
+ const int is_key_frame =
+ (cm->frame_type == KEY_FRAME ||
+ (is_one_pass_cbr_svc(cpi) &&
+ cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame));
// Always use 4x4 partition for key frame.
const int use_4x4_partition = cm->frame_type == KEY_FRAME;
const int low_res = (cm->width <= 352 && cm->height <= 288);
@@ -862,10 +826,8 @@
memset(x->variance_low, 0, sizeof(x->variance_low));
- if (xd->mb_to_right_edge < 0)
- pixels_wide += (xd->mb_to_right_edge >> 3);
- if (xd->mb_to_bottom_edge < 0)
- pixels_high += (xd->mb_to_bottom_edge >> 3);
+ if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3);
+ if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
s = x->plane[0].src.buf;
sp = x->plane[0].src.stride;
@@ -883,8 +845,8 @@
const YV12_BUFFER_CONFIG *yv12_g = NULL;
unsigned int y_sad_g, y_sad_thr;
- bsize = BLOCK_32X32
- + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows);
+ bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 +
+ (mi_row + 4 < cm->mi_rows);
assert(yv12 != NULL);
@@ -894,14 +856,12 @@
yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
}
- if (yv12_g && yv12_g != yv12 &&
- (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
+ if (yv12_g && yv12_g != yv12 && (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
&cm->frame_refs[GOLDEN_FRAME - 1].sf);
- y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
- x->plane[0].src.stride,
- xd->plane[0].pre[0].buf,
- xd->plane[0].pre[0].stride);
+ y_sad_g = cpi->fn_ptr[bsize].sdf(
+ x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
+ xd->plane[0].pre[0].stride);
} else {
y_sad_g = UINT_MAX;
}
@@ -936,8 +896,8 @@
x->sb_is_skin = 0;
#if !CONFIG_VP9_HIGHBITDEPTH
if (cpi->use_skin_detection)
- x->sb_is_skin = skin_sb_split(cpi, x, low_res, mi_row, mi_col,
- force_split);
+ x->sb_is_skin =
+ skin_sb_split(cpi, x, low_res, mi_row, mi_col, force_split);
#endif
d = xd->plane[0].dst.buf;
@@ -945,8 +905,7 @@
// If the y_sad is very small, take 64x64 as partition and exit.
// Don't check on boosted segment for now, as 64x64 is suppressed there.
- if (segment_id == CR_SEGMENT_ID_BASE &&
- y_sad < cpi->vbp_threshold_sad) {
+ if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) {
const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
if (mi_col + block_width / 2 < cm->mi_cols &&
@@ -962,16 +921,10 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (xd->bd) {
- case 10:
- d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
- break;
- case 12:
- d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
- break;
+ case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break;
+ case 12: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); break;
case 8:
- default:
- d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
- break;
+ default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
}
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -995,16 +948,13 @@
if (!is_key_frame) {
fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
#if CONFIG_VP9_HIGHBITDEPTH
- xd->cur_buf->flags,
+ xd->cur_buf->flags,
#endif
- pixels_wide,
- pixels_high,
- is_key_frame);
+ pixels_wide, pixels_high, is_key_frame);
fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
get_variance(&vt.split[i].split[j].part_variances.none);
avg_16x16[i] += vt.split[i].split[j].part_variances.none.variance;
- if (vt.split[i].split[j].part_variances.none.variance >
- thresholds[2]) {
+ if (vt.split[i].split[j].part_variances.none.variance > thresholds[2]) {
// 16X16 variance is above threshold for split, so force split to 8x8
// for this 16x16 block (this also forces splits for upper levels).
force_split[split_index] = 1;
@@ -1012,7 +962,7 @@
force_split[0] = 1;
} else if (cpi->oxcf.speed < 8 &&
vt.split[i].split[j].part_variances.none.variance >
- thresholds[1] &&
+ thresholds[1] &&
!cyclic_refresh_segment_id_boosted(segment_id)) {
// We have some nominal amount of 16x16 variance (based on average),
// compute the minmax over the 8x8 sub-blocks, and if above threshold,
@@ -1030,8 +980,8 @@
}
}
if (is_key_frame || (low_res &&
- vt.split[i].split[j].part_variances.none.variance >
- (thresholds[1] << 1))) {
+ vt.split[i].split[j].part_variances.none.variance >
+ (thresholds[1] << 1))) {
force_split[split_index] = 0;
// Go down to 4x4 down-sampling for variance.
variance4x4downsample[i2 + j] = 1;
@@ -1038,15 +988,12 @@
for (k = 0; k < 4; k++) {
int x8_idx = x16_idx + ((k & 1) << 3);
int y8_idx = y16_idx + ((k >> 1) << 3);
- v8x8 *vst2 = is_key_frame ? &vst->split[k] :
- &vt2[i2 + j].split[k];
+ v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
#if CONFIG_VP9_HIGHBITDEPTH
xd->cur_buf->flags,
#endif
- pixels_wide,
- pixels_high,
- is_key_frame);
+ pixels_wide, pixels_high, is_key_frame);
}
}
}
@@ -1057,10 +1004,8 @@
const int i2 = i << 2;
for (j = 0; j < 4; j++) {
if (variance4x4downsample[i2 + j] == 1) {
- v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
- &vt.split[i].split[j];
- for (m = 0; m < 4; m++)
- fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
+ v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j];
+ for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
fill_variance_tree(vtemp, BLOCK_16X16);
// If variance of this 16x16 block is above the threshold, force block
// to split. This also forces a split on the upper levels.
@@ -1081,8 +1026,8 @@
get_variance(&vt.split[i].part_variances.none);
if (vt.split[i].part_variances.none.variance > thresholds[1] ||
(!is_key_frame &&
- vt.split[i].part_variances.none.variance > (thresholds[1] >> 1) &&
- vt.split[i].part_variances.none.variance > (avg_16x16[i] >> 1))) {
+ vt.split[i].part_variances.none.variance > (thresholds[1] >> 1) &&
+ vt.split[i].part_variances.none.variance > (avg_16x16[i] >> 1))) {
force_split[i + 1] = 1;
force_split[0] = 1;
}
@@ -1094,14 +1039,13 @@
get_variance(&vt.part_variances.none);
// If variance of this 64x64 block is above (some threshold of) the average
// variance over the sub-32x32 blocks, then force this block to split.
- if (!is_key_frame &&
- vt.part_variances.none.variance > (5 * avg_32x32) >> 4)
+ if (!is_key_frame && vt.part_variances.none.variance > (5 * avg_32x32) >> 4)
force_split[0] = 1;
}
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold.
- if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
+ if (mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
!set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
thresholds[0], BLOCK_16X16, force_split[0])) {
for (i = 0; i < 4; ++i) {
@@ -1118,15 +1062,13 @@
// For inter frames: if variance4x4downsample[] == 1 for this 16x16
// block, then the variance is based on 4x4 down-sampling, so use vt2
// in set_vt_partioning(), otherwise use vt.
- v16x16 *vtemp = (!is_key_frame &&
- variance4x4downsample[i2 + j] == 1) ?
- &vt2[i2 + j] : &vt.split[i].split[j];
- if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16,
- mi_row + y32_idx + y16_idx,
- mi_col + x32_idx + x16_idx,
- thresholds[2],
- cpi->vbp_bsize_min,
- force_split[5 + i2 + j])) {
+ v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1)
+ ? &vt2[i2 + j]
+ : &vt.split[i].split[j];
+ if (!set_vt_partitioning(
+ cpi, x, xd, vtemp, BLOCK_16X16, mi_row + y32_idx + y16_idx,
+ mi_col + x32_idx + x16_idx, thresholds[2], cpi->vbp_bsize_min,
+ force_split[5 + i2 + j])) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
@@ -1136,16 +1078,14 @@
mi_row + y32_idx + y16_idx + y8_idx,
mi_col + x32_idx + x16_idx + x8_idx,
thresholds[3], BLOCK_8X8, 0)) {
- set_block_size(cpi, x, xd,
- (mi_row + y32_idx + y16_idx + y8_idx),
- (mi_col + x32_idx + x16_idx + x8_idx),
- BLOCK_4X4);
+ set_block_size(
+ cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
+ (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_4X4);
}
} else {
- set_block_size(cpi, x, xd,
- (mi_row + y32_idx + y16_idx + y8_idx),
- (mi_col + x32_idx + x16_idx + x8_idx),
- BLOCK_8X8);
+ set_block_size(
+ cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
+ (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_8X8);
}
}
}
@@ -1163,8 +1103,7 @@
return 0;
}
-static void update_state(VP9_COMP *cpi, ThreadData *td,
- PICK_MODE_CONTEXT *ctx,
+static void update_state(VP9_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, BLOCK_SIZE bsize,
int output_enabled) {
int i, x_idx, y;
@@ -1182,8 +1121,7 @@
const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
- MV_REF *const frame_mvs =
- cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+ MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
const int mis = cm->mi_stride;
@@ -1200,17 +1138,15 @@
if (seg->enabled) {
// For in frame complexity AQ copy the segment id from the segment map.
if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
- const uint8_t *const map = seg->update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
- mi_addr->segment_id =
- get_segment_id(cm, map, bsize, mi_row, mi_col);
+ const uint8_t *const map =
+ seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
+ mi_addr->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
- vp9_cyclic_refresh_update_segment(cpi, xd->mi[0], mi_row,
- mi_col, bsize, ctx->rate, ctx->dist,
- x->skip, p);
+ vp9_cyclic_refresh_update_segment(cpi, xd->mi[0], mi_row, mi_col, bsize,
+ ctx->rate, ctx->dist, x->skip, p);
}
}
@@ -1233,13 +1169,12 @@
// when the mode was picked for it
for (y = 0; y < mi_height; y++)
for (x_idx = 0; x_idx < mi_width; x_idx++)
- if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
- && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
+ if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx &&
+ (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
xd->mi[x_idx + y * mis] = mi_addr;
}
- if (cpi->oxcf.aq_mode != NO_AQ)
- vp9_init_plane_quantizers(cpi, x);
+ if (cpi->oxcf.aq_mode != NO_AQ) vp9_init_plane_quantizers(cpi, x);
if (is_inter_block(xdmi) && xdmi->sb_type < BLOCK_8X8) {
xdmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
@@ -1250,22 +1185,16 @@
memcpy(x->zcoeff_blk[xdmi->tx_size], ctx->zcoeff_blk,
sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
- if (!output_enabled)
- return;
+ if (!output_enabled) return;
#if CONFIG_INTERNAL_STATS
if (frame_is_intra_only(cm)) {
static const int kf_mode_index[] = {
- THR_DC /*DC_PRED*/,
- THR_V_PRED /*V_PRED*/,
- THR_H_PRED /*H_PRED*/,
- THR_D45_PRED /*D45_PRED*/,
- THR_D135_PRED /*D135_PRED*/,
- THR_D117_PRED /*D117_PRED*/,
- THR_D153_PRED /*D153_PRED*/,
- THR_D207_PRED /*D207_PRED*/,
- THR_D63_PRED /*D63_PRED*/,
- THR_TM /*TM_PRED*/,
+ THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
+ THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
+ THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
+ THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
+ THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
};
++cpi->mode_chosen_counts[kf_mode_index[xdmi->mode]];
} else {
@@ -1305,8 +1234,8 @@
void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
- uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
- const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
+ uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
+ const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
int i;
// Set current frame pointer.
@@ -1325,8 +1254,7 @@
INTERP_FILTER filter_ref;
filter_ref = get_pred_context_switchable_interp(xd);
- if (filter_ref == SWITCHABLE_FILTERS)
- filter_ref = EIGHTTAP;
+ if (filter_ref == SWITCHABLE_FILTERS) filter_ref = EIGHTTAP;
mi->sb_type = bsize;
mi->mode = ZEROMV;
@@ -1345,9 +1273,8 @@
vp9_rd_cost_init(rd_cost);
}
-static int set_segment_rdmult(VP9_COMP *const cpi,
- MACROBLOCK *const x,
- int8_t segment_id) {
+static int set_segment_rdmult(VP9_COMP *const cpi, MACROBLOCK *const x,
+ int8_t segment_id) {
int segment_qindex;
VP9_COMMON *const cm = &cpi->common;
vp9_init_plane_quantizers(cpi, x);
@@ -1356,12 +1283,10 @@
return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
}
-static void rd_pick_sb_modes(VP9_COMP *cpi,
- TileDataEnc *tile_data,
- MACROBLOCK *const x,
- int mi_row, int mi_col, RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd) {
+static void rd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *const x, int mi_row, int mi_col,
+ RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1396,16 +1321,15 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->source_variance =
- vp9_high_get_sby_perpixel_variance(cpi, &x->plane[0].src,
- bsize, xd->bd);
+ x->source_variance = vp9_high_get_sby_perpixel_variance(
+ cpi, &x->plane[0].src, bsize, xd->bd);
} else {
x->source_variance =
- vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+ vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
}
#else
x->source_variance =
- vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+ vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
#endif // CONFIG_VP9_HIGHBITDEPTH
// Save rdmult before it might be changed, so it can be restored later.
@@ -1412,15 +1336,14 @@
orig_rdmult = x->rdmult;
if (aq_mode == VARIANCE_AQ) {
- const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
- : vp9_block_energy(cpi, x, bsize);
- if (cm->frame_type == KEY_FRAME ||
- cpi->refresh_alt_ref_frame ||
+ const int energy =
+ bsize <= BLOCK_16X16 ? x->mb_energy : vp9_block_energy(cpi, x, bsize);
+ if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
mi->segment_id = vp9_vaq_segment_id(energy);
} else {
- const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
@@ -1428,8 +1351,8 @@
if (cm->frame_type == KEY_FRAME) {
mi->segment_id = vp9_360aq_segment_id(mi_row, cm->mi_rows);
} else {
- const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
@@ -1436,8 +1359,8 @@
} else if (aq_mode == COMPLEXITY_AQ) {
x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
} else if (aq_mode == CYCLIC_REFRESH_AQ) {
- const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
// If segment is boosted, use rdmult for that segment.
if (cyclic_refresh_segment_id_boosted(
get_segment_id(cm, map, bsize, mi_row, mi_col)))
@@ -1454,20 +1377,18 @@
vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
ctx, best_rd);
else
- vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
- rd_cost, bsize, ctx, best_rd);
+ vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+ bsize, ctx, best_rd);
} else {
- vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
- rd_cost, bsize, ctx, best_rd);
+ vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+ bsize, ctx, best_rd);
}
}
-
// Examine the resulting rate and for AQ mode 2 make a segment choice.
- if ((rd_cost->rate != INT_MAX) &&
- (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
- (cm->frame_type == KEY_FRAME ||
- cpi->refresh_alt_ref_frame ||
+ if ((rd_cost->rate != INT_MAX) && (aq_mode == COMPLEXITY_AQ) &&
+ (bsize >= BLOCK_16X16) &&
+ (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
}
@@ -1476,8 +1397,7 @@
// TODO(jingning) The rate-distortion optimization flow needs to be
// refactored to provide proper exit/return handle.
- if (rd_cost->rate == INT_MAX)
- rd_cost->rdcost = INT64_MAX;
+ if (rd_cost->rate == INT_MAX) rd_cost->rdcost = INT64_MAX;
ctx->rate = rd_cost->rate;
ctx->dist = rd_cost->dist;
@@ -1493,8 +1413,8 @@
if (!frame_is_intra_only(cm)) {
FRAME_COUNTS *const counts = td->counts;
const int inter_block = is_inter_block(mi);
- const int seg_ref_active = segfeature_active(&cm->seg, mi->segment_id,
- SEG_LVL_REF_FRAME);
+ const int seg_ref_active =
+ segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_REF_FRAME);
if (!seg_ref_active) {
counts->intra_inter[get_intra_inter_context(xd)][inter_block]++;
// If the segment reference feature is enabled we have only a single
@@ -1503,18 +1423,18 @@
if (inter_block) {
const MV_REFERENCE_FRAME ref0 = mi->ref_frame[0];
if (cm->reference_mode == REFERENCE_MODE_SELECT)
- counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
- [has_second_ref(mi)]++;
+ counts->comp_inter[vp9_get_reference_mode_context(
+ cm, xd)][has_second_ref(mi)]++;
if (has_second_ref(mi)) {
- counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
- [ref0 == GOLDEN_FRAME]++;
+ counts->comp_ref[vp9_get_pred_context_comp_ref_p(
+ cm, xd)][ref0 == GOLDEN_FRAME]++;
} else {
- counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
- [ref0 != LAST_FRAME]++;
+ counts->single_ref[vp9_get_pred_context_single_ref_p1(
+ xd)][0][ref0 != LAST_FRAME]++;
if (ref0 != LAST_FRAME)
- counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
- [ref0 != GOLDEN_FRAME]++;
+ counts->single_ref[vp9_get_pred_context_single_ref_p2(
+ xd)][1][ref0 != GOLDEN_FRAME]++;
}
}
}
@@ -1552,17 +1472,15 @@
int mi_width = num_8x8_blocks_wide_lookup[bsize];
int mi_height = num_8x8_blocks_high_lookup[bsize];
for (p = 0; p < MAX_MB_PLANE; p++) {
- memcpy(
- xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
- a + num_4x4_blocks_wide * p,
- (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
- xd->plane[p].subsampling_x);
- memcpy(
- xd->left_context[p]
- + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
- l + num_4x4_blocks_high * p,
- (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
- xd->plane[p].subsampling_y);
+ memcpy(xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
+ a + num_4x4_blocks_wide * p,
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+ xd->plane[p].subsampling_x);
+ memcpy(xd->left_context[p] +
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ l + num_4x4_blocks_high * p,
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+ xd->plane[p].subsampling_y);
}
memcpy(xd->above_seg_context + mi_col, sa,
sizeof(*xd->above_seg_context) * mi_width);
@@ -1584,17 +1502,15 @@
// buffer the above/left context information of the block in search.
for (p = 0; p < MAX_MB_PLANE; ++p) {
- memcpy(
- a + num_4x4_blocks_wide * p,
- xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
- (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
- xd->plane[p].subsampling_x);
- memcpy(
- l + num_4x4_blocks_high * p,
- xd->left_context[p]
- + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
- (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
- xd->plane[p].subsampling_y);
+ memcpy(a + num_4x4_blocks_wide * p,
+ xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+ xd->plane[p].subsampling_x);
+ memcpy(l + num_4x4_blocks_high * p,
+ xd->left_context[p] +
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+ xd->plane[p].subsampling_y);
}
memcpy(sa, xd->above_seg_context + mi_col,
sizeof(*xd->above_seg_context) * mi_width);
@@ -1602,8 +1518,7 @@
sizeof(xd->left_seg_context[0]) * mi_height);
}
-static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
- ThreadData *td,
+static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, ThreadData *td,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx) {
@@ -1620,11 +1535,9 @@
}
}
-static void encode_sb(VP9_COMP *cpi, ThreadData *td,
- const TileInfo *const tile,
+static void encode_sb(VP9_COMP *cpi, ThreadData *td, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
- int output_enabled, BLOCK_SIZE bsize,
- PC_TREE *pc_tree) {
+ int output_enabled, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1634,8 +1547,7 @@
PARTITION_TYPE partition;
BLOCK_SIZE subsize = bsize;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
if (bsize >= BLOCK_8X8) {
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
@@ -1685,9 +1597,7 @@
subsize, pc_tree->split[3]);
}
break;
- default:
- assert(0 && "Invalid partition type.");
- break;
+ default: assert(0 && "Invalid partition type."); break;
}
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
@@ -1697,9 +1607,8 @@
// Check to see if the given partition size is allowed for a specified number
// of 8x8 block rows and columns remaining in the image.
// If not then return the largest allowed partition size
-static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
- int rows_left, int cols_left,
- int *bh, int *bw) {
+static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
+ int cols_left, int *bh, int *bw) {
if (rows_left <= 0 || cols_left <= 0) {
return VPXMIN(bsize, BLOCK_8X8);
} else {
@@ -1714,9 +1623,10 @@
return bsize;
}
-static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
- int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
- BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
+static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, int bh_in,
+ int bw_in, int row8x8_remaining,
+ int col8x8_remaining, BLOCK_SIZE bsize,
+ MODE_INFO **mi_8x8) {
int bh = bh_in;
int r, c;
for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
@@ -1724,8 +1634,8 @@
for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
const int index = r * mis + c;
mi_8x8[index] = mi + index;
- mi_8x8[index]->sb_type = find_partition_size(bsize,
- row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
+ mi_8x8[index]->sb_type = find_partition_size(
+ bsize, row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
}
}
}
@@ -1762,7 +1672,7 @@
} else {
// Else this is a partial SB64.
set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
- col8x8_remaining, bsize, mi_8x8);
+ col8x8_remaining, bsize, mi_8x8);
}
}
@@ -1770,21 +1680,33 @@
int row;
int col;
} coord_lookup[16] = {
- // 32x32 index = 0
- {0, 0}, {0, 2}, {2, 0}, {2, 2},
- // 32x32 index = 1
- {0, 4}, {0, 6}, {2, 4}, {2, 6},
- // 32x32 index = 2
- {4, 0}, {4, 2}, {6, 0}, {6, 2},
- // 32x32 index = 3
- {4, 4}, {4, 6}, {6, 4}, {6, 6},
+ // 32x32 index = 0
+ { 0, 0 },
+ { 0, 2 },
+ { 2, 0 },
+ { 2, 2 },
+ // 32x32 index = 1
+ { 0, 4 },
+ { 0, 6 },
+ { 2, 4 },
+ { 2, 6 },
+ // 32x32 index = 2
+ { 4, 0 },
+ { 4, 2 },
+ { 6, 0 },
+ { 6, 2 },
+ // 32x32 index = 3
+ { 4, 4 },
+ { 4, 6 },
+ { 6, 4 },
+ { 6, 6 },
};
static void set_source_var_based_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MACROBLOCK *const x,
- MODE_INFO **mi_8x8,
- int mi_row, int mi_col) {
+ MODE_INFO **mi_8x8, int mi_row,
+ int mi_col) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
const int row8x8_remaining = tile->mi_row_end - mi_row;
@@ -1814,8 +1736,7 @@
for (j = 0; j < 4; j++) {
int b_mi_row = coord_lookup[i * 4 + j].row;
int b_mi_col = coord_lookup[i * 4 + j].col;
- int boffset = b_mi_row / 2 * cm->mb_cols +
- b_mi_col / 2;
+ int boffset = b_mi_row / 2 * cm->mb_cols + b_mi_col / 2;
d16[j] = cpi->source_diff_var + offset + boffset;
@@ -1828,7 +1749,7 @@
}
is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
- (d16[2]->var < thr) && (d16[3]->var < thr);
+ (d16[2]->var < thr) && (d16[3]->var < thr);
// Use 32x32 partition
if (is_larger_better) {
@@ -1841,7 +1762,7 @@
d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
- index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
+ index = coord_lookup[i * 4].row * mis + coord_lookup[i * 4].col;
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->sb_type = BLOCK_32X32;
}
@@ -1850,7 +1771,7 @@
if (use32x32 == 4) {
thr <<= 1;
is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
- (d32[2].var < thr) && (d32[3].var < thr);
+ (d32[2].var < thr) && (d32[3].var < thr);
// Use 64x64 partition
if (is_larger_better) {
@@ -1858,17 +1779,17 @@
mi_8x8[0]->sb_type = BLOCK_64X64;
}
}
- } else { // partial in-image SB64
+ } else { // partial in-image SB64
int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
- set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
- row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
+ set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
+ col8x8_remaining, BLOCK_16X16, mi_8x8);
}
}
static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
- PICK_MODE_CONTEXT *ctx,
- int mi_row, int mi_col, int bsize) {
+ PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
+ int bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1887,11 +1808,11 @@
// For in frame complexity AQ or variance AQ, copy segment_id from
// segmentation_map.
if (cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ) {
- const uint8_t *const map = seg->update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
} else {
- // Setting segmentation map for cyclic_refresh.
+ // Setting segmentation map for cyclic_refresh.
vp9_cyclic_refresh_update_segment(cpi, mi, mi_row, mi_col, bsize,
ctx->rate, ctx->dist, x->skip, p);
}
@@ -1912,8 +1833,8 @@
}
if (cm->use_prev_frame_mvs ||
- (cpi->svc.use_base_mv && cpi->svc.number_spatial_layers > 1
- && cpi->svc.spatial_layer_id != cpi->svc.number_spatial_layers - 1)) {
+ (cpi->svc.use_base_mv && cpi->svc.number_spatial_layers > 1 &&
+ cpi->svc.spatial_layer_id != cpi->svc.number_spatial_layers - 1)) {
MV_REF *const frame_mvs =
cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
@@ -1935,9 +1856,8 @@
}
static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
- const TileInfo *const tile,
- TOKENEXTRA **tp, int mi_row, int mi_col,
- int output_enabled, BLOCK_SIZE bsize,
+ const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
+ int mi_col, int output_enabled, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx) {
MACROBLOCK *const x = &td->mb;
set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
@@ -1951,10 +1871,9 @@
}
static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
- const TileInfo *const tile,
- TOKENEXTRA **tp, int mi_row, int mi_col,
- int output_enabled, BLOCK_SIZE bsize,
- PC_TREE *pc_tree) {
+ const TileInfo *const tile, TOKENEXTRA **tp,
+ int mi_row, int mi_col, int output_enabled,
+ BLOCK_SIZE bsize, PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1964,12 +1883,11 @@
PARTITION_TYPE partition;
BLOCK_SIZE subsize;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
if (bsize >= BLOCK_8X8) {
const int idx_str = xd->mi_stride * mi_row + mi_col;
- MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
subsize = mi_8x8[0]->sb_type;
} else {
@@ -2013,9 +1931,7 @@
encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
output_enabled, subsize, pc_tree->split[3]);
break;
- default:
- assert(0 && "Invalid partition type.");
- break;
+ default: assert(0 && "Invalid partition type."); break;
}
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
@@ -2022,13 +1938,10 @@
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
}
-static void rd_use_partition(VP9_COMP *cpi,
- ThreadData *td,
- TileDataEnc *tile_data,
- MODE_INFO **mi_8x8, TOKENEXTRA **tp,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize,
- int *rate, int64_t *dist,
+static void rd_use_partition(VP9_COMP *cpi, ThreadData *td,
+ TileDataEnc *tile_data, MODE_INFO **mi_8x8,
+ TOKENEXTRA **tp, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int *rate, int64_t *dist,
int do_recon, PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
@@ -2050,8 +1963,7 @@
int do_partition_search = 1;
PICK_MODE_CONTEXT *ctx = &pc_tree->none;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
assert(num_4x4_blocks_wide_lookup[bsize] ==
num_4x4_blocks_high_lookup[bsize]);
@@ -2093,15 +2005,15 @@
mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
pc_tree->partitioning = PARTITION_NONE;
- rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize,
- ctx, INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, ctx,
+ INT64_MAX);
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (none_rdc.rate < INT_MAX) {
none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
- none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
- none_rdc.dist);
+ none_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, none_rdc.rate, none_rdc.dist);
}
restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
@@ -2112,23 +2024,21 @@
switch (partition) {
case PARTITION_NONE:
- rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
- bsize, ctx, INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, bsize,
+ ctx, INT64_MAX);
break;
case PARTITION_HORZ:
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
- subsize, &pc_tree->horizontal[0],
- INT64_MAX);
- if (last_part_rdc.rate != INT_MAX &&
- bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
+ subsize, &pc_tree->horizontal[0], INT64_MAX);
+ if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
+ mi_row + (mi_step >> 1) < cm->mi_rows) {
RD_COST tmp_rdc;
PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
vp9_rd_cost_init(&tmp_rdc);
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
- rd_pick_sb_modes(cpi, tile_data, x,
- mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
- subsize, &pc_tree->horizontal[1], INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col,
+ &tmp_rdc, subsize, &pc_tree->horizontal[1], INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp9_rd_cost_reset(&last_part_rdc);
break;
@@ -2141,17 +2051,16 @@
case PARTITION_VERT:
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
subsize, &pc_tree->vertical[0], INT64_MAX);
- if (last_part_rdc.rate != INT_MAX &&
- bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
+ if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
+ mi_col + (mi_step >> 1) < cm->mi_cols) {
RD_COST tmp_rdc;
PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
vp9_rd_cost_init(&tmp_rdc);
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
- rd_pick_sb_modes(cpi, tile_data, x,
- mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
- subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
- INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1),
+ &tmp_rdc, subsize,
+ &pc_tree->vertical[bsize > BLOCK_8X8], INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp9_rd_cost_reset(&last_part_rdc);
break;
@@ -2179,11 +2088,10 @@
continue;
vp9_rd_cost_init(&tmp_rdc);
- rd_use_partition(cpi, td, tile_data,
- mi_8x8 + jj * bss * mis + ii * bss, tp,
- mi_row + y_idx, mi_col + x_idx, subsize,
- &tmp_rdc.rate, &tmp_rdc.dist,
- i != 3, pc_tree->split[i]);
+ rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss,
+ tp, mi_row + y_idx, mi_col + x_idx, subsize,
+ &tmp_rdc.rate, &tmp_rdc.dist, i != 3,
+ pc_tree->split[i]);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp9_rd_cost_reset(&last_part_rdc);
break;
@@ -2192,26 +2100,23 @@
last_part_rdc.dist += tmp_rdc.dist;
}
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (last_part_rdc.rate < INT_MAX) {
last_part_rdc.rate += cpi->partition_cost[pl][partition];
- last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- last_part_rdc.rate, last_part_rdc.dist);
+ last_part_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, last_part_rdc.rate, last_part_rdc.dist);
}
- if (do_partition_search
- && cpi->sf.adjust_partitioning_from_last_frame
- && cpi->sf.partition_search_type == SEARCH_PARTITION
- && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
- && (mi_row + mi_step < cm->mi_rows ||
- mi_row + (mi_step >> 1) == cm->mi_rows)
- && (mi_col + mi_step < cm->mi_cols ||
- mi_col + (mi_step >> 1) == cm->mi_cols)) {
+ if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame &&
+ cpi->sf.partition_search_type == SEARCH_PARTITION &&
+ partition != PARTITION_SPLIT && bsize > BLOCK_8X8 &&
+ (mi_row + mi_step < cm->mi_rows ||
+ mi_row + (mi_step >> 1) == cm->mi_rows) &&
+ (mi_col + mi_step < cm->mi_cols ||
+ mi_col + (mi_step >> 1) == cm->mi_cols)) {
BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
chosen_rdc.rate = 0;
chosen_rdc.dist = 0;
@@ -2231,9 +2136,9 @@
save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
pc_tree->split[i]->partitioning = PARTITION_NONE;
- rd_pick_sb_modes(cpi, tile_data, x,
- mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
- split_subsize, &pc_tree->split[i]->none, INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx,
+ &tmp_rdc, split_subsize, &pc_tree->split[i]->none,
+ INT64_MAX);
restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
@@ -2246,7 +2151,7 @@
chosen_rdc.dist += tmp_rdc.dist;
if (i != 3)
- encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
+ encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
split_subsize, pc_tree->split[i]);
pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
@@ -2256,8 +2161,8 @@
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (chosen_rdc.rate < INT_MAX) {
chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
- chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- chosen_rdc.rate, chosen_rdc.dist);
+ chosen_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, chosen_rdc.rate, chosen_rdc.dist);
}
}
@@ -2264,14 +2169,12 @@
// If last_part is better set the partitioning to that.
if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
mi_8x8[0]->sb_type = bsize;
- if (bsize >= BLOCK_8X8)
- pc_tree->partitioning = partition;
+ if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition;
chosen_rdc = last_part_rdc;
}
// If none was better set the partitioning to that.
if (none_rdc.rdcost < chosen_rdc.rdcost) {
- if (bsize >= BLOCK_8X8)
- pc_tree->partitioning = PARTITION_NONE;
+ if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
chosen_rdc = none_rdc;
}
@@ -2293,22 +2196,17 @@
}
static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
- BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
- BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
- BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
- BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
- BLOCK_16X16
+ BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
+ BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, BLOCK_16X16,
+ BLOCK_16X16, BLOCK_16X16, BLOCK_16X16
};
static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
- BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
- BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
- BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
- BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
- BLOCK_64X64
+ BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
+ BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
+ BLOCK_64X64, BLOCK_64X64, BLOCK_64X64
};
-
// Look at all the mode_info entries for blocks that are part of this
// partition and find the min and max values for sb_type.
// At the moment this is designed to work on a 64x64 SB but could be
@@ -2321,7 +2219,7 @@
BLOCK_SIZE *max_block_size,
int bs_hist[BLOCK_SIZES]) {
int sb_width_in_blocks = MI_BLOCK_SIZE;
- int sb_height_in_blocks = MI_BLOCK_SIZE;
+ int sb_height_in_blocks = MI_BLOCK_SIZE;
int i, j;
int index = 0;
@@ -2328,7 +2226,7 @@
// Check the sb_type for each block that belongs to this region.
for (i = 0; i < sb_height_in_blocks; ++i) {
for (j = 0; j < sb_width_in_blocks; ++j) {
- MODE_INFO *mi = mi_8x8[index+j];
+ MODE_INFO *mi = mi_8x8[index + j];
BLOCK_SIZE sb_type = mi ? mi->sb_type : 0;
bs_hist[sb_type]++;
*min_block_size = VPXMIN(*min_block_size, sb_type);
@@ -2340,19 +2238,16 @@
// Next square block size less or equal than current block size.
static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
- BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
- BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
- BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
- BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
- BLOCK_64X64
+ BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8,
+ BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
+ BLOCK_32X32, BLOCK_32X32, BLOCK_64X64
};
// Look at neighboring blocks and set a min and max partition size based on
// what they chose.
static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
- MACROBLOCKD *const xd,
- int mi_row, int mi_col,
- BLOCK_SIZE *min_block_size,
+ MACROBLOCKD *const xd, int mi_row,
+ int mi_col, BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
VP9_COMMON *const cm = &cpi->common;
MODE_INFO **mi = xd->mi;
@@ -2363,7 +2258,7 @@
int bh, bw;
BLOCK_SIZE min_size = BLOCK_4X4;
BLOCK_SIZE max_size = BLOCK_64X64;
- int bs_hist[BLOCK_SIZES] = {0};
+ int bs_hist[BLOCK_SIZES] = { 0 };
// Trap case where we do not have a prediction.
if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
@@ -2400,8 +2295,7 @@
}
// Check border cases where max and min from neighbors may not be legal.
- max_size = find_partition_size(max_size,
- row8x8_remaining, col8x8_remaining,
+ max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
&bh, &bw);
// Test for blocks at the edge of the active image.
// This may be the actual edge of the image or where there are formatting
@@ -2418,7 +2312,7 @@
// *min_block_size.
if (cpi->sf.use_square_partition_only &&
next_square_size[max_size] < min_size) {
- min_size = next_square_size[max_size];
+ min_size = next_square_size[max_size];
}
*min_block_size = min_size;
@@ -2426,10 +2320,10 @@
}
// TODO(jingning) refactor functions setting partition search range
-static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
+static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
- int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ int mi_width = num_8x8_blocks_wide_lookup[bsize];
int mi_height = num_8x8_blocks_high_lookup[bsize];
int idx, idy;
@@ -2488,16 +2382,19 @@
}
#if CONFIG_FP_MB_STATS
-const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
- {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
-const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
- {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
-const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
- {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
-const int qindex_split_threshold_lookup[BLOCK_SIZES] =
- {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
-const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
- {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
+const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 4, 4 };
+const int num_16x16_blocks_high_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
+ 2, 1, 2, 4, 2, 4 };
+const int qindex_skip_threshold_lookup[BLOCK_SIZES] = {
+ 0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120
+};
+const int qindex_split_threshold_lookup[BLOCK_SIZES] = {
+ 0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120
+};
+const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = {
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6
+};
typedef enum {
MV_ZERO = 0,
@@ -2536,10 +2433,10 @@
// unlikely to be selected depending on previous rate-distortion optimization
// results, for encoding speed-up.
static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
- TileDataEnc *tile_data,
- TOKENEXTRA **tp, int mi_row, int mi_col,
- BLOCK_SIZE bsize, RD_COST *rd_cost,
- int64_t best_rd, PC_TREE *pc_tree) {
+ TileDataEnc *tile_data, TOKENEXTRA **tp,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ RD_COST *rd_cost, int64_t best_rd,
+ PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
@@ -2572,10 +2469,10 @@
#endif
int partition_none_allowed = !force_horz_split && !force_vert_split;
- int partition_horz_allowed = !force_vert_split && yss <= xss &&
- bsize >= BLOCK_8X8;
- int partition_vert_allowed = !force_horz_split && xss <= yss &&
- bsize >= BLOCK_8X8;
+ int partition_horz_allowed =
+ !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
+ int partition_vert_allowed =
+ !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
@@ -2583,11 +2480,11 @@
(void)*tp_orig;
assert(num_8x8_blocks_wide_lookup[bsize] ==
- num_8x8_blocks_high_lookup[bsize]);
+ num_8x8_blocks_high_lookup[bsize]);
// Adjust dist breakout threshold according to the partition size.
- dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
- b_height_log2_lookup[bsize]);
+ dist_breakout_thr >>=
+ 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
rate_breakout_thr *= num_pels_log2_lookup[bsize];
vp9_rd_cost_init(&this_rdc);
@@ -2601,8 +2498,10 @@
x->mb_energy = vp9_block_energy(cpi, x, bsize);
if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
- int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
- + get_chessboard_index(cm->current_video_frame)) & 0x1;
+ int cb_partition_search_ctrl =
+ ((pc_tree->index == 0 || pc_tree->index == 3) +
+ get_chessboard_index(cm->current_video_frame)) &
+ 0x1;
if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
@@ -2612,10 +2511,10 @@
// The threshold set here has to be of square block size.
if (cpi->sf.auto_min_max_partition_size) {
partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
- partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
- force_horz_split);
- partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
- force_vert_split);
+ partition_horz_allowed &=
+ ((bsize <= max_size && bsize > min_size) || force_horz_split);
+ partition_vert_allowed &=
+ ((bsize <= max_size && bsize > min_size) || force_vert_split);
do_split &= bsize > min_size;
}
@@ -2637,8 +2536,8 @@
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
- src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
- mi_row, mi_col, bsize);
+ src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, mi_row,
+ mi_col, bsize);
}
#endif
@@ -2658,7 +2557,7 @@
// compute a complexity measure, basically measure inconsistency of motion
// vectors obtained from the first pass in the current block
- for (r = mb_row; r < mb_row_end ; r++) {
+ for (r = mb_row; r < mb_row_end; r++) {
for (c = mb_col; c < mb_col_end; c++) {
const int mb_index = r * cm->mb_cols + c;
@@ -2695,24 +2594,23 @@
// PARTITION_NONE
if (partition_none_allowed) {
- rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
- &this_rdc, bsize, ctx, best_rdc.rdcost);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx,
+ best_rdc.rdcost);
if (this_rdc.rate != INT_MAX) {
if (bsize >= BLOCK_8X8) {
this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
- this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- this_rdc.rate, this_rdc.dist);
+ this_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
}
if (this_rdc.rdcost < best_rdc.rdcost) {
best_rdc = this_rdc;
- if (bsize >= BLOCK_8X8)
- pc_tree->partitioning = PARTITION_NONE;
+ if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
// If all y, u, v transform blocks in this partition are skippable, and
// the dist & rate are within the thresholds, the partition search is
// terminated for current branch of the partition search tree.
- if (!x->e_mbd.lossless && ctx->skippable &&
+ if (!x->e_mbd.lossless && ctx->skippable &&
((best_rdc.dist < (dist_breakout_thr >> 2)) ||
(best_rdc.dist < dist_breakout_thr &&
best_rdc.rate < rate_breakout_thr))) {
@@ -2772,8 +2670,7 @@
}
// store estimated motion vector
- if (cpi->sf.adaptive_motion_search)
- store_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) store_pred_mv(x, ctx);
// If the interp_filter is marked as SWITCHABLE_FILTERS, it was for an
// intra block and used for context purposes.
@@ -2795,23 +2692,20 @@
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
pc_tree->leaf_split[0], best_rdc.rdcost);
- if (sum_rdc.rate == INT_MAX)
- sum_rdc.rdcost = INT64_MAX;
+ if (sum_rdc.rate == INT_MAX) sum_rdc.rdcost = INT64_MAX;
} else {
for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
- const int x_idx = (i & 1) * mi_step;
+ const int x_idx = (i & 1) * mi_step;
const int y_idx = (i >> 1) * mi_step;
if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
continue;
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
pc_tree->split[i]->index = i;
- rd_pick_partition(cpi, td, tile_data, tp,
- mi_row + y_idx, mi_col + x_idx,
- subsize, &this_rdc,
+ rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
+ mi_col + x_idx, subsize, &this_rdc,
best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
if (this_rdc.rate == INT_MAX) {
@@ -2827,8 +2721,7 @@
if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
- sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- sum_rdc.rate, sum_rdc.dist);
+ sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
if (sum_rdc.rdcost < best_rdc.rdcost) {
best_rdc = sum_rdc;
@@ -2835,10 +2728,9 @@
pc_tree->partitioning = PARTITION_SPLIT;
// Rate and distortion based partition search termination clause.
- if (!x->e_mbd.lossless &&
- ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
- (best_rdc.dist < dist_breakout_thr &&
- best_rdc.rate < rate_breakout_thr))) {
+ if (!x->e_mbd.lossless && ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
+ (best_rdc.dist < dist_breakout_thr &&
+ best_rdc.rate < rate_breakout_thr))) {
do_rect = 0;
}
}
@@ -2857,8 +2749,7 @@
if (partition_horz_allowed &&
(do_rect || vp9_active_h_edge(cpi, mi_row, mi_step))) {
subsize = get_subsize(bsize, PARTITION_HORZ);
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
partition_none_allowed)
pc_tree->horizontal[0].pred_interp_filter = pred_interp_filter;
@@ -2871,13 +2762,12 @@
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
partition_none_allowed)
pc_tree->horizontal[1].pred_interp_filter = pred_interp_filter;
- rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col,
- &this_rdc, subsize, &pc_tree->horizontal[1],
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc,
+ subsize, &pc_tree->horizontal[1],
best_rdc.rdcost - sum_rdc.rdcost);
if (this_rdc.rate == INT_MAX) {
sum_rdc.rdcost = INT64_MAX;
@@ -2908,8 +2798,7 @@
(do_rect || vp9_active_v_edge(cpi, mi_col, mi_step))) {
subsize = get_subsize(bsize, PARTITION_VERT);
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
partition_none_allowed)
pc_tree->vertical[0].pred_interp_filter = pred_interp_filter;
@@ -2921,14 +2810,13 @@
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
&pc_tree->vertical[0]);
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
partition_none_allowed)
pc_tree->vertical[1].pred_interp_filter = pred_interp_filter;
- rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step,
- &this_rdc, subsize,
- &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc,
+ subsize, &pc_tree->vertical[1],
+ best_rdc.rdcost - sum_rdc.rdcost);
if (this_rdc.rate == INT_MAX) {
sum_rdc.rdcost = INT64_MAX;
} else {
@@ -2940,8 +2828,7 @@
if (sum_rdc.rdcost < best_rdc.rdcost) {
sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
- sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- sum_rdc.rate, sum_rdc.dist);
+ sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
if (sum_rdc.rdcost < best_rdc.rdcost) {
best_rdc = sum_rdc;
pc_tree->partitioning = PARTITION_VERT;
@@ -2954,14 +2841,14 @@
// warning related to the fact that best_rd isn't used after this
// point. This code should be refactored so that the duplicate
// checks occur in some sub function and thus are used...
- (void) best_rd;
+ (void)best_rd;
*rd_cost = best_rdc;
if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
pc_tree->index != 3) {
int output_enabled = (bsize == BLOCK_64X64);
- encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
- bsize, pc_tree);
+ encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
+ pc_tree);
}
if (bsize == BLOCK_64X64) {
@@ -2973,10 +2860,8 @@
}
}
-static void encode_rd_sb_row(VP9_COMP *cpi,
- ThreadData *td,
- TileDataEnc *tile_data,
- int mi_row,
+static void encode_rd_sb_row(VP9_COMP *cpi, ThreadData *td,
+ TileDataEnc *tile_data, int mi_row,
TOKENEXTRA **tp) {
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
@@ -3004,8 +2889,7 @@
MODE_INFO **mi = cm->mi_grid_visible + idx_str;
if (sf->adaptive_pred_interp_filter) {
- for (i = 0; i < 64; ++i)
- td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
+ for (i = 0; i < 64; ++i) td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
for (i = 0; i < 64; ++i) {
td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
@@ -3019,8 +2903,8 @@
td->pc_root->index = 0;
if (seg->enabled) {
- const uint8_t *const map = seg->update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
}
@@ -3031,27 +2915,26 @@
seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
- rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
- BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+ rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1, td->pc_root);
} else if (cpi->partition_search_skippable_frame) {
BLOCK_SIZE bsize;
set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
- rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
- BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+ rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1, td->pc_root);
} else if (sf->partition_search_type == VAR_BASED_PARTITION &&
cm->frame_type != KEY_FRAME) {
choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
- rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
- BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+ rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1, td->pc_root);
} else {
// If required set upper and lower partition size limits
if (sf->auto_min_max_partition_size) {
set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
- &x->min_partition_size,
- &x->max_partition_size);
+ &x->min_partition_size, &x->max_partition_size);
}
rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rdc, INT64_MAX, td->pc_root);
@@ -3073,8 +2956,7 @@
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
memset(xd->above_context[0], 0,
- sizeof(*xd->above_context[0]) *
- 2 * aligned_mi_cols * MAX_MB_PLANE);
+ sizeof(*xd->above_context[0]) * 2 * aligned_mi_cols * MAX_MB_PLANE);
memset(xd->above_seg_context, 0,
sizeof(*xd->above_seg_context) * aligned_mi_cols);
}
@@ -3085,8 +2967,8 @@
if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
return 0;
} else {
- return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
- + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
+ return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) +
+ !!(ref_flags & VP9_ALT_FLAG)) >= 2;
}
}
@@ -3115,14 +2997,12 @@
}
static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
- if (xd->lossless)
- return ONLY_4X4;
- if (cpi->common.frame_type == KEY_FRAME &&
- cpi->sf.use_nonrd_pick_mode)
+ if (xd->lossless) return ONLY_4X4;
+ if (cpi->common.frame_type == KEY_FRAME && cpi->sf.use_nonrd_pick_mode)
return ALLOW_16X16;
if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
return ALLOW_32X32;
- else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
+ else if (cpi->sf.tx_size_search_method == USE_FULL_RD ||
cpi->sf.tx_size_search_method == USE_TX_8X8)
return TX_MODE_SELECT;
else
@@ -3138,10 +3018,10 @@
vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
}
-static void nonrd_pick_sb_modes(VP9_COMP *cpi,
- TileDataEnc *tile_data, MACROBLOCK *const x,
- int mi_row, int mi_col, RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
+static void nonrd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *const x, int mi_row, int mi_col,
+ RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx) {
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -3173,11 +3053,9 @@
else if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
else if (bsize >= BLOCK_8X8)
- vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col,
- rd_cost, bsize, ctx);
+ vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize, ctx);
else
- vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col,
- rd_cost, bsize, ctx);
+ vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col, rd_cost, bsize, ctx);
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
@@ -3189,17 +3067,14 @@
(sizeof(l[0]) * num_4x4_blocks_high) >> pd->subsampling_y);
}
- if (rd_cost->rate == INT_MAX)
- vp9_rd_cost_reset(rd_cost);
+ if (rd_cost->rate == INT_MAX) vp9_rd_cost_reset(rd_cost);
ctx->rate = rd_cost->rate;
ctx->dist = rd_cost->dist;
}
-static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize,
- PC_TREE *pc_tree) {
+static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, int mi_row,
+ int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
MACROBLOCKD *xd = &x->e_mbd;
int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
PARTITION_TYPE partition = pc_tree->partitioning;
@@ -3207,8 +3082,7 @@
assert(bsize >= BLOCK_8X8);
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
switch (partition) {
case PARTITION_NONE:
@@ -3252,8 +3126,7 @@
pc_tree->split[3]);
break;
}
- default:
- break;
+ default: break;
}
}
@@ -3268,17 +3141,15 @@
if (bsize > BLOCK_8X8) {
BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
int i;
- for (i = 0; i < 4; ++i)
- pred_pixel_ready_reset(pc_tree->split[i], subsize);
+ for (i = 0; i < 4; ++i) pred_pixel_ready_reset(pc_tree->split[i], subsize);
}
}
static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
- TileDataEnc *tile_data,
- TOKENEXTRA **tp, int mi_row,
- int mi_col, BLOCK_SIZE bsize, RD_COST *rd_cost,
- int do_recon, int64_t best_rd,
- PC_TREE *pc_tree) {
+ TileDataEnc *tile_data, TOKENEXTRA **tp,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ RD_COST *rd_cost, int do_recon,
+ int64_t best_rd, PC_TREE *pc_tree) {
const SPEED_FEATURES *const sf = &cpi->sf;
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
@@ -3299,14 +3170,14 @@
const int yss = x->e_mbd.plane[1].subsampling_y;
int partition_none_allowed = !force_horz_split && !force_vert_split;
- int partition_horz_allowed = !force_vert_split && yss <= xss &&
- bsize >= BLOCK_8X8;
- int partition_vert_allowed = !force_horz_split && xss <= yss &&
- bsize >= BLOCK_8X8;
- (void) *tp_orig;
+ int partition_horz_allowed =
+ !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
+ int partition_vert_allowed =
+ !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
+ (void)*tp_orig;
assert(num_8x8_blocks_wide_lookup[bsize] ==
- num_8x8_blocks_high_lookup[bsize]);
+ num_8x8_blocks_high_lookup[bsize]);
vp9_rd_cost_init(&sum_rdc);
vp9_rd_cost_reset(&best_rdc);
@@ -3315,14 +3186,14 @@
// Determine partition types in search according to the speed features.
// The threshold set here has to be of square block size.
if (sf->auto_min_max_partition_size) {
- partition_none_allowed &= (bsize <= x->max_partition_size &&
- bsize >= x->min_partition_size);
- partition_horz_allowed &= ((bsize <= x->max_partition_size &&
- bsize > x->min_partition_size) ||
- force_horz_split);
- partition_vert_allowed &= ((bsize <= x->max_partition_size &&
- bsize > x->min_partition_size) ||
- force_vert_split);
+ partition_none_allowed &=
+ (bsize <= x->max_partition_size && bsize >= x->min_partition_size);
+ partition_horz_allowed &=
+ ((bsize <= x->max_partition_size && bsize > x->min_partition_size) ||
+ force_horz_split);
+ partition_vert_allowed &=
+ ((bsize <= x->max_partition_size && bsize > x->min_partition_size) ||
+ force_vert_split);
do_split &= bsize > x->min_partition_size;
}
if (sf->use_square_partition_only) {
@@ -3330,14 +3201,13 @@
partition_vert_allowed &= force_vert_split;
}
- ctx->pred_pixel_ready = !(partition_vert_allowed ||
- partition_horz_allowed ||
- do_split);
+ ctx->pred_pixel_ready =
+ !(partition_vert_allowed || partition_horz_allowed || do_split);
// PARTITION_NONE
if (partition_none_allowed) {
- nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
- &this_rdc, bsize, ctx);
+ nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize,
+ ctx);
ctx->mic = *xd->mi[0];
ctx->mbmi_ext = *x->mbmi_ext;
ctx->skip_txfm[0] = x->skip_txfm[0];
@@ -3346,23 +3216,21 @@
if (this_rdc.rate != INT_MAX) {
int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
- this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- this_rdc.rate, this_rdc.dist);
+ this_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
if (this_rdc.rdcost < best_rdc.rdcost) {
int64_t dist_breakout_thr = sf->partition_search_breakout_dist_thr;
int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr;
- dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
- b_height_log2_lookup[bsize]);
+ dist_breakout_thr >>=
+ 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
rate_breakout_thr *= num_pels_log2_lookup[bsize];
best_rdc = this_rdc;
- if (bsize >= BLOCK_8X8)
- pc_tree->partitioning = PARTITION_NONE;
+ if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
- if (!x->e_mbd.lossless &&
- this_rdc.rate < rate_breakout_thr &&
+ if (!x->e_mbd.lossless && this_rdc.rate < rate_breakout_thr &&
this_rdc.dist < dist_breakout_thr) {
do_split = 0;
do_rect = 0;
@@ -3387,9 +3255,8 @@
if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
continue;
load_pred_mv(x, ctx);
- nonrd_pick_partition(cpi, td, tile_data, tp,
- mi_row + y_idx, mi_col + x_idx,
- subsize, &this_rdc, 0,
+ nonrd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
+ mi_col + x_idx, subsize, &this_rdc, 0,
best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
if (this_rdc.rate == INT_MAX) {
@@ -3407,8 +3274,7 @@
} else {
// skip rectangular partition test when larger block size
// gives better rd cost
- if (sf->less_rectangular_check)
- do_rect &= !partition_none_allowed;
+ if (sf->less_rectangular_check) do_rect &= !partition_none_allowed;
}
}
@@ -3415,8 +3281,7 @@
// PARTITION_HORZ
if (partition_horz_allowed && do_rect) {
subsize = get_subsize(bsize, PARTITION_HORZ);
- if (sf->adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (sf->adaptive_motion_search) load_pred_mv(x, ctx);
pc_tree->horizontal[0].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
&pc_tree->horizontal[0]);
@@ -3429,9 +3294,8 @@
if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
load_pred_mv(x, ctx);
pc_tree->horizontal[1].pred_pixel_ready = 1;
- nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col,
- &this_rdc, subsize,
- &pc_tree->horizontal[1]);
+ nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col, &this_rdc,
+ subsize, &pc_tree->horizontal[1]);
pc_tree->horizontal[1].mic = *xd->mi[0];
pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
@@ -3445,8 +3309,8 @@
this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
sum_rdc.rate += this_rdc.rate;
sum_rdc.dist += this_rdc.dist;
- sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- sum_rdc.rate, sum_rdc.dist);
+ sum_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
}
}
@@ -3461,8 +3325,7 @@
// PARTITION_VERT
if (partition_vert_allowed && do_rect) {
subsize = get_subsize(bsize, PARTITION_VERT);
- if (sf->adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (sf->adaptive_motion_search) load_pred_mv(x, ctx);
pc_tree->vertical[0].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
&pc_tree->vertical[0]);
@@ -3474,9 +3337,8 @@
if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
load_pred_mv(x, ctx);
pc_tree->vertical[1].pred_pixel_ready = 1;
- nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms,
- &this_rdc, subsize,
- &pc_tree->vertical[1]);
+ nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms, &this_rdc,
+ subsize, &pc_tree->vertical[1]);
pc_tree->vertical[1].mic = *xd->mi[0];
pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
@@ -3489,8 +3351,8 @@
sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
sum_rdc.rate += this_rdc.rate;
sum_rdc.dist += this_rdc.dist;
- sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- sum_rdc.rate, sum_rdc.dist);
+ sum_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
}
}
@@ -3514,8 +3376,8 @@
if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
int output_enabled = (bsize == BLOCK_64X64);
- encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
- bsize, pc_tree);
+ encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
+ pc_tree);
}
if (bsize == BLOCK_64X64 && do_recon) {
@@ -3527,12 +3389,9 @@
}
}
-static void nonrd_select_partition(VP9_COMP *cpi,
- ThreadData *td,
- TileDataEnc *tile_data,
- MODE_INFO **mi,
- TOKENEXTRA **tp,
- int mi_row, int mi_col,
+static void nonrd_select_partition(VP9_COMP *cpi, ThreadData *td,
+ TileDataEnc *tile_data, MODE_INFO **mi,
+ TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int output_enabled,
RD_COST *rd_cost, PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
@@ -3546,8 +3405,7 @@
RD_COST this_rdc;
vp9_rd_cost_reset(&this_rdc);
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
@@ -3555,25 +3413,25 @@
if (bsize == BLOCK_32X32 && subsize == BLOCK_32X32) {
x->max_partition_size = BLOCK_32X32;
x->min_partition_size = BLOCK_16X16;
- nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
- rd_cost, 0, INT64_MAX, pc_tree);
+ nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
+ 0, INT64_MAX, pc_tree);
} else if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
subsize >= BLOCK_16X16) {
x->max_partition_size = BLOCK_32X32;
x->min_partition_size = BLOCK_8X8;
- nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
- rd_cost, 0, INT64_MAX, pc_tree);
+ nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
+ 0, INT64_MAX, pc_tree);
} else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
x->max_partition_size = BLOCK_16X16;
x->min_partition_size = BLOCK_8X8;
- nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
- rd_cost, 0, INT64_MAX, pc_tree);
+ nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
+ 0, INT64_MAX, pc_tree);
} else {
switch (partition) {
case PARTITION_NONE:
pc_tree->none.pred_pixel_ready = 1;
- nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
- subsize, &pc_tree->none);
+ nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
+ &pc_tree->none);
pc_tree->none.mic = *xd->mi[0];
pc_tree->none.mbmi_ext = *x->mbmi_ext;
pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
@@ -3581,8 +3439,8 @@
break;
case PARTITION_VERT:
pc_tree->vertical[0].pred_pixel_ready = 1;
- nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
- subsize, &pc_tree->vertical[0]);
+ nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
+ &pc_tree->vertical[0]);
pc_tree->vertical[0].mic = *xd->mi[0];
pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
@@ -3604,8 +3462,8 @@
break;
case PARTITION_HORZ:
pc_tree->horizontal[0].pred_pixel_ready = 1;
- nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
- subsize, &pc_tree->horizontal[0]);
+ nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
+ &pc_tree->horizontal[0]);
pc_tree->horizontal[0].mic = *xd->mi[0];
pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
@@ -3630,9 +3488,9 @@
nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
subsize, output_enabled, rd_cost,
pc_tree->split[0]);
- nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp,
- mi_row, mi_col + hbs, subsize, output_enabled,
- &this_rdc, pc_tree->split[1]);
+ nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp, mi_row,
+ mi_col + hbs, subsize, output_enabled, &this_rdc,
+ pc_tree->split[1]);
if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
rd_cost->rate += this_rdc.rate;
@@ -3655,9 +3513,7 @@
rd_cost->dist += this_rdc.dist;
}
break;
- default:
- assert(0 && "Invalid partition type.");
- break;
+ default: assert(0 && "Invalid partition type."); break;
}
}
@@ -3665,13 +3521,9 @@
encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
}
-
-static void nonrd_use_partition(VP9_COMP *cpi,
- ThreadData *td,
- TileDataEnc *tile_data,
- MODE_INFO **mi,
- TOKENEXTRA **tp,
- int mi_row, int mi_col,
+static void nonrd_use_partition(VP9_COMP *cpi, ThreadData *td,
+ TileDataEnc *tile_data, MODE_INFO **mi,
+ TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int output_enabled,
RD_COST *dummy_cost, PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
@@ -3683,8 +3535,7 @@
PARTITION_TYPE partition;
BLOCK_SIZE subsize;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
@@ -3718,8 +3569,8 @@
subsize, &pc_tree->vertical[0]);
if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
pc_tree->vertical[1].pred_pixel_ready = 1;
- nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
- dummy_cost, subsize, &pc_tree->vertical[1]);
+ nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, dummy_cost,
+ subsize, &pc_tree->vertical[1]);
pc_tree->vertical[1].mic = *xd->mi[0];
pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
@@ -3741,8 +3592,8 @@
if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
pc_tree->horizontal[1].pred_pixel_ready = 1;
- nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
- dummy_cost, subsize, &pc_tree->horizontal[1]);
+ nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, dummy_cost,
+ subsize, &pc_tree->horizontal[1]);
pc_tree->horizontal[1].mic = *xd->mi[0];
pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
@@ -3756,15 +3607,14 @@
if (bsize == BLOCK_8X8) {
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
subsize, pc_tree->leaf_split[0]);
- encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col,
- output_enabled, subsize, pc_tree->leaf_split[0]);
+ encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
+ subsize, pc_tree->leaf_split[0]);
} else {
- nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
- subsize, output_enabled, dummy_cost,
- pc_tree->split[0]);
- nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp,
- mi_row, mi_col + hbs, subsize, output_enabled,
- dummy_cost, pc_tree->split[1]);
+ nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, subsize,
+ output_enabled, dummy_cost, pc_tree->split[0]);
+ nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp, mi_row,
+ mi_col + hbs, subsize, output_enabled, dummy_cost,
+ pc_tree->split[1]);
nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
mi_row + hbs, mi_col, subsize, output_enabled,
dummy_cost, pc_tree->split[2]);
@@ -3773,9 +3623,7 @@
dummy_cost, pc_tree->split[3]);
}
break;
- default:
- assert(0 && "Invalid partition type.");
- break;
+ default: assert(0 && "Invalid partition type."); break;
}
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
@@ -3782,10 +3630,8 @@
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
}
-static void encode_nonrd_sb_row(VP9_COMP *cpi,
- ThreadData *td,
- TileDataEnc *tile_data,
- int mi_row,
+static void encode_nonrd_sb_row(VP9_COMP *cpi, ThreadData *td,
+ TileDataEnc *tile_data, int mi_row,
TOKENEXTRA **tp) {
SPEED_FEATURES *const sf = &cpi->sf;
VP9_COMMON *const cm = &cpi->common;
@@ -3817,8 +3663,8 @@
x->sb_is_skin = 0;
if (seg->enabled) {
- const uint8_t *const map = seg->update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
if (seg_skip) {
@@ -3843,8 +3689,7 @@
BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
break;
case FIXED_PARTITION:
- if (!seg_skip)
- bsize = sf->always_this_block_size;
+ if (!seg_skip) bsize = sf->always_this_block_size;
set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
@@ -3856,9 +3701,9 @@
// nonrd_pick_partition does not support 4x4 partition, so avoid it
// on key frame for now.
if ((cpi->oxcf.rc_mode == VPX_VBR && cpi->rc.high_source_sad &&
- cm->frame_type != KEY_FRAME) ||
+ cm->frame_type != KEY_FRAME) ||
(cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
- xd->mi[0]->segment_id)) {
+ xd->mi[0]->segment_id)) {
// Use lower max_partition_size for low resoultions.
if (cm->width <= 352 && cm->height <= 288)
x->max_partition_size = BLOCK_32X32;
@@ -3866,8 +3711,8 @@
x->max_partition_size = BLOCK_64X64;
x->min_partition_size = BLOCK_8X8;
nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
- BLOCK_64X64, &dummy_rdc, 1,
- INT64_MAX, td->pc_root);
+ BLOCK_64X64, &dummy_rdc, 1, INT64_MAX,
+ td->pc_root);
} else {
choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
// TODO(marpan): Seems like nonrd_select_partition does not support
@@ -3882,9 +3727,7 @@
}
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
}
@@ -3900,9 +3743,9 @@
const int last_stride = cpi->Last_Source->y_stride;
// Pick cutoff threshold
- const int cutoff = (VPXMIN(cm->width, cm->height) >= 720) ?
- (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
- (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
+ const int cutoff = (VPXMIN(cm->width, cm->height) >= 720)
+ ? (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100)
+ : (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]);
diff *var16 = cpi->source_diff_var;
@@ -3918,11 +3761,11 @@
switch (cm->bit_depth) {
case VPX_BITS_8:
vpx_highbd_8_get16x16var(src, src_stride, last_src, last_stride,
- &var16->sse, &var16->sum);
+ &var16->sse, &var16->sum);
break;
case VPX_BITS_10:
vpx_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
- &var16->sse, &var16->sum);
+ &var16->sse, &var16->sum);
break;
case VPX_BITS_12:
vpx_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
@@ -3929,20 +3772,20 @@
&var16->sse, &var16->sum);
break;
default:
- assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
+ assert(0 &&
+ "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
" or VPX_BITS_12");
return -1;
}
} else {
- vpx_get16x16var(src, src_stride, last_src, last_stride,
- &var16->sse, &var16->sum);
+ vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse,
+ &var16->sum);
}
#else
- vpx_get16x16var(src, src_stride, last_src, last_stride,
- &var16->sse, &var16->sum);
+ vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse,
+ &var16->sum);
#endif // CONFIG_VP9_HIGHBITDEPTH
- var16->var = var16->sse -
- (((uint32_t)var16->sum * var16->sum) >> 8);
+ var16->var = var16->sse - (((uint32_t)var16->sum * var16->sum) >> 8);
if (var16->var >= VAR_HIST_MAX_BG_VAR)
hist[VAR_HIST_BINS - 1]++;
@@ -3985,8 +3828,7 @@
sf->partition_search_type = FIXED_PARTITION;
} else {
if (cm->last_width != cm->width || cm->last_height != cm->height) {
- if (cpi->source_diff_var)
- vpx_free(cpi->source_diff_var);
+ if (cpi->source_diff_var) vpx_free(cpi->source_diff_var);
CHECK_MEM_ERROR(cm, cpi->source_diff_var,
vpx_calloc(cm->MBs, sizeof(diff)));
@@ -4011,8 +3853,8 @@
inter_count += td->counts->intra_inter[j][1];
}
- return (intra_count << 2) < inter_count &&
- cm->frame_type != KEY_FRAME && cm->show_frame;
+ return (intra_count << 2) < inter_count && cm->frame_type != KEY_FRAME &&
+ cm->show_frame;
}
void vp9_init_tile_data(VP9_COMP *cpi) {
@@ -4024,10 +3866,9 @@
int tile_tok = 0;
if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
- if (cpi->tile_data != NULL)
- vpx_free(cpi->tile_data);
- CHECK_MEM_ERROR(cm, cpi->tile_data,
- vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
+ if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
+ CHECK_MEM_ERROR(cm, cpi->tile_data, vpx_malloc(tile_cols * tile_rows *
+ sizeof(*cpi->tile_data)));
cpi->allocated_tiles = tile_cols * tile_rows;
for (tile_row = 0; tile_row < tile_rows; ++tile_row)
@@ -4057,13 +3898,12 @@
}
}
-void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td,
- int tile_row, int tile_col) {
+void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td, int tile_row,
+ int tile_col) {
VP9_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
- TileDataEnc *this_tile =
- &cpi->tile_data[tile_row * tile_cols + tile_col];
- const TileInfo * const tile_info = &this_tile->tile_info;
+ TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
+ const TileInfo *const tile_info = &this_tile->tile_info;
TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
const int mi_row_start = tile_info->mi_row_start;
const int mi_row_end = tile_info->mi_row_end;
@@ -4082,7 +3922,7 @@
cpi->tok_count[tile_row][tile_col] =
(unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
assert(tok - cpi->tile_tok[tile_row][tile_col] <=
- allocated_tokens(*tile_info));
+ allocated_tokens(*tile_info));
}
static void encode_tiles(VP9_COMP *cpi) {
@@ -4102,10 +3942,9 @@
static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
- cm->current_video_frame * cm->MBs * sizeof(uint8_t);
+ cm->current_video_frame * cm->MBs * sizeof(uint8_t);
- if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
- return EOF;
+ if (mb_stats_in > firstpass_mb_stats->mb_stats_end) return EOF;
*this_frame_mb_stats = mb_stats_in;
@@ -4131,10 +3970,8 @@
rdc->m_search_count = 0; // Count of motion search hits.
rdc->ex_search_count = 0; // Exhaustive mesh search hits.
- xd->lossless = cm->base_qindex == 0 &&
- cm->y_dc_delta_q == 0 &&
- cm->uv_dc_delta_q == 0 &&
- cm->uv_ac_delta_q == 0;
+ xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth)
@@ -4141,15 +3978,14 @@
x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4;
else
x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
- x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add :
- vp9_highbd_idct4x4_add;
+ x->highbd_itxm_add =
+ xd->lossless ? vp9_highbd_iwht4x4_add : vp9_highbd_idct4x4_add;
#else
x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
#endif // CONFIG_VP9_HIGHBITDEPTH
x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
- if (xd->lossless)
- x->optimize = 0;
+ if (xd->lossless) x->optimize = 0;
cm->tx_mode = select_tx_mode(cpi, xd);
@@ -4158,15 +3994,13 @@
vp9_initialize_rd_consts(cpi);
vp9_initialize_me_consts(cpi, x, cm->base_qindex);
init_encode_frame_mb_context(cpi);
- cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
- cm->width == cm->last_width &&
- cm->height == cm->last_height &&
- !cm->intra_only &&
- cm->last_show_frame;
+ cm->use_prev_frame_mvs =
+ !cm->error_resilient_mode && cm->width == cm->last_width &&
+ cm->height == cm->last_height && !cm->intra_only && cm->last_show_frame;
// Special case: set prev_mi to NULL when the previous mode info
// context cannot be used.
- cm->prev_mi = cm->use_prev_frame_mvs ?
- cm->prev_mip + cm->mi_stride + 1 : NULL;
+ cm->prev_mi =
+ cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
x->quant_fp = cpi->sf.use_quant_fp;
vp9_zero(x->skip_txfm);
@@ -4186,8 +4020,7 @@
}
vp9_zero(x->zcoeff_blk);
- if (cm->frame_type != KEY_FRAME &&
- cpi->rc.frames_since_golden == 0 &&
+ if (cm->frame_type != KEY_FRAME && cpi->rc.frames_since_golden == 0 &&
!cpi->use_svc)
cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
@@ -4216,8 +4049,8 @@
cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
}
- sf->skip_encode_frame = sf->skip_encode_sb ?
- get_skip_encode_frame(cm, td) : 0;
+ sf->skip_encode_frame =
+ sf->skip_encode_sb ? get_skip_encode_frame(cm, td) : 0;
#if 0
// Keep record of the total distortion this time around for future use
@@ -4227,8 +4060,7 @@
static INTERP_FILTER get_interp_filter(
const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
- if (!is_alt_ref &&
- threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
+ if (!is_alt_ref && threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
return EIGHTTAP_SMOOTH;
@@ -4278,9 +4110,9 @@
// the other two.
if (!frame_is_intra_only(cm)) {
if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
- cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
+ cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
(cm->ref_frame_sign_bias[ALTREF_FRAME] ==
- cm->ref_frame_sign_bias[LAST_FRAME])) {
+ cm->ref_frame_sign_bias[LAST_FRAME])) {
cpi->allow_comp_inter_inter = 0;
} else {
cpi->allow_comp_inter_inter = 1;
@@ -4312,10 +4144,8 @@
if (is_alt_ref || !cpi->allow_comp_inter_inter)
cm->reference_mode = SINGLE_REFERENCE;
else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
- mode_thrs[COMPOUND_REFERENCE] >
- mode_thrs[REFERENCE_MODE_SELECT] &&
- check_dual_ref_flags(cpi) &&
- cpi->static_mb_pct == 100)
+ mode_thrs[COMPOUND_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT] &&
+ check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
cm->reference_mode = COMPOUND_REFERENCE;
else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
cm->reference_mode = SINGLE_REFERENCE;
@@ -4416,10 +4246,8 @@
++counts->uv_mode[y_mode][uv_mode];
}
-static void update_zeromv_cnt(VP9_COMP *const cpi,
- const MODE_INFO *const mi,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
+static void update_zeromv_cnt(VP9_COMP *const cpi, const MODE_INFO *const mi,
+ int mi_row, int mi_col, BLOCK_SIZE bsize) {
const VP9_COMMON *const cm = &cpi->common;
MV mv = mi->mv[0].as_mv;
const int bw = num_8x8_blocks_wide_lookup[bsize];
@@ -4434,7 +4262,7 @@
if (is_inter_block(mi) && mi->segment_id <= CR_SEGMENT_ID_BOOST2) {
if (abs(mv.row) < 8 && abs(mv.col) < 8) {
if (cpi->consec_zero_mv[map_offset] < 255)
- cpi->consec_zero_mv[map_offset]++;
+ cpi->consec_zero_mv[map_offset]++;
} else {
cpi->consec_zero_mv[map_offset] = 0;
}
@@ -4442,16 +4270,15 @@
}
}
-static void encode_superblock(VP9_COMP *cpi, ThreadData *td,
- TOKENEXTRA **t, int output_enabled,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx) {
+static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+ int output_enabled, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
- const int seg_skip = segfeature_active(&cm->seg, mi->segment_id,
- SEG_LVL_SKIP);
+ const int seg_skip =
+ segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP);
x->skip_recode = !x->select_tx_size && mi->sb_type >= BLOCK_8X8 &&
cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
@@ -4466,8 +4293,7 @@
x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
x->q_index < QIDX_SKIP_THRESH);
- if (x->skip_encode)
- return;
+ if (x->skip_encode) return;
if (!is_inter_block(mi)) {
int plane;
@@ -4480,8 +4306,7 @@
mi->skip = 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
vp9_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane, 1);
- if (output_enabled)
- sum_intra_stats(td->counts, mi);
+ if (output_enabled) sum_intra_stats(td->counts, mi);
vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip,
VPXMAX(bsize, BLOCK_8X8));
} else {
@@ -4489,8 +4314,7 @@
const int is_compound = has_second_ref(mi);
set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
for (ref = 0; ref < 1 + is_compound; ++ref) {
- YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
- mi->ref_frame[ref]);
+ YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mi->ref_frame[ref]);
assert(cfg != NULL);
vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
&xd->block_refs[ref]->sf);
@@ -4512,8 +4336,7 @@
}
if (output_enabled) {
- if (cm->tx_mode == TX_MODE_SELECT &&
- mi->sb_type >= BLOCK_8X8 &&
+ if (cm->tx_mode == TX_MODE_SELECT && mi->sb_type >= BLOCK_8X8 &&
!(is_inter_block(mi) && mi->skip)) {
++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd),
&td->counts->tx)[mi->tx_size];
--- a/vp9/encoder/vp9_encodeframe.h
+++ b/vp9/encoder/vp9_encodeframe.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_ENCODEFRAME_H_
#define VP9_ENCODER_VP9_ENCODEFRAME_H_
@@ -31,14 +30,14 @@
#define VAR_HIST_SMALL_CUT_OFF 45
void vp9_setup_src_planes(struct macroblock *x,
- const struct yv12_buffer_config *src,
- int mi_row, int mi_col);
+ const struct yv12_buffer_config *src, int mi_row,
+ int mi_col);
void vp9_encode_frame(struct VP9_COMP *cpi);
void vp9_init_tile_data(struct VP9_COMP *cpi);
-void vp9_encode_tile(struct VP9_COMP *cpi, struct ThreadData *td,
- int tile_row, int tile_col);
+void vp9_encode_tile(struct VP9_COMP *cpi, struct ThreadData *td, int tile_row,
+ int tile_col);
void vp9_set_variance_partition_thresholds(struct VP9_COMP *cpi, int q);
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "./vp9_rtcd.h"
#include "./vpx_config.h"
#include "./vpx_dsp_rtcd.h"
@@ -51,29 +50,29 @@
}
typedef struct vp9_token_state {
- int64_t error;
- int rate;
- int16_t next;
- int16_t token;
- tran_low_t qc;
- tran_low_t dqc;
- uint8_t best_index;
+ int64_t error;
+ int rate;
+ int16_t next;
+ int16_t token;
+ tran_low_t qc;
+ tran_low_t dqc;
+ uint8_t best_index;
} vp9_token_state;
-static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] ={ {10, 6}, {8, 5}, };
+static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
+ { 10, 6 }, { 8, 5 },
+};
-#define UPDATE_RD_COST()\
-{\
- rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\
- rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\
-}
+#define UPDATE_RD_COST() \
+ { \
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
+ }
// This function is a place holder for now but may ultimately need
// to scan previous tokens to work out the correct context.
-static int trellis_get_coeff_context(const int16_t *scan,
- const int16_t *nb,
- int idx, int token,
- uint8_t *token_cache) {
+static int trellis_get_coeff_context(const int16_t *scan, const int16_t *nb,
+ int idx, int token, uint8_t *token_cache) {
int bak = token_cache[scan[idx]], pt;
token_cache[scan[idx]] = vp9_pt_energy_class[token];
pt = get_coef_context(nb, token_cache, idx + 1);
@@ -81,8 +80,8 @@
return pt;
}
-int vp9_optimize_b(MACROBLOCK *mb, int plane, int block,
- TX_SIZE tx_size, int ctx) {
+int vp9_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+ int ctx) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblock_plane *const p = &mb->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -96,8 +95,8 @@
const PLANE_TYPE type = get_plane_type(plane);
const int default_eob = 16 << (tx_size << 1);
const int shift = (tx_size == TX_32X32);
- const int16_t* const dequant_ptr = pd->dequant;
- const uint8_t* const band_translate = get_band_translate(tx_size);
+ const int16_t *const dequant_ptr = pd->dequant;
+ const uint8_t *const band_translate = get_band_translate(tx_size);
const scan_order *const so = get_scan(xd, tx_size, type, block);
const int16_t *const scan = so->scan;
const int16_t *const nb = so->neighbors;
@@ -130,8 +129,7 @@
tokens[eob][1] = tokens[eob][0];
for (i = 0; i < eob; i++)
- token_cache[scan[i]] =
- vp9_pt_energy_class[vp9_get_token(qcoeff[scan[i]])];
+ token_cache[scan[i]] = vp9_pt_energy_class[vp9_get_token(qcoeff[scan[i]])];
for (i = eob; i-- > 0;) {
int base_bits, d2, dx;
@@ -150,10 +148,12 @@
if (next < default_eob) {
band = band_translate[i + 1];
pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
- rate0 += mb->token_costs[tx_size][type][ref][band][0][pt]
- [tokens[next][0].token];
- rate1 += mb->token_costs[tx_size][type][ref][band][0][pt]
- [tokens[next][1].token];
+ rate0 +=
+ mb->token_costs[tx_size][type][ref][band][0][pt][tokens[next][0]
+ .token];
+ rate1 +=
+ mb->token_costs[tx_size][type][ref][band][0][pt][tokens[next][1]
+ .token];
}
UPDATE_RD_COST();
/* And pick the best. */
@@ -179,8 +179,8 @@
rate1 = tokens[next][1].rate;
if ((abs(x) * dequant_ptr[rc != 0] > (abs(coeff[rc]) << shift)) &&
- (abs(x) * dequant_ptr[rc != 0] < (abs(coeff[rc]) << shift) +
- dequant_ptr[rc != 0]))
+ (abs(x) * dequant_ptr[rc != 0] <
+ (abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]))
shortcut = 1;
else
shortcut = 0;
@@ -210,13 +210,15 @@
band = band_translate[i + 1];
if (t0 != EOB_TOKEN) {
pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
- rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt]
- [tokens[next][0].token];
+ rate0 +=
+ mb->token_costs[tx_size][type][ref][band][!x][pt][tokens[next][0]
+ .token];
}
if (t1 != EOB_TOKEN) {
pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache);
- rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt]
- [tokens[next][1].token];
+ rate1 +=
+ mb->token_costs[tx_size][type][ref][band][!x][pt][tokens[next][1]
+ .token];
}
}
@@ -249,8 +251,7 @@
// Account for the rounding difference in the dequantized coefficeint
// value when the quantization index is dropped from an even number
// to an odd number.
- if (shift & x)
- offset += (dequant_ptr[rc != 0] & 0x01);
+ if (shift & x) offset += (dequant_ptr[rc != 0] & 0x01);
if (sz == 0)
tokens[i][1].dqc = dqcoeff[rc] - offset;
@@ -316,9 +317,8 @@
return final_eob;
}
-static INLINE void fdct32x32(int rd_transform,
- const int16_t *src, tran_low_t *dst,
- int src_stride) {
+static INLINE void fdct32x32(int rd_transform, const int16_t *src,
+ tran_low_t *dst, int src_stride) {
if (rd_transform)
vpx_fdct32x32_rd(src, dst, src_stride);
else
@@ -356,33 +356,31 @@
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vp9_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round_fp, p->quant_fp, p->quant_shift,
- qcoeff, dqcoeff, pd->dequant,
- eob, scan_order->scan,
- scan_order->iscan);
+ qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
break;
case TX_16X16:
vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
vp9_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_8X8:
vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
vp9_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
- default:
- assert(0);
+ default: assert(0);
}
return;
}
@@ -399,27 +397,22 @@
case TX_16X16:
vpx_fdct16x16(src_diff, coeff, diff_stride);
vp9_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff, pd->dequant,
+ eob, scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
- vp9_fdct8x8_quant(src_diff, diff_stride, coeff, 64,
- x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ vp9_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block,
+ p->zbin, p->round_fp, p->quant_fp, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff, pd->dequant,
+ eob, scan_order->scan, scan_order->iscan);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
@@ -447,23 +440,22 @@
case TX_16X16:
vpx_highbd_fdct16x16_1(src_diff, coeff, diff_stride);
vpx_highbd_quantize_dc(coeff, 256, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+ eob);
break;
case TX_8X8:
vpx_highbd_fdct8x8_1(src_diff, coeff, diff_stride);
vpx_highbd_quantize_dc(coeff, 64, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+ eob);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vpx_highbd_quantize_dc(coeff, 16, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+ eob);
break;
- default:
- assert(0);
+ default: assert(0);
}
return;
}
@@ -472,31 +464,25 @@
switch (tx_size) {
case TX_32X32:
vpx_fdct32x32_1(src_diff, coeff, diff_stride);
- vpx_quantize_dc_32x32(coeff, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ vpx_quantize_dc_32x32(coeff, x->skip_block, p->round, p->quant_fp[0],
+ qcoeff, dqcoeff, pd->dequant[0], eob);
break;
case TX_16X16:
vpx_fdct16x16_1(src_diff, coeff, diff_stride);
- vpx_quantize_dc(coeff, 256, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ vpx_quantize_dc(coeff, 256, x->skip_block, p->round, p->quant_fp[0],
+ qcoeff, dqcoeff, pd->dequant[0], eob);
break;
case TX_8X8:
vpx_fdct8x8_1(src_diff, coeff, diff_stride);
- vpx_quantize_dc(coeff, 64, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ vpx_quantize_dc(coeff, 64, x->skip_block, p->round, p->quant_fp[0],
+ qcoeff, dqcoeff, pd->dequant[0], eob);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
- vpx_quantize_dc(coeff, 16, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ vpx_quantize_dc(coeff, 16, x->skip_block, p->round, p->quant_fp[0],
+ qcoeff, dqcoeff, pd->dequant[0], eob);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
@@ -516,37 +502,36 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- switch (tx_size) {
+ switch (tx_size) {
case TX_32X32:
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift, qcoeff,
- dqcoeff, pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ dqcoeff, pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_16X16:
vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_8X8:
vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
- default:
- assert(0);
+ default: assert(0);
}
return;
}
@@ -562,34 +547,28 @@
break;
case TX_16X16:
vpx_fdct16x16(src_diff, coeff, diff_stride);
- vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
+ vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
vpx_fdct8x8(src_diff, coeff, diff_stride);
- vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
+ vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
- vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
+ vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
static void encode_block(int plane, int block, int row, int col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct encode_b_args *const args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -649,35 +628,32 @@
*a = *l = p->eobs[block] > 0;
}
- if (p->eobs[block])
- *(args->skip) = 0;
+ if (p->eobs[block]) *(args->skip) = 0;
- if (x->skip_encode || p->eobs[block] == 0)
- return;
+ if (x->skip_encode || p->eobs[block] == 0) return;
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_32X32:
- vp9_highbd_idct32x32_add(dqcoeff, dst, pd->dst.stride,
- p->eobs[block], xd->bd);
+ vp9_highbd_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ xd->bd);
break;
case TX_16X16:
- vp9_highbd_idct16x16_add(dqcoeff, dst, pd->dst.stride,
- p->eobs[block], xd->bd);
+ vp9_highbd_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ xd->bd);
break;
case TX_8X8:
- vp9_highbd_idct8x8_add(dqcoeff, dst, pd->dst.stride,
- p->eobs[block], xd->bd);
+ vp9_highbd_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ xd->bd);
break;
case TX_4X4:
// this is like vp9_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride,
- p->eobs[block], xd->bd);
+ x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ xd->bd);
break;
- default:
- assert(0 && "Invalid transform size");
+ default: assert(0 && "Invalid transform size");
}
return;
}
@@ -699,15 +675,13 @@
// case.
x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
break;
- default:
- assert(0 && "Invalid transform size");
- break;
+ default: assert(0 && "Invalid transform size"); break;
}
}
static void encode_block_pass1(int plane, int block, int row, int col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *arg) {
MACROBLOCK *const x = (MACROBLOCK *)arg;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *const p = &x->plane[plane];
@@ -721,8 +695,8 @@
if (p->eobs[block] > 0) {
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], xd->bd);
- return;
+ x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], xd->bd);
+ return;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
@@ -739,23 +713,21 @@
MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
MODE_INFO *mi = xd->mi[0];
- struct encode_b_args arg = {x, 1, NULL, NULL, &mi->skip};
+ struct encode_b_args arg = { x, 1, NULL, NULL, &mi->skip };
int plane;
mi->skip = 1;
- if (x->skip)
- return;
+ if (x->skip) return;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- if (!x->skip_recode)
- vp9_subtract_plane(x, bsize, plane);
+ if (!x->skip_recode) vp9_subtract_plane(x, bsize, plane);
if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
- const struct macroblockd_plane* const pd = &xd->plane[plane];
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size;
- vp9_get_entropy_contexts(bsize, tx_size, pd,
- ctx.ta[plane], ctx.tl[plane]);
+ vp9_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane],
+ ctx.tl[plane]);
arg.enable_coeff_opt = 1;
} else {
arg.enable_coeff_opt = 0;
@@ -769,9 +741,9 @@
}
void vp9_encode_block_intra(int plane, int block, int row, int col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
- struct encode_b_args* const args = arg;
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *arg) {
+ struct encode_b_args *const args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
@@ -817,8 +789,8 @@
}
vp9_predict_intra_block(xd, bwl, tx_size, mode, x->skip_encode ? src : dst,
- x->skip_encode ? src_stride : dst_stride,
- dst, dst_stride, col, row, plane);
+ x->skip_encode ? src_stride : dst_stride, dst,
+ dst_stride, col, row, plane);
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
@@ -825,8 +797,8 @@
switch (tx_size) {
case TX_32X32:
if (!x->skip_recode) {
- vpx_highbd_subtract_block(32, 32, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
+ vpx_highbd_subtract_block(32, 32, src_diff, diff_stride, src,
+ src_stride, dst, dst_stride, xd->bd);
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift,
@@ -839,8 +811,8 @@
break;
case TX_16X16:
if (!x->skip_recode) {
- vpx_highbd_subtract_block(16, 16, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
+ vpx_highbd_subtract_block(16, 16, src_diff, diff_stride, src,
+ src_stride, dst, dst_stride, xd->bd);
if (tx_type == DCT_DCT)
vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
else
@@ -847,18 +819,18 @@
vp9_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
if (!x->skip_encode && *eob) {
- vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, dst_stride,
- *eob, xd->bd);
+ vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, *eob,
+ xd->bd);
}
break;
case TX_8X8:
if (!x->skip_recode) {
- vpx_highbd_subtract_block(8, 8, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
+ vpx_highbd_subtract_block(8, 8, src_diff, diff_stride, src,
+ src_stride, dst, dst_stride, xd->bd);
if (tx_type == DCT_DCT)
vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
else
@@ -865,8 +837,8 @@
vp9_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
if (!x->skip_encode && *eob) {
vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, dst_stride, *eob,
@@ -875,8 +847,8 @@
break;
case TX_4X4:
if (!x->skip_recode) {
- vpx_highbd_subtract_block(4, 4, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
+ vpx_highbd_subtract_block(4, 4, src_diff, diff_stride, src,
+ src_stride, dst, dst_stride, xd->bd);
if (tx_type != DCT_DCT)
vp9_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
else
@@ -883,8 +855,8 @@
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
if (!x->skip_encode && *eob) {
@@ -898,12 +870,9 @@
}
}
break;
- default:
- assert(0);
- return;
+ default: assert(0); return;
}
- if (*eob)
- *(args->skip) = 0;
+ if (*eob) *(args->skip) = 0;
return;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -911,8 +880,8 @@
switch (tx_size) {
case TX_32X32:
if (!x->skip_recode) {
- vpx_subtract_block(32, 32, src_diff, diff_stride,
- src, src_stride, dst, dst_stride);
+ vpx_subtract_block(32, 32, src_diff, diff_stride, src, src_stride, dst,
+ dst_stride);
fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
@@ -920,7 +889,7 @@
scan_order->iscan);
}
if (args->enable_coeff_opt && !x->skip_recode) {
- *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
+ *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
}
if (!x->skip_encode && *eob)
vp9_idct32x32_add(dqcoeff, dst, dst_stride, *eob);
@@ -927,13 +896,12 @@
break;
case TX_16X16:
if (!x->skip_recode) {
- vpx_subtract_block(16, 16, src_diff, diff_stride,
- src, src_stride, dst, dst_stride);
+ vpx_subtract_block(16, 16, src_diff, diff_stride, src, src_stride, dst,
+ dst_stride);
vp9_fht16x16(src_diff, coeff, diff_stride, tx_type);
- vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+ vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
}
if (args->enable_coeff_opt && !x->skip_recode) {
*a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
@@ -943,13 +911,12 @@
break;
case TX_8X8:
if (!x->skip_recode) {
- vpx_subtract_block(8, 8, src_diff, diff_stride,
- src, src_stride, dst, dst_stride);
+ vpx_subtract_block(8, 8, src_diff, diff_stride, src, src_stride, dst,
+ dst_stride);
vp9_fht8x8(src_diff, coeff, diff_stride, tx_type);
vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
- p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
}
if (args->enable_coeff_opt && !x->skip_recode) {
*a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
@@ -959,16 +926,15 @@
break;
case TX_4X4:
if (!x->skip_recode) {
- vpx_subtract_block(4, 4, src_diff, diff_stride,
- src, src_stride, dst, dst_stride);
+ vpx_subtract_block(4, 4, src_diff, diff_stride, src, src_stride, dst,
+ dst_stride);
if (tx_type != DCT_DCT)
vp9_fht4x4(src_diff, coeff, diff_stride, tx_type);
else
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
- p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
}
if (args->enable_coeff_opt && !x->skip_recode) {
*a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
@@ -983,12 +949,9 @@
vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type);
}
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
- if (*eob)
- *(args->skip) = 0;
+ if (*eob) *(args->skip) = 0;
}
void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
@@ -995,15 +958,14 @@
int enable_optimize_b) {
const MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
- struct encode_b_args arg = {x, enable_optimize_b,
- ctx.ta[plane], ctx.tl[plane],
- &xd->mi[0]->skip};
+ struct encode_b_args arg = { x, enable_optimize_b, ctx.ta[plane],
+ ctx.tl[plane], &xd->mi[0]->skip };
if (enable_optimize_b && x->optimize &&
(!x->skip_recode || !x->skip_optimize)) {
- const struct macroblockd_plane* const pd = &xd->plane[plane];
- const TX_SIZE tx_size = plane ? get_uv_tx_size(xd->mi[0], pd) :
- xd->mi[0]->tx_size;
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const TX_SIZE tx_size =
+ plane ? get_uv_tx_size(xd->mi[0], pd) : xd->mi[0]->tx_size;
vp9_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
} else {
arg.enable_coeff_opt = 0;
--- a/vp9/encoder/vp9_encodemb.h
+++ b/vp9/encoder/vp9_encodemb.h
@@ -25,8 +25,8 @@
ENTROPY_CONTEXT *tl;
int8_t *skip;
};
-int vp9_optimize_b(MACROBLOCK *mb, int plane, int block,
- TX_SIZE tx_size, int ctx);
+int vp9_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+ int ctx);
void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
void vp9_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, int row, int col,
@@ -39,8 +39,7 @@
void vp9_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
void vp9_encode_block_intra(int plane, int block, int row, int col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg);
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg);
void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
int enable_optimize_b);
--- a/vp9/encoder/vp9_encodemv.c
+++ b/vp9/encoder/vp9_encodemv.c
@@ -30,15 +30,15 @@
vp9_tokens_from_tree(mv_fp_encodings, vp9_mv_fp_tree);
}
-static void encode_mv_component(vpx_writer* w, int comp,
- const nmv_component* mvcomp, int usehp) {
+static void encode_mv_component(vpx_writer *w, int comp,
+ const nmv_component *mvcomp, int usehp) {
int offset;
const int sign = comp < 0;
const int mag = sign ? -comp : comp;
const int mv_class = vp9_get_mv_class(mag - 1, &offset);
- const int d = offset >> 3; // int mv data
- const int fr = (offset >> 1) & 3; // fractional mv data
- const int hp = offset & 1; // high precision mv data
+ const int d = offset >> 3; // int mv data
+ const int fr = (offset >> 1) & 3; // fractional mv data
+ const int hp = offset & 1; // high precision mv data
assert(comp != 0);
@@ -56,24 +56,21 @@
} else {
int i;
const int n = mv_class + CLASS0_BITS - 1; // number of bits
- for (i = 0; i < n; ++i)
- vpx_write(w, (d >> i) & 1, mvcomp->bits[i]);
+ for (i = 0; i < n; ++i) vpx_write(w, (d >> i) & 1, mvcomp->bits[i]);
}
// Fractional bits
vp9_write_token(w, vp9_mv_fp_tree,
- mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
&mv_fp_encodings[fr]);
// High precision bit
if (usehp)
- vpx_write(w, hp,
- mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+ vpx_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
}
-
static void build_nmv_component_cost_table(int *mvcost,
- const nmv_component* const mvcomp,
+ const nmv_component *const mvcomp,
int usehp) {
int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
int bits_cost[MV_OFFSET_BITS][2];
@@ -107,12 +104,12 @@
int d, e, f;
int cost = class_cost[MV_CLASS_0];
int v = o + 1;
- d = (o >> 3); /* int mv data */
- f = (o >> 1) & 3; /* fractional pel mv data */
+ d = (o >> 3); /* int mv data */
+ f = (o >> 1) & 3; /* fractional pel mv data */
cost += class0_cost[d];
cost += class0_fp_cost[d][f];
if (usehp) {
- e = (o & 1); /* high precision mv data */
+ e = (o & 1); /* high precision mv data */
cost += class0_hp_cost[e];
}
mvcost[v] = cost + sign_cost[0];
@@ -123,9 +120,8 @@
for (d = 0; d < (1 << c); ++d) {
int f;
int whole_cost = class_cost[c];
- int b = c + CLASS0_BITS - 1; /* number of bits */
- for (i = 0; i < b; ++i)
- whole_cost += bits_cost[i][((d >> i) & 1)];
+ int b = c + CLASS0_BITS - 1; /* number of bits */
+ for (i = 0; i < b; ++i) whole_cost += bits_cost[i][((d >> i) & 1)];
for (f = 0; f < 4; ++f) {
int cost = whole_cost + fp_cost[f];
int v = (CLASS0_SIZE << (c + 2)) + d * 8 + f * 2 /* + e */ + 1;
@@ -163,8 +159,8 @@
static void write_mv_update(const vpx_tree_index *tree,
vpx_prob probs[/*n - 1*/],
- const unsigned int counts[/*n - 1*/],
- int n, vpx_writer *w) {
+ const unsigned int counts[/*n - 1*/], int n,
+ vpx_writer *w) {
int i;
unsigned int branch_ct[32][2];
@@ -214,11 +210,9 @@
}
}
-void vp9_encode_mv(VP9_COMP* cpi, vpx_writer* w,
- const MV* mv, const MV* ref,
- const nmv_context* mvctx, int usehp) {
- const MV diff = {mv->row - ref->row,
- mv->col - ref->col};
+void vp9_encode_mv(VP9_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref,
+ const nmv_context *mvctx, int usehp) {
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
const MV_JOINT_TYPE j = vp9_get_mv_joint(&diff);
usehp = usehp && use_mv_hp(ref);
@@ -238,7 +232,7 @@
}
void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context* ctx, int usehp) {
+ const nmv_context *ctx, int usehp) {
vp9_cost_tokens(mvjoint, ctx->joints, vp9_mv_joint_tree);
build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
@@ -245,14 +239,13 @@
}
static void inc_mvs(const MODE_INFO *mi, const MB_MODE_INFO_EXT *mbmi_ext,
- const int_mv mvs[2],
- nmv_context_counts *counts) {
+ const int_mv mvs[2], nmv_context_counts *counts) {
int i;
for (i = 0; i < 1 + has_second_ref(mi); ++i) {
const MV *ref = &mbmi_ext->ref_mvs[mi->ref_frame[i]][0].as_mv;
- const MV diff = {mvs[i].as_mv.row - ref->row,
- mvs[i].as_mv.col - ref->col};
+ const MV diff = { mvs[i].as_mv.row - ref->row,
+ mvs[i].as_mv.col - ref->col };
vp9_inc_mv(&diff, counts);
}
}
@@ -275,8 +268,6 @@
}
}
} else {
- if (mi->mode == NEWMV)
- inc_mvs(mi, mbmi_ext, mi->mv, &td->counts->mv);
+ if (mi->mode == NEWMV) inc_mvs(mi, mbmi_ext, mi->mv, &td->counts->mv);
}
}
-
--- a/vp9/encoder/vp9_encodemv.h
+++ b/vp9/encoder/vp9_encodemv.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_ENCODEMV_H_
#define VP9_ENCODER_VP9_ENCODEMV_H_
@@ -23,11 +22,11 @@
void vp9_write_nmv_probs(VP9_COMMON *cm, int usehp, vpx_writer *w,
nmv_context_counts *const counts);
-void vp9_encode_mv(VP9_COMP *cpi, vpx_writer* w, const MV* mv, const MV* ref,
- const nmv_context* mvctx, int usehp);
+void vp9_encode_mv(VP9_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref,
+ const nmv_context *mvctx, int usehp);
void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context* mvctx, int usehp);
+ const nmv_context *mvctx, int usehp);
void vp9_update_mv_count(ThreadData *td);
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -63,12 +63,12 @@
#define AM_SEGMENT_ID_INACTIVE 7
#define AM_SEGMENT_ID_ACTIVE 0
-#define ALTREF_HIGH_PRECISION_MV 1 // Whether to use high precision mv
- // for altref computation.
-#define HIGH_PRECISION_MV_QTHRESH 200 // Q threshold for high precision
- // mv. Choose a very high value for
- // now so that HIGH_PRECISION is always
- // chosen.
+#define ALTREF_HIGH_PRECISION_MV 1 // Whether to use high precision mv
+ // for altref computation.
+#define HIGH_PRECISION_MV_QTHRESH 200 // Q threshold for high precision
+ // mv. Choose a very high value for
+ // now so that HIGH_PRECISION is always
+ // chosen.
// #define OUTPUT_YUV_REC
#ifdef OUTPUT_YUV_DENOISED
@@ -106,24 +106,26 @@
return cpi->b_calculate_psnr && (oxcf->pass != 1) && cm->show_frame;
}
+/* clang-format off */
static const Vp9LevelSpec vp9_level_defs[VP9_LEVELS] = {
- {LEVEL_1, 829440, 36864, 200, 400, 2, 1, 4, 8},
- {LEVEL_1_1, 2764800, 73728, 800, 1000, 2, 1, 4, 8},
- {LEVEL_2, 4608000, 122880, 1800, 1500, 2, 1, 4, 8},
- {LEVEL_2_1, 9216000, 245760, 3600, 2800, 2, 2, 4, 8},
- {LEVEL_3, 20736000, 552960, 7200, 6000, 2, 4, 4, 8},
- {LEVEL_3_1, 36864000, 983040, 12000, 10000, 2, 4, 4, 8},
- {LEVEL_4, 83558400, 2228224, 18000, 16000, 4, 4, 4, 8},
- {LEVEL_4_1, 160432128, 2228224, 30000, 18000, 4, 4, 5, 6},
- {LEVEL_5, 311951360, 8912896, 60000, 36000, 6, 8, 6, 4},
- {LEVEL_5_1, 588251136, 8912896, 120000, 46000, 8, 8, 10, 4},
+ { LEVEL_1, 829440, 36864, 200, 400, 2, 1, 4, 8 },
+ { LEVEL_1_1, 2764800, 73728, 800, 1000, 2, 1, 4, 8 },
+ { LEVEL_2, 4608000, 122880, 1800, 1500, 2, 1, 4, 8 },
+ { LEVEL_2_1, 9216000, 245760, 3600, 2800, 2, 2, 4, 8 },
+ { LEVEL_3, 20736000, 552960, 7200, 6000, 2, 4, 4, 8 },
+ { LEVEL_3_1, 36864000, 983040, 12000, 10000, 2, 4, 4, 8 },
+ { LEVEL_4, 83558400, 2228224, 18000, 16000, 4, 4, 4, 8 },
+ { LEVEL_4_1, 160432128, 2228224, 30000, 18000, 4, 4, 5, 6 },
+ { LEVEL_5, 311951360, 8912896, 60000, 36000, 6, 8, 6, 4 },
+ { LEVEL_5_1, 588251136, 8912896, 120000, 46000, 8, 8, 10, 4 },
// TODO(huisu): update max_cpb_size for level 5_2 ~ 6_2 when
// they are finalized (currently TBD).
- {LEVEL_5_2, 1176502272, 8912896, 180000, 0, 8, 8, 10, 4},
- {LEVEL_6, 1176502272, 35651584, 180000, 0, 8, 16, 10, 4},
- {LEVEL_6_1, 2353004544u, 35651584, 240000, 0, 8, 16, 10, 4},
- {LEVEL_6_2, 4706009088u, 35651584, 480000, 0, 8, 16, 10, 4},
+ { LEVEL_5_2, 1176502272, 8912896, 180000, 0, 8, 8, 10, 4 },
+ { LEVEL_6, 1176502272, 35651584, 180000, 0, 8, 16, 10, 4 },
+ { LEVEL_6_1, 2353004544u, 35651584, 240000, 0, 8, 16, 10, 4 },
+ { LEVEL_6_2, 4706009088u, 35651584, 480000, 0, 8, 16, 10, 4 },
};
+/* clang-format on */
static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
switch (mode) {
@@ -138,15 +140,15 @@
case THREEFIVE:
*hr = 3;
*hs = 5;
- break;
+ break;
case ONETWO:
*hr = 1;
*hs = 2;
- break;
+ break;
default:
*hr = 1;
*hs = 1;
- assert(0);
+ assert(0);
break;
}
}
@@ -189,8 +191,8 @@
vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
// Setting the data to -MAX_LOOP_FILTER will result in the computed loop
// filter level being zero regardless of the value of seg->abs_delta.
- vp9_set_segdata(seg, AM_SEGMENT_ID_INACTIVE,
- SEG_LVL_ALT_LF, -MAX_LOOP_FILTER);
+ vp9_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
+ -MAX_LOOP_FILTER);
} else {
vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
@@ -213,7 +215,7 @@
level_spec->min_altref_distance = INT_MAX;
}
-VP9_LEVEL vp9_get_level(const Vp9LevelSpec * const level_spec) {
+VP9_LEVEL vp9_get_level(const Vp9LevelSpec *const level_spec) {
int i;
const Vp9LevelSpec *this_level;
@@ -222,7 +224,7 @@
for (i = 0; i < VP9_LEVELS; ++i) {
this_level = &vp9_level_defs[i];
if ((double)level_spec->max_luma_sample_rate * (1 + SAMPLE_RATE_GRACE_P) >
- (double)this_level->max_luma_sample_rate ||
+ (double)this_level->max_luma_sample_rate ||
level_spec->max_luma_picture_size > this_level->max_luma_picture_size ||
level_spec->average_bitrate > this_level->average_bitrate ||
level_spec->max_cpb_size > this_level->max_cpb_size ||
@@ -236,9 +238,7 @@
return (i == VP9_LEVELS) ? LEVEL_UNKNOWN : vp9_level_defs[i].level;
}
-int vp9_set_active_map(VP9_COMP* cpi,
- unsigned char* new_map_16x16,
- int rows,
+int vp9_set_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
unsigned char *const active_map_8x8 = cpi->active_map.map;
@@ -265,13 +265,11 @@
}
}
-int vp9_get_active_map(VP9_COMP* cpi,
- unsigned char* new_map_16x16,
- int rows,
+int vp9_get_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
new_map_16x16) {
- unsigned char* const seg_map_8x8 = cpi->segmentation_map;
+ unsigned char *const seg_map_8x8 = cpi->segmentation_map;
const int mi_rows = cpi->common.mi_rows;
const int mi_cols = cpi->common.mi_cols;
memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
@@ -314,13 +312,11 @@
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
vp9_setup_past_independence(cm);
} else {
- if (!cpi->use_svc)
- cm->frame_context_idx = cpi->refresh_alt_ref_frame;
+ if (!cpi->use_svc) cm->frame_context_idx = cpi->refresh_alt_ref_frame;
}
if (cm->frame_type == KEY_FRAME) {
- if (!is_two_pass_svc(cpi))
- cpi->refresh_golden_frame = 1;
+ if (!is_two_pass_svc(cpi)) cpi->refresh_golden_frame = 1;
cpi->refresh_alt_ref_frame = 1;
vp9_zero(cpi->interp_filter_selected);
} else {
@@ -349,19 +345,16 @@
static int vp9_enc_alloc_mi(VP9_COMMON *cm, int mi_size) {
cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
- if (!cm->mip)
- return 1;
+ if (!cm->mip) return 1;
cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
- if (!cm->prev_mip)
- return 1;
+ if (!cm->prev_mip) return 1;
cm->mi_alloc_size = mi_size;
- cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
- if (!cm->mi_grid_base)
- return 1;
- cm->prev_mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
- if (!cm->prev_mi_grid_base)
- return 1;
+ cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ if (!cm->mi_grid_base) return 1;
+ cm->prev_mi_grid_base =
+ (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ if (!cm->prev_mi_grid_base) return 1;
return 0;
}
@@ -511,7 +504,7 @@
// restored with a call to vp9_restore_coding_context. These functions are
// intended for use in a re-code loop in vp9_compress_frame where the
// quantizer value is adjusted between loop iterations.
- vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
+ vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
MV_VALS * sizeof(*cpi->nmvcosts[0]));
@@ -524,8 +517,8 @@
vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs);
- memcpy(cpi->coding_context.last_frame_seg_map_copy,
- cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
+ memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
+ (cm->mi_rows * cm->mi_cols));
vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
@@ -550,8 +543,7 @@
vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs);
- memcpy(cm->last_frame_seg_map,
- cpi->coding_context.last_frame_seg_map_copy,
+ memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
(cm->mi_rows * cm->mi_cols));
vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
@@ -603,8 +595,8 @@
seg->update_map = 1;
seg->update_data = 1;
- qi_delta = vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875,
- cm->bit_depth);
+ qi_delta =
+ vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
@@ -625,8 +617,8 @@
seg->update_data = 1;
seg->abs_delta = SEGMENT_DELTADATA;
- qi_delta = vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
- cm->bit_depth);
+ qi_delta =
+ vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, cm->bit_depth);
vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
@@ -708,16 +700,15 @@
cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
- cm->use_highbitdepth,
+ cm->use_highbitdepth,
#endif
- oxcf->lag_in_frames);
+ oxcf->lag_in_frames);
if (!cpi->lookahead)
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate lag buffers");
// TODO(agrange) Check if ARF is enabled and skip allocation if not.
- if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer,
- oxcf->width, oxcf->height,
+ if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -730,8 +721,7 @@
static void alloc_util_frame_buffers(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
- if (vpx_realloc_frame_buffer(&cpi->last_frame_uf,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -741,8 +731,7 @@
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate last frame buffer");
- if (vpx_realloc_frame_buffer(&cpi->scaled_source,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -757,23 +746,18 @@
// target of 1/4x1/4.
if (is_one_pass_cbr_svc(cpi) && !cpi->svc.scaled_temp_is_alloc) {
cpi->svc.scaled_temp_is_alloc = 1;
- if (vpx_realloc_frame_buffer(&cpi->svc.scaled_temp,
- cm->width >> 1,
- cm->height >> 1,
- cm->subsampling_x,
- cm->subsampling_y,
+ if (vpx_realloc_frame_buffer(
+ &cpi->svc.scaled_temp, cm->width >> 1, cm->height >> 1,
+ cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
- cm->use_highbitdepth,
+ cm->use_highbitdepth,
#endif
- VP9_ENC_BORDER_IN_PIXELS,
- cm->byte_alignment,
- NULL, NULL, NULL))
+ VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL))
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate scaled_frame for svc ");
}
- if (vpx_realloc_frame_buffer(&cpi->scaled_last_source,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -783,8 +767,7 @@
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate scaled last source buffer");
#ifdef ENABLE_KF_DENOISE
- if (vpx_realloc_frame_buffer(&cpi->raw_unscaled_source,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->raw_unscaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -794,8 +777,7 @@
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate unscaled raw source frame buffer");
- if (vpx_realloc_frame_buffer(&cpi->raw_scaled_source,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->raw_scaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -807,14 +789,12 @@
#endif
}
-
static int alloc_context_buffers_ext(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
int mi_size = cm->mi_cols * cm->mi_rows;
cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
- if (!cpi->mbmi_ext_base)
- return 1;
+ if (!cpi->mbmi_ext_base) return 1;
return 0;
}
@@ -831,7 +811,7 @@
{
unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
- vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
+ vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
}
vp9_setup_pc_tree(&cpi->common, &cpi->td);
@@ -848,14 +828,13 @@
int min_log2_tile_cols, max_log2_tile_cols;
vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
- if (is_two_pass_svc(cpi) &&
- (cpi->svc.encode_empty_frame_state == ENCODING ||
- cpi->svc.number_spatial_layers > 1)) {
+ if (is_two_pass_svc(cpi) && (cpi->svc.encode_empty_frame_state == ENCODING ||
+ cpi->svc.number_spatial_layers > 1)) {
cm->log2_tile_cols = 0;
cm->log2_tile_rows = 0;
} else {
- cm->log2_tile_cols = clamp(cpi->oxcf.tile_columns,
- min_log2_tile_cols, max_log2_tile_cols);
+ cm->log2_tile_cols =
+ clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
cm->log2_tile_rows = cpi->oxcf.tile_rows;
}
}
@@ -874,8 +853,7 @@
set_tile_limits(cpi);
if (is_two_pass_svc(cpi)) {
- if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -949,153 +927,120 @@
const int64_t maximum = oxcf->maximum_buffer_size_ms;
rc->starting_buffer_level = starting * bandwidth / 1000;
- rc->optimal_buffer_level = (optimal == 0) ? bandwidth / 8
- : optimal * bandwidth / 1000;
- rc->maximum_buffer_size = (maximum == 0) ? bandwidth / 8
- : maximum * bandwidth / 1000;
+ rc->optimal_buffer_level =
+ (optimal == 0) ? bandwidth / 8 : optimal * bandwidth / 1000;
+ rc->maximum_buffer_size =
+ (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
}
#if CONFIG_VP9_HIGHBITDEPTH
#define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
- cpi->fn_ptr[BT].sdf = SDF; \
- cpi->fn_ptr[BT].sdaf = SDAF; \
- cpi->fn_ptr[BT].vf = VF; \
- cpi->fn_ptr[BT].svf = SVF; \
- cpi->fn_ptr[BT].svaf = SVAF; \
- cpi->fn_ptr[BT].sdx3f = SDX3F; \
- cpi->fn_ptr[BT].sdx8f = SDX8F; \
- cpi->fn_ptr[BT].sdx4df = SDX4DF;
+ cpi->fn_ptr[BT].sdf = SDF; \
+ cpi->fn_ptr[BT].sdaf = SDAF; \
+ cpi->fn_ptr[BT].vf = VF; \
+ cpi->fn_ptr[BT].svf = SVF; \
+ cpi->fn_ptr[BT].svaf = SVAF; \
+ cpi->fn_ptr[BT].sdx3f = SDX3F; \
+ cpi->fn_ptr[BT].sdx8f = SDX8F; \
+ cpi->fn_ptr[BT].sdx4df = SDX4DF;
-#define MAKE_BFP_SAD_WRAPPER(fnname) \
-static unsigned int fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \
-} \
-static unsigned int fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \
-} \
-static unsigned int fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \
-}
+#define MAKE_BFP_SAD_WRAPPER(fnname) \
+ static unsigned int fnname##_bits8(const uint8_t *src_ptr, \
+ int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \
+ } \
+ static unsigned int fnname##_bits10( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \
+ } \
+ static unsigned int fnname##_bits12( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \
+ }
-#define MAKE_BFP_SADAVG_WRAPPER(fnname) static unsigned int \
-fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- const uint8_t *second_pred) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \
-} \
-static unsigned int fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- const uint8_t *second_pred) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \
- second_pred) >> 2; \
-} \
-static unsigned int fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- const uint8_t *second_pred) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \
- second_pred) >> 4; \
-}
+#define MAKE_BFP_SADAVG_WRAPPER(fnname) \
+ static unsigned int fnname##_bits8( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride, const uint8_t *second_pred) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \
+ } \
+ static unsigned int fnname##_bits10( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride, const uint8_t *second_pred) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
+ 2; \
+ } \
+ static unsigned int fnname##_bits12( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride, const uint8_t *second_pred) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
+ 4; \
+ }
-#define MAKE_BFP_SAD3_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 3; i++) \
- sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 3; i++) \
- sad_array[i] >>= 4; \
-}
+#define MAKE_BFP_SAD3_WRAPPER(fnname) \
+ static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ } \
+ static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 3; i++) sad_array[i] >>= 2; \
+ } \
+ static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 3; i++) sad_array[i] >>= 4; \
+ }
-#define MAKE_BFP_SAD8_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 8; i++) \
- sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 8; i++) \
- sad_array[i] >>= 4; \
-}
-#define MAKE_BFP_SAD4D_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t* const ref_ptr[], \
- int ref_stride, \
- unsigned int *sad_array) { \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t* const ref_ptr[], \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 4; i++) \
- sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t* const ref_ptr[], \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 4; i++) \
- sad_array[i] >>= 4; \
-}
+#define MAKE_BFP_SAD8_WRAPPER(fnname) \
+ static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ } \
+ static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 8; i++) sad_array[i] >>= 2; \
+ } \
+ static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 8; i++) sad_array[i] >>= 4; \
+ }
+#define MAKE_BFP_SAD4D_WRAPPER(fnname) \
+ static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *const ref_ptr[], int ref_stride, \
+ unsigned int *sad_array) { \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ } \
+ static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *const ref_ptr[], int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 4; i++) sad_array[i] >>= 2; \
+ } \
+ static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *const ref_ptr[], int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 4; i++) sad_array[i] >>= 4; \
+ }
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
@@ -1153,409 +1098,267 @@
MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
-static void highbd_set_var_fns(VP9_COMP *const cpi) {
+static void highbd_set_var_fns(VP9_COMP *const cpi) {
VP9_COMMON *const cm = &cpi->common;
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
case VPX_BITS_8:
- HIGHBD_BFP(BLOCK_32X16,
- vpx_highbd_sad32x16_bits8,
- vpx_highbd_sad32x16_avg_bits8,
- vpx_highbd_8_variance32x16,
+ HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
+ vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
vpx_highbd_8_sub_pixel_variance32x16,
- vpx_highbd_8_sub_pixel_avg_variance32x16,
- NULL,
- NULL,
+ vpx_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
vpx_highbd_sad32x16x4d_bits8)
- HIGHBD_BFP(BLOCK_16X32,
- vpx_highbd_sad16x32_bits8,
- vpx_highbd_sad16x32_avg_bits8,
- vpx_highbd_8_variance16x32,
+ HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
+ vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
vpx_highbd_8_sub_pixel_variance16x32,
- vpx_highbd_8_sub_pixel_avg_variance16x32,
- NULL,
- NULL,
+ vpx_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
vpx_highbd_sad16x32x4d_bits8)
- HIGHBD_BFP(BLOCK_64X32,
- vpx_highbd_sad64x32_bits8,
- vpx_highbd_sad64x32_avg_bits8,
- vpx_highbd_8_variance64x32,
+ HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
+ vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
vpx_highbd_8_sub_pixel_variance64x32,
- vpx_highbd_8_sub_pixel_avg_variance64x32,
- NULL,
- NULL,
+ vpx_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
vpx_highbd_sad64x32x4d_bits8)
- HIGHBD_BFP(BLOCK_32X64,
- vpx_highbd_sad32x64_bits8,
- vpx_highbd_sad32x64_avg_bits8,
- vpx_highbd_8_variance32x64,
+ HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
+ vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
vpx_highbd_8_sub_pixel_variance32x64,
- vpx_highbd_8_sub_pixel_avg_variance32x64,
- NULL,
- NULL,
+ vpx_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
vpx_highbd_sad32x64x4d_bits8)
- HIGHBD_BFP(BLOCK_32X32,
- vpx_highbd_sad32x32_bits8,
- vpx_highbd_sad32x32_avg_bits8,
- vpx_highbd_8_variance32x32,
+ HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
+ vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
vpx_highbd_8_sub_pixel_variance32x32,
vpx_highbd_8_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits8,
- vpx_highbd_sad32x32x8_bits8,
+ vpx_highbd_sad32x32x3_bits8, vpx_highbd_sad32x32x8_bits8,
vpx_highbd_sad32x32x4d_bits8)
- HIGHBD_BFP(BLOCK_64X64,
- vpx_highbd_sad64x64_bits8,
- vpx_highbd_sad64x64_avg_bits8,
- vpx_highbd_8_variance64x64,
+ HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
+ vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
vpx_highbd_8_sub_pixel_variance64x64,
vpx_highbd_8_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits8,
- vpx_highbd_sad64x64x8_bits8,
+ vpx_highbd_sad64x64x3_bits8, vpx_highbd_sad64x64x8_bits8,
vpx_highbd_sad64x64x4d_bits8)
- HIGHBD_BFP(BLOCK_16X16,
- vpx_highbd_sad16x16_bits8,
- vpx_highbd_sad16x16_avg_bits8,
- vpx_highbd_8_variance16x16,
+ HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
+ vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
vpx_highbd_8_sub_pixel_variance16x16,
vpx_highbd_8_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits8,
- vpx_highbd_sad16x16x8_bits8,
+ vpx_highbd_sad16x16x3_bits8, vpx_highbd_sad16x16x8_bits8,
vpx_highbd_sad16x16x4d_bits8)
- HIGHBD_BFP(BLOCK_16X8,
- vpx_highbd_sad16x8_bits8,
- vpx_highbd_sad16x8_avg_bits8,
- vpx_highbd_8_variance16x8,
- vpx_highbd_8_sub_pixel_variance16x8,
- vpx_highbd_8_sub_pixel_avg_variance16x8,
- vpx_highbd_sad16x8x3_bits8,
- vpx_highbd_sad16x8x8_bits8,
- vpx_highbd_sad16x8x4d_bits8)
+ HIGHBD_BFP(
+ BLOCK_16X8, vpx_highbd_sad16x8_bits8, vpx_highbd_sad16x8_avg_bits8,
+ vpx_highbd_8_variance16x8, vpx_highbd_8_sub_pixel_variance16x8,
+ vpx_highbd_8_sub_pixel_avg_variance16x8, vpx_highbd_sad16x8x3_bits8,
+ vpx_highbd_sad16x8x8_bits8, vpx_highbd_sad16x8x4d_bits8)
- HIGHBD_BFP(BLOCK_8X16,
- vpx_highbd_sad8x16_bits8,
- vpx_highbd_sad8x16_avg_bits8,
- vpx_highbd_8_variance8x16,
- vpx_highbd_8_sub_pixel_variance8x16,
- vpx_highbd_8_sub_pixel_avg_variance8x16,
- vpx_highbd_sad8x16x3_bits8,
- vpx_highbd_sad8x16x8_bits8,
- vpx_highbd_sad8x16x4d_bits8)
+ HIGHBD_BFP(
+ BLOCK_8X16, vpx_highbd_sad8x16_bits8, vpx_highbd_sad8x16_avg_bits8,
+ vpx_highbd_8_variance8x16, vpx_highbd_8_sub_pixel_variance8x16,
+ vpx_highbd_8_sub_pixel_avg_variance8x16, vpx_highbd_sad8x16x3_bits8,
+ vpx_highbd_sad8x16x8_bits8, vpx_highbd_sad8x16x4d_bits8)
- HIGHBD_BFP(BLOCK_8X8,
- vpx_highbd_sad8x8_bits8,
- vpx_highbd_sad8x8_avg_bits8,
- vpx_highbd_8_variance8x8,
- vpx_highbd_8_sub_pixel_variance8x8,
- vpx_highbd_8_sub_pixel_avg_variance8x8,
- vpx_highbd_sad8x8x3_bits8,
- vpx_highbd_sad8x8x8_bits8,
- vpx_highbd_sad8x8x4d_bits8)
+ HIGHBD_BFP(
+ BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
+ vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
+ vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits8,
+ vpx_highbd_sad8x8x8_bits8, vpx_highbd_sad8x8x4d_bits8)
- HIGHBD_BFP(BLOCK_8X4,
- vpx_highbd_sad8x4_bits8,
- vpx_highbd_sad8x4_avg_bits8,
- vpx_highbd_8_variance8x4,
+ HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits8,
+ vpx_highbd_sad8x4_avg_bits8, vpx_highbd_8_variance8x4,
vpx_highbd_8_sub_pixel_variance8x4,
- vpx_highbd_8_sub_pixel_avg_variance8x4,
- NULL,
- vpx_highbd_sad8x4x8_bits8,
- vpx_highbd_sad8x4x4d_bits8)
+ vpx_highbd_8_sub_pixel_avg_variance8x4, NULL,
+ vpx_highbd_sad8x4x8_bits8, vpx_highbd_sad8x4x4d_bits8)
- HIGHBD_BFP(BLOCK_4X8,
- vpx_highbd_sad4x8_bits8,
- vpx_highbd_sad4x8_avg_bits8,
- vpx_highbd_8_variance4x8,
+ HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits8,
+ vpx_highbd_sad4x8_avg_bits8, vpx_highbd_8_variance4x8,
vpx_highbd_8_sub_pixel_variance4x8,
- vpx_highbd_8_sub_pixel_avg_variance4x8,
- NULL,
- vpx_highbd_sad4x8x8_bits8,
- vpx_highbd_sad4x8x4d_bits8)
+ vpx_highbd_8_sub_pixel_avg_variance4x8, NULL,
+ vpx_highbd_sad4x8x8_bits8, vpx_highbd_sad4x8x4d_bits8)
- HIGHBD_BFP(BLOCK_4X4,
- vpx_highbd_sad4x4_bits8,
- vpx_highbd_sad4x4_avg_bits8,
- vpx_highbd_8_variance4x4,
- vpx_highbd_8_sub_pixel_variance4x4,
- vpx_highbd_8_sub_pixel_avg_variance4x4,
- vpx_highbd_sad4x4x3_bits8,
- vpx_highbd_sad4x4x8_bits8,
- vpx_highbd_sad4x4x4d_bits8)
+ HIGHBD_BFP(
+ BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
+ vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
+ vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits8,
+ vpx_highbd_sad4x4x8_bits8, vpx_highbd_sad4x4x4d_bits8)
break;
case VPX_BITS_10:
- HIGHBD_BFP(BLOCK_32X16,
- vpx_highbd_sad32x16_bits10,
- vpx_highbd_sad32x16_avg_bits10,
- vpx_highbd_10_variance32x16,
+ HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
+ vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
vpx_highbd_10_sub_pixel_variance32x16,
- vpx_highbd_10_sub_pixel_avg_variance32x16,
- NULL,
- NULL,
+ vpx_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
vpx_highbd_sad32x16x4d_bits10)
- HIGHBD_BFP(BLOCK_16X32,
- vpx_highbd_sad16x32_bits10,
- vpx_highbd_sad16x32_avg_bits10,
- vpx_highbd_10_variance16x32,
+ HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
+ vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
vpx_highbd_10_sub_pixel_variance16x32,
- vpx_highbd_10_sub_pixel_avg_variance16x32,
- NULL,
- NULL,
+ vpx_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
vpx_highbd_sad16x32x4d_bits10)
- HIGHBD_BFP(BLOCK_64X32,
- vpx_highbd_sad64x32_bits10,
- vpx_highbd_sad64x32_avg_bits10,
- vpx_highbd_10_variance64x32,
+ HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
+ vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
vpx_highbd_10_sub_pixel_variance64x32,
- vpx_highbd_10_sub_pixel_avg_variance64x32,
- NULL,
- NULL,
+ vpx_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
vpx_highbd_sad64x32x4d_bits10)
- HIGHBD_BFP(BLOCK_32X64,
- vpx_highbd_sad32x64_bits10,
- vpx_highbd_sad32x64_avg_bits10,
- vpx_highbd_10_variance32x64,
+ HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
+ vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
vpx_highbd_10_sub_pixel_variance32x64,
- vpx_highbd_10_sub_pixel_avg_variance32x64,
- NULL,
- NULL,
+ vpx_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
vpx_highbd_sad32x64x4d_bits10)
- HIGHBD_BFP(BLOCK_32X32,
- vpx_highbd_sad32x32_bits10,
- vpx_highbd_sad32x32_avg_bits10,
- vpx_highbd_10_variance32x32,
+ HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
+ vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
vpx_highbd_10_sub_pixel_variance32x32,
vpx_highbd_10_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits10,
- vpx_highbd_sad32x32x8_bits10,
+ vpx_highbd_sad32x32x3_bits10, vpx_highbd_sad32x32x8_bits10,
vpx_highbd_sad32x32x4d_bits10)
- HIGHBD_BFP(BLOCK_64X64,
- vpx_highbd_sad64x64_bits10,
- vpx_highbd_sad64x64_avg_bits10,
- vpx_highbd_10_variance64x64,
+ HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
+ vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
vpx_highbd_10_sub_pixel_variance64x64,
vpx_highbd_10_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits10,
- vpx_highbd_sad64x64x8_bits10,
+ vpx_highbd_sad64x64x3_bits10, vpx_highbd_sad64x64x8_bits10,
vpx_highbd_sad64x64x4d_bits10)
- HIGHBD_BFP(BLOCK_16X16,
- vpx_highbd_sad16x16_bits10,
- vpx_highbd_sad16x16_avg_bits10,
- vpx_highbd_10_variance16x16,
+ HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
+ vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
vpx_highbd_10_sub_pixel_variance16x16,
vpx_highbd_10_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits10,
- vpx_highbd_sad16x16x8_bits10,
+ vpx_highbd_sad16x16x3_bits10, vpx_highbd_sad16x16x8_bits10,
vpx_highbd_sad16x16x4d_bits10)
- HIGHBD_BFP(BLOCK_16X8,
- vpx_highbd_sad16x8_bits10,
- vpx_highbd_sad16x8_avg_bits10,
- vpx_highbd_10_variance16x8,
+ HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
+ vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
vpx_highbd_10_sub_pixel_variance16x8,
vpx_highbd_10_sub_pixel_avg_variance16x8,
- vpx_highbd_sad16x8x3_bits10,
- vpx_highbd_sad16x8x8_bits10,
+ vpx_highbd_sad16x8x3_bits10, vpx_highbd_sad16x8x8_bits10,
vpx_highbd_sad16x8x4d_bits10)
- HIGHBD_BFP(BLOCK_8X16,
- vpx_highbd_sad8x16_bits10,
- vpx_highbd_sad8x16_avg_bits10,
- vpx_highbd_10_variance8x16,
+ HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
+ vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
vpx_highbd_10_sub_pixel_variance8x16,
vpx_highbd_10_sub_pixel_avg_variance8x16,
- vpx_highbd_sad8x16x3_bits10,
- vpx_highbd_sad8x16x8_bits10,
+ vpx_highbd_sad8x16x3_bits10, vpx_highbd_sad8x16x8_bits10,
vpx_highbd_sad8x16x4d_bits10)
- HIGHBD_BFP(BLOCK_8X8,
- vpx_highbd_sad8x8_bits10,
- vpx_highbd_sad8x8_avg_bits10,
- vpx_highbd_10_variance8x8,
- vpx_highbd_10_sub_pixel_variance8x8,
- vpx_highbd_10_sub_pixel_avg_variance8x8,
- vpx_highbd_sad8x8x3_bits10,
- vpx_highbd_sad8x8x8_bits10,
- vpx_highbd_sad8x8x4d_bits10)
+ HIGHBD_BFP(
+ BLOCK_8X8, vpx_highbd_sad8x8_bits10, vpx_highbd_sad8x8_avg_bits10,
+ vpx_highbd_10_variance8x8, vpx_highbd_10_sub_pixel_variance8x8,
+ vpx_highbd_10_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits10,
+ vpx_highbd_sad8x8x8_bits10, vpx_highbd_sad8x8x4d_bits10)
- HIGHBD_BFP(BLOCK_8X4,
- vpx_highbd_sad8x4_bits10,
- vpx_highbd_sad8x4_avg_bits10,
- vpx_highbd_10_variance8x4,
+ HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
+ vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
vpx_highbd_10_sub_pixel_variance8x4,
- vpx_highbd_10_sub_pixel_avg_variance8x4,
- NULL,
- vpx_highbd_sad8x4x8_bits10,
- vpx_highbd_sad8x4x4d_bits10)
+ vpx_highbd_10_sub_pixel_avg_variance8x4, NULL,
+ vpx_highbd_sad8x4x8_bits10, vpx_highbd_sad8x4x4d_bits10)
- HIGHBD_BFP(BLOCK_4X8,
- vpx_highbd_sad4x8_bits10,
- vpx_highbd_sad4x8_avg_bits10,
- vpx_highbd_10_variance4x8,
+ HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
+ vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
vpx_highbd_10_sub_pixel_variance4x8,
- vpx_highbd_10_sub_pixel_avg_variance4x8,
- NULL,
- vpx_highbd_sad4x8x8_bits10,
- vpx_highbd_sad4x8x4d_bits10)
+ vpx_highbd_10_sub_pixel_avg_variance4x8, NULL,
+ vpx_highbd_sad4x8x8_bits10, vpx_highbd_sad4x8x4d_bits10)
- HIGHBD_BFP(BLOCK_4X4,
- vpx_highbd_sad4x4_bits10,
- vpx_highbd_sad4x4_avg_bits10,
- vpx_highbd_10_variance4x4,
- vpx_highbd_10_sub_pixel_variance4x4,
- vpx_highbd_10_sub_pixel_avg_variance4x4,
- vpx_highbd_sad4x4x3_bits10,
- vpx_highbd_sad4x4x8_bits10,
- vpx_highbd_sad4x4x4d_bits10)
+ HIGHBD_BFP(
+ BLOCK_4X4, vpx_highbd_sad4x4_bits10, vpx_highbd_sad4x4_avg_bits10,
+ vpx_highbd_10_variance4x4, vpx_highbd_10_sub_pixel_variance4x4,
+ vpx_highbd_10_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits10,
+ vpx_highbd_sad4x4x8_bits10, vpx_highbd_sad4x4x4d_bits10)
break;
case VPX_BITS_12:
- HIGHBD_BFP(BLOCK_32X16,
- vpx_highbd_sad32x16_bits12,
- vpx_highbd_sad32x16_avg_bits12,
- vpx_highbd_12_variance32x16,
+ HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
+ vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
vpx_highbd_12_sub_pixel_variance32x16,
- vpx_highbd_12_sub_pixel_avg_variance32x16,
- NULL,
- NULL,
+ vpx_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
vpx_highbd_sad32x16x4d_bits12)
- HIGHBD_BFP(BLOCK_16X32,
- vpx_highbd_sad16x32_bits12,
- vpx_highbd_sad16x32_avg_bits12,
- vpx_highbd_12_variance16x32,
+ HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
+ vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
vpx_highbd_12_sub_pixel_variance16x32,
- vpx_highbd_12_sub_pixel_avg_variance16x32,
- NULL,
- NULL,
+ vpx_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
vpx_highbd_sad16x32x4d_bits12)
- HIGHBD_BFP(BLOCK_64X32,
- vpx_highbd_sad64x32_bits12,
- vpx_highbd_sad64x32_avg_bits12,
- vpx_highbd_12_variance64x32,
+ HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
+ vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
vpx_highbd_12_sub_pixel_variance64x32,
- vpx_highbd_12_sub_pixel_avg_variance64x32,
- NULL,
- NULL,
+ vpx_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
vpx_highbd_sad64x32x4d_bits12)
- HIGHBD_BFP(BLOCK_32X64,
- vpx_highbd_sad32x64_bits12,
- vpx_highbd_sad32x64_avg_bits12,
- vpx_highbd_12_variance32x64,
+ HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
+ vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
vpx_highbd_12_sub_pixel_variance32x64,
- vpx_highbd_12_sub_pixel_avg_variance32x64,
- NULL,
- NULL,
+ vpx_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
vpx_highbd_sad32x64x4d_bits12)
- HIGHBD_BFP(BLOCK_32X32,
- vpx_highbd_sad32x32_bits12,
- vpx_highbd_sad32x32_avg_bits12,
- vpx_highbd_12_variance32x32,
+ HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
+ vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
vpx_highbd_12_sub_pixel_variance32x32,
vpx_highbd_12_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits12,
- vpx_highbd_sad32x32x8_bits12,
+ vpx_highbd_sad32x32x3_bits12, vpx_highbd_sad32x32x8_bits12,
vpx_highbd_sad32x32x4d_bits12)
- HIGHBD_BFP(BLOCK_64X64,
- vpx_highbd_sad64x64_bits12,
- vpx_highbd_sad64x64_avg_bits12,
- vpx_highbd_12_variance64x64,
+ HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
+ vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
vpx_highbd_12_sub_pixel_variance64x64,
vpx_highbd_12_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits12,
- vpx_highbd_sad64x64x8_bits12,
+ vpx_highbd_sad64x64x3_bits12, vpx_highbd_sad64x64x8_bits12,
vpx_highbd_sad64x64x4d_bits12)
- HIGHBD_BFP(BLOCK_16X16,
- vpx_highbd_sad16x16_bits12,
- vpx_highbd_sad16x16_avg_bits12,
- vpx_highbd_12_variance16x16,
+ HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
+ vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
vpx_highbd_12_sub_pixel_variance16x16,
vpx_highbd_12_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits12,
- vpx_highbd_sad16x16x8_bits12,
+ vpx_highbd_sad16x16x3_bits12, vpx_highbd_sad16x16x8_bits12,
vpx_highbd_sad16x16x4d_bits12)
- HIGHBD_BFP(BLOCK_16X8,
- vpx_highbd_sad16x8_bits12,
- vpx_highbd_sad16x8_avg_bits12,
- vpx_highbd_12_variance16x8,
+ HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
+ vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
vpx_highbd_12_sub_pixel_variance16x8,
vpx_highbd_12_sub_pixel_avg_variance16x8,
- vpx_highbd_sad16x8x3_bits12,
- vpx_highbd_sad16x8x8_bits12,
+ vpx_highbd_sad16x8x3_bits12, vpx_highbd_sad16x8x8_bits12,
vpx_highbd_sad16x8x4d_bits12)
- HIGHBD_BFP(BLOCK_8X16,
- vpx_highbd_sad8x16_bits12,
- vpx_highbd_sad8x16_avg_bits12,
- vpx_highbd_12_variance8x16,
+ HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
+ vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
vpx_highbd_12_sub_pixel_variance8x16,
vpx_highbd_12_sub_pixel_avg_variance8x16,
- vpx_highbd_sad8x16x3_bits12,
- vpx_highbd_sad8x16x8_bits12,
+ vpx_highbd_sad8x16x3_bits12, vpx_highbd_sad8x16x8_bits12,
vpx_highbd_sad8x16x4d_bits12)
- HIGHBD_BFP(BLOCK_8X8,
- vpx_highbd_sad8x8_bits12,
- vpx_highbd_sad8x8_avg_bits12,
- vpx_highbd_12_variance8x8,
- vpx_highbd_12_sub_pixel_variance8x8,
- vpx_highbd_12_sub_pixel_avg_variance8x8,
- vpx_highbd_sad8x8x3_bits12,
- vpx_highbd_sad8x8x8_bits12,
- vpx_highbd_sad8x8x4d_bits12)
+ HIGHBD_BFP(
+ BLOCK_8X8, vpx_highbd_sad8x8_bits12, vpx_highbd_sad8x8_avg_bits12,
+ vpx_highbd_12_variance8x8, vpx_highbd_12_sub_pixel_variance8x8,
+ vpx_highbd_12_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits12,
+ vpx_highbd_sad8x8x8_bits12, vpx_highbd_sad8x8x4d_bits12)
- HIGHBD_BFP(BLOCK_8X4,
- vpx_highbd_sad8x4_bits12,
- vpx_highbd_sad8x4_avg_bits12,
- vpx_highbd_12_variance8x4,
+ HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
+ vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
vpx_highbd_12_sub_pixel_variance8x4,
- vpx_highbd_12_sub_pixel_avg_variance8x4,
- NULL,
- vpx_highbd_sad8x4x8_bits12,
- vpx_highbd_sad8x4x4d_bits12)
+ vpx_highbd_12_sub_pixel_avg_variance8x4, NULL,
+ vpx_highbd_sad8x4x8_bits12, vpx_highbd_sad8x4x4d_bits12)
- HIGHBD_BFP(BLOCK_4X8,
- vpx_highbd_sad4x8_bits12,
- vpx_highbd_sad4x8_avg_bits12,
- vpx_highbd_12_variance4x8,
+ HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
+ vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
vpx_highbd_12_sub_pixel_variance4x8,
- vpx_highbd_12_sub_pixel_avg_variance4x8,
- NULL,
- vpx_highbd_sad4x8x8_bits12,
- vpx_highbd_sad4x8x4d_bits12)
+ vpx_highbd_12_sub_pixel_avg_variance4x8, NULL,
+ vpx_highbd_sad4x8x8_bits12, vpx_highbd_sad4x8x4d_bits12)
- HIGHBD_BFP(BLOCK_4X4,
- vpx_highbd_sad4x4_bits12,
- vpx_highbd_sad4x4_avg_bits12,
- vpx_highbd_12_variance4x4,
- vpx_highbd_12_sub_pixel_variance4x4,
- vpx_highbd_12_sub_pixel_avg_variance4x4,
- vpx_highbd_sad4x4x3_bits12,
- vpx_highbd_sad4x4x8_bits12,
- vpx_highbd_sad4x4x4d_bits12)
+ HIGHBD_BFP(
+ BLOCK_4X4, vpx_highbd_sad4x4_bits12, vpx_highbd_sad4x4_avg_bits12,
+ vpx_highbd_12_variance4x4, vpx_highbd_12_sub_pixel_variance4x4,
+ vpx_highbd_12_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits12,
+ vpx_highbd_sad4x4x8_bits12, vpx_highbd_sad4x4x4d_bits12)
break;
default:
- assert(0 && "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ assert(0 &&
+ "cm->bit_depth should be VPX_BITS_8, "
+ "VPX_BITS_10 or VPX_BITS_12");
}
}
}
@@ -1570,8 +1373,7 @@
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
// Create a map used for cyclic background refresh.
- if (cpi->cyclic_refresh)
- vp9_cyclic_refresh_free(cpi->cyclic_refresh);
+ if (cpi->cyclic_refresh) vp9_cyclic_refresh_free(cpi->cyclic_refresh);
CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
vp9_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
@@ -1593,8 +1395,7 @@
int last_w = cpi->oxcf.width;
int last_h = cpi->oxcf.height;
- if (cm->profile != oxcf->profile)
- cm->profile = oxcf->profile;
+ if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
cm->bit_depth = oxcf->bit_depth;
cm->color_space = oxcf->color_space;
cm->color_range = oxcf->color_range;
@@ -1674,8 +1475,8 @@
cpi->initial_width = cpi->initial_height = 0;
cpi->external_resize = 0;
} else if (cm->mi_alloc_size == new_mi_size &&
- (cpi->oxcf.width > last_w || cpi->oxcf.height > last_h)) {
- vp9_alloc_loop_filter(cm);
+ (cpi->oxcf.width > last_w || cpi->oxcf.height > last_h)) {
+ vp9_alloc_loop_filter(cm);
}
}
@@ -1683,13 +1484,12 @@
if (last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) {
memset(cpi->consec_zero_mv, 0,
- cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv));
+ cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv));
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
vp9_cyclic_refresh_reset_resize(cpi);
}
- if ((cpi->svc.number_temporal_layers > 1 &&
- cpi->oxcf.rc_mode == VPX_CBR) ||
+ if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) ||
((cpi->svc.number_temporal_layers > 1 ||
cpi->svc.number_spatial_layers > 1) &&
cpi->oxcf.pass != 1)) {
@@ -1719,7 +1519,7 @@
#ifndef M_LOG2_E
#define M_LOG2_E 0.693147180559945309417
#endif
-#define log2f(x) (log (x) / (float) M_LOG2_E)
+#define log2f(x) (log(x) / (float)M_LOG2_E)
/***********************************************************************
* Read before modifying 'cal_nmvjointsadcost' or 'cal_nmvsadcosts' *
@@ -1790,8 +1590,7 @@
VP9_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP9_COMP));
VP9_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
- if (!cm)
- return NULL;
+ if (!cm) return NULL;
vp9_zero(*cpi);
@@ -1806,11 +1605,10 @@
cm->free_mi = vp9_enc_free_mi;
cm->setup_mi = vp9_enc_setup_mi;
- CHECK_MEM_ERROR(cm, cm->fc,
- (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
- CHECK_MEM_ERROR(cm, cm->frame_contexts,
- (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS,
- sizeof(*cm->frame_contexts)));
+ CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(
+ cm, cm->frame_contexts,
+ (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
cpi->use_svc = 0;
cpi->resize_state = 0;
@@ -1829,9 +1627,9 @@
realloc_segmentation_maps(cpi);
- CHECK_MEM_ERROR(cm, cpi->consec_zero_mv,
- vpx_calloc(cm->mi_rows * cm->mi_cols,
- sizeof(*cpi->consec_zero_mv)));
+ CHECK_MEM_ERROR(
+ cm, cpi->consec_zero_mv,
+ vpx_calloc(cm->mi_rows * cm->mi_cols, sizeof(*cpi->consec_zero_mv)));
CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
@@ -1850,11 +1648,11 @@
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
- for (i = 0; i < (sizeof(cpi->mbgraph_stats) /
- sizeof(cpi->mbgraph_stats[0])); i++) {
- CHECK_MEM_ERROR(cm, cpi->mbgraph_stats[i].mb_stats,
- vpx_calloc(cm->MBs *
- sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
+ for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
+ i++) {
+ CHECK_MEM_ERROR(
+ cm, cpi->mbgraph_stats[i].mb_stats,
+ vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
}
#if CONFIG_FP_MB_STATS
@@ -1961,10 +1759,10 @@
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
- if (cpi->svc.number_spatial_layers > 1
- || cpi->svc.number_temporal_layers > 1) {
+ if (cpi->svc.number_spatial_layers > 1 ||
+ cpi->svc.number_temporal_layers > 1) {
FIRSTPASS_STATS *const stats = oxcf->two_pass_stats_in.buf;
- FIRSTPASS_STATS *stats_copy[VPX_SS_MAX_LAYERS] = {0};
+ FIRSTPASS_STATS *stats_copy[VPX_SS_MAX_LAYERS] = { 0 };
int i;
for (i = 0; i < oxcf->ss_number_layers; ++i) {
@@ -1982,8 +1780,8 @@
vpx_malloc(lc->rc_twopass_stats_in.sz));
lc->twopass.stats_in_start = lc->rc_twopass_stats_in.buf;
lc->twopass.stats_in = lc->twopass.stats_in_start;
- lc->twopass.stats_in_end = lc->twopass.stats_in_start
- + packets_in_layer - 1;
+ lc->twopass.stats_in_end =
+ lc->twopass.stats_in_start + packets_in_layer - 1;
stats_copy[layer_id] = lc->rc_twopass_stats_in.buf;
}
}
@@ -1990,8 +1788,8 @@
for (i = 0; i < packets; ++i) {
const int layer_id = (int)stats[i].spatial_layer_id;
- if (layer_id >= 0 && layer_id < oxcf->ss_number_layers
- && stats_copy[layer_id] != NULL) {
+ if (layer_id >= 0 && layer_id < oxcf->ss_number_layers &&
+ stats_copy[layer_id] != NULL) {
*stats_copy[layer_id] = stats[i];
++stats_copy[layer_id];
}
@@ -2024,79 +1822,71 @@
vp9_set_speed_features_framesize_dependent(cpi);
// Allocate memory to store variances for a frame.
- CHECK_MEM_ERROR(cm, cpi->source_diff_var,
- vpx_calloc(cm->MBs, sizeof(diff)));
+ CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
cpi->source_var_thresh = 0;
cpi->frames_till_next_var_check = 0;
-#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF)\
- cpi->fn_ptr[BT].sdf = SDF; \
- cpi->fn_ptr[BT].sdaf = SDAF; \
- cpi->fn_ptr[BT].vf = VF; \
- cpi->fn_ptr[BT].svf = SVF; \
- cpi->fn_ptr[BT].svaf = SVAF; \
- cpi->fn_ptr[BT].sdx3f = SDX3F; \
- cpi->fn_ptr[BT].sdx8f = SDX8F; \
- cpi->fn_ptr[BT].sdx4df = SDX4DF;
+#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
+ cpi->fn_ptr[BT].sdf = SDF; \
+ cpi->fn_ptr[BT].sdaf = SDAF; \
+ cpi->fn_ptr[BT].vf = VF; \
+ cpi->fn_ptr[BT].svf = SVF; \
+ cpi->fn_ptr[BT].svaf = SVAF; \
+ cpi->fn_ptr[BT].sdx3f = SDX3F; \
+ cpi->fn_ptr[BT].sdx8f = SDX8F; \
+ cpi->fn_ptr[BT].sdx4df = SDX4DF;
- BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg,
- vpx_variance32x16, vpx_sub_pixel_variance32x16,
- vpx_sub_pixel_avg_variance32x16, NULL, NULL, vpx_sad32x16x4d)
+ BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
+ vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16, NULL, NULL,
+ vpx_sad32x16x4d)
- BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg,
- vpx_variance16x32, vpx_sub_pixel_variance16x32,
- vpx_sub_pixel_avg_variance16x32, NULL, NULL, vpx_sad16x32x4d)
+ BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
+ vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32, NULL, NULL,
+ vpx_sad16x32x4d)
- BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg,
- vpx_variance64x32, vpx_sub_pixel_variance64x32,
- vpx_sub_pixel_avg_variance64x32, NULL, NULL, vpx_sad64x32x4d)
+ BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
+ vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32, NULL, NULL,
+ vpx_sad64x32x4d)
- BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg,
- vpx_variance32x64, vpx_sub_pixel_variance32x64,
- vpx_sub_pixel_avg_variance32x64, NULL, NULL, vpx_sad32x64x4d)
+ BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
+ vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64, NULL, NULL,
+ vpx_sad32x64x4d)
- BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg,
- vpx_variance32x32, vpx_sub_pixel_variance32x32,
- vpx_sub_pixel_avg_variance32x32, vpx_sad32x32x3, vpx_sad32x32x8,
- vpx_sad32x32x4d)
+ BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
+ vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
+ vpx_sad32x32x3, vpx_sad32x32x8, vpx_sad32x32x4d)
- BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg,
- vpx_variance64x64, vpx_sub_pixel_variance64x64,
- vpx_sub_pixel_avg_variance64x64, vpx_sad64x64x3, vpx_sad64x64x8,
- vpx_sad64x64x4d)
+ BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
+ vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
+ vpx_sad64x64x3, vpx_sad64x64x8, vpx_sad64x64x4d)
- BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg,
- vpx_variance16x16, vpx_sub_pixel_variance16x16,
- vpx_sub_pixel_avg_variance16x16, vpx_sad16x16x3, vpx_sad16x16x8,
- vpx_sad16x16x4d)
+ BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
+ vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
+ vpx_sad16x16x3, vpx_sad16x16x8, vpx_sad16x16x4d)
- BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg,
- vpx_variance16x8, vpx_sub_pixel_variance16x8,
- vpx_sub_pixel_avg_variance16x8,
- vpx_sad16x8x3, vpx_sad16x8x8, vpx_sad16x8x4d)
+ BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
+ vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8, vpx_sad16x8x3,
+ vpx_sad16x8x8, vpx_sad16x8x4d)
- BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg,
- vpx_variance8x16, vpx_sub_pixel_variance8x16,
- vpx_sub_pixel_avg_variance8x16,
- vpx_sad8x16x3, vpx_sad8x16x8, vpx_sad8x16x4d)
+ BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
+ vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16, vpx_sad8x16x3,
+ vpx_sad8x16x8, vpx_sad8x16x4d)
- BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg,
- vpx_variance8x8, vpx_sub_pixel_variance8x8,
- vpx_sub_pixel_avg_variance8x8,
- vpx_sad8x8x3, vpx_sad8x8x8, vpx_sad8x8x4d)
+ BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
+ vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x3,
+ vpx_sad8x8x8, vpx_sad8x8x4d)
- BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg,
- vpx_variance8x4, vpx_sub_pixel_variance8x4,
- vpx_sub_pixel_avg_variance8x4, NULL, vpx_sad8x4x8, vpx_sad8x4x4d)
+ BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
+ vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, NULL,
+ vpx_sad8x4x8, vpx_sad8x4x4d)
- BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg,
- vpx_variance4x8, vpx_sub_pixel_variance4x8,
- vpx_sub_pixel_avg_variance4x8, NULL, vpx_sad4x8x8, vpx_sad4x8x4d)
+ BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
+ vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, NULL,
+ vpx_sad4x8x8, vpx_sad4x8x4d)
- BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg,
- vpx_variance4x4, vpx_sub_pixel_variance4x4,
- vpx_sub_pixel_avg_variance4x4,
- vpx_sad4x4x3, vpx_sad4x4x8, vpx_sad4x4x4d)
+ BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
+ vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x3,
+ vpx_sad4x4x8, vpx_sad4x4x4d)
#if CONFIG_VP9_HIGHBITDEPTH
highbd_set_var_fns(cpi);
@@ -2117,8 +1907,7 @@
}
#if CONFIG_INTERNAL_STATS
-#define SNPRINT(H, T) \
- snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
+#define SNPRINT(H, T) snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
#define SNPRINT2(H, T, V) \
snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
@@ -2129,8 +1918,7 @@
unsigned int i;
int t;
- if (!cpi)
- return;
+ if (!cpi) return;
cm = &cpi->common;
if (cm->current_video_frame > 0) {
@@ -2138,30 +1926,29 @@
vpx_clear_system_state();
if (cpi->oxcf.pass != 1) {
- char headings[512] = {0};
- char results[512] = {0};
+ char headings[512] = { 0 };
+ char results[512] = { 0 };
FILE *f = fopen("opsnr.stt", "a");
- double time_encoded = (cpi->last_end_time_stamp_seen
- - cpi->first_time_stamp_ever) / 10000000.000;
- double total_encode_time = (cpi->time_receive_data +
- cpi->time_compress_data) / 1000.000;
+ double time_encoded =
+ (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
+ 10000000.000;
+ double total_encode_time =
+ (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
const double dr =
- (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded;
+ (double)cpi->bytes * (double)8 / (double)1000 / time_encoded;
const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
if (cpi->b_calculate_psnr) {
- const double total_psnr =
- vpx_sse_to_psnr((double)cpi->total_samples, peak,
- (double)cpi->total_sq_error);
- const double totalp_psnr =
- vpx_sse_to_psnr((double)cpi->totalp_samples, peak,
- (double)cpi->totalp_sq_error);
- const double total_ssim = 100 * pow(cpi->summed_quality /
- cpi->summed_weights, 8.0);
- const double totalp_ssim = 100 * pow(cpi->summedp_quality /
- cpi->summedp_weights, 8.0);
+ const double total_psnr = vpx_sse_to_psnr(
+ (double)cpi->total_samples, peak, (double)cpi->total_sq_error);
+ const double totalp_psnr = vpx_sse_to_psnr(
+ (double)cpi->totalp_samples, peak, (double)cpi->totalp_sq_error);
+ const double total_ssim =
+ 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
+ const double totalp_ssim =
+ 100 * pow(cpi->summedp_quality / cpi->summedp_weights, 8.0);
snprintf(headings, sizeof(headings),
"Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
@@ -2172,12 +1959,10 @@
"%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
"%7.3f\t%7.3f\t%7.3f\t%7.3f",
dr, cpi->psnr.stat[ALL] / cpi->count, total_psnr,
- cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr,
- total_ssim, totalp_ssim,
- cpi->fastssim.stat[ALL] / cpi->count,
- cpi->psnrhvs.stat[ALL] / cpi->count,
- cpi->psnr.worst, cpi->worst_ssim, cpi->fastssim.worst,
- cpi->psnrhvs.worst);
+ cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr, total_ssim,
+ totalp_ssim, cpi->fastssim.stat[ALL] / cpi->count,
+ cpi->psnrhvs.stat[ALL] / cpi->count, cpi->psnr.worst,
+ cpi->worst_ssim, cpi->fastssim.worst, cpi->psnrhvs.worst);
if (cpi->b_calculate_blockiness) {
SNPRINT(headings, "\t Block\tWstBlck");
@@ -2195,8 +1980,8 @@
SNPRINT2(results, "\t%7.3f", cpi->worst_consistency);
}
fprintf(f, "%s\t Time Rc-Err Abs Err\n", headings);
- fprintf(f, "%s\t%8.0f %7.2f %7.2f\n", results,
- total_encode_time, rate_err, fabs(rate_err));
+ fprintf(f, "%s\t%8.0f %7.2f %7.2f\n", results, total_encode_time,
+ rate_err, fabs(rate_err));
}
fclose(f);
@@ -2237,13 +2022,12 @@
vpx_free(cpi->tile_thr_data);
vpx_free(cpi->workers);
- if (cpi->num_workers > 1)
- vp9_loop_filter_dealloc(&cpi->lf_row_sync);
+ if (cpi->num_workers > 1) vp9_loop_filter_dealloc(&cpi->lf_row_sync);
dealloc_compressor_data(cpi);
- for (i = 0; i < sizeof(cpi->mbgraph_stats) /
- sizeof(cpi->mbgraph_stats[0]); ++i) {
+ for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
+ ++i) {
vpx_free(cpi->mbgraph_stats[i].mb_stats);
}
@@ -2306,14 +2090,14 @@
pkt.kind = VPX_CODEC_PSNR_PKT;
if (cpi->use_svc)
cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers].psnr_pkt = pkt.data.psnr;
+ cpi->svc.number_temporal_layers]
+ .psnr_pkt = pkt.data.psnr;
else
vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
}
int vp9_use_as_reference(VP9_COMP *cpi, int ref_frame_flags) {
- if (ref_frame_flags > 7)
- return -1;
+ if (ref_frame_flags > 7) return -1;
cpi->ref_frame_flags = ref_frame_flags;
return 0;
@@ -2326,8 +2110,8 @@
cpi->ext_refresh_frame_flags_pending = 1;
}
-static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer(VP9_COMP *cpi,
- VP9_REFFRAME ref_frame_flag) {
+static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer(
+ VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag) {
MV_REFERENCE_FRAME ref_frame = NONE;
if (ref_frame_flag == VP9_LAST_FLAG)
ref_frame = LAST_FRAME;
@@ -2361,7 +2145,7 @@
}
}
-int vp9_update_entropy(VP9_COMP * cpi, int update) {
+int vp9_update_entropy(VP9_COMP *cpi, int update) {
cpi->ext_refresh_frame_context = update;
cpi->ext_refresh_frame_context_pending = 1;
return 0;
@@ -2410,7 +2194,7 @@
uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
do {
- fwrite(src16, s->y_width, 2, yuv_rec_file);
+ fwrite(src16, s->y_width, 2, yuv_rec_file);
src16 += s->y_stride;
} while (--h);
@@ -2418,7 +2202,7 @@
h = s->uv_height;
do {
- fwrite(src16, s->uv_width, 2, yuv_rec_file);
+ fwrite(src16, s->uv_width, 2, yuv_rec_file);
src16 += s->uv_stride;
} while (--h);
@@ -2436,7 +2220,7 @@
#endif // CONFIG_VP9_HIGHBITDEPTH
do {
- fwrite(src, s->y_width, 1, yuv_rec_file);
+ fwrite(src, s->y_width, 1, yuv_rec_file);
src += s->y_stride;
} while (--h);
@@ -2444,7 +2228,7 @@
h = s->uv_height;
do {
- fwrite(src, s->uv_width, 1, yuv_rec_file);
+ fwrite(src, s->uv_width, 1, yuv_rec_file);
src += s->uv_stride;
} while (--h);
@@ -2470,18 +2254,19 @@
#endif // CONFIG_VP9_HIGHBITDEPTH
// TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t
int i;
- const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
- const int src_widths[3] = {src->y_crop_width, src->uv_crop_width,
- src->uv_crop_width };
- const int src_heights[3] = {src->y_crop_height, src->uv_crop_height,
- src->uv_crop_height};
- uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
- const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
- const int dst_widths[3] = {dst->y_crop_width, dst->uv_crop_width,
- dst->uv_crop_width};
- const int dst_heights[3] = {dst->y_crop_height, dst->uv_crop_height,
- dst->uv_crop_height};
+ const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+ src->v_buffer };
+ const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+ const int src_widths[3] = { src->y_crop_width, src->uv_crop_width,
+ src->uv_crop_width };
+ const int src_heights[3] = { src->y_crop_height, src->uv_crop_height,
+ src->uv_crop_height };
+ uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+ const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
+ const int dst_widths[3] = { dst->y_crop_width, dst->uv_crop_width,
+ dst->uv_crop_width };
+ const int dst_heights[3] = { dst->y_crop_height, dst->uv_crop_height,
+ dst->uv_crop_height };
for (i = 0; i < MAX_MB_PLANE; ++i) {
#if CONFIG_VP9_HIGHBITDEPTH
@@ -2508,10 +2293,11 @@
const int src_h = src->y_crop_height;
const int dst_w = dst->y_crop_width;
const int dst_h = dst->y_crop_height;
- const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
- uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
- const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
+ const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+ src->v_buffer };
+ const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+ uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+ const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
const InterpKernel *const kernel = vp9_filter_kernels[EIGHTTAP];
int x, y, i;
@@ -2523,8 +2309,9 @@
const int y_q4 = y * (16 / factor) * src_h / dst_h;
for (x = 0; x < dst_w; x += 16) {
const int x_q4 = x * (16 / factor) * src_w / dst_w;
- const uint8_t *src_ptr = srcs[i] + (y / factor) * src_h / dst_h *
- src_stride + (x / factor) * src_w / dst_w;
+ const uint8_t *src_ptr = srcs[i] +
+ (y / factor) * src_h / dst_h * src_stride +
+ (x / factor) * src_w / dst_w;
uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
@@ -2535,8 +2322,8 @@
} else {
vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
kernel[x_q4 & 0xf], 16 * src_w / dst_w,
- kernel[y_q4 & 0xf], 16 * src_h / dst_h,
- 16 / factor, 16 / factor);
+ kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
+ 16 / factor);
}
}
}
@@ -2551,10 +2338,11 @@
const int src_h = src->y_crop_height;
const int dst_w = dst->y_crop_width;
const int dst_h = dst->y_crop_height;
- const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
- uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
- const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
+ const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+ src->v_buffer };
+ const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+ uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+ const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
const InterpKernel *const kernel = vp9_filter_kernels[EIGHTTAP];
int x, y, i;
@@ -2566,14 +2354,15 @@
const int y_q4 = y * (16 / factor) * src_h / dst_h;
for (x = 0; x < dst_w; x += 16) {
const int x_q4 = x * (16 / factor) * src_w / dst_w;
- const uint8_t *src_ptr = srcs[i] + (y / factor) * src_h / dst_h *
- src_stride + (x / factor) * src_w / dst_w;
+ const uint8_t *src_ptr = srcs[i] +
+ (y / factor) * src_h / dst_h * src_stride +
+ (x / factor) * src_w / dst_w;
uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
kernel[x_q4 & 0xf], 16 * src_w / dst_w,
- kernel[y_q4 & 0xf], 16 * src_h / dst_h,
- 16 / factor, 16 / factor);
+ kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
+ 16 / factor);
}
}
}
@@ -2590,8 +2379,9 @@
if (rc->frame_size_selector == UNSCALED &&
q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) {
- const int max_size_thresh = (int)(rate_thresh_mult[SCALE_STEP1]
- * VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
+ const int max_size_thresh =
+ (int)(rate_thresh_mult[SCALE_STEP1] *
+ VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
scale = rc->projected_frame_size > max_size_thresh ? 1 : 0;
}
return scale;
@@ -2606,9 +2396,8 @@
// Function to test for conditions that indicate we should loop
// back and recode a frame.
-static int recode_loop_test(VP9_COMP *cpi,
- int high_limit, int low_limit,
- int q, int maxq, int minq) {
+static int recode_loop_test(VP9_COMP *cpi, int high_limit, int low_limit, int q,
+ int maxq, int minq) {
const RATE_CONTROL *const rc = &cpi->rc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi);
@@ -2617,14 +2406,12 @@
if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
big_rate_miss(cpi, high_limit, low_limit) ||
(cpi->sf.recode_loop == ALLOW_RECODE) ||
- (frame_is_kfgfarf &&
- (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) {
- if (frame_is_kfgfarf &&
- (oxcf->resize_mode == RESIZE_DYNAMIC) &&
+ (frame_is_kfgfarf && (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) {
+ if (frame_is_kfgfarf && (oxcf->resize_mode == RESIZE_DYNAMIC) &&
scale_down(cpi, q)) {
- // Code this group at a lower resolution.
- cpi->resize_pending = 1;
- return 1;
+ // Code this group at a lower resolution.
+ cpi->resize_pending = 1;
+ return 1;
}
// TODO(agrange) high_limit could be greater than the scale-down threshold.
@@ -2644,16 +2431,16 @@
}
void vp9_update_reference_frames(VP9_COMP *cpi) {
- VP9_COMMON * const cm = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
// At this point the new frame has been encoded.
// If any buffer copy / swapping is signaled it should be done here.
if (cm->frame_type == KEY_FRAME) {
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+ cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
+ cm->new_fb_idx);
} else if (vp9_preserve_existing_gf(cpi)) {
// We have decided to preserve the previously existing golden frame as our
// new ARF frame. However, in the short term in function
@@ -2665,8 +2452,8 @@
// slot and, if we're updating the GF, the current frame becomes the new GF.
int tmp;
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
+ cm->new_fb_idx);
tmp = cpi->alt_fb_idx;
cpi->alt_fb_idx = cpi->gld_fb_idx;
@@ -2684,8 +2471,7 @@
arf_idx = gf_group->arf_update_idx[gf_group->index];
}
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
cpi->interp_filter_selected[0],
sizeof(cpi->interp_filter_selected[0]));
@@ -2692,8 +2478,8 @@
}
if (cpi->refresh_golden_frame) {
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+ cm->new_fb_idx);
if (!cpi->rc.is_src_frame_alt_ref)
memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
cpi->interp_filter_selected[0],
@@ -2706,8 +2492,8 @@
}
if (cpi->refresh_last_frame) {
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx],
+ cm->new_fb_idx);
if (!cpi->rc.is_src_frame_alt_ref)
memcpy(cpi->interp_filter_selected[LAST_FRAME],
cpi->interp_filter_selected[0],
@@ -2716,13 +2502,10 @@
#if CONFIG_VP9_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity > 0 &&
cpi->denoiser.denoising_level > kDenLowLow) {
- vp9_denoiser_update_frame_info(&cpi->denoiser,
- *cpi->Source,
- cpi->common.frame_type,
- cpi->refresh_alt_ref_frame,
- cpi->refresh_golden_frame,
- cpi->refresh_last_frame,
- cpi->resize_pending);
+ vp9_denoiser_update_frame_info(
+ &cpi->denoiser, *cpi->Source, cpi->common.frame_type,
+ cpi->refresh_alt_ref_frame, cpi->refresh_golden_frame,
+ cpi->refresh_last_frame, cpi->resize_pending);
}
#endif
if (is_one_pass_cbr_svc(cpi)) {
@@ -2748,8 +2531,8 @@
struct loopfilter *lf = &cm->lf;
if (xd->lossless) {
- lf->filter_level = 0;
- lf->last_filt_level = 0;
+ lf->filter_level = 0;
+ lf->last_filt_level = 0;
} else {
struct vpx_usec_timer timer;
@@ -2777,9 +2560,8 @@
if (cpi->num_workers > 1)
vp9_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
- lf->filter_level, 0, 0,
- cpi->workers, cpi->num_workers,
- &cpi->lf_row_sync);
+ lf->filter_level, 0, 0, cpi->workers,
+ cpi->num_workers, &cpi->lf_row_sync);
else
vp9_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
}
@@ -2787,11 +2569,9 @@
vpx_extend_frame_inner_borders(cm->frame_to_show);
}
-static INLINE void alloc_frame_mvs(VP9_COMMON *const cm,
- int buffer_idx) {
+static INLINE void alloc_frame_mvs(VP9_COMMON *const cm, int buffer_idx) {
RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
- if (new_fb_ptr->mvs == NULL ||
- new_fb_ptr->mi_rows < cm->mi_rows ||
+ if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
new_fb_ptr->mi_cols < cm->mi_cols) {
vpx_free(new_fb_ptr->mvs);
CHECK_MEM_ERROR(cm, new_fb_ptr->mvs,
@@ -2805,14 +2585,15 @@
void vp9_scale_references(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
MV_REFERENCE_FRAME ref_frame;
- const VP9_REFFRAME ref_mask[3] = {VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG};
+ const VP9_REFFRAME ref_mask[3] = { VP9_LAST_FLAG, VP9_GOLD_FLAG,
+ VP9_ALT_FLAG };
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
// Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1).
if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
BufferPool *const pool = cm->buffer_pool;
- const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi,
- ref_frame);
+ const YV12_BUFFER_CONFIG *const ref =
+ get_ref_frame_buffer(cpi, ref_frame);
if (ref == NULL) {
cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
@@ -2828,11 +2609,9 @@
new_fb = get_free_fb(cm);
force_scaling = 1;
}
- if (new_fb == INVALID_IDX)
- return;
+ if (new_fb == INVALID_IDX) return;
new_fb_ptr = &pool->frame_bufs[new_fb];
- if (force_scaling ||
- new_fb_ptr->buf.y_crop_width != cm->width ||
+ if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
new_fb_ptr->buf.y_crop_height != cm->height) {
if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
@@ -2854,11 +2633,9 @@
new_fb = get_free_fb(cm);
force_scaling = 1;
}
- if (new_fb == INVALID_IDX)
- return;
+ if (new_fb == INVALID_IDX) return;
new_fb_ptr = &pool->frame_bufs[new_fb];
- if (force_scaling ||
- new_fb_ptr->buf.y_crop_width != cm->width ||
+ if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
new_fb_ptr->buf.y_crop_height != cm->height) {
if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
@@ -2909,22 +2686,21 @@
refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0;
for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
const int idx = cpi->scaled_ref_idx[i - 1];
- RefCntBuffer *const buf = idx != INVALID_IDX ?
- &cm->buffer_pool->frame_bufs[idx] : NULL;
+ RefCntBuffer *const buf =
+ idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i);
if (buf != NULL &&
- (refresh[i - 1] ||
- (buf->buf.y_crop_width == ref->y_crop_width &&
- buf->buf.y_crop_height == ref->y_crop_height))) {
+ (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width &&
+ buf->buf.y_crop_height == ref->y_crop_height))) {
--buf->ref_count;
- cpi->scaled_ref_idx[i -1] = INVALID_IDX;
+ cpi->scaled_ref_idx[i - 1] = INVALID_IDX;
}
}
} else {
for (i = 0; i < MAX_REF_FRAMES; ++i) {
const int idx = cpi->scaled_ref_idx[i];
- RefCntBuffer *const buf = idx != INVALID_IDX ?
- &cm->buffer_pool->frame_bufs[idx] : NULL;
+ RefCntBuffer *const buf =
+ idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
if (buf != NULL) {
--buf->ref_count;
cpi->scaled_ref_idx[i] = INVALID_IDX;
@@ -3090,8 +2866,8 @@
cpi->common.interp_filter = cpi->sf.default_interp_filter;
}
-static void set_size_dependent_vars(VP9_COMP *cpi, int *q,
- int *bottom_index, int *top_index) {
+static void set_size_dependent_vars(VP9_COMP *cpi, int *q, int *bottom_index,
+ int *top_index) {
VP9_COMMON *const cm = &cpi->common;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
@@ -3116,22 +2892,12 @@
if (oxcf->noise_sensitivity > 0) {
int l = 0;
switch (oxcf->noise_sensitivity) {
- case 1:
- l = 20;
- break;
- case 2:
- l = 40;
- break;
- case 3:
- l = 60;
- break;
+ case 1: l = 20; break;
+ case 2: l = 40; break;
+ case 3: l = 60; break;
case 4:
- case 5:
- l = 100;
- break;
- case 6:
- l = 150;
- break;
+ case 5: l = 100; break;
+ case 6: l = 150; break;
}
if (!cpi->common.postproc_state.limits) {
cpi->common.postproc_state.limits = vpx_calloc(
@@ -3175,12 +2941,11 @@
VP9EncoderConfig *const oxcf = &cpi->oxcf;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
- if (oxcf->pass == 2 &&
- oxcf->rc_mode == VPX_VBR &&
+ if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
- (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
- calculate_coded_size(
- cpi, &oxcf->scaled_frame_width, &oxcf->scaled_frame_height);
+ (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
+ calculate_coded_size(cpi, &oxcf->scaled_frame_width,
+ &oxcf->scaled_frame_height);
// There has been a change in frame size.
vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
@@ -3187,18 +2952,14 @@
oxcf->scaled_frame_height);
}
- if (oxcf->pass == 0 &&
- oxcf->rc_mode == VPX_CBR &&
- !cpi->use_svc &&
- oxcf->resize_mode == RESIZE_DYNAMIC &&
- cpi->resize_pending != 0) {
+ if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR && !cpi->use_svc &&
+ oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending != 0) {
oxcf->scaled_frame_width =
(oxcf->width * cpi->resize_scale_num) / cpi->resize_scale_den;
oxcf->scaled_frame_height =
- (oxcf->height * cpi->resize_scale_num) /cpi->resize_scale_den;
+ (oxcf->height * cpi->resize_scale_num) / cpi->resize_scale_den;
// There has been a change in frame size.
- vp9_set_size_literal(cpi,
- oxcf->scaled_frame_width,
+ vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
oxcf->scaled_frame_height);
// TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
@@ -3218,9 +2979,8 @@
}
if ((oxcf->pass == 2) &&
- (!cpi->use_svc ||
- (is_two_pass_svc(cpi) &&
- cpi->svc.encode_empty_frame_state != ENCODING))) {
+ (!cpi->use_svc || (is_two_pass_svc(cpi) &&
+ cpi->svc.encode_empty_frame_state != ENCODING))) {
vp9_set_target_rate(cpi);
}
@@ -3250,18 +3010,15 @@
YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
ref_buf->buf = buf;
#if CONFIG_VP9_HIGHBITDEPTH
- vp9_setup_scale_factors_for_frame(&ref_buf->sf,
- buf->y_crop_width, buf->y_crop_height,
- cm->width, cm->height,
- (buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
- 1 : 0);
+ vp9_setup_scale_factors_for_frame(
+ &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
+ cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
#else
- vp9_setup_scale_factors_for_frame(&ref_buf->sf,
- buf->y_crop_width, buf->y_crop_height,
- cm->width, cm->height);
+ vp9_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
+ buf->y_crop_height, cm->width,
+ cm->height);
#endif // CONFIG_VP9_HIGHBITDEPTH
- if (vp9_is_scaled(&ref_buf->sf))
- vpx_extend_frame_borders(buf);
+ if (vp9_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
} else {
ref_buf->buf = NULL;
}
@@ -3270,8 +3027,7 @@
set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
}
-static void encode_without_recode_loop(VP9_COMP *cpi,
- size_t *size,
+static void encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
uint8_t *dest) {
VP9_COMMON *const cm = &cpi->common;
int q = 0, bottom_index = 0, top_index = 0; // Dummy variables.
@@ -3288,10 +3044,8 @@
// For svc, if it is a 1/4x1/4 downscaling, do a two-stage scaling to take
// advantage of the 1:2 optimized scaler. In the process, the 1/2x1/2
// result will be saved in scaled_temp and might be used later.
- cpi->Source = vp9_svc_twostage_scale(cm,
- cpi->un_scaled_source,
- &cpi->scaled_source,
- &cpi->svc.scaled_temp);
+ cpi->Source = vp9_svc_twostage_scale(
+ cm, cpi->un_scaled_source, &cpi->scaled_source, &cpi->svc.scaled_temp);
cpi->svc.scaled_one_half = 1;
} else if (is_one_pass_cbr_svc(cpi) &&
cpi->un_scaled_source->y_width == cm->width << 1 &&
@@ -3302,10 +3056,8 @@
cpi->Source = &cpi->svc.scaled_temp;
cpi->svc.scaled_one_half = 0;
} else {
- cpi->Source = vp9_scale_if_required(cm,
- cpi->un_scaled_source,
- &cpi->scaled_source,
- (cpi->oxcf.pass == 0));
+ cpi->Source = vp9_scale_if_required(
+ cm, cpi->un_scaled_source, &cpi->scaled_source, (cpi->oxcf.pass == 0));
}
// Unfiltered raw source used in metrics calculation if the source
// has been filtered.
@@ -3314,8 +3066,7 @@
if (is_spatial_denoise_enabled(cpi)) {
cpi->raw_source_frame =
vp9_scale_if_required(cm, &cpi->raw_unscaled_source,
- &cpi->raw_scaled_source,
- (cpi->oxcf.pass == 0));
+ &cpi->raw_scaled_source, (cpi->oxcf.pass == 0));
} else {
cpi->raw_source_frame = cpi->Source;
}
@@ -3330,14 +3081,13 @@
// estimation is enabled.
if (cpi->unscaled_last_source != NULL &&
(cpi->oxcf.content == VP9E_CONTENT_SCREEN ||
- (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_VBR &&
- cpi->oxcf.mode == REALTIME && cpi->oxcf.speed >= 5) ||
- cpi->sf.partition_search_type == SOURCE_VAR_BASED_PARTITION ||
- cpi->noise_estimate.enabled))
- cpi->Last_Source = vp9_scale_if_required(cm,
- cpi->unscaled_last_source,
- &cpi->scaled_last_source,
- (cpi->oxcf.pass == 0));
+ (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_VBR &&
+ cpi->oxcf.mode == REALTIME && cpi->oxcf.speed >= 5) ||
+ cpi->sf.partition_search_type == SOURCE_VAR_BASED_PARTITION ||
+ cpi->noise_estimate.enabled))
+ cpi->Last_Source =
+ vp9_scale_if_required(cm, cpi->unscaled_last_source,
+ &cpi->scaled_last_source, (cpi->oxcf.pass == 0));
if (cm->frame_type == KEY_FRAME || cpi->resize_pending != 0) {
memset(cpi->consec_zero_mv, 0,
@@ -3346,10 +3096,8 @@
vp9_update_noise_estimate(cpi);
- if (cpi->oxcf.pass == 0 &&
- cpi->oxcf.mode == REALTIME &&
- cpi->oxcf.speed >= 5 &&
- cpi->resize_state == 0 &&
+ if (cpi->oxcf.pass == 0 && cpi->oxcf.mode == REALTIME &&
+ cpi->oxcf.speed >= 5 && cpi->resize_state == 0 &&
(cpi->oxcf.content == VP9E_CONTENT_SCREEN ||
cpi->oxcf.rc_mode == VPX_VBR))
vp9_avg_source_sad(cpi);
@@ -3364,8 +3112,7 @@
set_size_independent_vars(cpi);
set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
- if (cpi->oxcf.speed >= 5 &&
- cpi->oxcf.pass == 0 &&
+ if (cpi->oxcf.speed >= 5 && cpi->oxcf.pass == 0 &&
cpi->oxcf.rc_mode == VPX_CBR &&
cpi->oxcf.content != VP9E_CONTENT_SCREEN &&
cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
@@ -3395,10 +3142,8 @@
// Check if we should drop this frame because of high overshoot.
// Only for frames where high temporal-source SAD is detected.
- if (cpi->oxcf.pass == 0 &&
- cpi->oxcf.rc_mode == VPX_CBR &&
- cpi->resize_state == 0 &&
- cm->frame_type != KEY_FRAME &&
+ if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
+ cpi->resize_state == 0 && cm->frame_type != KEY_FRAME &&
cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
cpi->rc.high_source_sad == 1) {
int frame_size = 0;
@@ -3427,10 +3172,8 @@
// Update some stats from cyclic refresh, and check if we should not update
// golden reference, for non-SVC 1 pass CBR.
- if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
- cm->frame_type != KEY_FRAME &&
- !cpi->use_svc &&
- cpi->ext_refresh_frame_flags_pending == 0 &&
+ if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME &&
+ !cpi->use_svc && cpi->ext_refresh_frame_flags_pending == 0 &&
(cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR))
vp9_cyclic_refresh_check_golden_update(cpi);
@@ -3440,8 +3183,7 @@
vpx_clear_system_state();
}
-static void encode_with_recode_loop(VP9_COMP *cpi,
- size_t *size,
+static void encode_with_recode_loop(VP9_COMP *cpi, size_t *size,
uint8_t *dest) {
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
@@ -3488,9 +3230,8 @@
&frame_over_shoot_limit);
}
- cpi->Source = vp9_scale_if_required(cm, cpi->un_scaled_source,
- &cpi->scaled_source,
- (cpi->oxcf.pass == 0));
+ cpi->Source = vp9_scale_if_required(
+ cm, cpi->un_scaled_source, &cpi->scaled_source, (cpi->oxcf.pass == 0));
// Unfiltered raw source used in metrics calculation if the source
// has been filtered.
@@ -3497,10 +3238,9 @@
if (is_psnr_calc_enabled(cpi)) {
#ifdef ENABLE_KF_DENOISE
if (is_spatial_denoise_enabled(cpi)) {
- cpi->raw_source_frame =
- vp9_scale_if_required(cm, &cpi->raw_unscaled_source,
- &cpi->raw_scaled_source,
- (cpi->oxcf.pass == 0));
+ cpi->raw_source_frame = vp9_scale_if_required(
+ cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source,
+ (cpi->oxcf.pass == 0));
} else {
cpi->raw_source_frame = cpi->Source;
}
@@ -3523,8 +3263,7 @@
vp9_set_quantizer(cm, q);
- if (loop_count == 0)
- setup_frame(cpi);
+ if (loop_count == 0) setup_frame(cpi);
// Variance adaptive and in frame q adjustment experiments are mutually
// exclusive.
@@ -3549,22 +3288,19 @@
// to recode.
if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
save_coding_context(cpi);
- if (!cpi->sf.use_nonrd_pick_mode)
- vp9_pack_bitstream(cpi, dest, size);
+ if (!cpi->sf.use_nonrd_pick_mode) vp9_pack_bitstream(cpi, dest, size);
rc->projected_frame_size = (int)(*size) << 3;
restore_coding_context(cpi);
- if (frame_over_shoot_limit == 0)
- frame_over_shoot_limit = 1;
+ if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
}
if (cpi->oxcf.rc_mode == VPX_Q) {
loop = 0;
} else {
- if ((cm->frame_type == KEY_FRAME) &&
- rc->this_key_frame_forced &&
- (rc->projected_frame_size < rc->max_frame_bandwidth)) {
+ if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced &&
+ (rc->projected_frame_size < rc->max_frame_bandwidth)) {
int last_q = q;
int64_t kf_err;
@@ -3611,9 +3347,9 @@
q = clamp(q, q_low, q_high);
loop = q != last_q;
- } else if (recode_loop_test(
- cpi, frame_over_shoot_limit, frame_under_shoot_limit,
- q, VPXMAX(q_high, top_index), bottom_index)) {
+ } else if (recode_loop_test(cpi, frame_over_shoot_limit,
+ frame_under_shoot_limit, q,
+ VPXMAX(q_high, top_index), bottom_index)) {
// Is the projected frame size out of range and are we allowed
// to attempt to recode.
int last_q = q;
@@ -3654,13 +3390,13 @@
// Update rate_correction_factor unless
vp9_rc_update_rate_correction_factors(cpi);
- q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
- bottom_index, VPXMAX(q_high, top_index));
+ q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ VPXMAX(q_high, top_index));
while (q < q_low && retries < 10) {
vp9_rc_update_rate_correction_factors(cpi);
- q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
- bottom_index, VPXMAX(q_high, top_index));
+ q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ VPXMAX(q_high, top_index));
retries++;
}
}
@@ -3675,21 +3411,20 @@
q = (q_high + q_low) / 2;
} else {
vp9_rc_update_rate_correction_factors(cpi);
- q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
- bottom_index, top_index);
+ q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ top_index);
// Special case reset for qlow for constrained quality.
// This should only trigger where there is very substantial
// undershoot on a frame and the auto cq level is above
// the user passsed in value.
- if (cpi->oxcf.rc_mode == VPX_CQ &&
- q < q_low) {
+ if (cpi->oxcf.rc_mode == VPX_CQ && q < q_low) {
q_low = q;
}
while (q > q_high && retries < 10) {
vp9_rc_update_rate_correction_factors(cpi);
- q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
- bottom_index, top_index);
+ q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ top_index);
retries++;
}
}
@@ -3729,8 +3464,7 @@
const int gold_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx];
int flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
- if (gold_is_last)
- flags &= ~VP9_GOLD_FLAG;
+ if (gold_is_last) flags &= ~VP9_GOLD_FLAG;
if (cpi->rc.frames_till_gf_update_due == INT_MAX &&
(cpi->svc.number_temporal_layers == 1 &&
@@ -3737,11 +3471,9 @@
cpi->svc.number_spatial_layers == 1))
flags &= ~VP9_GOLD_FLAG;
- if (alt_is_last)
- flags &= ~VP9_ALT_FLAG;
+ if (alt_is_last) flags &= ~VP9_ALT_FLAG;
- if (gold_is_alt)
- flags &= ~VP9_ALT_FLAG;
+ if (gold_is_alt) flags &= ~VP9_ALT_FLAG;
return flags;
}
@@ -3788,15 +3520,13 @@
if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
cm->mi_rows * MI_SIZE != unscaled->y_height) {
#if CONFIG_VP9_HIGHBITDEPTH
- if (use_normative_scaler &&
- unscaled->y_width <= (scaled->y_width << 1) &&
+ if (use_normative_scaler && unscaled->y_width <= (scaled->y_width << 1) &&
unscaled->y_height <= (scaled->y_height << 1))
scale_and_extend_frame(unscaled, scaled, (int)cm->bit_depth);
else
scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth);
#else
- if (use_normative_scaler &&
- unscaled->y_width <= (scaled->y_width << 1) &&
+ if (use_normative_scaler && unscaled->y_width <= (scaled->y_width << 1) &&
unscaled->y_height <= (scaled->y_height << 1))
vp9_scale_and_extend_frame(unscaled, scaled);
else
@@ -3819,7 +3549,7 @@
(gf_group->rf_level[gf_group->index] == GF_ARF_LOW));
} else {
arf_sign_bias =
- (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
+ (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
}
cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias;
}
@@ -3826,11 +3556,10 @@
static int setup_interp_filter_search_mask(VP9_COMP *cpi) {
INTERP_FILTER ifilter;
- int ref_total[MAX_REF_FRAMES] = {0};
+ int ref_total[MAX_REF_FRAMES] = { 0 };
MV_REFERENCE_FRAME ref;
int mask = 0;
- if (cpi->common.last_frame_type == KEY_FRAME ||
- cpi->refresh_alt_ref_frame)
+ if (cpi->common.last_frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame)
return mask;
for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref)
for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter)
@@ -3838,13 +3567,13 @@
for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) {
if ((ref_total[LAST_FRAME] &&
- cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
+ cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
(ref_total[GOLDEN_FRAME] == 0 ||
- cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50
- < ref_total[GOLDEN_FRAME]) &&
+ cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 <
+ ref_total[GOLDEN_FRAME]) &&
(ref_total[ALTREF_FRAME] == 0 ||
- cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50
- < ref_total[ALTREF_FRAME]))
+ cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50 <
+ ref_total[ALTREF_FRAME]))
mask |= 1 << ifilter;
}
return mask;
@@ -3852,20 +3581,13 @@
#ifdef ENABLE_KF_DENOISE
// Baseline Kernal weights for denoise
-static uint8_t dn_kernal_3[9] = {
- 1, 2, 1,
- 2, 4, 2,
- 1, 2, 1};
-static uint8_t dn_kernal_5[25] = {
- 1, 1, 1, 1, 1,
- 1, 1, 2, 1, 1,
- 1, 2, 4, 2, 1,
- 1, 1, 2, 1, 1,
- 1, 1, 1, 1, 1};
+static uint8_t dn_kernal_3[9] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 };
+static uint8_t dn_kernal_5[25] = { 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 4,
+ 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1 };
-static INLINE void add_denoise_point(int centre_val, int data_val,
- int thresh, uint8_t point_weight,
- int *sum_val, int *sum_weight) {
+static INLINE void add_denoise_point(int centre_val, int data_val, int thresh,
+ uint8_t point_weight, int *sum_val,
+ int *sum_weight) {
if (abs(centre_val - data_val) <= thresh) {
*sum_weight += point_weight;
*sum_val += (int)data_val * (int)point_weight;
@@ -3885,7 +3607,7 @@
uint8_t *kernal_ptr;
// Find the maximum deviation from the source point in the locale.
- tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1);
+ tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1);
for (i = 0; i < kernal_size + 2; ++i) {
for (j = 0; j < kernal_size + 2; ++j) {
max_diff = VPXMAX(max_diff, abs((int)*src_ptr - (int)tmp_ptr[j]));
@@ -3905,8 +3627,8 @@
tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size;
for (i = 0; i < kernal_size; ++i) {
for (j = 0; j < kernal_size; ++j) {
- add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh,
- *kernal_ptr, &sum_val, &sum_weight);
+ add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr,
+ &sum_val, &sum_weight);
++kernal_ptr;
}
tmp_ptr += stride;
@@ -3918,7 +3640,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
static void highbd_spatial_denoise_point(uint16_t *src_ptr, const int stride,
- const int strength) {
+ const int strength) {
int sum_weight = 0;
int sum_val = 0;
int thresh = strength;
@@ -3950,8 +3672,8 @@
tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size;
for (i = 0; i < kernal_size; ++i) {
for (j = 0; j < kernal_size; ++j) {
- add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh,
- *kernal_ptr, &sum_val, &sum_weight);
+ add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr,
+ &sum_val, &sum_weight);
++kernal_ptr;
}
tmp_ptr += stride;
@@ -3963,12 +3685,11 @@
#endif // CONFIG_VP9_HIGHBITDEPTH
// Apply thresholded spatial noise supression to a given buffer.
-static void spatial_denoise_buffer(VP9_COMP *cpi,
- uint8_t * buffer, const int stride,
- const int width, const int height,
- const int strength) {
+static void spatial_denoise_buffer(VP9_COMP *cpi, uint8_t *buffer,
+ const int stride, const int width,
+ const int height, const int strength) {
VP9_COMMON *const cm = &cpi->common;
- uint8_t * src_ptr = buffer;
+ uint8_t *src_ptr = buffer;
int row;
int col;
@@ -3976,8 +3697,8 @@
for (col = 0; col < width; ++col) {
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth)
- highbd_spatial_denoise_point(
- CONVERT_TO_SHORTPTR(&src_ptr[col]), stride, strength);
+ highbd_spatial_denoise_point(CONVERT_TO_SHORTPTR(&src_ptr[col]), stride,
+ strength);
else
spatial_denoise_point(&src_ptr[col], stride, strength);
#else
@@ -4002,20 +3723,19 @@
VPXMAX(oxcf->arnr_strength >> 2, VPXMIN(oxcf->arnr_strength, (q >> 4)));
// Denoise each of Y,U and V buffers.
- spatial_denoise_buffer(cpi, src->y_buffer, src->y_stride,
- src->y_width, src->y_height, strength);
+ spatial_denoise_buffer(cpi, src->y_buffer, src->y_stride, src->y_width,
+ src->y_height, strength);
strength += (strength >> 1);
- spatial_denoise_buffer(cpi, src->u_buffer, src->uv_stride,
- src->uv_width, src->uv_height, strength << 1);
+ spatial_denoise_buffer(cpi, src->u_buffer, src->uv_stride, src->uv_width,
+ src->uv_height, strength << 1);
- spatial_denoise_buffer(cpi, src->v_buffer, src->uv_stride,
- src->uv_width, src->uv_height, strength << 1);
+ spatial_denoise_buffer(cpi, src->v_buffer, src->uv_stride, src->uv_width,
+ src->uv_height, strength << 1);
}
#endif // ENABLE_KF_DENOISE
-static void encode_frame_to_data_rate(VP9_COMP *cpi,
- size_t *size,
+static void encode_frame_to_data_rate(VP9_COMP *cpi, size_t *size,
uint8_t *dest,
unsigned int *frame_flags) {
VP9_COMMON *const cm = &cpi->common;
@@ -4028,8 +3748,7 @@
#ifdef ENABLE_KF_DENOISE
// Spatial denoise of key frame.
- if (is_spatial_denoise_enabled(cpi))
- spatial_denoise_frame(cpi);
+ if (is_spatial_denoise_enabled(cpi)) spatial_denoise_frame(cpi);
#endif
// Set the arf sign bias for this frame.
@@ -4038,10 +3757,8 @@
// Set default state for segment based loop filter update flags.
cm->lf.mode_ref_delta_update = 0;
- if (cpi->oxcf.pass == 2 &&
- cpi->sf.adaptive_interp_filter_search)
- cpi->sf.interp_filter_search_mask =
- setup_interp_filter_search_mask(cpi);
+ if (cpi->oxcf.pass == 2 && cpi->sf.adaptive_interp_filter_search)
+ cpi->sf.interp_filter_search_mask = setup_interp_filter_search_mask(cpi);
// Set various flags etc to special state if it is a key frame.
if (frame_is_intra_only(cm)) {
@@ -4079,9 +3796,9 @@
else
cm->frame_context_idx = FRAME_CONTEXTS - 1;
} else {
- cm->frame_context_idx =
- cpi->svc.spatial_layer_id * cpi->svc.number_temporal_layers +
- cpi->svc.temporal_layer_id;
+ cm->frame_context_idx =
+ cpi->svc.spatial_layer_id * cpi->svc.number_temporal_layers +
+ cpi->svc.temporal_layer_id;
}
cm->frame_parallel_decoding_mode = oxcf->frame_parallel_decoding_mode;
@@ -4112,8 +3829,7 @@
// For 1 pass CBR, check if we are dropping this frame.
// For spatial layers, for now only check for frame-dropping on first spatial
// layer, and if decision is to drop, we drop whole super-frame.
- if (oxcf->pass == 0 &&
- oxcf->rc_mode == VPX_CBR &&
+ if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
cm->frame_type != KEY_FRAME) {
if (vp9_rc_drop_frame(cpi) ||
(is_one_pass_cbr_svc(cpi) && cpi->svc.rc_drop_superframe == 1)) {
@@ -4164,8 +3880,8 @@
if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- cpi->ambient_err = vpx_highbd_get_y_sse(cpi->Source,
- get_frame_new_buffer(cm));
+ cpi->ambient_err =
+ vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} else {
cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
}
@@ -4175,13 +3891,12 @@
}
// If the encoder forced a KEY_FRAME decision
- if (cm->frame_type == KEY_FRAME)
- cpi->refresh_last_frame = 1;
+ if (cm->frame_type == KEY_FRAME) cpi->refresh_last_frame = 1;
cm->frame_to_show = get_frame_new_buffer(cm);
cm->frame_to_show->color_space = cm->color_space;
cm->frame_to_show->color_range = cm->color_range;
- cm->frame_to_show->render_width = cm->render_width;
+ cm->frame_to_show->render_width = cm->render_width;
cm->frame_to_show->render_height = cm->render_height;
// Pick the loop filter level for the frame.
@@ -4190,8 +3905,7 @@
// build the bitstream
vp9_pack_bitstream(cpi, dest, size);
- if (cm->seg.update_map)
- update_reference_segmentation_map(cpi);
+ if (cm->seg.update_map) update_reference_segmentation_map(cpi);
if (frame_is_intra_only(cm) == 0) {
release_scaled_references(cpi);
@@ -4253,8 +3967,7 @@
cm->last_height = cm->height;
// reset to normal state now that we are done.
- if (!cm->show_existing_frame)
- cm->last_show_frame = cm->show_frame;
+ if (!cm->show_existing_frame) cm->last_show_frame = cm->show_frame;
if (cm->show_frame) {
vp9_swap_mi_and_prev_mi(cm);
@@ -4261,16 +3974,15 @@
// Don't increment frame counters if this was an altref buffer
// update not a real frame
++cm->current_video_frame;
- if (cpi->use_svc)
- vp9_inc_frame_in_layer(cpi);
+ if (cpi->use_svc) vp9_inc_frame_in_layer(cpi);
}
cm->prev_frame = cm->cur_frame;
if (cpi->use_svc)
cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers +
- cpi->svc.temporal_layer_id].last_frame_type =
- cm->frame_type;
+ cpi->svc.number_temporal_layers +
+ cpi->svc.temporal_layer_id]
+ .last_frame_type = cm->frame_type;
}
static void SvcEncode(VP9_COMP *cpi, size_t *size, uint8_t *dest,
@@ -4289,8 +4001,8 @@
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
}
-static void Pass2Encode(VP9_COMP *cpi, size_t *size,
- uint8_t *dest, unsigned int *frame_flags) {
+static void Pass2Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest,
+ unsigned int *frame_flags) {
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
@@ -4387,18 +4099,13 @@
return res;
}
-
static int frame_is_reference(const VP9_COMP *cpi) {
const VP9_COMMON *cm = &cpi->common;
- return cm->frame_type == KEY_FRAME ||
- cpi->refresh_last_frame ||
- cpi->refresh_golden_frame ||
- cpi->refresh_alt_ref_frame ||
- cm->refresh_frame_context ||
- cm->lf.mode_ref_delta_update ||
- cm->seg.update_map ||
- cm->seg.update_data;
+ return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
+ cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame ||
+ cm->refresh_frame_context || cm->lf.mode_ref_delta_update ||
+ cm->seg.update_map || cm->seg.update_data;
}
static void adjust_frame_rate(VP9_COMP *cpi,
@@ -4410,8 +4117,8 @@
this_duration = source->ts_end - source->ts_start;
step = 1;
} else {
- int64_t last_duration = cpi->last_end_time_stamp_seen
- - cpi->last_time_stamp_seen;
+ int64_t last_duration =
+ cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
this_duration = source->ts_end - cpi->last_end_time_stamp_seen;
@@ -4465,10 +4172,10 @@
if (cpi->oxcf.pass == 2) {
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
rc->is_src_frame_alt_ref =
- (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
+ (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
} else {
- rc->is_src_frame_alt_ref = cpi->alt_ref_source &&
- (source == cpi->alt_ref_source);
+ rc->is_src_frame_alt_ref =
+ cpi->alt_ref_source && (source == cpi->alt_ref_source);
}
if (rc->is_src_frame_alt_ref) {
@@ -4483,8 +4190,8 @@
#if CONFIG_INTERNAL_STATS
extern double vp9_get_blockiness(const uint8_t *img1, int img1_pitch,
- const uint8_t *img2, int img2_pitch,
- int width, int height);
+ const uint8_t *img2, int img2_pitch, int width,
+ int height);
static void adjust_image_stat(double y, double u, double v, double all,
ImageStat *s) {
@@ -4523,7 +4230,7 @@
if (!level_stats->seen_first_altref) {
level_stats->seen_first_altref = 1;
} else if (level_stats->frames_since_last_altref <
- level_spec->min_altref_distance) {
+ level_spec->min_altref_distance) {
level_spec->min_altref_distance = level_stats->frames_since_last_altref;
}
level_stats->frames_since_last_altref = 0;
@@ -4533,7 +4240,8 @@
if (level_stats->frame_window_buffer.len < FRAME_WINDOW_SIZE - 1) {
idx = (level_stats->frame_window_buffer.start +
- level_stats->frame_window_buffer.len++) % FRAME_WINDOW_SIZE;
+ level_stats->frame_window_buffer.len++) %
+ FRAME_WINDOW_SIZE;
} else {
idx = level_stats->frame_window_buffer.start;
level_stats->frame_window_buffer.start = (idx + 1) % FRAME_WINDOW_SIZE;
@@ -4563,15 +4271,15 @@
}
// update average_bitrate
- level_spec->average_bitrate =
- (double)level_stats->total_compressed_size / 125.0 /
- level_stats->time_encoded;
+ level_spec->average_bitrate = (double)level_stats->total_compressed_size /
+ 125.0 / level_stats->time_encoded;
// update max_luma_sample_rate
luma_samples = 0;
for (i = 0; i < level_stats->frame_window_buffer.len; ++i) {
idx = (level_stats->frame_window_buffer.start +
- level_stats->frame_window_buffer.len - 1 - i) % FRAME_WINDOW_SIZE;
+ level_stats->frame_window_buffer.len - 1 - i) %
+ FRAME_WINDOW_SIZE;
if (i == 0) {
dur_end = level_stats->frame_window_buffer.buf[idx].ts;
}
@@ -4590,7 +4298,8 @@
for (i = 0; i < CPB_WINDOW_SIZE; ++i) {
if (i >= level_stats->frame_window_buffer.len) break;
idx = (level_stats->frame_window_buffer.start +
- level_stats->frame_window_buffer.len - 1 - i) % FRAME_WINDOW_SIZE;
+ level_stats->frame_window_buffer.len - 1 - i) %
+ FRAME_WINDOW_SIZE;
cpb_data_size += level_stats->frame_window_buffer.buf[idx].size;
}
cpb_data_size = cpb_data_size / 125.0;
@@ -4604,9 +4313,9 @@
}
// update compression_ratio
- level_spec->compression_ratio =
- (double)level_stats->total_uncompressed_size * cm->bit_depth /
- level_stats->total_compressed_size / 8.0;
+ level_spec->compression_ratio = (double)level_stats->total_uncompressed_size *
+ cm->bit_depth /
+ level_stats->total_compressed_size / 8.0;
// update max_col_tiles
if (level_spec->max_col_tiles < (1 << cm->log2_tile_cols)) {
@@ -4615,13 +4324,13 @@
}
int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
- size_t *size, uint8_t *dest,
- int64_t *time_stamp, int64_t *time_end, int flush) {
+ size_t *size, uint8_t *dest, int64_t *time_stamp,
+ int64_t *time_end, int flush) {
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
VP9_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
RATE_CONTROL *const rc = &cpi->rc;
- struct vpx_usec_timer cmptimer;
+ struct vpx_usec_timer cmptimer;
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
struct lookahead_entry *last_source = NULL;
struct lookahead_entry *source = NULL;
@@ -4635,8 +4344,7 @@
if (cpi->svc.encode_empty_frame_state == ENCODING)
source = &cpi->svc.empty_frame;
#endif
- if (oxcf->pass == 2)
- vp9_restore_layer_context(cpi);
+ if (oxcf->pass == 2) vp9_restore_layer_context(cpi);
} else if (is_one_pass_cbr_svc(cpi)) {
vp9_one_pass_cbr_svc_start_layer(cpi);
}
@@ -4648,8 +4356,7 @@
// Is multi-arf enabled.
// Note that at the moment multi_arf is only configured for 2 pass VBR and
// will not work properly with svc.
- if ((oxcf->pass == 2) && !cpi->use_svc &&
- (cpi->oxcf.enable_auto_arf > 1))
+ if ((oxcf->pass == 2) && !cpi->use_svc && (cpi->oxcf.enable_auto_arf > 1))
cpi->multi_arf_allowed = 1;
else
cpi->multi_arf_allowed = 0;
@@ -4667,8 +4374,7 @@
arf_src_index = get_arf_src_index(cpi);
// Skip alt frame if we encode the empty frame
- if (is_two_pass_svc(cpi) && source != NULL)
- arf_src_index = 0;
+ if (is_two_pass_svc(cpi) && source != NULL) arf_src_index = 0;
if (arf_src_index) {
for (i = 0; i <= arf_src_index; ++i) {
@@ -4754,8 +4460,8 @@
}
if (source) {
- cpi->un_scaled_source = cpi->Source = force_src_buffer ? force_src_buffer
- : &source->img;
+ cpi->un_scaled_source = cpi->Source =
+ force_src_buffer ? force_src_buffer : &source->img;
#ifdef ENABLE_KF_DENOISE
// Copy of raw source for metrics calculation.
@@ -4772,7 +4478,7 @@
} else {
*size = 0;
if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
- vp9_end_first_pass(cpi); /* get last stats packet */
+ vp9_end_first_pass(cpi); /* get last stats packet */
cpi->twopass.first_pass_done = 1;
}
return -1;
@@ -4803,8 +4509,7 @@
}
cm->new_fb_idx = get_free_fb(cm);
- if (cm->new_fb_idx == INVALID_IDX)
- return -1;
+ if (cm->new_fb_idx == INVALID_IDX) return -1;
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
@@ -4823,39 +4528,33 @@
cpi->frame_flags = *frame_flags;
if ((oxcf->pass == 2) &&
- (!cpi->use_svc ||
- (is_two_pass_svc(cpi) &&
- cpi->svc.encode_empty_frame_state != ENCODING))) {
+ (!cpi->use_svc || (is_two_pass_svc(cpi) &&
+ cpi->svc.encode_empty_frame_state != ENCODING))) {
vp9_rc_get_second_pass_params(cpi);
} else if (oxcf->pass == 1) {
set_frame_size(cpi);
}
- if (cpi->oxcf.pass != 0 ||
- cpi->use_svc ||
- frame_is_intra_only(cm) == 1) {
- for (i = 0; i < MAX_REF_FRAMES; ++i)
- cpi->scaled_ref_idx[i] = INVALID_IDX;
+ if (cpi->oxcf.pass != 0 || cpi->use_svc || frame_is_intra_only(cm) == 1) {
+ for (i = 0; i < MAX_REF_FRAMES; ++i) cpi->scaled_ref_idx[i] = INVALID_IDX;
}
- if (oxcf->pass == 1 &&
- (!cpi->use_svc || is_two_pass_svc(cpi))) {
+ if (oxcf->pass == 1 && (!cpi->use_svc || is_two_pass_svc(cpi))) {
const int lossless = is_lossless_requested(oxcf);
#if CONFIG_VP9_HIGHBITDEPTH
if (cpi->oxcf.use_highbitdepth)
- cpi->td.mb.fwd_txm4x4 = lossless ?
- vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4;
+ cpi->td.mb.fwd_txm4x4 =
+ lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4;
else
cpi->td.mb.fwd_txm4x4 = lossless ? vp9_fwht4x4 : vpx_fdct4x4;
- cpi->td.mb.highbd_itxm_add = lossless ? vp9_highbd_iwht4x4_add :
- vp9_highbd_idct4x4_add;
+ cpi->td.mb.highbd_itxm_add =
+ lossless ? vp9_highbd_iwht4x4_add : vp9_highbd_idct4x4_add;
#else
cpi->td.mb.fwd_txm4x4 = lossless ? vp9_fwht4x4 : vpx_fdct4x4;
#endif // CONFIG_VP9_HIGHBITDEPTH
cpi->td.mb.itxm_add = lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
vp9_first_pass(cpi, source);
- } else if (oxcf->pass == 2 &&
- (!cpi->use_svc || is_two_pass_svc(cpi))) {
+ } else if (oxcf->pass == 2 && (!cpi->use_svc || is_two_pass_svc(cpi))) {
Pass2Encode(cpi, size, dest, frame_flags);
} else if (cpi->use_svc) {
SvcEncode(cpi, size, dest, frame_flags);
@@ -4877,10 +4576,9 @@
}
// Save layer specific state.
- if (is_one_pass_cbr_svc(cpi) ||
- ((cpi->svc.number_temporal_layers > 1 ||
- cpi->svc.number_spatial_layers > 1) &&
- oxcf->pass == 2)) {
+ if (is_one_pass_cbr_svc(cpi) || ((cpi->svc.number_temporal_layers > 1 ||
+ cpi->svc.number_spatial_layers > 1) &&
+ oxcf->pass == 2)) {
vp9_save_layer_context(cpi);
}
@@ -4888,8 +4586,7 @@
cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
// Should we calculate metrics for the frame.
- if (is_psnr_calc_enabled(cpi))
- generate_psnr_packet(cpi);
+ if (is_psnr_calc_enabled(cpi)) generate_psnr_packet(cpi);
if (cpi->keep_level_stats && oxcf->pass != 1)
update_level_info(cpi, size, arf_src_index);
@@ -4905,10 +4602,10 @@
uint32_t in_bit_depth = 8;
cpi->count++;
#if CONFIG_VP9_HIGHBITDEPTH
- if (cm->use_highbitdepth) {
- in_bit_depth = cpi->oxcf.input_bit_depth;
- bit_depth = cm->bit_depth;
- }
+ if (cm->use_highbitdepth) {
+ in_bit_depth = cpi->oxcf.input_bit_depth;
+ bit_depth = cm->bit_depth;
+ }
#endif
if (cpi->b_calculate_psnr) {
@@ -4933,14 +4630,13 @@
PSNR_STATS psnr2;
double frame_ssim2 = 0, weight = 0;
#if CONFIG_VP9_POSTPROC
- if (vpx_alloc_frame_buffer(pp,
- recon->y_crop_width, recon->y_crop_height,
- cm->subsampling_x, cm->subsampling_y,
+ if (vpx_alloc_frame_buffer(
+ pp, recon->y_crop_width, recon->y_crop_height,
+ cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
- cm->use_highbitdepth,
+ cm->use_highbitdepth,
#endif
- VP9_ENC_BORDER_IN_PIXELS,
- cm->byte_alignment) < 0) {
+ VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment) < 0) {
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate post processing buffer");
}
@@ -4956,7 +4652,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
vpx_calc_highbd_psnr(orig, pp, &psnr2, cpi->td.mb.e_mbd.bd,
- cpi->oxcf.input_bit_depth);
+ cpi->oxcf.input_bit_depth);
#else
vpx_calc_psnr(orig, pp, &psnr2);
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -4968,8 +4664,8 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight,
- bit_depth, in_bit_depth);
+ frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight, bit_depth,
+ in_bit_depth);
} else {
frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
}
@@ -4983,8 +4679,8 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- frame_ssim2 = vpx_highbd_calc_ssim(
- orig, pp, &weight, bit_depth, in_bit_depth);
+ frame_ssim2 = vpx_highbd_calc_ssim(orig, pp, &weight, bit_depth,
+ in_bit_depth);
} else {
frame_ssim2 = vpx_calc_ssim(orig, pp, &weight);
}
@@ -5032,8 +4728,8 @@
&cpi->metrics, 1);
const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
- double consistency = vpx_sse_to_psnr(samples, peak,
- (double)cpi->total_inconsistency);
+ double consistency =
+ vpx_sse_to_psnr(samples, peak, (double)cpi->total_inconsistency);
if (consistency > 0.0)
cpi->worst_consistency =
VPXMIN(cpi->worst_consistency, consistency);
@@ -5113,13 +4809,12 @@
}
}
-int vp9_set_internal_size(VP9_COMP *cpi,
- VPX_SCALING horiz_mode, VPX_SCALING vert_mode) {
+int vp9_set_internal_size(VP9_COMP *cpi, VPX_SCALING horiz_mode,
+ VPX_SCALING vert_mode) {
VP9_COMMON *cm = &cpi->common;
int hr = 0, hs = 0, vr = 0, vs = 0;
- if (horiz_mode > ONETWO || vert_mode > ONETWO)
- return -1;
+ if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
Scale2Ratio(horiz_mode, &hr, &hs);
Scale2Ratio(vert_mode, &vr, &vs);
@@ -5178,40 +4873,32 @@
return;
}
-int vp9_get_quantizer(VP9_COMP *cpi) {
- return cpi->common.base_qindex;
-}
+int vp9_get_quantizer(VP9_COMP *cpi) { return cpi->common.base_qindex; }
void vp9_apply_encoding_flags(VP9_COMP *cpi, vpx_enc_frame_flags_t flags) {
- if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_ARF)) {
+ if (flags &
+ (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
int ref = 7;
- if (flags & VP8_EFLAG_NO_REF_LAST)
- ref ^= VP9_LAST_FLAG;
+ if (flags & VP8_EFLAG_NO_REF_LAST) ref ^= VP9_LAST_FLAG;
- if (flags & VP8_EFLAG_NO_REF_GF)
- ref ^= VP9_GOLD_FLAG;
+ if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VP9_GOLD_FLAG;
- if (flags & VP8_EFLAG_NO_REF_ARF)
- ref ^= VP9_ALT_FLAG;
+ if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VP9_ALT_FLAG;
vp9_use_as_reference(cpi, ref);
}
- if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF |
- VP8_EFLAG_FORCE_ARF)) {
+ if (flags &
+ (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) {
int upd = 7;
- if (flags & VP8_EFLAG_NO_UPD_LAST)
- upd ^= VP9_LAST_FLAG;
+ if (flags & VP8_EFLAG_NO_UPD_LAST) upd ^= VP9_LAST_FLAG;
- if (flags & VP8_EFLAG_NO_UPD_GF)
- upd ^= VP9_GOLD_FLAG;
+ if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VP9_GOLD_FLAG;
- if (flags & VP8_EFLAG_NO_UPD_ARF)
- upd ^= VP9_ALT_FLAG;
+ if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VP9_ALT_FLAG;
vp9_update_reference(cpi, upd);
}
--- a/vp9/encoder/vp9_encoder.h
+++ b/vp9/encoder/vp9_encoder.h
@@ -72,7 +72,6 @@
FRAME_CONTEXT fc;
} CODING_CONTEXT;
-
typedef enum {
// encode_breakout is disabled.
ENCODE_BREAKOUT_DISABLED = 0,
@@ -83,10 +82,10 @@
} ENCODE_BREAKOUT_TYPE;
typedef enum {
- NORMAL = 0,
- FOURFIVE = 1,
- THREEFIVE = 2,
- ONETWO = 3
+ NORMAL = 0,
+ FOURFIVE = 1,
+ THREEFIVE = 2,
+ ONETWO = 3
} VPX_SCALING;
typedef enum {
@@ -106,7 +105,7 @@
} MODE;
typedef enum {
- FRAMEFLAGS_KEY = 1 << 0,
+ FRAMEFLAGS_KEY = 1 << 0,
FRAMEFLAGS_GOLDEN = 1 << 1,
FRAMEFLAGS_ALTREF = 1 << 2,
} FRAMETYPE_FLAGS;
@@ -129,14 +128,14 @@
typedef struct VP9EncoderConfig {
BITSTREAM_PROFILE profile;
vpx_bit_depth_t bit_depth; // Codec bit-depth.
- int width; // width of data passed to the compressor
- int height; // height of data passed to the compressor
+ int width; // width of data passed to the compressor
+ int height; // height of data passed to the compressor
unsigned int input_bit_depth; // Input bit depth.
- double init_framerate; // set to passed in framerate
- int64_t target_bandwidth; // bandwidth to be used in bits per second
+ double init_framerate; // set to passed in framerate
+ int64_t target_bandwidth; // bandwidth to be used in bits per second
int noise_sensitivity; // pre processing blur: recommendation 0
- int sharpness; // sharpening output: recommendation 0:
+ int sharpness; // sharpening output: recommendation 0:
int speed;
// maximum allowed bitrate for any intra frame in % of bitrate target.
unsigned int rc_max_intra_bitrate_pct;
@@ -188,7 +187,7 @@
int frame_periodic_boost;
// two pass datarate control
- int two_pass_vbrbias; // two pass datarate control tweaks
+ int two_pass_vbrbias; // two pass datarate control tweaks
int two_pass_vbrmin_section;
int two_pass_vbrmax_section;
// END DATARATE CONTROL OPTIONS
@@ -289,15 +288,10 @@
unsigned char *map;
} ActiveMap;
-typedef enum {
- Y,
- U,
- V,
- ALL
-} STAT_TYPE;
+typedef enum { Y, U, V, ALL } STAT_TYPE;
typedef struct IMAGE_STAT {
- double stat[ALL+1];
+ double stat[ALL + 1];
double worst;
} ImageStat;
@@ -333,7 +327,7 @@
uint64_t max_luma_sample_rate;
uint32_t max_luma_picture_size;
double average_bitrate; // in kilobits per second
- double max_cpb_size; // in kilobits
+ double max_cpb_size; // in kilobits
double compression_ratio;
uint8_t max_col_tiles;
uint32_t min_altref_distance;
@@ -375,8 +369,8 @@
DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
VP9_COMMON common;
VP9EncoderConfig oxcf;
- struct lookahead_ctx *lookahead;
- struct lookahead_entry *alt_ref_source;
+ struct lookahead_ctx *lookahead;
+ struct lookahead_entry *alt_ref_source;
YV12_BUFFER_CONFIG *Source;
YV12_BUFFER_CONFIG *Last_Source; // NULL for first frame and alt_ref frames
@@ -439,11 +433,11 @@
int interp_filter_selected[MAX_REF_FRAMES][SWITCHABLE];
- struct vpx_codec_pkt_list *output_pkt_list;
+ struct vpx_codec_pkt_list *output_pkt_list;
MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
- int mbgraph_n_frames; // number of frames filled in the above
- int static_mb_pct; // % forced skip mbs by segmentation
+ int mbgraph_n_frames; // number of frames filled in the above
+ int static_mb_pct; // % forced skip mbs by segmentation
int ref_frame_flags;
SPEED_FEATURES sf;
@@ -500,7 +494,7 @@
double total_blockiness;
double worst_blockiness;
- int bytes;
+ int bytes;
double summed_quality;
double summed_weights;
double summedp_quality;
@@ -606,15 +600,15 @@
void vp9_change_config(VP9_COMP *cpi, const VP9EncoderConfig *oxcf);
- // receive a frames worth of data. caller can assume that a copy of this
- // frame is made and not just a copy of the pointer..
+// receive a frames worth of data. caller can assume that a copy of this
+// frame is made and not just a copy of the pointer..
int vp9_receive_raw_frame(VP9_COMP *cpi, unsigned int frame_flags,
YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
int64_t end_time_stamp);
int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
- size_t *size, uint8_t *dest,
- int64_t *time_stamp, int64_t *time_end, int flush);
+ size_t *size, uint8_t *dest, int64_t *time_stamp,
+ int64_t *time_end, int flush);
int vp9_get_preview_raw_frame(VP9_COMP *cpi, YV12_BUFFER_CONFIG *dest,
vp9_ppflags_t *flags);
@@ -635,8 +629,8 @@
int vp9_get_active_map(VP9_COMP *cpi, unsigned char *map, int rows, int cols);
-int vp9_set_internal_size(VP9_COMP *cpi,
- VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
+int vp9_set_internal_size(VP9_COMP *cpi, VPX_SCALING horiz_mode,
+ VPX_SCALING vert_mode);
int vp9_set_size_literal(VP9_COMP *cpi, unsigned int width,
unsigned int height);
@@ -646,8 +640,7 @@
int vp9_get_quantizer(struct VP9_COMP *cpi);
static INLINE int frame_is_kf_gf_arf(const VP9_COMP *cpi) {
- return frame_is_intra_only(&cpi->common) ||
- cpi->refresh_alt_ref_frame ||
+ return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
}
@@ -673,8 +666,8 @@
VP9_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
VP9_COMMON *const cm = &cpi->common;
const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
- return
- buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf : NULL;
+ return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
+ : NULL;
}
static INLINE int get_token_alloc(int mb_rows, int mb_cols) {
@@ -737,10 +730,10 @@
static INLINE void set_ref_ptrs(VP9_COMMON *cm, MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref0,
MV_REFERENCE_FRAME ref1) {
- xd->block_refs[0] = &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME
- : 0];
- xd->block_refs[1] = &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME
- : 0];
+ xd->block_refs[0] =
+ &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME : 0];
+ xd->block_refs[1] =
+ &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME : 0];
}
static INLINE int get_chessboard_index(const int frame_index) {
--- a/vp9/encoder/vp9_ethread.c
+++ b/vp9/encoder/vp9_ethread.c
@@ -43,10 +43,10 @@
const int tile_rows = 1 << cm->log2_tile_rows;
int t;
- (void) unused;
+ (void)unused;
for (t = thread_data->start; t < tile_rows * tile_cols;
- t += cpi->num_workers) {
+ t += cpi->num_workers) {
int tile_row = t / tile_cols;
int tile_col = t % tile_cols;
@@ -63,8 +63,8 @@
int log2_tile_cols;
vp9_get_tile_n_bits(mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
- log2_tile_cols = clamp(cpi->oxcf.tile_columns,
- min_log2_tile_cols, max_log2_tile_cols);
+ log2_tile_cols =
+ clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
return (1 << log2_tile_cols);
}
@@ -92,8 +92,7 @@
vpx_malloc(allocated_workers * sizeof(*cpi->workers)));
CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
- vpx_calloc(allocated_workers,
- sizeof(*cpi->tile_thr_data)));
+ vpx_calloc(allocated_workers, sizeof(*cpi->tile_thr_data)));
for (i = 0; i < allocated_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
@@ -140,7 +139,7 @@
worker->hook = (VPxWorkerHook)enc_worker_hook;
worker->data1 = &cpi->tile_thr_data[i];
worker->data2 = NULL;
- thread_data = (EncWorkerData*)worker->data1;
+ thread_data = (EncWorkerData *)worker->data1;
// Before encoding a frame, copy the thread data from cpi.
if (thread_data->td != &cpi->td) {
@@ -173,7 +172,7 @@
// Encode a frame
for (i = 0; i < num_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
- EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+ EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
// Set the starting tile for each thread.
thread_data->start = i;
@@ -192,7 +191,7 @@
for (i = 0; i < num_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
- EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+ EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
// Accumulate counters.
if (i < cpi->num_workers - 1) {
--- a/vp9/encoder/vp9_extend.c
+++ b/vp9/encoder/vp9_extend.c
@@ -16,8 +16,7 @@
#include "vp9/encoder/vp9_extend.h"
static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
- uint8_t *dst, int dst_pitch,
- int w, int h,
+ uint8_t *dst, int dst_pitch, int w, int h,
int extend_top, int extend_left,
int extend_bottom, int extend_right) {
int i, linesize;
@@ -43,7 +42,7 @@
src_ptr1 = dst - extend_left;
src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
- dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+ dst_ptr2 = dst + dst_pitch * (h)-extend_left;
linesize = extend_left + extend_right + w;
for (i = 0; i < extend_top; i++) {
@@ -59,9 +58,8 @@
#if CONFIG_VP9_HIGHBITDEPTH
static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
- uint8_t *dst8, int dst_pitch,
- int w, int h,
- int extend_top, int extend_left,
+ uint8_t *dst8, int dst_pitch, int w,
+ int h, int extend_top, int extend_left,
int extend_bottom, int extend_right) {
int i, linesize;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
@@ -88,7 +86,7 @@
src_ptr1 = dst - extend_left;
src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
- dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+ dst_ptr2 = dst + dst_pitch * (h)-extend_left;
linesize = extend_left + extend_right + w;
for (i = 0; i < extend_top; i++) {
@@ -127,51 +125,46 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
- highbd_copy_and_extend_plane(src->y_buffer, src->y_stride,
- dst->y_buffer, dst->y_stride,
- src->y_crop_width, src->y_crop_height,
- et_y, el_y, eb_y, er_y);
+ highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, src->y_crop_width,
+ src->y_crop_height, et_y, el_y, eb_y, er_y);
- highbd_copy_and_extend_plane(src->u_buffer, src->uv_stride,
- dst->u_buffer, dst->uv_stride,
- src->uv_crop_width, src->uv_crop_height,
- et_uv, el_uv, eb_uv, er_uv);
+ highbd_copy_and_extend_plane(
+ src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride,
+ src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
- highbd_copy_and_extend_plane(src->v_buffer, src->uv_stride,
- dst->v_buffer, dst->uv_stride,
- src->uv_crop_width, src->uv_crop_height,
- et_uv, el_uv, eb_uv, er_uv);
+ highbd_copy_and_extend_plane(
+ src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride,
+ src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
return;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
- copy_and_extend_plane(src->y_buffer, src->y_stride,
- dst->y_buffer, dst->y_stride,
- src->y_crop_width, src->y_crop_height,
+ copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, src->y_crop_width, src->y_crop_height,
et_y, el_y, eb_y, er_y);
- copy_and_extend_plane(src->u_buffer, src->uv_stride,
- dst->u_buffer, dst->uv_stride,
- src->uv_crop_width, src->uv_crop_height,
+ copy_and_extend_plane(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, src->uv_crop_width, src->uv_crop_height,
et_uv, el_uv, eb_uv, er_uv);
- copy_and_extend_plane(src->v_buffer, src->uv_stride,
- dst->v_buffer, dst->uv_stride,
- src->uv_crop_width, src->uv_crop_height,
+ copy_and_extend_plane(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, src->uv_crop_width, src->uv_crop_height,
et_uv, el_uv, eb_uv, er_uv);
}
void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst,
- int srcy, int srcx,
- int srch, int srcw) {
+ YV12_BUFFER_CONFIG *dst, int srcy,
+ int srcx, int srch, int srcw) {
// If the side is not touching the bounder then don't extend.
const int et_y = srcy ? 0 : dst->border;
const int el_y = srcx ? 0 : dst->border;
- const int eb_y = srcy + srch != src->y_height ? 0 :
- dst->border + dst->y_height - src->y_height;
- const int er_y = srcx + srcw != src->y_width ? 0 :
- dst->border + dst->y_width - src->y_width;
+ const int eb_y = srcy + srch != src->y_height
+ ? 0
+ : dst->border + dst->y_height - src->y_height;
+ const int er_y = srcx + srcw != src->y_width
+ ? 0
+ : dst->border + dst->y_width - src->y_width;
const int src_y_offset = srcy * src->y_stride + srcx;
const int dst_y_offset = srcy * dst->y_stride + srcx;
@@ -185,17 +178,14 @@
const int srcw_uv = ROUND_POWER_OF_TWO(srcw, 1);
copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
- dst->y_buffer + dst_y_offset, dst->y_stride,
- srcw, srch,
+ dst->y_buffer + dst_y_offset, dst->y_stride, srcw, srch,
et_y, el_y, eb_y, er_y);
copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride,
- dst->u_buffer + dst_uv_offset, dst->uv_stride,
- srcw_uv, srch_uv,
- et_uv, el_uv, eb_uv, er_uv);
+ dst->u_buffer + dst_uv_offset, dst->uv_stride, srcw_uv,
+ srch_uv, et_uv, el_uv, eb_uv, er_uv);
copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride,
- dst->v_buffer + dst_uv_offset, dst->uv_stride,
- srcw_uv, srch_uv,
- et_uv, el_uv, eb_uv, er_uv);
+ dst->v_buffer + dst_uv_offset, dst->uv_stride, srcw_uv,
+ srch_uv, et_uv, el_uv, eb_uv, er_uv);
}
--- a/vp9/encoder/vp9_extend.h
+++ b/vp9/encoder/vp9_extend.h
@@ -18,14 +18,12 @@
extern "C" {
#endif
-
void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst);
void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst,
- int srcy, int srcx,
- int srch, int srcw);
+ YV12_BUFFER_CONFIG *dst, int srcy,
+ int srcx, int srch, int srcw);
#ifdef __cplusplus
} // extern "C"
#endif
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -38,35 +38,34 @@
#include "vp9/encoder/vp9_rd.h"
#include "vpx_dsp/variance.h"
-#define OUTPUT_FPF 0
-#define ARF_STATS_OUTPUT 0
+#define OUTPUT_FPF 0
+#define ARF_STATS_OUTPUT 0
-#define BOOST_BREAKOUT 12.5
-#define BOOST_FACTOR 12.5
-#define FACTOR_PT_LOW 0.70
-#define FACTOR_PT_HIGH 0.90
-#define FIRST_PASS_Q 10.0
-#define GF_MAX_BOOST 96.0
-#define INTRA_MODE_PENALTY 1024
-#define KF_MAX_BOOST 128.0
-#define MIN_ARF_GF_BOOST 240
-#define MIN_DECAY_FACTOR 0.01
-#define MIN_KF_BOOST 300
+#define BOOST_BREAKOUT 12.5
+#define BOOST_FACTOR 12.5
+#define FACTOR_PT_LOW 0.70
+#define FACTOR_PT_HIGH 0.90
+#define FIRST_PASS_Q 10.0
+#define GF_MAX_BOOST 96.0
+#define INTRA_MODE_PENALTY 1024
+#define KF_MAX_BOOST 128.0
+#define MIN_ARF_GF_BOOST 240
+#define MIN_DECAY_FACTOR 0.01
+#define MIN_KF_BOOST 300
#define NEW_MV_MODE_PENALTY 32
-#define SVC_FACTOR_PT_LOW 0.45
-#define DARK_THRESH 64
-#define DEFAULT_GRP_WEIGHT 1.0
-#define RC_FACTOR_MIN 0.75
-#define RC_FACTOR_MAX 1.75
-#define SECTION_NOISE_DEF 250.0
-#define LOW_I_THRESH 24000
+#define SVC_FACTOR_PT_LOW 0.45
+#define DARK_THRESH 64
+#define DEFAULT_GRP_WEIGHT 1.0
+#define RC_FACTOR_MIN 0.75
+#define RC_FACTOR_MAX 1.75
+#define SECTION_NOISE_DEF 250.0
+#define LOW_I_THRESH 24000
#define NCOUNT_INTRA_THRESH 8192
#define NCOUNT_INTRA_FACTOR 3
+#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x)-0.000001 : (x) + 0.000001)
-#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001)
-
#if ARF_STATS_OUTPUT
unsigned int arf_count = 0;
#endif
@@ -73,8 +72,7 @@
// Resets the first pass file to the given position using a relative seek from
// the current position.
-static void reset_fpf_position(TWO_PASS *p,
- const FIRSTPASS_STATS *position) {
+static void reset_fpf_position(TWO_PASS *p, const FIRSTPASS_STATS *position) {
p->stats_in = position;
}
@@ -89,8 +87,7 @@
}
static int input_stats(TWO_PASS *p, FIRSTPASS_STATS *fps) {
- if (p->stats_in >= p->stats_in_end)
- return EOF;
+ if (p->stats_in >= p->stats_in_end) return EOF;
*fps = *p->stats_in;
++p->stats_in;
@@ -111,34 +108,19 @@
FILE *fpfile;
fpfile = fopen("firstpass.stt", "a");
- fprintf(fpfile, "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.0lf %12.4lf"
+ fprintf(fpfile,
+ "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.0lf %12.4lf"
"%12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf"
"%12.4lf %12.4lf %12.4lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf"
"\n",
- stats->frame,
- stats->weight,
- stats->intra_error,
- stats->coded_error,
- stats->sr_coded_error,
- stats->frame_noise_energy,
- stats->pcnt_inter,
- stats->pcnt_motion,
- stats->pcnt_second_ref,
- stats->pcnt_neutral,
- stats->intra_skip_pct,
- stats->intra_smooth_pct,
- stats->inactive_zone_rows,
- stats->inactive_zone_cols,
- stats->MVr,
- stats->mvr_abs,
- stats->MVc,
- stats->mvc_abs,
- stats->MVrv,
- stats->MVcv,
- stats->mv_in_out_count,
- stats->new_mv_count,
- stats->count,
- stats->duration);
+ stats->frame, stats->weight, stats->intra_error, stats->coded_error,
+ stats->sr_coded_error, stats->frame_noise_energy, stats->pcnt_inter,
+ stats->pcnt_motion, stats->pcnt_second_ref, stats->pcnt_neutral,
+ stats->intra_skip_pct, stats->intra_smooth_pct,
+ stats->inactive_zone_rows, stats->inactive_zone_cols, stats->MVr,
+ stats->mvr_abs, stats->MVc, stats->mvc_abs, stats->MVrv,
+ stats->MVcv, stats->mv_in_out_count, stats->new_mv_count,
+ stats->count, stats->duration);
fclose(fpfile);
}
#endif
@@ -146,7 +128,7 @@
#if CONFIG_FP_MB_STATS
static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP9_COMMON *cm,
- struct vpx_codec_pkt_list *pktlist) {
+ struct vpx_codec_pkt_list *pktlist) {
struct vpx_codec_cx_pkt pkt;
pkt.kind = VPX_CODEC_FPMB_STATS_PKT;
pkt.data.firstpass_mb_stats.buf = this_frame_mb_stats;
@@ -156,88 +138,88 @@
#endif
static void zero_stats(FIRSTPASS_STATS *section) {
- section->frame = 0.0;
- section->weight = 0.0;
- section->intra_error = 0.0;
- section->coded_error = 0.0;
- section->sr_coded_error = 0.0;
+ section->frame = 0.0;
+ section->weight = 0.0;
+ section->intra_error = 0.0;
+ section->coded_error = 0.0;
+ section->sr_coded_error = 0.0;
section->frame_noise_energy = 0.0;
- section->pcnt_inter = 0.0;
- section->pcnt_motion = 0.0;
- section->pcnt_second_ref = 0.0;
- section->pcnt_neutral = 0.0;
- section->intra_skip_pct = 0.0;
- section->intra_smooth_pct = 0.0;
+ section->pcnt_inter = 0.0;
+ section->pcnt_motion = 0.0;
+ section->pcnt_second_ref = 0.0;
+ section->pcnt_neutral = 0.0;
+ section->intra_skip_pct = 0.0;
+ section->intra_smooth_pct = 0.0;
section->inactive_zone_rows = 0.0;
section->inactive_zone_cols = 0.0;
- section->MVr = 0.0;
- section->mvr_abs = 0.0;
- section->MVc = 0.0;
- section->mvc_abs = 0.0;
- section->MVrv = 0.0;
- section->MVcv = 0.0;
- section->mv_in_out_count = 0.0;
- section->new_mv_count = 0.0;
- section->count = 0.0;
- section->duration = 1.0;
- section->spatial_layer_id = 0;
+ section->MVr = 0.0;
+ section->mvr_abs = 0.0;
+ section->MVc = 0.0;
+ section->mvc_abs = 0.0;
+ section->MVrv = 0.0;
+ section->MVcv = 0.0;
+ section->mv_in_out_count = 0.0;
+ section->new_mv_count = 0.0;
+ section->count = 0.0;
+ section->duration = 1.0;
+ section->spatial_layer_id = 0;
}
static void accumulate_stats(FIRSTPASS_STATS *section,
const FIRSTPASS_STATS *frame) {
- section->frame += frame->frame;
- section->weight += frame->weight;
- section->spatial_layer_id = frame->spatial_layer_id;
- section->intra_error += frame->intra_error;
- section->coded_error += frame->coded_error;
- section->sr_coded_error += frame->sr_coded_error;
+ section->frame += frame->frame;
+ section->weight += frame->weight;
+ section->spatial_layer_id = frame->spatial_layer_id;
+ section->intra_error += frame->intra_error;
+ section->coded_error += frame->coded_error;
+ section->sr_coded_error += frame->sr_coded_error;
section->frame_noise_energy += frame->frame_noise_energy;
- section->pcnt_inter += frame->pcnt_inter;
- section->pcnt_motion += frame->pcnt_motion;
- section->pcnt_second_ref += frame->pcnt_second_ref;
- section->pcnt_neutral += frame->pcnt_neutral;
- section->intra_skip_pct += frame->intra_skip_pct;
- section->intra_smooth_pct += frame->intra_smooth_pct;
+ section->pcnt_inter += frame->pcnt_inter;
+ section->pcnt_motion += frame->pcnt_motion;
+ section->pcnt_second_ref += frame->pcnt_second_ref;
+ section->pcnt_neutral += frame->pcnt_neutral;
+ section->intra_skip_pct += frame->intra_skip_pct;
+ section->intra_smooth_pct += frame->intra_smooth_pct;
section->inactive_zone_rows += frame->inactive_zone_rows;
section->inactive_zone_cols += frame->inactive_zone_cols;
- section->MVr += frame->MVr;
- section->mvr_abs += frame->mvr_abs;
- section->MVc += frame->MVc;
- section->mvc_abs += frame->mvc_abs;
- section->MVrv += frame->MVrv;
- section->MVcv += frame->MVcv;
- section->mv_in_out_count += frame->mv_in_out_count;
- section->new_mv_count += frame->new_mv_count;
- section->count += frame->count;
- section->duration += frame->duration;
+ section->MVr += frame->MVr;
+ section->mvr_abs += frame->mvr_abs;
+ section->MVc += frame->MVc;
+ section->mvc_abs += frame->mvc_abs;
+ section->MVrv += frame->MVrv;
+ section->MVcv += frame->MVcv;
+ section->mv_in_out_count += frame->mv_in_out_count;
+ section->new_mv_count += frame->new_mv_count;
+ section->count += frame->count;
+ section->duration += frame->duration;
}
static void subtract_stats(FIRSTPASS_STATS *section,
const FIRSTPASS_STATS *frame) {
- section->frame -= frame->frame;
- section->weight -= frame->weight;
- section->intra_error -= frame->intra_error;
- section->coded_error -= frame->coded_error;
- section->sr_coded_error -= frame->sr_coded_error;
+ section->frame -= frame->frame;
+ section->weight -= frame->weight;
+ section->intra_error -= frame->intra_error;
+ section->coded_error -= frame->coded_error;
+ section->sr_coded_error -= frame->sr_coded_error;
section->frame_noise_energy -= frame->frame_noise_energy;
- section->pcnt_inter -= frame->pcnt_inter;
- section->pcnt_motion -= frame->pcnt_motion;
- section->pcnt_second_ref -= frame->pcnt_second_ref;
- section->pcnt_neutral -= frame->pcnt_neutral;
- section->intra_skip_pct -= frame->intra_skip_pct;
- section->intra_smooth_pct -= frame->intra_smooth_pct;
+ section->pcnt_inter -= frame->pcnt_inter;
+ section->pcnt_motion -= frame->pcnt_motion;
+ section->pcnt_second_ref -= frame->pcnt_second_ref;
+ section->pcnt_neutral -= frame->pcnt_neutral;
+ section->intra_skip_pct -= frame->intra_skip_pct;
+ section->intra_smooth_pct -= frame->intra_smooth_pct;
section->inactive_zone_rows -= frame->inactive_zone_rows;
section->inactive_zone_cols -= frame->inactive_zone_cols;
- section->MVr -= frame->MVr;
- section->mvr_abs -= frame->mvr_abs;
- section->MVc -= frame->MVc;
- section->mvc_abs -= frame->mvc_abs;
- section->MVrv -= frame->MVrv;
- section->MVcv -= frame->MVcv;
- section->mv_in_out_count -= frame->mv_in_out_count;
- section->new_mv_count -= frame->new_mv_count;
- section->count -= frame->count;
- section->duration -= frame->duration;
+ section->MVr -= frame->MVr;
+ section->mvr_abs -= frame->mvr_abs;
+ section->MVc -= frame->MVc;
+ section->mvc_abs -= frame->mvc_abs;
+ section->MVrv -= frame->MVrv;
+ section->MVcv -= frame->MVcv;
+ section->mv_in_out_count -= frame->mv_in_out_count;
+ section->new_mv_count -= frame->new_mv_count;
+ section->count -= frame->count;
+ section->duration -= frame->duration;
}
// Calculate an active area of the image that discounts formatting
@@ -248,9 +230,10 @@
const FIRSTPASS_STATS *this_frame) {
double active_pct;
- active_pct = 1.0 -
- ((this_frame->intra_skip_pct / 2) +
- ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows));
+ active_pct =
+ 1.0 -
+ ((this_frame->intra_skip_pct / 2) +
+ ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows));
return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
}
@@ -265,8 +248,9 @@
const double av_weight = stats->weight / stats->count;
const double av_err = (stats->coded_error * av_weight) / stats->count;
double modified_error =
- av_err * pow(this_frame->coded_error * this_frame->weight /
- DOUBLE_DIVIDE_CHECK(av_err), oxcf->two_pass_vbrbias / 100.0);
+ av_err * pow(this_frame->coded_error * this_frame->weight /
+ DOUBLE_DIVIDE_CHECK(av_err),
+ oxcf->two_pass_vbrbias / 100.0);
// Correction for active area. Frames with a reduced active area
// (eg due to formatting bars) have a higher error per mb for the
@@ -274,10 +258,10 @@
// 0.5N blocks of complexity 2X is a little easier than coding N
// blocks of complexity X.
modified_error *=
- pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION);
+ pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION);
- return fclamp(modified_error,
- twopass->modified_error_min, twopass->modified_error_max);
+ return fclamp(modified_error, twopass->modified_error_min,
+ twopass->modified_error_max);
}
// This function returns the maximum target rate per frame.
@@ -284,7 +268,8 @@
static int frame_max_bits(const RATE_CONTROL *rc,
const VP9EncoderConfig *oxcf) {
int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
- (int64_t)oxcf->two_pass_vbrmax_section) / 100;
+ (int64_t)oxcf->two_pass_vbrmax_section) /
+ 100;
if (max_bits < 0)
max_bits = 0;
else if (max_bits > rc->max_frame_bandwidth)
@@ -311,14 +296,10 @@
static vpx_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
switch (bsize) {
- case BLOCK_8X8:
- return vpx_mse8x8;
- case BLOCK_16X8:
- return vpx_mse16x8;
- case BLOCK_8X16:
- return vpx_mse8x16;
- default:
- return vpx_mse16x16;
+ case BLOCK_8X8: return vpx_mse8x8;
+ case BLOCK_16X8: return vpx_mse16x8;
+ case BLOCK_8X16: return vpx_mse8x16;
+ default: return vpx_mse16x16;
}
}
@@ -337,38 +318,26 @@
switch (bd) {
default:
switch (bsize) {
- case BLOCK_8X8:
- return vpx_highbd_8_mse8x8;
- case BLOCK_16X8:
- return vpx_highbd_8_mse16x8;
- case BLOCK_8X16:
- return vpx_highbd_8_mse8x16;
- default:
- return vpx_highbd_8_mse16x16;
+ case BLOCK_8X8: return vpx_highbd_8_mse8x8;
+ case BLOCK_16X8: return vpx_highbd_8_mse16x8;
+ case BLOCK_8X16: return vpx_highbd_8_mse8x16;
+ default: return vpx_highbd_8_mse16x16;
}
break;
case 10:
switch (bsize) {
- case BLOCK_8X8:
- return vpx_highbd_10_mse8x8;
- case BLOCK_16X8:
- return vpx_highbd_10_mse16x8;
- case BLOCK_8X16:
- return vpx_highbd_10_mse8x16;
- default:
- return vpx_highbd_10_mse16x16;
+ case BLOCK_8X8: return vpx_highbd_10_mse8x8;
+ case BLOCK_16X8: return vpx_highbd_10_mse16x8;
+ case BLOCK_8X16: return vpx_highbd_10_mse8x16;
+ default: return vpx_highbd_10_mse16x16;
}
break;
case 12:
switch (bsize) {
- case BLOCK_8X8:
- return vpx_highbd_12_mse8x8;
- case BLOCK_16X8:
- return vpx_highbd_12_mse16x8;
- case BLOCK_8X16:
- return vpx_highbd_12_mse8x16;
- default:
- return vpx_highbd_12_mse16x16;
+ case BLOCK_8X8: return vpx_highbd_12_mse8x8;
+ case BLOCK_16X8: return vpx_highbd_12_mse16x8;
+ case BLOCK_8X16: return vpx_highbd_12_mse8x16;
+ default: return vpx_highbd_12_mse16x16;
}
break;
}
@@ -391,8 +360,7 @@
int sr = 0;
const int dim = VPXMIN(cpi->initial_width, cpi->initial_height);
- while ((dim << sr) < MAX_FULL_PEL_VAL)
- ++sr;
+ while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr;
return sr;
}
@@ -400,8 +368,8 @@
const MV *ref_mv, MV *best_mv,
int *best_motion_err) {
MACROBLOCKD *const xd = &x->e_mbd;
- MV tmp_mv = {0, 0};
- MV ref_mv_full = {ref_mv->row >> 3, ref_mv->col >> 3};
+ MV tmp_mv = { 0, 0 };
+ MV ref_mv_full = { ref_mv->row >> 3, ref_mv->col >> 3 };
int num00, tmp_err, n;
const BLOCK_SIZE bsize = xd->mi[0]->sb_type;
vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
@@ -423,12 +391,11 @@
// Center the initial step/diamond search on best mv.
tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
- step_param,
- x->sadperbit16, &num00, &v_fn_ptr, ref_mv);
+ step_param, x->sadperbit16, &num00,
+ &v_fn_ptr, ref_mv);
if (tmp_err < INT_MAX)
tmp_err = vp9_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
- if (tmp_err < INT_MAX - new_mv_mode_penalty)
- tmp_err += new_mv_mode_penalty;
+ if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
if (tmp_err < *best_motion_err) {
*best_motion_err = tmp_err;
@@ -446,8 +413,8 @@
--num00;
} else {
tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
- step_param + n, x->sadperbit16,
- &num00, &v_fn_ptr, ref_mv);
+ step_param + n, x->sadperbit16, &num00,
+ &v_fn_ptr, ref_mv);
if (tmp_err < INT_MAX)
tmp_err = vp9_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
if (tmp_err < INT_MAX - new_mv_mode_penalty)
@@ -463,11 +430,9 @@
static BLOCK_SIZE get_bsize(const VP9_COMMON *cm, int mb_row, int mb_col) {
if (2 * mb_col + 1 < cm->mi_cols) {
- return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16
- : BLOCK_16X8;
+ return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8;
} else {
- return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16
- : BLOCK_8X8;
+ return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16 : BLOCK_8X8;
}
}
@@ -475,11 +440,9 @@
int i;
for (i = 0; i < QINDEX_RANGE; ++i)
- if (vp9_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q)
- break;
+ if (vp9_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
- if (i == QINDEX_RANGE)
- i--;
+ if (i == QINDEX_RANGE) i--;
return i;
}
@@ -487,8 +450,7 @@
static void set_first_pass_params(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
if (!cpi->refresh_alt_ref_frame &&
- (cm->current_video_frame == 0 ||
- (cpi->frame_flags & FRAMEFLAGS_KEY))) {
+ (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
cm->frame_type = KEY_FRAME;
} else {
cm->frame_type = INTER_FRAME;
@@ -503,22 +465,17 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
- case VPX_BITS_8:
- ret_val = thresh;
- break;
- case VPX_BITS_10:
- ret_val = thresh >> 4;
- break;
- case VPX_BITS_12:
- ret_val = thresh >> 8;
- break;
+ case VPX_BITS_8: ret_val = thresh; break;
+ case VPX_BITS_10: ret_val = thresh >> 4; break;
+ case VPX_BITS_12: ret_val = thresh >> 8; break;
default:
- assert(0 && "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ assert(0 &&
+ "cm->bit_depth should be VPX_BITS_8, "
+ "VPX_BITS_10 or VPX_BITS_12");
}
}
#else
- (void) cm;
+ (void)cm;
#endif // CONFIG_VP9_HIGHBITDEPTH
return ret_val;
}
@@ -534,22 +491,17 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
- case VPX_BITS_8:
- ret_val = UL_INTRA_THRESH;
- break;
- case VPX_BITS_10:
- ret_val = UL_INTRA_THRESH << 2;
- break;
- case VPX_BITS_12:
- ret_val = UL_INTRA_THRESH << 4;
- break;
+ case VPX_BITS_8: ret_val = UL_INTRA_THRESH; break;
+ case VPX_BITS_10: ret_val = UL_INTRA_THRESH << 2; break;
+ case VPX_BITS_12: ret_val = UL_INTRA_THRESH << 4; break;
default:
- assert(0 && "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ assert(0 &&
+ "cm->bit_depth should be VPX_BITS_8, "
+ "VPX_BITS_10 or VPX_BITS_12");
}
}
#else
- (void) cm;
+ (void)cm;
#endif // CONFIG_VP9_HIGHBITDEPTH
return ret_val;
}
@@ -560,22 +512,17 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
- case VPX_BITS_8:
- ret_val = SMOOTH_INTRA_THRESH;
- break;
- case VPX_BITS_10:
- ret_val = SMOOTH_INTRA_THRESH << 4;
- break;
- case VPX_BITS_12:
- ret_val = SMOOTH_INTRA_THRESH << 8;
- break;
+ case VPX_BITS_8: ret_val = SMOOTH_INTRA_THRESH; break;
+ case VPX_BITS_10: ret_val = SMOOTH_INTRA_THRESH << 4; break;
+ case VPX_BITS_12: ret_val = SMOOTH_INTRA_THRESH << 8; break;
default:
- assert(0 && "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ assert(0 &&
+ "cm->bit_depth should be VPX_BITS_8, "
+ "VPX_BITS_10 or VPX_BITS_12");
}
}
#else
- (void) cm;
+ (void)cm;
#endif // CONFIG_VP9_HIGHBITDEPTH
return ret_val;
}
@@ -585,10 +532,8 @@
#define KERNEL_SIZE 3
// Baseline Kernal weights for first pass noise metric
-static uint8_t fp_dn_kernal_3[KERNEL_SIZE * KERNEL_SIZE] = {
- 1, 2, 1,
- 2, 4, 2,
- 1, 2, 1};
+static uint8_t fp_dn_kernal_3[KERNEL_SIZE * KERNEL_SIZE] = { 1, 2, 1, 2, 4,
+ 2, 1, 2, 1 };
// Estimate noise at a single point based on the impace of a spatial kernal
// on the point value
@@ -623,7 +568,7 @@
if (max_diff < FP_MAX_DN_THRESH)
// Update the source value with the new filtered value
- dn_val = (sum_val + (sum_weight >> 1)) / sum_weight;
+ dn_val = (sum_val + (sum_weight >> 1)) / sum_weight;
else
dn_val = *src_ptr;
@@ -666,7 +611,7 @@
if (max_diff < FP_MAX_DN_THRESH)
// Update the source value with the new filtered value
- dn_val = (sum_val + (sum_weight >> 1)) / sum_weight;
+ dn_val = (sum_val + (sum_weight >> 1)) / sum_weight;
else
dn_val = *CONVERT_TO_SHORTPTR(src_ptr);
@@ -738,9 +683,9 @@
int image_data_start_row = INVALID_ROW;
int new_mv_count = 0;
int sum_in_vectors = 0;
- MV lastmv = {0, 0};
+ MV lastmv = { 0, 0 };
TWO_PASS *twopass = &cpi->twopass;
- const MV zero_mv = {0, 0};
+ const MV zero_mv = { 0, 0 };
int recon_y_stride, recon_uv_stride, uv_mb_height;
YV12_BUFFER_CONFIG *const lst_yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
@@ -748,8 +693,9 @@
YV12_BUFFER_CONFIG *const new_yv12 = get_frame_new_buffer(cm);
const YV12_BUFFER_CONFIG *first_ref_buf = lst_yv12;
- LAYER_CONTEXT *const lc = is_two_pass_svc(cpi) ?
- &cpi->svc.layer_context[cpi->svc.spatial_layer_id] : NULL;
+ LAYER_CONTEXT *const lc =
+ is_two_pass_svc(cpi) ? &cpi->svc.layer_context[cpi->svc.spatial_layer_id]
+ : NULL;
double intra_factor;
double brightness_factor;
BufferPool *const pool = cm->buffer_pool;
@@ -790,8 +736,7 @@
cpi->refresh_golden_frame = 0;
}
- if (lc->current_video_frame_in_layer == 0)
- cpi->ref_frame_flags = 0;
+ if (lc->current_video_frame_in_layer == 0) cpi->ref_frame_flags = 0;
vp9_scale_references(cpi);
@@ -812,7 +757,7 @@
}
set_ref_ptrs(cm, xd,
- (cpi->ref_frame_flags & VP9_LAST_FLAG) ? LAST_FRAME: NONE,
+ (cpi->ref_frame_flags & VP9_LAST_FLAG) ? LAST_FRAME : NONE,
(cpi->ref_frame_flags & VP9_GOLD_FLAG) ? GOLDEN_FRAME : NONE);
cpi->Source = vp9_scale_if_required(cm, cpi->un_scaled_source,
@@ -852,7 +797,7 @@
uv_mb_height = 16 >> (new_yv12->y_height > new_yv12->uv_height);
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
- MV best_ref_mv = {0, 0};
+ MV best_ref_mv = { 0, 0 };
// Reset above block coeffs.
recon_yoffset = (mb_row * recon_y_stride * 16);
@@ -861,8 +806,7 @@
// Set up limit values for motion vectors to prevent them extending
// outside the UMV borders.
x->mv_row_min = -((mb_row * 16) + BORDER_MV_PIXELS_B16);
- x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
- + BORDER_MV_PIXELS_B16;
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + BORDER_MV_PIXELS_B16;
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
int this_error;
@@ -883,8 +827,7 @@
xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset;
xd->mi[0]->sb_type = bsize;
xd->mi[0]->ref_frame[0] = INTRA_FRAME;
- set_mi_row_col(xd, &tile,
- mb_row << 1, num_8x8_blocks_high_lookup[bsize],
+ set_mi_row_col(xd, &tile, mb_row << 1, num_8x8_blocks_high_lookup[bsize],
mb_col << 1, num_8x8_blocks_wide_lookup[bsize],
cm->mi_rows, cm->mi_cols);
// Are edges available for intra prediction?
@@ -892,13 +835,13 @@
// above_mi/left_mi must be overwritten with a nonzero value when edges
// are available. Required by vp9_predict_intra_block().
xd->above_mi = (mb_row != 0) ? &mi_above : NULL;
- xd->left_mi = (mb_col > tile.mi_col_start) ? &mi_left : NULL;
+ xd->left_mi = (mb_col > tile.mi_col_start) ? &mi_left : NULL;
// Do intra 16x16 prediction.
x->skip_encode = 0;
xd->mi[0]->mode = DC_PRED;
- xd->mi[0]->tx_size = use_dc_pred ?
- (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
+ xd->mi[0]->tx_size =
+ use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
vp9_encode_intra_block_plane(x, bsize, 0, 0);
this_error = vpx_get_mb_ss(x->plane[0].src_diff);
this_intra_error = this_error;
@@ -933,17 +876,13 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
- case VPX_BITS_8:
- break;
- case VPX_BITS_10:
- this_error >>= 4;
- break;
- case VPX_BITS_12:
- this_error >>= 8;
- break;
+ case VPX_BITS_8: break;
+ case VPX_BITS_10: this_error >>= 4; break;
+ case VPX_BITS_12: this_error >>= 8; break;
default:
- assert(0 && "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ assert(0 &&
+ "cm->bit_depth should be VPX_BITS_8, "
+ "VPX_BITS_10 or VPX_BITS_12");
return;
}
}
@@ -998,7 +937,7 @@
(lc != NULL && lc->current_video_frame_in_layer > 0)) {
int tmp_err, motion_error, raw_motion_error;
// Assume 0,0 motion with no mv overhead.
- MV mv = {0, 0} , tmp_mv = {0, 0};
+ MV mv = { 0, 0 }, tmp_mv = { 0, 0 };
struct buf_2d unscaled_last_source_buf_2d;
xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
@@ -1007,12 +946,12 @@
motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
} else {
- motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+ motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &xd->plane[0].pre[0]);
}
#else
- motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+ motion_error =
+ get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
#endif // CONFIG_VP9_HIGHBITDEPTH
// Compute the motion error of the 0,0 motion using the last source
@@ -1027,12 +966,12 @@
raw_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
} else {
- raw_motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &unscaled_last_source_buf_2d);
+ raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &unscaled_last_source_buf_2d);
}
#else
- raw_motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &unscaled_last_source_buf_2d);
+ raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &unscaled_last_source_buf_2d);
#endif // CONFIG_VP9_HIGHBITDEPTH
// TODO(pengchong): Replace the hard-coded threshold
@@ -1055,8 +994,8 @@
// Search in an older reference frame.
if (((lc == NULL && cm->current_video_frame > 1) ||
- (lc != NULL && lc->current_video_frame_in_layer > 1))
- && gld_yv12 != NULL) {
+ (lc != NULL && lc->current_video_frame_in_layer > 1)) &&
+ gld_yv12 != NULL) {
// Assume 0,0 motion with no mv overhead.
int gf_motion_error;
@@ -1066,12 +1005,12 @@
gf_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
} else {
- gf_motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+ gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &xd->plane[0].pre[0]);
}
#else
- gf_motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+ gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &xd->plane[0].pre[0]);
#endif // CONFIG_VP9_HIGHBITDEPTH
first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv,
@@ -1127,12 +1066,12 @@
if (((this_error - intrapenalty) * 9 <= motion_error * 10) &&
(this_error < (2 * intrapenalty))) {
neutral_count += 1.0;
- // Also track cases where the intra is not much worse than the inter
- // and use this in limiting the GF/arf group length.
+ // Also track cases where the intra is not much worse than the inter
+ // and use this in limiting the GF/arf group length.
} else if ((this_error > NCOUNT_INTRA_THRESH) &&
(this_error < (NCOUNT_INTRA_FACTOR * motion_error))) {
- neutral_count += (double)motion_error /
- DOUBLE_DIVIDE_CHECK((double)this_error);
+ neutral_count +=
+ (double)motion_error / DOUBLE_DIVIDE_CHECK((double)this_error);
}
mv.row *= 8;
@@ -1202,8 +1141,7 @@
#endif
// Non-zero vector, was it different from the last non zero vector?
- if (!is_equal_mv(&mv, &lastmv))
- ++new_mv_count;
+ if (!is_equal_mv(&mv, &lastmv)) ++new_mv_count;
lastmv = mv;
// Does the row vector point inwards or outwards?
@@ -1232,8 +1170,7 @@
--sum_in_vectors;
}
frame_noise_energy += (int64_t)SECTION_NOISE_DEF;
- } else if (this_intra_error <
- scale_sse_threshold(cm, LOW_I_THRESH)) {
+ } else if (this_intra_error < scale_sse_threshold(cm, LOW_I_THRESH)) {
frame_noise_energy += fp_estimate_block_noise(x, bsize);
} else { // 0,0 mv but high error
frame_noise_energy += (int64_t)SECTION_NOISE_DEF;
@@ -1260,10 +1197,10 @@
// Adjust to the next row of MBs.
x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
- x->plane[1].src.buf += uv_mb_height * x->plane[1].src.stride -
- uv_mb_height * cm->mb_cols;
- x->plane[2].src.buf += uv_mb_height * x->plane[1].src.stride -
- uv_mb_height * cm->mb_cols;
+ x->plane[1].src.buf +=
+ uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
+ x->plane[2].src.buf +=
+ uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
vpx_clear_system_state();
}
@@ -1288,7 +1225,8 @@
// Initial estimate here uses sqrt(mbs) to define the min_err, where the
// number of mbs is proportional to the image area.
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
+ ? cpi->initial_mbs
+ : cpi->common.MBs;
const double min_err = 200 * sqrt(num_mbs);
intra_factor = intra_factor / (double)num_mbs;
@@ -1316,10 +1254,10 @@
fps.mvr_abs = (double)sum_mvr_abs / mvcount;
fps.MVc = (double)sum_mvc / mvcount;
fps.mvc_abs = (double)sum_mvc_abs / mvcount;
- fps.MVrv = ((double)sum_mvrs -
- ((double)sum_mvr * sum_mvr / mvcount)) / mvcount;
- fps.MVcv = ((double)sum_mvcs -
- ((double)sum_mvc * sum_mvc / mvcount)) / mvcount;
+ fps.MVrv =
+ ((double)sum_mvrs - ((double)sum_mvr * sum_mvr / mvcount)) / mvcount;
+ fps.MVcv =
+ ((double)sum_mvcs - ((double)sum_mvc * sum_mvc / mvcount)) / mvcount;
fps.mv_in_out_count = (double)sum_in_vectors / (mvcount * 2);
fps.new_mv_count = new_mv_count;
fps.pcnt_motion = (double)mvcount / num_mbs;
@@ -1402,15 +1340,11 @@
}
++cm->current_video_frame;
- if (cpi->use_svc)
- vp9_inc_frame_in_layer(cpi);
+ if (cpi->use_svc) vp9_inc_frame_in_layer(cpi);
}
-static double calc_correction_factor(double err_per_mb,
- double err_divisor,
- double pt_low,
- double pt_high,
- int q,
+static double calc_correction_factor(double err_per_mb, double err_divisor,
+ double pt_low, double pt_high, int q,
vpx_bit_depth_t bit_depth) {
const double error_term = err_per_mb / err_divisor;
@@ -1419,19 +1353,16 @@
VPXMIN(vp9_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
// Calculate correction factor.
- if (power_term < 1.0)
- assert(error_term >= 0.0);
+ if (power_term < 1.0) assert(error_term >= 0.0);
return fclamp(pow(error_term, power_term), 0.05, 5.0);
}
-#define ERR_DIVISOR 115.0
-#define NOISE_FACTOR_MIN 0.9
-#define NOISE_FACTOR_MAX 1.1
-static int get_twopass_worst_quality(VP9_COMP *cpi,
- const double section_err,
- double inactive_zone,
- double section_noise,
+#define ERR_DIVISOR 115.0
+#define NOISE_FACTOR_MIN 0.9
+#define NOISE_FACTOR_MAX 1.1
+static int get_twopass_worst_quality(VP9_COMP *cpi, const double section_err,
+ double inactive_zone, double section_noise,
int section_target_bandwidth) {
const RATE_CONTROL *const rc = &cpi->rc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
@@ -1448,13 +1379,14 @@
return rc->worst_quality; // Highest value allowed
} else {
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
+ ? cpi->initial_mbs
+ : cpi->common.MBs;
const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
const double av_err_per_mb = section_err / active_mbs;
const double speed_term = 1.0 + 0.04 * oxcf->speed;
double last_group_rate_err;
- const int target_norm_bits_per_mb = ((uint64_t)target_rate <<
- BPER_MB_NORMBITS) / active_mbs;
+ const int target_norm_bits_per_mb =
+ ((uint64_t)target_rate << BPER_MB_NORMBITS) / active_mbs;
int q;
int is_svc_upper_layer = 0;
@@ -1462,35 +1394,29 @@
is_svc_upper_layer = 1;
// based on recent history adjust expectations of bits per macroblock.
- last_group_rate_err = (double)twopass->rolling_arf_group_actual_bits /
- DOUBLE_DIVIDE_CHECK((double)twopass->rolling_arf_group_target_bits);
last_group_rate_err =
- VPXMAX(0.25, VPXMIN(4.0, last_group_rate_err));
+ (double)twopass->rolling_arf_group_actual_bits /
+ DOUBLE_DIVIDE_CHECK((double)twopass->rolling_arf_group_target_bits);
+ last_group_rate_err = VPXMAX(0.25, VPXMIN(4.0, last_group_rate_err));
twopass->bpm_factor *= (3.0 + last_group_rate_err) / 4.0;
- twopass->bpm_factor =
- VPXMAX(0.25, VPXMIN(4.0, twopass->bpm_factor));
+ twopass->bpm_factor = VPXMAX(0.25, VPXMIN(4.0, twopass->bpm_factor));
// Try and pick a max Q that will be high enough to encode the
// content at the given rate.
for (q = rc->best_quality; q < rc->worst_quality; ++q) {
- const double factor =
- calc_correction_factor(av_err_per_mb,
- ERR_DIVISOR,
- is_svc_upper_layer ? SVC_FACTOR_PT_LOW :
- FACTOR_PT_LOW, FACTOR_PT_HIGH, q,
- cpi->common.bit_depth);
- const int bits_per_mb =
- vp9_rc_bits_per_mb(INTER_FRAME, q,
- factor * speed_term * cpi->twopass.bpm_factor *
- noise_factor,
- cpi->common.bit_depth);
- if (bits_per_mb <= target_norm_bits_per_mb)
- break;
+ const double factor = calc_correction_factor(
+ av_err_per_mb, ERR_DIVISOR,
+ is_svc_upper_layer ? SVC_FACTOR_PT_LOW : FACTOR_PT_LOW,
+ FACTOR_PT_HIGH, q, cpi->common.bit_depth);
+ const int bits_per_mb = vp9_rc_bits_per_mb(
+ INTER_FRAME, q,
+ factor * speed_term * cpi->twopass.bpm_factor * noise_factor,
+ cpi->common.bit_depth);
+ if (bits_per_mb <= target_norm_bits_per_mb) break;
}
// Restriction on active max q for constrained quality mode.
- if (cpi->oxcf.rc_mode == VPX_CQ)
- q = VPXMAX(q, oxcf->cq_level);
+ if (cpi->oxcf.rc_mode == VPX_CQ) q = VPXMAX(q, oxcf->cq_level);
return q;
}
}
@@ -1520,8 +1446,7 @@
setup_rf_level_maxq(cpi);
}
-void calculate_coded_size(VP9_COMP *cpi,
- int *scaled_frame_width,
+void calculate_coded_size(VP9_COMP *cpi, int *scaled_frame_width,
int *scaled_frame_height) {
RATE_CONTROL *const rc = &cpi->rc;
*scaled_frame_width = rc->frame_width[rc->frame_size_selector];
@@ -1531,11 +1456,12 @@
void vp9_init_second_pass(VP9_COMP *cpi) {
SVC *const svc = &cpi->svc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
- const int is_two_pass_svc = (svc->number_spatial_layers > 1) ||
- (svc->number_temporal_layers > 1);
+ const int is_two_pass_svc =
+ (svc->number_spatial_layers > 1) || (svc->number_temporal_layers > 1);
RATE_CONTROL *const rc = &cpi->rc;
- TWO_PASS *const twopass = is_two_pass_svc ?
- &svc->layer_context[svc->spatial_layer_id].twopass : &cpi->twopass;
+ TWO_PASS *const twopass =
+ is_two_pass_svc ? &svc->layer_context[svc->spatial_layer_id].twopass
+ : &cpi->twopass;
double frame_rate;
FIRSTPASS_STATS *stats;
@@ -1542,8 +1468,7 @@
zero_stats(&twopass->total_stats);
zero_stats(&twopass->total_left_stats);
- if (!twopass->stats_in_end)
- return;
+ if (!twopass->stats_in_end) return;
stats = &twopass->total_stats;
@@ -1559,13 +1484,14 @@
if (is_two_pass_svc) {
vp9_update_spatial_layer_framerate(cpi, frame_rate);
- twopass->bits_left = (int64_t)(stats->duration *
- svc->layer_context[svc->spatial_layer_id].target_bandwidth /
- 10000000.0);
+ twopass->bits_left =
+ (int64_t)(stats->duration *
+ svc->layer_context[svc->spatial_layer_id].target_bandwidth /
+ 10000000.0);
} else {
vp9_new_framerate(cpi, frame_rate);
- twopass->bits_left = (int64_t)(stats->duration * oxcf->target_bandwidth /
- 10000000.0);
+ twopass->bits_left =
+ (int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0);
}
// This variable monitors how far behind the second ref update is lagging.
@@ -1574,14 +1500,14 @@
// Scan the first pass file and calculate a modified total error based upon
// the bias/power function used to allocate bits.
{
- const double avg_error = stats->coded_error /
- DOUBLE_DIVIDE_CHECK(stats->count);
+ const double avg_error =
+ stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count);
const FIRSTPASS_STATS *s = twopass->stats_in;
double modified_error_total = 0.0;
- twopass->modified_error_min = (avg_error *
- oxcf->two_pass_vbrmin_section) / 100;
- twopass->modified_error_max = (avg_error *
- oxcf->two_pass_vbrmax_section) / 100;
+ twopass->modified_error_min =
+ (avg_error * oxcf->two_pass_vbrmin_section) / 100;
+ twopass->modified_error_max =
+ (avg_error * oxcf->two_pass_vbrmax_section) / 100;
while (s < twopass->stats_in_end) {
modified_error_total += calculate_modified_err(cpi, twopass, oxcf, s);
++s;
@@ -1623,17 +1549,15 @@
static double get_sr_decay_rate(const VP9_COMP *cpi,
const FIRSTPASS_STATS *frame) {
- const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
- double sr_diff =
- (frame->sr_coded_error - frame->coded_error) / num_mbs;
+ const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
+ : cpi->common.MBs;
+ double sr_diff = (frame->sr_coded_error - frame->coded_error) / num_mbs;
double sr_decay = 1.0;
double modified_pct_inter;
double modified_pcnt_intra;
const double motion_amplitude_part =
- frame->pcnt_motion *
- ((frame->mvc_abs + frame->mvr_abs) /
- (cpi->initial_height + cpi->initial_width));
+ frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) /
+ (cpi->initial_height + cpi->initial_width));
modified_pct_inter = frame->pcnt_inter;
if (((frame->coded_error / num_mbs) > LOW_CODED_ERR_PER_MB) &&
@@ -1643,11 +1567,9 @@
}
modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
-
if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX);
- sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) -
- motion_amplitude_part -
+ sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) - motion_amplitude_part -
(INTRA_PART * modified_pcnt_intra);
}
return VPXMAX(sr_decay, VPXMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter));
@@ -1657,8 +1579,7 @@
// quality is decaying from frame to frame.
static double get_zero_motion_factor(const VP9_COMP *cpi,
const FIRSTPASS_STATS *frame) {
- const double zero_motion_pct = frame->pcnt_inter -
- frame->pcnt_motion;
+ const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
double sr_decay = get_sr_decay_rate(cpi, frame);
return VPXMIN(sr_decay, zero_motion_pct);
}
@@ -1669,8 +1590,8 @@
const FIRSTPASS_STATS *next_frame) {
const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame);
const double zero_motion_factor =
- (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
- ZM_POWER_FACTOR));
+ (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
+ ZM_POWER_FACTOR));
return VPXMAX(zero_motion_factor,
(sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
@@ -1679,8 +1600,8 @@
// Function to test for a condition where a complex transition is followed
// by a static section. For example in slide shows where there is a fade
// between slides. This is to help with more optimal kf and gf positioning.
-static int detect_transition_to_still(VP9_COMP *cpi,
- int frame_interval, int still_interval,
+static int detect_transition_to_still(VP9_COMP *cpi, int frame_interval,
+ int still_interval,
double loop_decay_rate,
double last_decay_rate) {
TWO_PASS *const twopass = &cpi->twopass;
@@ -1689,8 +1610,7 @@
// Break clause to detect very still sections after motion
// For example a static image after a fade or other transition
// instead of a clean scene cut.
- if (frame_interval > rc->min_gf_interval &&
- loop_decay_rate >= 0.999 &&
+ if (frame_interval > rc->min_gf_interval && loop_decay_rate >= 0.999 &&
last_decay_rate < 0.9) {
int j;
@@ -1697,11 +1617,9 @@
// Look ahead a few frames to see if static condition persists...
for (j = 0; j < still_interval; ++j) {
const FIRSTPASS_STATS *stats = &twopass->stats_in[j];
- if (stats >= twopass->stats_in_end)
- break;
+ if (stats >= twopass->stats_in_end) break;
- if (stats->pcnt_inter - stats->pcnt_motion < 0.999)
- break;
+ if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
}
// Only if it does do we signal a transition to still.
@@ -1743,30 +1661,27 @@
// Accumulate a measure of how uniform (or conversely how random) the motion
// field is (a ratio of abs(mv) / mv).
if (pct > 0.05) {
- const double mvr_ratio = fabs(stats->mvr_abs) /
- DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
- const double mvc_ratio = fabs(stats->mvc_abs) /
- DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
+ const double mvr_ratio =
+ fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
+ const double mvc_ratio =
+ fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
- *mv_ratio_accumulator += pct * (mvr_ratio < stats->mvr_abs ?
- mvr_ratio : stats->mvr_abs);
- *mv_ratio_accumulator += pct * (mvc_ratio < stats->mvc_abs ?
- mvc_ratio : stats->mvc_abs);
+ *mv_ratio_accumulator +=
+ pct * (mvr_ratio < stats->mvr_abs ? mvr_ratio : stats->mvr_abs);
+ *mv_ratio_accumulator +=
+ pct * (mvc_ratio < stats->mvc_abs ? mvc_ratio : stats->mvc_abs);
}
}
#define BASELINE_ERR_PER_MB 1000.0
-static double calc_frame_boost(VP9_COMP *cpi,
- const FIRSTPASS_STATS *this_frame,
- double this_frame_mv_in_out,
- double max_boost) {
+static double calc_frame_boost(VP9_COMP *cpi, const FIRSTPASS_STATS *this_frame,
+ double this_frame_mv_in_out, double max_boost) {
double frame_boost;
- const double lq =
- vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
- cpi->common.bit_depth);
+ const double lq = vp9_convert_qindex_to_q(
+ cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth);
const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5);
- int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
+ int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
+ : cpi->common.MBs;
// Correct for any inactive region in the image
num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame));
@@ -1788,8 +1703,7 @@
return VPXMIN(frame_boost, max_boost * boost_q_correction);
}
-static int calc_arf_boost(VP9_COMP *cpi, int offset,
- int f_frames, int b_frames,
+static int calc_arf_boost(VP9_COMP *cpi, int offset, int f_frames, int b_frames,
int *f_boost, int *b_boost) {
TWO_PASS *const twopass = &cpi->twopass;
int i;
@@ -1805,14 +1719,12 @@
// Search forward from the proposed arf/next gf position.
for (i = 0; i < f_frames; ++i) {
const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset);
- if (this_frame == NULL)
- break;
+ if (this_frame == NULL) break;
// Update the motion related elements to the boost calculation.
- accumulate_frame_motion_stats(this_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator,
- &mv_ratio_accumulator);
+ accumulate_frame_motion_stats(
+ this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
// We want to discount the flash frame itself and the recovery
// frame that follows as both will have poor scores.
@@ -1823,12 +1735,13 @@
if (!flash_detected) {
decay_accumulator *= get_prediction_decay_rate(cpi, this_frame);
decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
- ? MIN_DECAY_FACTOR : decay_accumulator;
+ ? MIN_DECAY_FACTOR
+ : decay_accumulator;
}
- boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame,
- this_frame_mv_in_out,
- GF_MAX_BOOST);
+ boost_score +=
+ decay_accumulator *
+ calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST);
}
*f_boost = (int)boost_score;
@@ -1844,14 +1757,12 @@
// Search backward towards last gf position.
for (i = -1; i >= -b_frames; --i) {
const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset);
- if (this_frame == NULL)
- break;
+ if (this_frame == NULL) break;
// Update the motion related elements to the boost calculation.
- accumulate_frame_motion_stats(this_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator,
- &mv_ratio_accumulator);
+ accumulate_frame_motion_stats(
+ this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
// We want to discount the the flash frame itself and the recovery
// frame that follows as both will have poor scores.
@@ -1862,12 +1773,13 @@
if (!flash_detected) {
decay_accumulator *= get_prediction_decay_rate(cpi, this_frame);
decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
- ? MIN_DECAY_FACTOR : decay_accumulator;
+ ? MIN_DECAY_FACTOR
+ : decay_accumulator;
}
- boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame,
- this_frame_mv_in_out,
- GF_MAX_BOOST);
+ boost_score +=
+ decay_accumulator *
+ calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST);
}
*b_boost = (int)boost_score;
@@ -1915,9 +1827,10 @@
}
// Clamp odd edge cases.
- total_group_bits = (total_group_bits < 0) ?
- 0 : (total_group_bits > twopass->kf_group_bits) ?
- twopass->kf_group_bits : total_group_bits;
+ total_group_bits =
+ (total_group_bits < 0) ? 0 : (total_group_bits > twopass->kf_group_bits)
+ ? twopass->kf_group_bits
+ : total_group_bits;
// Clip based on user supplied data rate variability limit.
if (total_group_bits > (int64_t)max_bits * rc->baseline_gf_interval)
@@ -1927,13 +1840,12 @@
}
// Calculate the number bits extra to assign to boosted frames in a group.
-static int calculate_boost_bits(int frame_count,
- int boost, int64_t total_group_bits) {
+static int calculate_boost_bits(int frame_count, int boost,
+ int64_t total_group_bits) {
int allocation_chunks;
// return 0 for invalid inputs (could arise e.g. through rounding errors)
- if (!boost || (total_group_bits <= 0) || (frame_count <= 0) )
- return 0;
+ if (!boost || (total_group_bits <= 0) || (frame_count <= 0)) return 0;
allocation_chunks = (frame_count * 100) + boost;
@@ -1980,15 +1892,14 @@
int mid_frame_idx;
unsigned char arf_buffer_indices[MAX_ACTIVE_ARFS];
int alt_frame_index = frame_index;
- int has_temporal_layers = is_two_pass_svc(cpi) &&
- cpi->svc.number_temporal_layers > 1;
+ int has_temporal_layers =
+ is_two_pass_svc(cpi) && cpi->svc.number_temporal_layers > 1;
// Only encode alt reference frame in temporal base layer.
- if (has_temporal_layers)
- alt_frame_index = cpi->svc.number_temporal_layers;
+ if (has_temporal_layers) alt_frame_index = cpi->svc.number_temporal_layers;
- key_frame = cpi->common.frame_type == KEY_FRAME ||
- vp9_is_upper_layer_key_frame(cpi);
+ key_frame =
+ cpi->common.frame_type == KEY_FRAME || vp9_is_upper_layer_key_frame(cpi);
get_arf_buffer_indices(arf_buffer_indices);
@@ -2008,14 +1919,12 @@
gf_group->arf_ref_idx[0] = arf_buffer_indices[0];
// Step over the golden frame / overlay frame
- if (EOF == input_stats(twopass, &frame_stats))
- return;
+ if (EOF == input_stats(twopass, &frame_stats)) return;
}
// Deduct the boost bits for arf (or gf if it is not a key frame)
// from the group total.
- if (rc->source_alt_ref_pending || !key_frame)
- total_group_bits -= gf_arf_bits;
+ if (rc->source_alt_ref_pending || !key_frame) total_group_bits -= gf_arf_bits;
// Store the bits to spend on the ARF if there is one.
if (rc->source_alt_ref_pending) {
@@ -2033,10 +1942,9 @@
gf_group->arf_update_idx[alt_frame_index] = arf_buffer_indices[0];
gf_group->arf_ref_idx[alt_frame_index] =
- arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
- rc->source_alt_ref_active];
- if (!has_temporal_layers)
- ++frame_index;
+ arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
+ rc->source_alt_ref_active];
+ if (!has_temporal_layers) ++frame_index;
if (cpi->multi_arf_enabled) {
// Set aside a slot for a level 1 arf.
@@ -2043,7 +1951,7 @@
gf_group->update_type[frame_index] = ARF_UPDATE;
gf_group->rf_level[frame_index] = GF_ARF_LOW;
gf_group->arf_src_offset[frame_index] =
- (unsigned char)((rc->baseline_gf_interval >> 1) - 1);
+ (unsigned char)((rc->baseline_gf_interval >> 1) - 1);
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[1];
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
++frame_index;
@@ -2056,8 +1964,7 @@
// Allocate bits to the other frames in the group.
for (i = 0; i < rc->baseline_gf_interval - rc->source_alt_ref_pending; ++i) {
int arf_idx = 0;
- if (EOF == input_stats(twopass, &frame_stats))
- break;
+ if (EOF == input_stats(twopass, &frame_stats)) break;
if (has_temporal_layers && frame_index == alt_frame_index) {
++frame_index;
@@ -2076,14 +1983,13 @@
mid_boost_bits += (target_frame_size >> 4);
target_frame_size -= (target_frame_size >> 4);
- if (frame_index <= mid_frame_idx)
- arf_idx = 1;
+ if (frame_index <= mid_frame_idx) arf_idx = 1;
}
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[arf_idx];
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx];
- target_frame_size = clamp(target_frame_size, 0,
- VPXMIN(max_bits, (int)total_group_bits));
+ target_frame_size =
+ clamp(target_frame_size, 0, VPXMIN(max_bits, (int)total_group_bits));
gf_group->update_type[frame_index] = LF_UPDATE;
gf_group->rf_level[frame_index] = INTER_NORMAL;
@@ -2120,20 +2026,17 @@
}
// Adjusts the ARNF filter for a GF group.
-static void adjust_group_arnr_filter(VP9_COMP *cpi,
- double section_noise,
+static void adjust_group_arnr_filter(VP9_COMP *cpi, double section_noise,
double section_inter,
double section_motion) {
TWO_PASS *const twopass = &cpi->twopass;
- double section_zeromv = section_inter - section_motion;;
+ double section_zeromv = section_inter - section_motion;
twopass->arnr_strength_adjustment = 0;
- if ((section_zeromv < 0.10) ||
- (section_noise <= (SECTION_NOISE_DEF * 0.75)))
+ if ((section_zeromv < 0.10) || (section_noise <= (SECTION_NOISE_DEF * 0.75)))
twopass->arnr_strength_adjustment -= 1;
- if (section_zeromv > 0.50)
- twopass->arnr_strength_adjustment += 1;
+ if (section_zeromv > 0.50) twopass->arnr_strength_adjustment += 1;
}
// Analyse and define a gf/arf group.
@@ -2217,14 +2120,12 @@
// Set a maximum and minimum interval for the GF group.
// If the image appears almost completely static we can extend beyond this.
{
- int int_max_q =
- (int)(vp9_convert_qindex_to_q(twopass->active_worst_quality,
- cpi->common.bit_depth));
- int int_lbq =
- (int)(vp9_convert_qindex_to_q(rc->last_boosted_qindex,
- cpi->common.bit_depth));
+ int int_max_q = (int)(vp9_convert_qindex_to_q(twopass->active_worst_quality,
+ cpi->common.bit_depth));
+ int int_lbq = (int)(vp9_convert_qindex_to_q(rc->last_boosted_qindex,
+ cpi->common.bit_depth));
active_min_gf_interval =
- rc->min_gf_interval + arf_active_or_kf + VPXMIN(2, int_max_q / 200);
+ rc->min_gf_interval + arf_active_or_kf + VPXMIN(2, int_max_q / 200);
if (active_min_gf_interval > rc->max_gf_interval)
active_min_gf_interval = rc->max_gf_interval;
@@ -2235,8 +2136,7 @@
// bits to spare and are better with a smaller interval and smaller boost.
// At high Q when there are few bits to spare we are better with a longer
// interval to spread the cost of the GF.
- active_max_gf_interval =
- 12 + arf_active_or_kf + VPXMIN(4, (int_lbq / 6));
+ active_max_gf_interval = 12 + arf_active_or_kf + VPXMIN(4, (int_lbq / 6));
// We have: active_min_gf_interval <= rc->max_gf_interval
if (active_max_gf_interval < active_min_gf_interval)
@@ -2246,8 +2146,7 @@
// Would the active max drop us out just before the near the next kf?
if ((active_max_gf_interval <= rc->frames_to_key) &&
- (active_max_gf_interval >=
- (rc->frames_to_key - rc->min_gf_interval)))
+ (active_max_gf_interval >= (rc->frames_to_key - rc->min_gf_interval)))
active_max_gf_interval = rc->frames_to_key / 2;
}
}
@@ -2266,8 +2165,7 @@
gf_group_inter += this_frame->pcnt_inter;
gf_group_motion += this_frame->pcnt_motion;
- if (EOF == input_stats(twopass, &next_frame))
- break;
+ if (EOF == input_stats(twopass, &next_frame)) break;
// Test for the case where there is a brief flash but the prediction
// quality back to an earlier frame is then restored.
@@ -2274,10 +2172,9 @@
flash_detected = detect_flash(twopass, 0);
// Update the motion related elements to the boost calculation.
- accumulate_frame_motion_stats(&next_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator,
- &mv_ratio_accumulator);
+ accumulate_frame_motion_stats(
+ &next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
// Accumulate the effect of prediction quality decay.
if (!flash_detected) {
@@ -2300,25 +2197,24 @@
}
// Calculate a boost number for this frame.
- boost_score += decay_accumulator * calc_frame_boost(cpi, &next_frame,
- this_frame_mv_in_out,
- GF_MAX_BOOST);
+ boost_score +=
+ decay_accumulator *
+ calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out, GF_MAX_BOOST);
// Break out conditions.
if (
- // Break at active_max_gf_interval unless almost totally static.
- ((i >= active_max_gf_interval) &&
- (zero_motion_accumulator < 0.995)) ||
- (
- // Don't break out with a very short interval.
- (i >= active_min_gf_interval) &&
- // If possible dont break very close to a kf
- ((rc->frames_to_key - i) >= rc->min_gf_interval) &&
- (!flash_detected) &&
- ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
- (abs_mv_in_out_accumulator > 3.0) ||
- (mv_in_out_accumulator < -2.0) ||
- ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) {
+ // Break at active_max_gf_interval unless almost totally static.
+ ((i >= active_max_gf_interval) && (zero_motion_accumulator < 0.995)) ||
+ (
+ // Don't break out with a very short interval.
+ (i >= active_min_gf_interval) &&
+ // If possible dont break very close to a kf
+ ((rc->frames_to_key - i) >= rc->min_gf_interval) &&
+ (!flash_detected) &&
+ ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
+ (abs_mv_in_out_accumulator > 3.0) ||
+ (mv_in_out_accumulator < -2.0) ||
+ ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) {
boost_score = old_boost_score;
break;
}
@@ -2331,18 +2227,19 @@
rc->constrained_gf_group = (i >= rc->frames_to_key) ? 1 : 0;
// Should we use the alternate reference frame.
- if (allow_alt_ref &&
- (i < cpi->oxcf.lag_in_frames) &&
- (i >= rc->min_gf_interval)) {
+ if (allow_alt_ref && (i < cpi->oxcf.lag_in_frames) &&
+ (i >= rc->min_gf_interval)) {
// Calculate the boost for alt ref.
- rc->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost,
- &b_boost);
+ rc->gfu_boost =
+ calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
rc->source_alt_ref_pending = 1;
// Test to see if multi arf is appropriate.
cpi->multi_arf_enabled =
- (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) &&
- (zero_motion_accumulator < 0.995)) ? 1 : 0;
+ (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) &&
+ (zero_motion_accumulator < 0.995))
+ ? 1
+ : 0;
} else {
rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST);
rc->source_alt_ref_pending = 0;
@@ -2359,8 +2256,7 @@
int new_gf_interval = (rc->baseline_gf_interval + count) & (~count);
int j;
for (j = 0; j < new_gf_interval - rc->baseline_gf_interval; ++j) {
- if (EOF == input_stats(twopass, this_frame))
- break;
+ if (EOF == input_stats(twopass, this_frame)) break;
gf_group_err += calculate_modified_err(cpi, twopass, oxcf, this_frame);
gf_group_raw_error += this_frame->coded_error;
gf_group_noise += this_frame->frame_noise_energy;
@@ -2387,19 +2283,17 @@
// of the allocated bit budget.
if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) {
const int vbr_group_bits_per_frame =
- (int)(gf_group_bits / rc->baseline_gf_interval);
- const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
- const double group_av_noise = gf_group_noise / rc->baseline_gf_interval;
+ (int)(gf_group_bits / rc->baseline_gf_interval);
+ const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
+ const double group_av_noise = gf_group_noise / rc->baseline_gf_interval;
const double group_av_skip_pct =
- gf_group_skip_pct / rc->baseline_gf_interval;
+ gf_group_skip_pct / rc->baseline_gf_interval;
const double group_av_inactive_zone =
- ((gf_group_inactive_zone_rows * 2) /
- (rc->baseline_gf_interval * (double)cm->mb_rows));
- int tmp_q =
- get_twopass_worst_quality(cpi, group_av_err,
- (group_av_skip_pct + group_av_inactive_zone),
- group_av_noise,
- vbr_group_bits_per_frame);
+ ((gf_group_inactive_zone_rows * 2) /
+ (rc->baseline_gf_interval * (double)cm->mb_rows));
+ int tmp_q = get_twopass_worst_quality(
+ cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
+ group_av_noise, vbr_group_bits_per_frame);
twopass->active_worst_quality =
(tmp_q + (twopass->active_worst_quality * 3)) >> 2;
}
@@ -2406,17 +2300,16 @@
// Context Adjustment of ARNR filter strength
if (rc->baseline_gf_interval > 1) {
- adjust_group_arnr_filter(cpi,
- (gf_group_noise / rc->baseline_gf_interval),
- (gf_group_inter / rc->baseline_gf_interval),
- (gf_group_motion / rc->baseline_gf_interval));
+ adjust_group_arnr_filter(cpi, (gf_group_noise / rc->baseline_gf_interval),
+ (gf_group_inter / rc->baseline_gf_interval),
+ (gf_group_motion / rc->baseline_gf_interval));
} else {
twopass->arnr_strength_adjustment = 0;
}
// Calculate the extra bits to be used for boosted frame(s)
- gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval,
- rc->gfu_boost, gf_group_bits);
+ gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval, rc->gfu_boost,
+ gf_group_bits);
// Adjust KF group bits and error remaining.
twopass->kf_group_error_left -= (int64_t)gf_group_err;
@@ -2443,9 +2336,8 @@
// Calculate a section intra ratio used in setting max loop filter.
if (cpi->common.frame_type != KEY_FRAME) {
- twopass->section_intra_rating =
- calculate_section_intra_ratio(start_pos, twopass->stats_in_end,
- rc->baseline_gf_interval);
+ twopass->section_intra_rating = calculate_section_intra_ratio(
+ start_pos, twopass->stats_in_end, rc->baseline_gf_interval);
}
if (oxcf->resize_mode == RESIZE_DYNAMIC) {
@@ -2491,7 +2383,7 @@
int is_viable_kf = 0;
double pcnt_intra = 1.0 - this_frame->pcnt_inter;
double modified_pcnt_inter =
- this_frame->pcnt_inter - this_frame->pcnt_neutral;
+ this_frame->pcnt_inter - this_frame->pcnt_neutral;
// Does the frame satisfy the primary criteria of a key frame?
// See above for an explanation of the test criteria.
@@ -2503,15 +2395,15 @@
(pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
((this_frame->intra_error /
DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) <
- KF_II_ERR_THRESHOLD) &&
+ KF_II_ERR_THRESHOLD) &&
((fabs(last_frame->coded_error - this_frame->coded_error) /
- DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
ERR_CHANGE_THRESHOLD) ||
(fabs(last_frame->intra_error - this_frame->intra_error) /
- DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
+ DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
ERR_CHANGE_THRESHOLD) ||
((next_frame->intra_error /
- DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) >
+ DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) >
II_IMPROVEMENT_THRESHOLD))))) {
int i;
const FIRSTPASS_STATS *start_pos = twopass->stats_in;
@@ -2525,8 +2417,7 @@
double next_iiratio = (BOOST_FACTOR * local_next_frame.intra_error /
DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
- if (next_iiratio > KF_II_MAX)
- next_iiratio = KF_II_MAX;
+ if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX;
// Cumulative effect of decay in prediction quality.
if (local_next_frame.pcnt_inter > 0.85)
@@ -2538,10 +2429,9 @@
boost_score += (decay_accumulator * next_iiratio);
// Test various breakout clauses.
- if ((local_next_frame.pcnt_inter < 0.05) ||
- (next_iiratio < 1.5) ||
- (((local_next_frame.pcnt_inter -
- local_next_frame.pcnt_neutral) < 0.20) &&
+ if ((local_next_frame.pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
+ (((local_next_frame.pcnt_inter - local_next_frame.pcnt_neutral) <
+ 0.20) &&
(next_iiratio < 3.0)) ||
((boost_score - old_boost_score) < 3.0) ||
(local_next_frame.intra_error < 200)) {
@@ -2551,8 +2441,7 @@
old_boost_score = boost_score;
// Get the next frame details
- if (EOF == input_stats(twopass, &local_next_frame))
- break;
+ if (EOF == input_stats(twopass, &local_next_frame)) break;
}
// If there is tolerable prediction for at least the next 3 frames then
@@ -2618,8 +2507,7 @@
kf_mod_err = calculate_modified_err(cpi, twopass, oxcf, this_frame);
// Initialize the decay rates for the recent frames to check
- for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j)
- recent_loop_decay[j] = 1.0;
+ for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) recent_loop_decay[j] = 1.0;
// Find the next keyframe.
i = 0;
@@ -2663,8 +2551,7 @@
// If we don't have a real key frame within the next two
// key_freq intervals then break out of the loop.
- if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq)
- break;
+ if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq) break;
} else {
++rc->frames_to_key;
}
@@ -2675,8 +2562,7 @@
// We already breakout of the loop above at 2x max.
// This code centers the extra kf if the actual natural interval
// is between 1x and 2x.
- if (cpi->oxcf.auto_key &&
- rc->frames_to_key > cpi->oxcf.key_freq) {
+ if (cpi->oxcf.auto_key && rc->frames_to_key > cpi->oxcf.key_freq) {
FIRSTPASS_STATS tmp_frame = first_frame;
rc->frames_to_key /= 2;
@@ -2704,8 +2590,7 @@
int new_frame_to_key = (rc->frames_to_key + count) & (~count);
int j;
for (j = 0; j < new_frame_to_key - rc->frames_to_key; ++j) {
- if (EOF == input_stats(twopass, this_frame))
- break;
+ if (EOF == input_stats(twopass, this_frame)) break;
kf_group_err += calculate_modified_err(cpi, twopass, oxcf, this_frame);
}
rc->frames_to_key = new_frame_to_key;
@@ -2727,8 +2612,8 @@
// Default allocation based on bits left and relative
// complexity of the section.
- twopass->kf_group_bits = (int64_t)(twopass->bits_left *
- (kf_group_err / twopass->modified_error_left));
+ twopass->kf_group_bits = (int64_t)(
+ twopass->bits_left * (kf_group_err / twopass->modified_error_left));
// Clip based on maximum per frame rate defined by the user.
max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key;
@@ -2747,23 +2632,22 @@
decay_accumulator = 1.0;
boost_score = 0.0;
for (i = 0; i < (rc->frames_to_key - 1); ++i) {
- if (EOF == input_stats(twopass, &next_frame))
- break;
+ if (EOF == input_stats(twopass, &next_frame)) break;
// Monitor for static sections.
- zero_motion_accumulator = VPXMIN(
- zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame));
+ zero_motion_accumulator = VPXMIN(zero_motion_accumulator,
+ get_zero_motion_factor(cpi, &next_frame));
// Not all frames in the group are necessarily used in calculating boost.
if ((i <= rc->max_gf_interval) ||
((i <= (rc->max_gf_interval * 4)) && (decay_accumulator > 0.5))) {
const double frame_boost =
- calc_frame_boost(cpi, &next_frame, 0, KF_MAX_BOOST);
+ calc_frame_boost(cpi, &next_frame, 0, KF_MAX_BOOST);
// How fast is prediction quality decaying.
if (!detect_flash(twopass, 0)) {
const double loop_decay_rate =
- get_prediction_decay_rate(cpi, &next_frame);
+ get_prediction_decay_rate(cpi, &next_frame);
decay_accumulator *= loop_decay_rate;
decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR);
av_decay_accumulator += decay_accumulator;
@@ -2780,9 +2664,8 @@
twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
// Calculate a section intra ratio used in setting max loop filter.
- twopass->section_intra_rating =
- calculate_section_intra_ratio(start_position, twopass->stats_in_end,
- rc->frames_to_key);
+ twopass->section_intra_rating = calculate_section_intra_ratio(
+ start_position, twopass->stats_in_end, rc->frames_to_key);
// Apply various clamps for min and max boost
rc->kf_boost = (int)(av_decay_accumulator * boost_score);
@@ -2790,8 +2673,8 @@
rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST);
// Work out how many bits to allocate for the key frame itself.
- kf_bits = calculate_boost_bits((rc->frames_to_key - 1),
- rc->kf_boost, twopass->kf_group_bits);
+ kf_bits = calculate_boost_bits((rc->frames_to_key - 1), rc->kf_boost,
+ twopass->kf_group_bits);
twopass->kf_group_bits -= kf_bits;
@@ -2846,9 +2729,7 @@
cpi->refresh_golden_frame = 0;
cpi->refresh_alt_ref_frame = 1;
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
if (is_two_pass_svc(cpi)) {
if (cpi->svc.temporal_layer_id > 0) {
@@ -2857,8 +2738,7 @@
}
if (cpi->svc.layer_context[cpi->svc.spatial_layer_id].gold_ref_idx < 0)
cpi->refresh_golden_frame = 0;
- if (cpi->alt_ref_source == NULL)
- cpi->refresh_alt_ref_frame = 0;
+ if (cpi->alt_ref_source == NULL) cpi->refresh_alt_ref_frame = 0;
}
}
@@ -2868,17 +2748,20 @@
// can be skipped for partition check, and the partition size is assigned
// according to the variance
const SVC *const svc = &cpi->svc;
- const TWO_PASS *const twopass = is_two_pass_svc(cpi) ?
- &svc->layer_context[svc->spatial_layer_id].twopass : &cpi->twopass;
+ const TWO_PASS *const twopass =
+ is_two_pass_svc(cpi) ? &svc->layer_context[svc->spatial_layer_id].twopass
+ : &cpi->twopass;
return (!frame_is_intra_only(&cpi->common) &&
- twopass->stats_in - 2 > twopass->stats_in_start &&
- twopass->stats_in < twopass->stats_in_end &&
- (twopass->stats_in - 1)->pcnt_inter - (twopass->stats_in - 1)->pcnt_motion
- == 1 &&
- (twopass->stats_in - 2)->pcnt_inter - (twopass->stats_in - 2)->pcnt_motion
- == 1 &&
- twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
+ twopass->stats_in - 2 > twopass->stats_in_start &&
+ twopass->stats_in < twopass->stats_in_end &&
+ (twopass->stats_in - 1)->pcnt_inter -
+ (twopass->stats_in - 1)->pcnt_motion ==
+ 1 &&
+ (twopass->stats_in - 2)->pcnt_inter -
+ (twopass->stats_in - 2)->pcnt_motion ==
+ 1 &&
+ twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
}
void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
@@ -2889,11 +2772,11 @@
FIRSTPASS_STATS this_frame;
int target_rate;
- LAYER_CONTEXT *const lc = is_two_pass_svc(cpi) ?
- &cpi->svc.layer_context[cpi->svc.spatial_layer_id] : 0;
+ LAYER_CONTEXT *const lc =
+ is_two_pass_svc(cpi) ? &cpi->svc.layer_context[cpi->svc.spatial_layer_id]
+ : 0;
- if (!twopass->stats_in)
- return;
+ if (!twopass->stats_in) return;
// If this is an arf frame then we dont want to read the stats file or
// advance the input pointer as we already have what we need.
@@ -2912,15 +2795,14 @@
} else {
lc->is_key_frame = cpi->svc.layer_context[0].is_key_frame;
- if (lc->is_key_frame)
- cpi->ref_frame_flags &= (~VP9_LAST_FLAG);
+ if (lc->is_key_frame) cpi->ref_frame_flags &= (~VP9_LAST_FLAG);
}
}
// Do the firstpass stats indicate that this frame is skippable for the
// partition search?
- if (cpi->sf.allow_partition_search_skip &&
- cpi->oxcf.pass == 2 && (!cpi->use_svc || is_two_pass_svc(cpi))) {
+ if (cpi->sf.allow_partition_search_skip && cpi->oxcf.pass == 2 &&
+ (!cpi->use_svc || is_two_pass_svc(cpi))) {
cpi->partition_search_skippable_frame = is_skippable_frame(cpi);
}
@@ -2933,12 +2815,13 @@
twopass->active_worst_quality = cpi->oxcf.cq_level;
} else if (cm->current_video_frame == 0 ||
(lc != NULL && lc->current_video_frame_in_layer == 0)) {
- const int frames_left = (int)(twopass->total_stats.count -
- ((lc != NULL) ? lc->current_video_frame_in_layer
- : cm->current_video_frame));
+ const int frames_left =
+ (int)(twopass->total_stats.count -
+ ((lc != NULL) ? lc->current_video_frame_in_layer
+ : cm->current_video_frame));
// Special case code for first frame.
- const int section_target_bandwidth = (int)(twopass->bits_left /
- frames_left);
+ const int section_target_bandwidth =
+ (int)(twopass->bits_left / frames_left);
const double section_length = twopass->total_left_stats.count;
const double section_error =
twopass->total_left_stats.coded_error / section_length;
@@ -2951,8 +2834,8 @@
twopass->total_left_stats.frame_noise_energy / section_length;
int tmp_q;
- tmp_q = get_twopass_worst_quality(cpi, section_error,
- section_intra_skip + section_inactive_zone,
+ tmp_q = get_twopass_worst_quality(
+ cpi, section_error, section_intra_skip + section_inactive_zone,
section_noise, section_target_bandwidth);
twopass->active_worst_quality = tmp_q;
@@ -2965,8 +2848,7 @@
rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME];
}
vp9_zero(this_frame);
- if (EOF == input_stats(twopass, &this_frame))
- return;
+ if (EOF == input_stats(twopass, &this_frame)) return;
// Set the frame content type flag.
if (this_frame.intra_skip_pct >= FC_ANIMATION_THRESH)
@@ -3011,8 +2893,7 @@
define_gf_group(cpi, &this_frame);
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
- if (lc != NULL)
- cpi->refresh_golden_frame = 1;
+ if (lc != NULL) cpi->refresh_golden_frame = 1;
#if ARF_STATS_OUTPUT
{
@@ -3019,9 +2900,9 @@
FILE *fpfile;
fpfile = fopen("arf.stt", "a");
++arf_count;
- fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n",
- cm->current_video_frame, rc->frames_till_gf_update_due,
- rc->kf_boost, arf_count, rc->gfu_boost);
+ fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n", cm->current_video_frame,
+ rc->frames_till_gf_update_due, rc->kf_boost, arf_count,
+ rc->gfu_boost);
fclose(fpfile);
}
@@ -3042,11 +2923,12 @@
{
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
+ ? cpi->initial_mbs
+ : cpi->common.MBs;
// The multiplication by 256 reverses a scaling factor of (>> 8)
// applied when combining MB error values for the frame.
twopass->mb_av_energy =
- log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0);
+ log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0);
twopass->mb_smooth_pct = this_frame.intra_smooth_pct;
}
@@ -3078,7 +2960,7 @@
// Calculate the pct rc error.
if (rc->total_actual_bits) {
rc->rate_error_estimate =
- (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits);
+ (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits);
rc->rate_error_estimate = clamp(rc->rate_error_estimate, -100, 100);
} else {
rc->rate_error_estimate = 0;
@@ -3095,10 +2977,9 @@
++twopass->gf_group.index;
// If the rate control is drifting consider adjustment to min or maxq.
- if ((cpi->oxcf.rc_mode != VPX_Q) &&
- !cpi->rc.is_src_frame_alt_ref) {
+ if ((cpi->oxcf.rc_mode != VPX_Q) && !cpi->rc.is_src_frame_alt_ref) {
const int maxq_adj_limit =
- rc->worst_quality - twopass->active_worst_quality;
+ rc->worst_quality - twopass->active_worst_quality;
const int minq_adj_limit =
(cpi->oxcf.rc_mode == VPX_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
int aq_extend_min = 0;
@@ -3123,7 +3004,7 @@
--twopass->extend_maxq;
if (rc->rolling_target_bits >= rc->rolling_actual_bits)
++twopass->extend_minq;
- // Overshoot.
+ // Overshoot.
} else if (rc->rate_error_estimate < -cpi->oxcf.over_shoot_pct) {
--twopass->extend_minq;
if (rc->rolling_target_bits < rc->rolling_actual_bits)
@@ -3154,14 +3035,14 @@
int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
if (rc->projected_frame_size < fast_extra_thresh) {
rc->vbr_bits_off_target_fast +=
- fast_extra_thresh - rc->projected_frame_size;
+ fast_extra_thresh - rc->projected_frame_size;
rc->vbr_bits_off_target_fast =
- VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
+ VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
// Fast adaptation of minQ if necessary to use up the extra bits.
if (rc->avg_frame_bandwidth) {
twopass->extend_minq_fast =
- (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
+ (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
}
twopass->extend_minq_fast = VPXMIN(
twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
--- a/vp9/encoder/vp9_firstpass.h
+++ b/vp9/encoder/vp9_firstpass.h
@@ -154,8 +154,7 @@
// Post encode update of the rate control parameters for 2-pass
void vp9_twopass_postencode_update(struct VP9_COMP *cpi);
-void calculate_coded_size(struct VP9_COMP *cpi,
- int *scaled_frame_width,
+void calculate_coded_size(struct VP9_COMP *cpi, int *scaled_frame_width,
int *scaled_frame_height);
#ifdef __cplusplus
--- a/vp9/encoder/vp9_lookahead.c
+++ b/vp9/encoder/vp9_lookahead.c
@@ -19,26 +19,22 @@
#include "vp9/encoder/vp9_lookahead.h"
/* Return the buffer at the given absolute index and increment the index */
-static struct lookahead_entry *pop(struct lookahead_ctx *ctx,
- int *idx) {
+static struct lookahead_entry *pop(struct lookahead_ctx *ctx, int *idx) {
int index = *idx;
struct lookahead_entry *buf = ctx->buf + index;
assert(index < ctx->max_sz);
- if (++index >= ctx->max_sz)
- index -= ctx->max_sz;
+ if (++index >= ctx->max_sz) index -= ctx->max_sz;
*idx = index;
return buf;
}
-
void vp9_lookahead_destroy(struct lookahead_ctx *ctx) {
if (ctx) {
if (ctx->buf) {
int i;
- for (i = 0; i < ctx->max_sz; i++)
- vpx_free_frame_buffer(&ctx->buf[i].img);
+ for (i = 0; i < ctx->max_sz; i++) vpx_free_frame_buffer(&ctx->buf[i].img);
free(ctx->buf);
}
free(ctx);
@@ -45,7 +41,6 @@
}
}
-
struct lookahead_ctx *vp9_lookahead_init(unsigned int width,
unsigned int height,
unsigned int subsampling_x,
@@ -69,20 +64,18 @@
unsigned int i;
ctx->max_sz = depth;
ctx->buf = calloc(depth, sizeof(*ctx->buf));
- if (!ctx->buf)
- goto bail;
+ if (!ctx->buf) goto bail;
for (i = 0; i < depth; i++)
- if (vpx_alloc_frame_buffer(&ctx->buf[i].img,
- width, height, subsampling_x, subsampling_y,
+ if (vpx_alloc_frame_buffer(
+ &ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
- use_highbitdepth,
+ use_highbitdepth,
#endif
- VP9_ENC_BORDER_IN_PIXELS,
- legacy_byte_alignment))
+ VP9_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
goto bail;
}
return ctx;
- bail:
+bail:
vp9_lookahead_destroy(ctx);
return NULL;
}
@@ -109,8 +102,7 @@
int subsampling_y = src->subsampling_y;
int larger_dimensions, new_dimensions;
- if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz)
- return 1;
+ if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz) return 1;
ctx->sz++;
buf = pop(ctx, &ctx->write_idx);
@@ -118,8 +110,7 @@
height != buf->img.y_crop_height ||
uv_width != buf->img.uv_crop_width ||
uv_height != buf->img.uv_crop_height;
- larger_dimensions = width > buf->img.y_width ||
- height > buf->img.y_height ||
+ larger_dimensions = width > buf->img.y_width || height > buf->img.y_height ||
uv_width > buf->img.uv_width ||
uv_height > buf->img.uv_height;
assert(!larger_dimensions || new_dimensions);
@@ -139,27 +130,22 @@
while (1) {
// Find the first active macroblock in this row.
for (; col < mb_cols; ++col) {
- if (active_map[col])
- break;
+ if (active_map[col]) break;
}
// No more active macroblock in this row.
- if (col == mb_cols)
- break;
+ if (col == mb_cols) break;
// Find the end of active region in this row.
active_end = col;
for (; active_end < mb_cols; ++active_end) {
- if (!active_map[active_end])
- break;
+ if (!active_map[active_end]) break;
}
// Only copy this active region.
- vp9_copy_and_extend_frame_with_rect(src, &buf->img,
- row << 4,
- col << 4, 16,
- (active_end - col) << 4);
+ vp9_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+ 16, (active_end - col) << 4);
// Start again from the end of this active region.
col = active_end;
@@ -172,14 +158,13 @@
if (larger_dimensions) {
YV12_BUFFER_CONFIG new_img;
memset(&new_img, 0, sizeof(new_img));
- if (vpx_alloc_frame_buffer(&new_img,
- width, height, subsampling_x, subsampling_y,
+ if (vpx_alloc_frame_buffer(&new_img, width, height, subsampling_x,
+ subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
use_highbitdepth,
#endif
- VP9_ENC_BORDER_IN_PIXELS,
- 0))
- return 1;
+ VP9_ENC_BORDER_IN_PIXELS, 0))
+ return 1;
vpx_free_frame_buffer(&buf->img);
buf->img = new_img;
} else if (new_dimensions) {
@@ -202,7 +187,6 @@
return 0;
}
-
struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx,
int drain) {
struct lookahead_entry *buf = NULL;
@@ -214,7 +198,6 @@
return buf;
}
-
struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx,
int index) {
struct lookahead_entry *buf = NULL;
@@ -223,8 +206,7 @@
// Forward peek
if (index < ctx->sz) {
index += ctx->read_idx;
- if (index >= ctx->max_sz)
- index -= ctx->max_sz;
+ if (index >= ctx->max_sz) index -= ctx->max_sz;
buf = ctx->buf + index;
}
} else if (index < 0) {
@@ -231,8 +213,7 @@
// Backward peek
if (-index <= MAX_PRE_FRAMES) {
index += ctx->read_idx;
- if (index < 0)
- index += ctx->max_sz;
+ if (index < 0) index += ctx->max_sz;
buf = ctx->buf + index;
}
}
@@ -240,6 +221,4 @@
return buf;
}
-unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) {
- return ctx->sz;
-}
+unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
--- a/vp9/encoder/vp9_lookahead.h
+++ b/vp9/encoder/vp9_lookahead.h
@@ -26,10 +26,10 @@
#define MAX_LAG_BUFFERS 25
struct lookahead_entry {
- YV12_BUFFER_CONFIG img;
- int64_t ts_start;
- int64_t ts_end;
- unsigned int flags;
+ YV12_BUFFER_CONFIG img;
+ int64_t ts_start;
+ int64_t ts_end;
+ unsigned int flags;
};
// The max of past frames we want to keep in the queue.
@@ -57,12 +57,10 @@
#endif
unsigned int depth);
-
/**\brief Destroys the lookahead stage
*/
void vp9_lookahead_destroy(struct lookahead_ctx *ctx);
-
/**\brief Enqueue a source buffer
*
* This function will copy the source image into a new framebuffer with
@@ -85,7 +83,6 @@
#endif
unsigned int flags);
-
/**\brief Get the next source buffer to encode
*
*
@@ -96,10 +93,8 @@
* \retval NULL, if drain set and queue is empty
* \retval NULL, if drain not set and queue not of the configured depth
*/
-struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx,
- int drain);
+struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx, int drain);
-
/**\brief Get a future source buffer to encode
*
* \param[in] ctx Pointer to the lookahead context
@@ -109,7 +104,6 @@
*/
struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx,
int index);
-
/**\brief Get the number of frames currently in the lookahead queue
*
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -22,11 +22,8 @@
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
-
-static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
- const MV *ref_mv,
- MV *dst_mv,
- int mb_row,
+static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, const MV *ref_mv,
+ MV *dst_mv, int mb_row,
int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -64,9 +61,8 @@
cpi->find_fractional_mv_step(
x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
&v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- NULL, NULL,
- &distortion, &sse, NULL, 0, 0);
+ cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0,
+ 0);
}
xd->mi[0]->mode = NEWMV;
@@ -109,10 +105,10 @@
// based search as well.
if (ref_mv->row != 0 || ref_mv->col != 0) {
unsigned int tmp_err;
- MV zero_ref_mv = {0, 0}, tmp_mv;
+ MV zero_ref_mv = { 0, 0 }, tmp_mv;
- tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv,
- mb_row, mb_col);
+ tmp_err =
+ do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, mb_row, mb_col);
if (tmp_err < err) {
dst_mv->as_mv = tmp_mv;
err = tmp_err;
@@ -137,7 +133,7 @@
return err;
}
static int find_best_16x16_intra(VP9_COMP *cpi, PREDICTION_MODE *pbest_mode) {
- MACROBLOCK *const x = &cpi->td.mb;
+ MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
PREDICTION_MODE best_mode = -1, mode;
unsigned int best_err = INT_MAX;
@@ -148,38 +144,30 @@
unsigned int err;
xd->mi[0]->mode = mode;
- vp9_predict_intra_block(xd, 2, TX_16X16, mode,
- x->plane[0].src.buf, x->plane[0].src.stride,
- xd->plane[0].dst.buf, xd->plane[0].dst.stride,
- 0, 0, 0);
+ vp9_predict_intra_block(xd, 2, TX_16X16, mode, x->plane[0].src.buf,
+ x->plane[0].src.stride, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, 0, 0, 0);
err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride);
// find best
if (err < best_err) {
- best_err = err;
+ best_err = err;
best_mode = mode;
}
}
- if (pbest_mode)
- *pbest_mode = best_mode;
+ if (pbest_mode) *pbest_mode = best_mode;
return best_err;
}
-static void update_mbgraph_mb_stats
-(
- VP9_COMP *cpi,
- MBGRAPH_MB_STATS *stats,
- YV12_BUFFER_CONFIG *buf,
- int mb_y_offset,
- YV12_BUFFER_CONFIG *golden_ref,
- const MV *prev_golden_ref_mv,
- YV12_BUFFER_CONFIG *alt_ref,
- int mb_row,
- int mb_col
-) {
+static void update_mbgraph_mb_stats(VP9_COMP *cpi, MBGRAPH_MB_STATS *stats,
+ YV12_BUFFER_CONFIG *buf, int mb_y_offset,
+ YV12_BUFFER_CONFIG *golden_ref,
+ const MV *prev_golden_ref_mv,
+ YV12_BUFFER_CONFIG *alt_ref, int mb_row,
+ int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
int intra_error;
@@ -193,10 +181,8 @@
xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride;
// do intra 16x16 prediction
- intra_error = find_best_16x16_intra(cpi,
- &stats->ref[INTRA_FRAME].m.mode);
- if (intra_error <= 0)
- intra_error = 1;
+ intra_error = find_best_16x16_intra(cpi, &stats->ref[INTRA_FRAME].m.mode);
+ if (intra_error <= 0) intra_error = 1;
stats->ref[INTRA_FRAME].err = intra_error;
// Golden frame MV search, if it exists and is different than last frame
@@ -204,10 +190,9 @@
int g_motion_error;
xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
xd->plane[0].pre[0].stride = golden_ref->y_stride;
- g_motion_error = do_16x16_motion_search(cpi,
- prev_golden_ref_mv,
- &stats->ref[GOLDEN_FRAME].m.mv,
- mb_row, mb_col);
+ g_motion_error =
+ do_16x16_motion_search(cpi, prev_golden_ref_mv,
+ &stats->ref[GOLDEN_FRAME].m.mv, mb_row, mb_col);
stats->ref[GOLDEN_FRAME].err = g_motion_error;
} else {
stats->ref[GOLDEN_FRAME].err = INT_MAX;
@@ -220,8 +205,8 @@
int a_motion_error;
xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
xd->plane[0].pre[0].stride = alt_ref->y_stride;
- a_motion_error = do_16x16_zerozero_search(cpi,
- &stats->ref[ALTREF_FRAME].m.mv);
+ a_motion_error =
+ do_16x16_zerozero_search(cpi, &stats->ref[ALTREF_FRAME].m.mv);
stats->ref[ALTREF_FRAME].err = a_motion_error;
} else {
@@ -241,7 +226,7 @@
int mb_col, mb_row, offset = 0;
int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
- MV gld_top_mv = {0, 0};
+ MV gld_top_mv = { 0, 0 };
MODE_INFO mi_local;
MODE_INFO mi_above, mi_left;
@@ -248,13 +233,13 @@
vp9_zero(mi_local);
// Set up limit values for motion vectors to prevent them extending outside
// the UMV borders.
- x->mv_row_min = -BORDER_MV_PIXELS_B16;
- x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16;
+ x->mv_row_min = -BORDER_MV_PIXELS_B16;
+ x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16;
// Signal to vp9_predict_intra_block() that above is not available
xd->above_mi = NULL;
- xd->plane[0].dst.stride = buf->y_stride;
- xd->plane[0].pre[0].stride = buf->y_stride;
+ xd->plane[0].dst.stride = buf->y_stride;
+ xd->plane[0].pre[0].stride = buf->y_stride;
xd->plane[1].dst.stride = buf->uv_stride;
xd->mi[0] = &mi_local;
mi_local.sb_type = BLOCK_16X16;
@@ -263,14 +248,14 @@
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
MV gld_left_mv = gld_top_mv;
- int mb_y_in_offset = mb_y_offset;
+ int mb_y_in_offset = mb_y_offset;
int arf_y_in_offset = arf_y_offset;
int gld_y_in_offset = gld_y_offset;
// Set up limit values for motion vectors to prevent them extending outside
// the UMV borders.
- x->mv_col_min = -BORDER_MV_PIXELS_B16;
- x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16;
+ x->mv_col_min = -BORDER_MV_PIXELS_B16;
+ x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16;
// Signal to vp9_predict_intra_block() that left is not available
xd->left_mi = NULL;
@@ -277,9 +262,8 @@
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
- update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
- golden_ref, &gld_left_mv, alt_ref,
- mb_row, mb_col);
+ update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, golden_ref,
+ &gld_left_mv, alt_ref, mb_row, mb_col);
gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv;
if (mb_col == 0) {
gld_top_mv = gld_left_mv;
@@ -287,23 +271,22 @@
// Signal to vp9_predict_intra_block() that left is available
xd->left_mi = &mi_left;
- mb_y_in_offset += 16;
- gld_y_in_offset += 16;
- arf_y_in_offset += 16;
- x->mv_col_min -= 16;
- x->mv_col_max -= 16;
+ mb_y_in_offset += 16;
+ gld_y_in_offset += 16;
+ arf_y_in_offset += 16;
+ x->mv_col_min -= 16;
+ x->mv_col_max -= 16;
}
// Signal to vp9_predict_intra_block() that above is available
xd->above_mi = &mi_above;
- mb_y_offset += buf->y_stride * 16;
- gld_y_offset += golden_ref->y_stride * 16;
- if (alt_ref)
- arf_y_offset += alt_ref->y_stride * 16;
- x->mv_row_min -= 16;
- x->mv_row_max -= 16;
- offset += cm->mb_cols;
+ mb_y_offset += buf->y_stride * 16;
+ gld_y_offset += golden_ref->y_stride * 16;
+ if (alt_ref) arf_y_offset += alt_ref->y_stride * 16;
+ x->mv_row_min -= 16;
+ x->mv_row_max -= 16;
+ offset += cm->mb_cols;
}
}
@@ -317,9 +300,9 @@
int *arf_not_zz;
- CHECK_MEM_ERROR(cm, arf_not_zz,
- vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz),
- 1));
+ CHECK_MEM_ERROR(
+ cm, arf_not_zz,
+ vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
// We are not interested in results beyond the alt ref itself.
if (n_frames > cpi->rc.frames_till_gf_update_due)
@@ -335,12 +318,11 @@
MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];
int altref_err = mb_stats->ref[ALTREF_FRAME].err;
- int intra_err = mb_stats->ref[INTRA_FRAME ].err;
+ int intra_err = mb_stats->ref[INTRA_FRAME].err;
int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
// Test for altref vs intra and gf and that its mv was 0,0.
- if (altref_err > 1000 ||
- altref_err > intra_err ||
+ if (altref_err > 1000 || altref_err > intra_err ||
altref_err > golden_err) {
arf_not_zz[offset + mb_col]++;
}
@@ -395,11 +377,9 @@
// we need to look ahead beyond where the ARF transitions into
// being a GF - so exit if we don't look ahead beyond that
- if (n_frames <= cpi->rc.frames_till_gf_update_due)
- return;
+ if (n_frames <= cpi->rc.frames_till_gf_update_due) return;
- if (n_frames > MAX_LAG_BUFFERS)
- n_frames = MAX_LAG_BUFFERS;
+ if (n_frames > MAX_LAG_BUFFERS) n_frames = MAX_LAG_BUFFERS;
cpi->mbgraph_n_frames = n_frames;
for (i = 0; i < n_frames; i++) {
@@ -418,8 +398,8 @@
assert(q_cur != NULL);
- update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
- golden_ref, cpi->Source);
+ update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, golden_ref,
+ cpi->Source);
}
vpx_clear_system_state();
--- a/vp9/encoder/vp9_mbgraph.h
+++ b/vp9/encoder/vp9_mbgraph.h
@@ -25,9 +25,7 @@
} ref[MAX_REF_FRAMES];
} MBGRAPH_MB_STATS;
-typedef struct {
- MBGRAPH_MB_STATS *mb_stats;
-} MBGRAPH_FRAME_STATS;
+typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS;
struct VP9_COMP;
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -46,14 +46,10 @@
// Get intersection of UMV window and valid MV window to reduce # of checks
// in diamond search.
- if (x->mv_col_min < col_min)
- x->mv_col_min = col_min;
- if (x->mv_col_max > col_max)
- x->mv_col_max = col_max;
- if (x->mv_row_min < row_min)
- x->mv_row_min = row_min;
- if (x->mv_row_max > row_max)
- x->mv_row_max = row_max;
+ if (x->mv_col_min < col_min) x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max) x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min) x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max) x->mv_row_max = row_max;
}
int vp9_init_search_range(int size) {
@@ -61,25 +57,23 @@
// Minimum search size no matter what the passed in value.
size = VPXMAX(16, size);
- while ((size << sr) < MAX_FULL_PEL_VAL)
- sr++;
+ while ((size << sr) < MAX_FULL_PEL_VAL) sr++;
sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2);
return sr;
}
-static INLINE int mv_cost(const MV *mv,
- const int *joint_cost, int *const comp_cost[2]) {
+static INLINE int mv_cost(const MV *mv, const int *joint_cost,
+ int *const comp_cost[2]) {
assert(mv->row >= -MV_MAX && mv->row < MV_MAX);
assert(mv->col >= -MV_MAX && mv->col < MV_MAX);
- return joint_cost[vp9_get_mv_joint(mv)] +
- comp_cost[0][mv->row] + comp_cost[1][mv->col];
+ return joint_cost[vp9_get_mv_joint(mv)] + comp_cost[0][mv->row] +
+ comp_cost[1][mv->col];
}
-int vp9_mv_bit_cost(const MV *mv, const MV *ref,
- const int *mvjcost, int *mvcost[2], int weight) {
- const MV diff = { mv->row - ref->row,
- mv->col - ref->col };
+int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+ int *mvcost[2], int weight) {
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
}
@@ -87,7 +81,7 @@
static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
int *mvcost[2], int error_per_bit) {
if (mvcost) {
- const MV diff = {mv->row - ref->row, mv->col - ref->col};
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
// This product sits at a 32-bit ceiling right now and any additional
// accuracy in either bit cost or error cost will cause it to overflow.
return ROUND_POWER_OF_TWO(
@@ -100,11 +94,9 @@
static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
int sad_per_bit) {
- const MV diff = { mv->row - ref->row,
- mv->col - ref->col };
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
return ROUND_POWER_OF_TWO(
- (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) *
- sad_per_bit,
+ (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) * sad_per_bit,
VP9_PROB_COST_SHIFT);
}
@@ -114,7 +106,7 @@
for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
// Generate offsets for 4 search sites per step.
- const MV ss_mvs[] = {{-len, 0}, {len, 0}, {0, -len}, {0, len}};
+ const MV ss_mvs[] = { { -len, 0 }, { len, 0 }, { 0, -len }, { 0, len } };
int i;
for (i = 0; i < 4; ++i, ++ss_count) {
cfg->ss_mv[ss_count] = ss_mvs[i];
@@ -132,10 +124,9 @@
for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
// Generate offsets for 8 search sites per step.
- const MV ss_mvs[8] = {
- {-len, 0 }, {len, 0 }, { 0, -len}, {0, len},
- {-len, -len}, {-len, len}, {len, -len}, {len, len}
- };
+ const MV ss_mvs[8] = { { -len, 0 }, { len, 0 }, { 0, -len },
+ { 0, len }, { -len, -len }, { -len, len },
+ { len, -len }, { len, len } };
int i;
for (i = 0; i < 8; ++i, ++ss_count) {
cfg->ss_mv[ss_count] = ss_mvs[i];
@@ -149,17 +140,17 @@
/* Estimated (square) error cost of a motion vector (r,c). The 14 scale comes
* from the same math as in mv_err_cost(). */
-#define MVC(r, c) \
- (mvcost ? \
- ((unsigned)(mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
- mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
- error_per_bit + 8192) >> 14 : 0)
+#define MVC(r, c) \
+ (mvcost \
+ ? ((unsigned)(mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
+ mvcost[0][((r)-rr)] + mvcost[1][((c)-rc)]) * \
+ error_per_bit + \
+ 8192) >> \
+ 14 \
+ : 0)
-
// convert motion vector component to offset for sv[a]f calc
-static INLINE int sp(int x) {
- return x & 7;
-}
+static INLINE int sp(int x) { return x & 7; }
static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
return &buf[(r >> 3) * stride + (c >> 3)];
@@ -167,182 +158,157 @@
#if CONFIG_VP9_HIGHBITDEPTH
/* checks if (r, c) has better score than previous best */
-#define CHECK_BETTER(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- int64_t tmpmse; \
- if (second_pred == NULL) { \
- thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), \
- sp(r), z, src_stride, &sse); \
- } else { \
- thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), \
- sp(r), z, src_stride, &sse, second_pred); \
- } \
- tmpmse = thismse; \
- tmpmse += MVC(r, c); \
- if (tmpmse >= INT_MAX) { \
- v = INT_MAX; \
- } else if ((v = (uint32_t)tmpmse) < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
+#define CHECK_BETTER(v, r, c) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ int64_t tmpmse; \
+ if (second_pred == NULL) { \
+ thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse); \
+ } else { \
+ thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse, second_pred); \
+ } \
+ tmpmse = thismse; \
+ tmpmse += MVC(r, c); \
+ if (tmpmse >= INT_MAX) { \
+ v = INT_MAX; \
+ } else if ((v = (uint32_t)tmpmse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
}
#else
/* checks if (r, c) has better score than previous best */
-#define CHECK_BETTER(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- if (second_pred == NULL) \
- thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
- src_stride, &sse); \
- else \
- thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
- z, src_stride, &sse, second_pred); \
- if ((v = MVC(r, c) + thismse) < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
+#define CHECK_BETTER(v, r, c) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ if (second_pred == NULL) \
+ thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse); \
+ else \
+ thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse, second_pred); \
+ if ((v = MVC(r, c) + thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
}
#endif
-#define FIRST_LEVEL_CHECKS \
- { \
- unsigned int left, right, up, down, diag; \
- CHECK_BETTER(left, tr, tc - hstep); \
- CHECK_BETTER(right, tr, tc + hstep); \
- CHECK_BETTER(up, tr - hstep, tc); \
- CHECK_BETTER(down, tr + hstep, tc); \
- whichdir = (left < right ? 0 : 1) + \
- (up < down ? 0 : 2); \
- switch (whichdir) { \
- case 0: \
- CHECK_BETTER(diag, tr - hstep, tc - hstep); \
- break; \
- case 1: \
- CHECK_BETTER(diag, tr - hstep, tc + hstep); \
- break; \
- case 2: \
- CHECK_BETTER(diag, tr + hstep, tc - hstep); \
- break; \
- case 3: \
- CHECK_BETTER(diag, tr + hstep, tc + hstep); \
- break; \
- } \
+#define FIRST_LEVEL_CHECKS \
+ { \
+ unsigned int left, right, up, down, diag; \
+ CHECK_BETTER(left, tr, tc - hstep); \
+ CHECK_BETTER(right, tr, tc + hstep); \
+ CHECK_BETTER(up, tr - hstep, tc); \
+ CHECK_BETTER(down, tr + hstep, tc); \
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); \
+ switch (whichdir) { \
+ case 0: CHECK_BETTER(diag, tr - hstep, tc - hstep); break; \
+ case 1: CHECK_BETTER(diag, tr - hstep, tc + hstep); break; \
+ case 2: CHECK_BETTER(diag, tr + hstep, tc - hstep); break; \
+ case 3: CHECK_BETTER(diag, tr + hstep, tc + hstep); break; \
+ } \
}
-#define SECOND_LEVEL_CHECKS \
- { \
- int kr, kc; \
- unsigned int second; \
- if (tr != br && tc != bc) { \
- kr = br - tr; \
- kc = bc - tc; \
- CHECK_BETTER(second, tr + kr, tc + 2 * kc); \
- CHECK_BETTER(second, tr + 2 * kr, tc + kc); \
- } else if (tr == br && tc != bc) { \
- kc = bc - tc; \
- CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \
- CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \
- switch (whichdir) { \
- case 0: \
- case 1: \
- CHECK_BETTER(second, tr + hstep, tc + kc); \
- break; \
- case 2: \
- case 3: \
- CHECK_BETTER(second, tr - hstep, tc + kc); \
- break; \
- } \
- } else if (tr != br && tc == bc) { \
- kr = br - tr; \
- CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \
- CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \
- switch (whichdir) { \
- case 0: \
- case 2: \
- CHECK_BETTER(second, tr + kr, tc + hstep); \
- break; \
- case 1: \
- case 3: \
- CHECK_BETTER(second, tr + kr, tc - hstep); \
- break; \
- } \
- } \
+#define SECOND_LEVEL_CHECKS \
+ { \
+ int kr, kc; \
+ unsigned int second; \
+ if (tr != br && tc != bc) { \
+ kr = br - tr; \
+ kc = bc - tc; \
+ CHECK_BETTER(second, tr + kr, tc + 2 * kc); \
+ CHECK_BETTER(second, tr + 2 * kr, tc + kc); \
+ } else if (tr == br && tc != bc) { \
+ kc = bc - tc; \
+ CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \
+ CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \
+ switch (whichdir) { \
+ case 0: \
+ case 1: CHECK_BETTER(second, tr + hstep, tc + kc); break; \
+ case 2: \
+ case 3: CHECK_BETTER(second, tr - hstep, tc + kc); break; \
+ } \
+ } else if (tr != br && tc == bc) { \
+ kr = br - tr; \
+ CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \
+ CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \
+ switch (whichdir) { \
+ case 0: \
+ case 2: CHECK_BETTER(second, tr + kr, tc + hstep); break; \
+ case 1: \
+ case 3: CHECK_BETTER(second, tr + kr, tc - hstep); break; \
+ } \
+ } \
}
// TODO(yunqingwang): SECOND_LEVEL_CHECKS_BEST was a rewrote of
// SECOND_LEVEL_CHECKS, and SECOND_LEVEL_CHECKS should be rewritten
// later in the same way.
-#define SECOND_LEVEL_CHECKS_BEST \
- { \
- unsigned int second; \
- int br0 = br; \
- int bc0 = bc; \
- assert(tr == br || tc == bc); \
- if (tr == br && tc != bc) { \
- kc = bc - tc; \
- } else if (tr != br && tc == bc) { \
- kr = br - tr; \
- } \
- CHECK_BETTER(second, br0 + kr, bc0); \
- CHECK_BETTER(second, br0, bc0 + kc); \
- if (br0 != br || bc0 != bc) { \
- CHECK_BETTER(second, br0 + kr, bc0 + kc); \
- } \
+#define SECOND_LEVEL_CHECKS_BEST \
+ { \
+ unsigned int second; \
+ int br0 = br; \
+ int bc0 = bc; \
+ assert(tr == br || tc == bc); \
+ if (tr == br && tc != bc) { \
+ kc = bc - tc; \
+ } else if (tr != br && tc == bc) { \
+ kr = br - tr; \
+ } \
+ CHECK_BETTER(second, br0 + kr, bc0); \
+ CHECK_BETTER(second, br0, bc0 + kc); \
+ if (br0 != br || bc0 != bc) { \
+ CHECK_BETTER(second, br0 + kr, bc0 + kc); \
+ } \
}
-#define SETUP_SUBPEL_SEARCH \
- const uint8_t *const z = x->plane[0].src.buf; \
- const int src_stride = x->plane[0].src.stride; \
- const MACROBLOCKD *xd = &x->e_mbd; \
- unsigned int besterr = INT_MAX; \
- unsigned int sse; \
- unsigned int whichdir; \
- int thismse; \
- const unsigned int halfiters = iters_per_step; \
- const unsigned int quarteriters = iters_per_step; \
- const unsigned int eighthiters = iters_per_step; \
- const int y_stride = xd->plane[0].pre[0].stride; \
- const int offset = bestmv->row * y_stride + bestmv->col; \
- const uint8_t *const y = xd->plane[0].pre[0].buf; \
- \
- int rr = ref_mv->row; \
- int rc = ref_mv->col; \
- int br = bestmv->row * 8; \
- int bc = bestmv->col * 8; \
- int hstep = 4; \
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
- int tr = br; \
- int tc = bc; \
- \
- bestmv->row *= 8; \
+#define SETUP_SUBPEL_SEARCH \
+ const uint8_t *const z = x->plane[0].src.buf; \
+ const int src_stride = x->plane[0].src.stride; \
+ const MACROBLOCKD *xd = &x->e_mbd; \
+ unsigned int besterr = INT_MAX; \
+ unsigned int sse; \
+ unsigned int whichdir; \
+ int thismse; \
+ const unsigned int halfiters = iters_per_step; \
+ const unsigned int quarteriters = iters_per_step; \
+ const unsigned int eighthiters = iters_per_step; \
+ const int y_stride = xd->plane[0].pre[0].stride; \
+ const int offset = bestmv->row * y_stride + bestmv->col; \
+ const uint8_t *const y = xd->plane[0].pre[0].buf; \
+ \
+ int rr = ref_mv->row; \
+ int rc = ref_mv->col; \
+ int br = bestmv->row * 8; \
+ int bc = bestmv->col * 8; \
+ int hstep = 4; \
+ const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
+ const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
+ const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
+ const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
+ int tr = br; \
+ int tc = bc; \
+ \
+ bestmv->row *= 8; \
bestmv->col *= 8;
-static unsigned int setup_center_error(const MACROBLOCKD *xd,
- const MV *bestmv,
- const MV *ref_mv,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
- const uint8_t *const src,
- const int src_stride,
- const uint8_t *const y,
- int y_stride,
- const uint8_t *second_pred,
- int w, int h, int offset,
- int *mvjcost, int *mvcost[2],
- uint32_t *sse1,
- uint32_t *distortion) {
+static unsigned int setup_center_error(
+ const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
+ int error_per_bit, const vp9_variance_fn_ptr_t *vfp,
+ const uint8_t *const src, const int src_stride, const uint8_t *const y,
+ int y_stride, const uint8_t *second_pred, int w, int h, int offset,
+ int *mvjcost, int *mvcost[2], uint32_t *sse1, uint32_t *distortion) {
#if CONFIG_VP9_HIGHBITDEPTH
uint64_t besterr;
if (second_pred != NULL) {
@@ -350,8 +316,8 @@
DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
vpx_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
y_stride);
- besterr = vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride,
- sse1);
+ besterr =
+ vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1);
} else {
DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
@@ -362,12 +328,11 @@
}
*distortion = (uint32_t)besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
- if (besterr >= UINT_MAX)
- return UINT_MAX;
+ if (besterr >= UINT_MAX) return UINT_MAX;
return (uint32_t)besterr;
#else
uint32_t besterr;
- (void) xd;
+ (void)xd;
if (second_pred != NULL) {
DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
@@ -386,10 +351,8 @@
}
static INLINE int is_cost_list_wellbehaved(int *cost_list) {
- return cost_list[0] < cost_list[1] &&
- cost_list[0] < cost_list[2] &&
- cost_list[0] < cost_list[3] &&
- cost_list[0] < cost_list[4];
+ return cost_list[0] < cost_list[1] && cost_list[0] < cost_list[2] &&
+ cost_list[0] < cost_list[3] && cost_list[0] < cost_list[4];
}
// Returns surface minima estimate at given precision in 1/2^n bits.
@@ -400,8 +363,7 @@
// x0 = 1/2 (S1 - S3)/(S1 + S3 - 2*S0),
// y0 = 1/2 (S4 - S2)/(S4 + S2 - 2*S0).
// The code below is an integerized version of that.
-static void get_cost_surf_min(int *cost_list, int *ir, int *ic,
- int bits) {
+static void get_cost_surf_min(int *cost_list, int *ir, int *ic, int bits) {
*ic = divide_and_round((cost_list[1] - cost_list[3]) * (1 << (bits - 1)),
(cost_list[1] - 2 * cost_list[0] + cost_list[3]));
*ir = divide_and_round((cost_list[4] - cost_list[2]) * (1 << (bits - 1)),
@@ -408,43 +370,36 @@
(cost_list[4] - 2 * cost_list[0] + cost_list[2]));
}
-uint32_t vp9_skip_sub_pixel_tree(
- const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- uint32_t *distortion,
- uint32_t *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+uint32_t vp9_skip_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
+ const MV *ref_mv, int allow_hp,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int forced_stop, int iters_per_step,
+ int *cost_list, int *mvjcost, int *mvcost[2],
+ uint32_t *distortion, uint32_t *sse1,
+ const uint8_t *second_pred, int w, int h) {
SETUP_SUBPEL_SEARCH;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
- (void) halfiters;
- (void) quarteriters;
- (void) eighthiters;
- (void) whichdir;
- (void) allow_hp;
- (void) forced_stop;
- (void) hstep;
- (void) rr;
- (void) rc;
- (void) minr;
- (void) minc;
- (void) maxr;
- (void) maxc;
- (void) tr;
- (void) tc;
- (void) sse;
- (void) thismse;
- (void) cost_list;
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
+ (void)halfiters;
+ (void)quarteriters;
+ (void)eighthiters;
+ (void)whichdir;
+ (void)allow_hp;
+ (void)forced_stop;
+ (void)hstep;
+ (void)rr;
+ (void)rc;
+ (void)minr;
+ (void)minc;
+ (void)maxr;
+ (void)maxc;
+ (void)tr;
+ (void)tc;
+ (void)sse;
+ (void)thismse;
+ (void)cost_list;
if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) ||
(abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3)))
@@ -454,37 +409,26 @@
}
uint32_t vp9_find_best_sub_pixel_tree_pruned_evenmore(
- const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- uint32_t *distortion,
- uint32_t *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
+ int h) {
SETUP_SUBPEL_SEARCH;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
- (void) halfiters;
- (void) quarteriters;
- (void) eighthiters;
- (void) whichdir;
- (void) allow_hp;
- (void) forced_stop;
- (void) hstep;
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
+ (void)halfiters;
+ (void)quarteriters;
+ (void)eighthiters;
+ (void)whichdir;
+ (void)allow_hp;
+ (void)forced_stop;
+ (void)hstep;
- if (cost_list &&
- cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+ if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
- cost_list[4] != INT_MAX &&
- is_cost_list_wellbehaved(cost_list)) {
+ cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
int ir, ic;
unsigned int minpt;
get_cost_surf_min(cost_list, &ir, &ic, 2);
@@ -533,29 +477,19 @@
return besterr;
}
-uint32_t vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- uint32_t *distortion,
- uint32_t *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+uint32_t vp9_find_best_sub_pixel_tree_pruned_more(
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
+ int h) {
SETUP_SUBPEL_SEARCH;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
- if (cost_list &&
- cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
+ if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
- cost_list[4] != INT_MAX &&
- is_cost_list_wellbehaved(cost_list)) {
+ cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
unsigned int minpt;
int ir, ic;
get_cost_surf_min(cost_list, &ir, &ic, 1);
@@ -594,8 +528,8 @@
}
// These lines insure static analysis doesn't warn that
// tr and tc aren't used after the above point.
- (void) tr;
- (void) tc;
+ (void)tr;
+ (void)tc;
bestmv->row = br;
bestmv->col = bc;
@@ -607,26 +541,17 @@
return besterr;
}
-uint32_t vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- uint32_t *distortion,
- uint32_t *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+uint32_t vp9_find_best_sub_pixel_tree_pruned(
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
+ int h) {
SETUP_SUBPEL_SEARCH;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
- if (cost_list &&
- cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
+ if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
cost_list[4] != INT_MAX) {
unsigned int left, right, up, down, diag;
@@ -689,8 +614,8 @@
}
// These lines insure static analysis doesn't warn that
// tr and tc aren't used after the above point.
- (void) tr;
- (void) tc;
+ (void)tr;
+ (void)tc;
bestmv->row = br;
bestmv->col = bc;
@@ -702,26 +627,21 @@
return besterr;
}
+/* clang-format off */
static const MV search_step_table[12] = {
- // left, right, up, down
- {0, -4}, {0, 4}, {-4, 0}, {4, 0},
- {0, -2}, {0, 2}, {-2, 0}, {2, 0},
- {0, -1}, {0, 1}, {-1, 0}, {1, 0}
+ // left, right, up, down
+ { 0, -4 }, { 0, 4 }, { -4, 0 }, { 4, 0 },
+ { 0, -2 }, { 0, 2 }, { -2, 0 }, { 2, 0 },
+ { 0, -1 }, { 0, 1 }, { -1, 0 }, { 1, 0 }
};
+/* clang-format on */
-uint32_t vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- uint32_t *distortion,
- uint32_t *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+uint32_t vp9_find_best_sub_pixel_tree(
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
+ int h) {
const uint8_t *const z = x->plane[0].src.buf;
const uint8_t *const src_address = z;
const int src_stride = x->plane[0].src.stride;
@@ -751,18 +671,16 @@
int kr, kc;
if (!(allow_hp && use_mv_hp(ref_mv)))
- if (round == 3)
- round = 2;
+ if (round == 3) round = 2;
bestmv->row *= 8;
bestmv->col *= 8;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
- (void) cost_list; // to silence compiler warning
+ (void)cost_list; // to silence compiler warning
for (iter = 0; iter < round; ++iter) {
// Check vertical and horizontal sub-pixel positions.
@@ -775,13 +693,13 @@
this_mv.row = tr;
this_mv.col = tc;
if (second_pred == NULL)
- thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, &sse);
+ thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+ src_stride, &sse);
else
thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, &sse, second_pred);
- cost_array[idx] = thismse +
- mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
+ cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
+ mvcost, error_per_bit);
if (cost_array[idx] < besterr) {
best_idx = idx;
@@ -802,15 +720,15 @@
tr = br + kr;
if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
- MV this_mv = {tr, tc};
+ MV this_mv = { tr, tc };
if (second_pred == NULL)
- thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, &sse);
+ thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+ src_stride, &sse);
else
- thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, &sse, second_pred);
- cost_array[4] = thismse +
- mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
+ thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+ src_stride, &sse, second_pred);
+ cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+ error_per_bit);
if (cost_array[4] < besterr) {
best_idx = 4;
@@ -830,8 +748,7 @@
bc = tc;
}
- if (iters_per_step > 1 && best_idx != -1)
- SECOND_LEVEL_CHECKS_BEST;
+ if (iters_per_step > 1 && best_idx != -1) SECOND_LEVEL_CHECKS_BEST;
tr = br;
tc = bc;
@@ -846,8 +763,8 @@
// These lines insure static analysis doesn't warn that
// tr and tc aren't used after the above point.
- (void) tr;
- (void) tc;
+ (void)tr;
+ (void)tc;
bestmv->row = br;
bestmv->col = bc;
@@ -864,10 +781,8 @@
static INLINE int check_bounds(const MACROBLOCK *x, int row, int col,
int range) {
- return ((row - range) >= x->mv_row_min) &
- ((row + range) <= x->mv_row_max) &
- ((col - range) >= x->mv_col_min) &
- ((col + range) <= x->mv_col_max);
+ return ((row - range) >= x->mv_row_min) & ((row + range) <= x->mv_row_max) &
+ ((col - range) >= x->mv_col_min) & ((col + range) <= x->mv_col_max);
}
static INLINE int is_mv_in(const MACROBLOCK *x, const MV *mv) {
@@ -875,33 +790,31 @@
(mv->row >= x->mv_row_min) && (mv->row <= x->mv_row_max);
}
-#define CHECK_BETTER \
- {\
- if (thissad < bestsad) {\
- if (use_mvcost) \
- thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);\
- if (thissad < bestsad) {\
- bestsad = thissad;\
- best_site = i;\
- }\
- }\
+#define CHECK_BETTER \
+ { \
+ if (thissad < bestsad) { \
+ if (use_mvcost) \
+ thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); \
+ if (thissad < bestsad) { \
+ bestsad = thissad; \
+ best_site = i; \
+ } \
+ } \
}
-#define MAX_PATTERN_SCALES 11
-#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale
-#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates
+#define MAX_PATTERN_SCALES 11
+#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale
+#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates
// Calculate and return a sad+mvcost list around an integer best pel.
-static INLINE void calc_int_cost_list(const MACROBLOCK *x,
- const MV *ref_mv,
+static INLINE void calc_int_cost_list(const MACROBLOCK *x, const MV *ref_mv,
int sadpb,
const vp9_variance_fn_ptr_t *fn_ptr,
- const MV *best_mv,
- int *cost_list) {
- static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}};
+ const MV *best_mv, int *cost_list) {
+ static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0];
- const MV fcenter_mv = {ref_mv->row >> 3, ref_mv->col >> 3};
+ const MV fcenter_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
int br = best_mv->row;
int bc = best_mv->col;
MV this_mv;
@@ -910,25 +823,22 @@
this_mv.row = br;
this_mv.col = bc;
- cost_list[0] = fn_ptr->vf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride, &sse) +
+ cost_list[0] =
+ fn_ptr->vf(what->buf, what->stride, get_buf_from_mv(in_what, &this_mv),
+ in_what->stride, &sse) +
mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
if (check_bounds(x, br, bc, 1)) {
for (i = 0; i < 4; i++) {
- const MV this_mv = {br + neighbors[i].row,
- bc + neighbors[i].col};
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv),
in_what->stride, &sse) +
- mv_err_cost(&this_mv, &fcenter_mv,
- x->nmvjointcost, x->mvcost,
- x->errorperbit);
+ mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
+ x->mvcost, x->errorperbit);
}
} else {
for (i = 0; i < 4; i++) {
- const MV this_mv = {br + neighbors[i].row,
- bc + neighbors[i].col};
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
if (!is_mv_in(x, &this_mv))
cost_list[i + 1] = INT_MAX;
else
@@ -935,9 +845,8 @@
cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv),
in_what->stride, &sse) +
- mv_err_cost(&this_mv, &fcenter_mv,
- x->nmvjointcost, x->mvcost,
- x->errorperbit);
+ mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
+ x->mvcost, x->errorperbit);
}
}
}
@@ -947,19 +856,12 @@
// candidates as indicated in the num_candidates and candidates arrays
// passed into this function
//
-static int vp9_pattern_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv,
- const int num_candidates[MAX_PATTERN_SCALES],
- const MV candidates[MAX_PATTERN_SCALES]
- [MAX_PATTERN_CANDIDATES]) {
+static int vp9_pattern_search(
+ const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
+ int do_init_search, int *cost_list, const vp9_variance_fn_ptr_t *vfp,
+ int use_mvcost, const MV *center_mv, MV *best_mv,
+ const int num_candidates[MAX_PATTERN_SCALES],
+ const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
const MACROBLOCKD *const xd = &x->e_mbd;
static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
@@ -971,7 +873,7 @@
int bestsad = INT_MAX;
int thissad;
int k = -1;
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
int best_init_s = search_param_to_steps[search_param];
// adjust ref_mv to make sure it is within MV range
clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
@@ -979,9 +881,9 @@
bc = ref_mv->col;
// Work out the start point for the search
- bestsad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
- mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+ bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
+ mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
// Search all possible scales upto the search param around the center point
// pick the scale of the point that is best as the starting scale of
@@ -993,22 +895,21 @@
int best_site = -1;
if (check_bounds(x, br, bc, 1 << t)) {
for (i = 0; i < num_candidates[t]; i++) {
- const MV this_mv = {br + candidates[t][i].row,
- bc + candidates[t][i].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[t][i].row,
+ bc + candidates[t][i].col };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[t]; i++) {
- const MV this_mv = {br + candidates[t][i].row,
- bc + candidates[t][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[t][i].row,
+ bc + candidates[t][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1036,22 +937,21 @@
if (!do_init_search || s != best_init_s) {
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1074,22 +974,25 @@
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1122,19 +1025,12 @@
// are 4 1-away neighbors, and cost_list is non-null
// TODO(debargha): Merge this function with the one above. Also remove
// use_mvcost option since it is always 1, to save unnecessary branches.
-static int vp9_pattern_search_sad(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv,
- const int num_candidates[MAX_PATTERN_SCALES],
- const MV candidates[MAX_PATTERN_SCALES]
- [MAX_PATTERN_CANDIDATES]) {
+static int vp9_pattern_search_sad(
+ const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
+ int do_init_search, int *cost_list, const vp9_variance_fn_ptr_t *vfp,
+ int use_mvcost, const MV *center_mv, MV *best_mv,
+ const int num_candidates[MAX_PATTERN_SCALES],
+ const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
const MACROBLOCKD *const xd = &x->e_mbd;
static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
@@ -1146,7 +1042,7 @@
int bestsad = INT_MAX;
int thissad;
int k = -1;
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
int best_init_s = search_param_to_steps[search_param];
// adjust ref_mv to make sure it is within MV range
clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
@@ -1158,9 +1054,9 @@
}
// Work out the start point for the search
- bestsad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
- mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+ bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
+ mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
// Search all possible scales upto the search param around the center point
// pick the scale of the point that is best as the starting scale of
@@ -1172,22 +1068,21 @@
int best_site = -1;
if (check_bounds(x, br, bc, 1 << t)) {
for (i = 0; i < num_candidates[t]; i++) {
- const MV this_mv = {br + candidates[t][i].row,
- bc + candidates[t][i].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[t][i].row,
+ bc + candidates[t][i].col };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[t]; i++) {
- const MV this_mv = {br + candidates[t][i].row,
- bc + candidates[t][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[t][i].row,
+ bc + candidates[t][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1215,22 +1110,21 @@
if (!do_init_search || s != best_init_s) {
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1253,22 +1147,25 @@
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1287,24 +1184,21 @@
if (!do_init_search || s != best_init_s) {
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- cost_list[i + 1] =
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ cost_list[i + 1] = thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- cost_list[i + 1] =
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ cost_list[i + 1] = thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1327,26 +1221,28 @@
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- cost_list[next_chkpts_indices[i] + 1] =
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ cost_list[next_chkpts_indices[i] + 1] = thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
if (!is_mv_in(x, &this_mv)) {
cost_list[next_chkpts_indices[i] + 1] = INT_MAX;
continue;
}
- cost_list[next_chkpts_indices[i] + 1] =
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ cost_list[next_chkpts_indices[i] + 1] = thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1367,34 +1263,31 @@
// cost_list[3]: sad at delta { 0, 1} (right) from the best integer pel
// cost_list[4]: sad at delta {-1, 0} (top) from the best integer pel
if (cost_list) {
- static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}};
+ static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
if (cost_list[0] == INT_MAX) {
cost_list[0] = bestsad;
if (check_bounds(x, br, bc, 1)) {
for (i = 0; i < 4; i++) {
- const MV this_mv = { br + neighbors[i].row,
- bc + neighbors[i].col };
- cost_list[i + 1] = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
+ cost_list[i + 1] =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
}
} else {
for (i = 0; i < 4; i++) {
- const MV this_mv = {br + neighbors[i].row,
- bc + neighbors[i].col};
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
if (!is_mv_in(x, &this_mv))
cost_list[i + 1] = INT_MAX;
else
- cost_list[i + 1] = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ cost_list[i + 1] =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
}
}
} else {
if (use_mvcost) {
for (i = 0; i < 4; i++) {
- const MV this_mv = {br + neighbors[i].row,
- bc + neighbors[i].col};
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
if (cost_list[i + 1] != INT_MAX) {
cost_list[i + 1] +=
mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
@@ -1408,94 +1301,88 @@
return bestsad;
}
-int vp9_get_mvpred_var(const MACROBLOCK *x,
- const MV *best_mv, const MV *center_mv,
- const vp9_variance_fn_ptr_t *vfp,
+int vp9_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const vp9_variance_fn_ptr_t *vfp,
int use_mvcost) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const MV mv = {best_mv->row * 8, best_mv->col * 8};
+ const MV mv = { best_mv->row * 8, best_mv->col * 8 };
uint32_t unused;
#if CONFIG_VP9_HIGHBITDEPTH
- uint64_t err= vfp->vf(what->buf, what->stride,
- get_buf_from_mv(in_what, best_mv),
- in_what->stride, &unused);
- err += (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
- x->mvcost, x->errorperbit) : 0);
- if (err >= INT_MAX)
- return INT_MAX;
+ uint64_t err =
+ vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
+ in_what->stride, &unused);
+ err += (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+ x->errorperbit)
+ : 0);
+ if (err >= INT_MAX) return INT_MAX;
return (int)err;
#else
- return vfp->vf(what->buf, what->stride,
- get_buf_from_mv(in_what, best_mv), in_what->stride, &unused) +
- (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
- x->mvcost, x->errorperbit) : 0);
+ return vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
+ in_what->stride, &unused) +
+ (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+ x->errorperbit)
+ : 0);
#endif
}
-int vp9_get_mvpred_av_var(const MACROBLOCK *x,
- const MV *best_mv, const MV *center_mv,
- const uint8_t *second_pred,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost) {
+int vp9_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const uint8_t *second_pred,
+ const vp9_variance_fn_ptr_t *vfp, int use_mvcost) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const MV mv = {best_mv->row * 8, best_mv->col * 8};
+ const MV mv = { best_mv->row * 8, best_mv->col * 8 };
unsigned int unused;
return vfp->svaf(get_buf_from_mv(in_what, best_mv), in_what->stride, 0, 0,
what->buf, what->stride, &unused, second_pred) +
- (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
- x->mvcost, x->errorperbit) : 0);
+ (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+ x->errorperbit)
+ : 0);
}
-static int hex_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost,
+static int hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const vp9_variance_fn_ptr_t *vfp, int use_mvcost,
const MV *center_mv, MV *best_mv) {
// First scale has 8-closest points, the rest have 6 points in hex shape
// at increasing scales
- static const int hex_num_candidates[MAX_PATTERN_SCALES] = {
- 8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6
- };
+ static const int hex_num_candidates[MAX_PATTERN_SCALES] = { 8, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6 };
// Note that the largest candidate step at each scale is 2^scale
+ /* clang-format off */
static const MV hex_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
- {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, { 0, 1}, { -1, 1}, {-1, 0}},
- {{-1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0}},
- {{-2, -4}, {2, -4}, {4, 0}, {2, 4}, { -2, 4}, { -4, 0}},
- {{-4, -8}, {4, -8}, {8, 0}, {4, 8}, { -4, 8}, { -8, 0}},
- {{-8, -16}, {8, -16}, {16, 0}, {8, 16}, { -8, 16}, { -16, 0}},
- {{-16, -32}, {16, -32}, {32, 0}, {16, 32}, { -16, 32}, { -32, 0}},
- {{-32, -64}, {32, -64}, {64, 0}, {32, 64}, { -32, 64}, { -64, 0}},
- {{-64, -128}, {64, -128}, {128, 0}, {64, 128}, { -64, 128}, { -128, 0}},
- {{-128, -256}, {128, -256}, {256, 0}, {128, 256}, { -128, 256}, { -256, 0}},
- {{-256, -512}, {256, -512}, {512, 0}, {256, 512}, { -256, 512}, { -512, 0}},
- {{-512, -1024}, {512, -1024}, {1024, 0}, {512, 1024}, { -512, 1024},
- { -1024, 0}},
+ { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 },
+ { -1, 0 } },
+ { { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 } },
+ { { -2, -4 }, { 2, -4 }, { 4, 0 }, { 2, 4 }, { -2, 4 }, { -4, 0 } },
+ { { -4, -8 }, { 4, -8 }, { 8, 0 }, { 4, 8 }, { -4, 8 }, { -8, 0 } },
+ { { -8, -16 }, { 8, -16 }, { 16, 0 }, { 8, 16 }, { -8, 16 }, { -16, 0 } },
+ { { -16, -32 }, { 16, -32 }, { 32, 0 }, { 16, 32 }, { -16, 32 },
+ { -32, 0 } },
+ { { -32, -64 }, { 32, -64 }, { 64, 0 }, { 32, 64 }, { -32, 64 },
+ { -64, 0 } },
+ { { -64, -128 }, { 64, -128 }, { 128, 0 }, { 64, 128 }, { -64, 128 },
+ { -128, 0 } },
+ { { -128, -256 }, { 128, -256 }, { 256, 0 }, { 128, 256 }, { -128, 256 },
+ { -256, 0 } },
+ { { -256, -512 }, { 256, -512 }, { 512, 0 }, { 256, 512 }, { -256, 512 },
+ { -512, 0 } },
+ { { -512, -1024 }, { 512, -1024 }, { 1024, 0 }, { 512, 1024 },
+ { -512, 1024 }, { -1024, 0 } }
};
- return vp9_pattern_search(x, ref_mv, search_param, sad_per_bit,
- do_init_search, cost_list, vfp, use_mvcost,
- center_mv, best_mv,
- hex_num_candidates, hex_candidates);
+ /* clang-format on */
+ return vp9_pattern_search(
+ x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+ use_mvcost, center_mv, best_mv, hex_num_candidates, hex_candidates);
}
-static int bigdia_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv) {
+static int bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const vp9_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv, MV *best_mv) {
// First scale has 4-closest points, the rest have 8 points in diamond
// shape at increasing scales
static const int bigdia_num_candidates[MAX_PATTERN_SCALES] = {
@@ -1502,102 +1389,95 @@
4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
};
// Note that the largest candidate step at each scale is 2^scale
- static const MV bigdia_candidates[MAX_PATTERN_SCALES]
- [MAX_PATTERN_CANDIDATES] = {
- {{0, -1}, {1, 0}, { 0, 1}, {-1, 0}},
- {{-1, -1}, {0, -2}, {1, -1}, {2, 0}, {1, 1}, {0, 2}, {-1, 1}, {-2, 0}},
- {{-2, -2}, {0, -4}, {2, -2}, {4, 0}, {2, 2}, {0, 4}, {-2, 2}, {-4, 0}},
- {{-4, -4}, {0, -8}, {4, -4}, {8, 0}, {4, 4}, {0, 8}, {-4, 4}, {-8, 0}},
- {{-8, -8}, {0, -16}, {8, -8}, {16, 0}, {8, 8}, {0, 16}, {-8, 8}, {-16, 0}},
- {{-16, -16}, {0, -32}, {16, -16}, {32, 0}, {16, 16}, {0, 32},
- {-16, 16}, {-32, 0}},
- {{-32, -32}, {0, -64}, {32, -32}, {64, 0}, {32, 32}, {0, 64},
- {-32, 32}, {-64, 0}},
- {{-64, -64}, {0, -128}, {64, -64}, {128, 0}, {64, 64}, {0, 128},
- {-64, 64}, {-128, 0}},
- {{-128, -128}, {0, -256}, {128, -128}, {256, 0}, {128, 128}, {0, 256},
- {-128, 128}, {-256, 0}},
- {{-256, -256}, {0, -512}, {256, -256}, {512, 0}, {256, 256}, {0, 512},
- {-256, 256}, {-512, 0}},
- {{-512, -512}, {0, -1024}, {512, -512}, {1024, 0}, {512, 512}, {0, 1024},
- {-512, 512}, {-1024, 0}},
- };
- return vp9_pattern_search_sad(x, ref_mv, search_param, sad_per_bit,
- do_init_search, cost_list, vfp, use_mvcost,
- center_mv, best_mv,
- bigdia_num_candidates, bigdia_candidates);
+ /* clang-format off */
+ static const MV
+ bigdia_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+ { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } },
+ { { -1, -1 }, { 0, -2 }, { 1, -1 }, { 2, 0 }, { 1, 1 }, { 0, 2 },
+ { -1, 1 }, { -2, 0 } },
+ { { -2, -2 }, { 0, -4 }, { 2, -2 }, { 4, 0 }, { 2, 2 }, { 0, 4 },
+ { -2, 2 }, { -4, 0 } },
+ { { -4, -4 }, { 0, -8 }, { 4, -4 }, { 8, 0 }, { 4, 4 }, { 0, 8 },
+ { -4, 4 }, { -8, 0 } },
+ { { -8, -8 }, { 0, -16 }, { 8, -8 }, { 16, 0 }, { 8, 8 }, { 0, 16 },
+ { -8, 8 }, { -16, 0 } },
+ { { -16, -16 }, { 0, -32 }, { 16, -16 }, { 32, 0 }, { 16, 16 },
+ { 0, 32 }, { -16, 16 }, { -32, 0 } },
+ { { -32, -32 }, { 0, -64 }, { 32, -32 }, { 64, 0 }, { 32, 32 },
+ { 0, 64 }, { -32, 32 }, { -64, 0 } },
+ { { -64, -64 }, { 0, -128 }, { 64, -64 }, { 128, 0 }, { 64, 64 },
+ { 0, 128 }, { -64, 64 }, { -128, 0 } },
+ { { -128, -128 }, { 0, -256 }, { 128, -128 }, { 256, 0 }, { 128, 128 },
+ { 0, 256 }, { -128, 128 }, { -256, 0 } },
+ { { -256, -256 }, { 0, -512 }, { 256, -256 }, { 512, 0 }, { 256, 256 },
+ { 0, 512 }, { -256, 256 }, { -512, 0 } },
+ { { -512, -512 }, { 0, -1024 }, { 512, -512 }, { 1024, 0 },
+ { 512, 512 }, { 0, 1024 }, { -512, 512 }, { -1024, 0 } }
+ };
+ /* clang-format on */
+ return vp9_pattern_search_sad(
+ x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+ use_mvcost, center_mv, best_mv, bigdia_num_candidates, bigdia_candidates);
}
-static int square_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv) {
+static int square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const vp9_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv, MV *best_mv) {
// All scales have 8 closest points in square shape
static const int square_num_candidates[MAX_PATTERN_SCALES] = {
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
};
// Note that the largest candidate step at each scale is 2^scale
- static const MV square_candidates[MAX_PATTERN_SCALES]
- [MAX_PATTERN_CANDIDATES] = {
- {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}},
- {{-2, -2}, {0, -2}, {2, -2}, {2, 0}, {2, 2}, {0, 2}, {-2, 2}, {-2, 0}},
- {{-4, -4}, {0, -4}, {4, -4}, {4, 0}, {4, 4}, {0, 4}, {-4, 4}, {-4, 0}},
- {{-8, -8}, {0, -8}, {8, -8}, {8, 0}, {8, 8}, {0, 8}, {-8, 8}, {-8, 0}},
- {{-16, -16}, {0, -16}, {16, -16}, {16, 0}, {16, 16}, {0, 16},
- {-16, 16}, {-16, 0}},
- {{-32, -32}, {0, -32}, {32, -32}, {32, 0}, {32, 32}, {0, 32},
- {-32, 32}, {-32, 0}},
- {{-64, -64}, {0, -64}, {64, -64}, {64, 0}, {64, 64}, {0, 64},
- {-64, 64}, {-64, 0}},
- {{-128, -128}, {0, -128}, {128, -128}, {128, 0}, {128, 128}, {0, 128},
- {-128, 128}, {-128, 0}},
- {{-256, -256}, {0, -256}, {256, -256}, {256, 0}, {256, 256}, {0, 256},
- {-256, 256}, {-256, 0}},
- {{-512, -512}, {0, -512}, {512, -512}, {512, 0}, {512, 512}, {0, 512},
- {-512, 512}, {-512, 0}},
- {{-1024, -1024}, {0, -1024}, {1024, -1024}, {1024, 0}, {1024, 1024},
- {0, 1024}, {-1024, 1024}, {-1024, 0}},
- };
- return vp9_pattern_search(x, ref_mv, search_param, sad_per_bit,
- do_init_search, cost_list, vfp, use_mvcost,
- center_mv, best_mv,
- square_num_candidates, square_candidates);
+ /* clang-format off */
+ static const MV
+ square_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+ { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
+ { -1, 1 }, { -1, 0 } },
+ { { -2, -2 }, { 0, -2 }, { 2, -2 }, { 2, 0 }, { 2, 2 }, { 0, 2 },
+ { -2, 2 }, { -2, 0 } },
+ { { -4, -4 }, { 0, -4 }, { 4, -4 }, { 4, 0 }, { 4, 4 }, { 0, 4 },
+ { -4, 4 }, { -4, 0 } },
+ { { -8, -8 }, { 0, -8 }, { 8, -8 }, { 8, 0 }, { 8, 8 }, { 0, 8 },
+ { -8, 8 }, { -8, 0 } },
+ { { -16, -16 }, { 0, -16 }, { 16, -16 }, { 16, 0 }, { 16, 16 },
+ { 0, 16 }, { -16, 16 }, { -16, 0 } },
+ { { -32, -32 }, { 0, -32 }, { 32, -32 }, { 32, 0 }, { 32, 32 },
+ { 0, 32 }, { -32, 32 }, { -32, 0 } },
+ { { -64, -64 }, { 0, -64 }, { 64, -64 }, { 64, 0 }, { 64, 64 },
+ { 0, 64 }, { -64, 64 }, { -64, 0 } },
+ { { -128, -128 }, { 0, -128 }, { 128, -128 }, { 128, 0 }, { 128, 128 },
+ { 0, 128 }, { -128, 128 }, { -128, 0 } },
+ { { -256, -256 }, { 0, -256 }, { 256, -256 }, { 256, 0 }, { 256, 256 },
+ { 0, 256 }, { -256, 256 }, { -256, 0 } },
+ { { -512, -512 }, { 0, -512 }, { 512, -512 }, { 512, 0 }, { 512, 512 },
+ { 0, 512 }, { -512, 512 }, { -512, 0 } },
+ { { -1024, -1024 }, { 0, -1024 }, { 1024, -1024 }, { 1024, 0 },
+ { 1024, 1024 }, { 0, 1024 }, { -1024, 1024 }, { -1024, 0 } }
+ };
+ /* clang-format on */
+ return vp9_pattern_search(
+ x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+ use_mvcost, center_mv, best_mv, square_num_candidates, square_candidates);
}
-static int fast_hex_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
+static int fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
int sad_per_bit,
int do_init_search, // must be zero for fast_hex
- int *cost_list,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv) {
+ int *cost_list, const vp9_variance_fn_ptr_t *vfp,
+ int use_mvcost, const MV *center_mv, MV *best_mv) {
return hex_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param),
sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
center_mv, best_mv);
}
-static int fast_dia_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv) {
- return bigdia_search(
- x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
- do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
+static int fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const vp9_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv, MV *best_mv) {
+ return bigdia_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param),
+ sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
+ center_mv, best_mv);
}
#undef CHECK_BETTER
@@ -1604,8 +1484,7 @@
// Exhuastive motion search around a given centre position with a given
// step size.
-static int exhuastive_mesh_search(const MACROBLOCK *x,
- MV *ref_mv, MV *best_mv,
+static int exhuastive_mesh_search(const MACROBLOCK *x, MV *ref_mv, MV *best_mv,
int range, int step, int sad_per_bit,
const vp9_variance_fn_ptr_t *fn_ptr,
const MV *center_mv) {
@@ -1612,7 +1491,7 @@
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- MV fcenter_mv = {center_mv->row, center_mv->col};
+ MV fcenter_mv = { center_mv->row, center_mv->col };
unsigned int best_sad = INT_MAX;
int r, c, i;
int start_col, end_col, start_row, end_row;
@@ -1620,12 +1499,13 @@
assert(step >= 1);
- clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max,
- x->mv_row_min, x->mv_row_max);
+ clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+ x->mv_row_max);
*best_mv = fcenter_mv;
- best_sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
- mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
+ best_sad =
+ fn_ptr->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
+ mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
start_row = VPXMAX(-range, x->mv_row_min - fcenter_mv.row);
start_col = VPXMAX(-range, x->mv_col_min - fcenter_mv.col);
end_row = VPXMIN(range, x->mv_row_max - fcenter_mv.row);
@@ -1635,9 +1515,10 @@
for (c = start_col; c <= end_col; c += col_step) {
// Step > 1 means we are not checking every location in this pass.
if (step > 1) {
- const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c};
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride);
+ const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c };
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+ in_what->stride);
if (sad < best_sad) {
sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
if (sad < best_sad) {
@@ -1651,17 +1532,16 @@
unsigned int sads[4];
const uint8_t *addrs[4];
for (i = 0; i < 4; ++i) {
- const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
+ const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
addrs[i] = get_buf_from_mv(in_what, &mv);
}
- fn_ptr->sdx4df(what->buf, what->stride, addrs,
- in_what->stride, sads);
+ fn_ptr->sdx4df(what->buf, what->stride, addrs, in_what->stride, sads);
for (i = 0; i < 4; ++i) {
if (sads[i] < best_sad) {
- const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
- const unsigned int sad = sads[i] +
- mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
+ const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+ const unsigned int sad =
+ sads[i] + mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
*best_mv = mv;
@@ -1670,9 +1550,10 @@
}
} else {
for (i = 0; i < end_col - c; ++i) {
- const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride);
+ const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &mv), in_what->stride);
if (sad < best_sad) {
sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
if (sad < best_sad) {
@@ -1689,8 +1570,7 @@
return best_sad;
}
-int vp9_diamond_search_sad_c(const MACROBLOCK *x,
- const search_site_config *cfg,
+int vp9_diamond_search_sad_c(const MACROBLOCK *x, const search_site_config *cfg,
MV *ref_mv, MV *best_mv, int search_param,
int sad_per_bit, int *num00,
const vp9_variance_fn_ptr_t *fn_ptr,
@@ -1716,12 +1596,12 @@
// 0 = initial step (MAX_FIRST_STEP) pel
// 1 = (MAX_FIRST_STEP/2) pel,
// 2 = (MAX_FIRST_STEP/4) pel...
-// const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step];
+ // const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step];
const MV *ss_mv = &cfg->ss_mv[search_param * cfg->searches_per_step];
const intptr_t *ss_os = &cfg->ss_os[search_param * cfg->searches_per_step];
const int tot_steps = cfg->total_steps - search_param;
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
ref_row = ref_mv->row;
ref_col = ref_mv->col;
@@ -1734,8 +1614,8 @@
best_address = in_what;
// Check the starting position
- bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride)
- + mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
+ bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
+ mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
i = 0;
@@ -1758,8 +1638,7 @@
for (j = 0; j < cfg->searches_per_step; j += 4) {
unsigned char const *block_offset[4];
- for (t = 0; t < 4; t++)
- block_offset[t] = ss_os[i + t] + best_address;
+ for (t = 0; t < 4; t++) block_offset[t] = ss_os[i + t] + best_address;
fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
sad_array);
@@ -1766,10 +1645,10 @@
for (t = 0; t < 4; t++, i++) {
if (sad_array[t] < bestsad) {
- const MV this_mv = {best_mv->row + ss_mv[i].row,
- best_mv->col + ss_mv[i].col};
- sad_array[t] += mvsad_err_cost(x, &this_mv, &fcenter_mv,
- sad_per_bit);
+ const MV this_mv = { best_mv->row + ss_mv[i].row,
+ best_mv->col + ss_mv[i].col };
+ sad_array[t] +=
+ mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
if (sad_array[t] < bestsad) {
bestsad = sad_array[t];
best_site = i;
@@ -1780,13 +1659,13 @@
} else {
for (j = 0; j < cfg->searches_per_step; j++) {
// Trap illegal vectors
- const MV this_mv = {best_mv->row + ss_mv[i].row,
- best_mv->col + ss_mv[i].col};
+ const MV this_mv = { best_mv->row + ss_mv[i].row,
+ best_mv->col + ss_mv[i].col };
if (is_mv_in(x, &this_mv)) {
const uint8_t *const check_here = ss_os[i] + best_address;
- unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here,
- in_what_stride);
+ unsigned int thissad =
+ fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
if (thissad < bestsad) {
thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
@@ -1806,12 +1685,12 @@
last_site = best_site;
#if defined(NEW_DIAMOND_SEARCH)
while (1) {
- const MV this_mv = {best_mv->row + ss_mv[best_site].row,
- best_mv->col + ss_mv[best_site].col};
+ const MV this_mv = { best_mv->row + ss_mv[best_site].row,
+ best_mv->col + ss_mv[best_site].col };
if (is_mv_in(x, &this_mv)) {
const uint8_t *const check_here = ss_os[best_site] + best_address;
- unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here,
- in_what_stride);
+ unsigned int thissad =
+ fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
if (thissad < bestsad) {
thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
if (thissad < bestsad) {
@@ -1851,8 +1730,7 @@
for (d = -8; d <= 8; d += 16) {
int this_pos = offset + d;
// check limit
- if (this_pos < 0 || this_pos > bw)
- continue;
+ if (this_pos < 0 || this_pos > bw) continue;
this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
@@ -1864,8 +1742,7 @@
for (d = -4; d <= 4; d += 8) {
int this_pos = offset + d;
// check limit
- if (this_pos < 0 || this_pos > bw)
- continue;
+ if (this_pos < 0 || this_pos > bw) continue;
this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
@@ -1877,8 +1754,7 @@
for (d = -2; d <= 2; d += 4) {
int this_pos = offset + d;
// check limit
- if (this_pos < 0 || this_pos > bw)
- continue;
+ if (this_pos < 0 || this_pos > bw) continue;
this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
@@ -1890,8 +1766,7 @@
for (d = -1; d <= 1; d += 2) {
int this_pos = offset + d;
// check limit
- if (this_pos < 0 || this_pos > bw)
- continue;
+ if (this_pos < 0 || this_pos > bw) continue;
this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
@@ -1903,15 +1778,15 @@
}
static const MV search_pos[4] = {
- {-1, 0}, {0, -1}, {0, 1}, {1, 0},
+ { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
};
unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int mi_row, int mi_col) {
+ BLOCK_SIZE bsize, int mi_row,
+ int mi_col) {
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
- struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
DECLARE_ALIGNED(16, int16_t, hbuf[128]);
DECLARE_ALIGNED(16, int16_t, vbuf[128]);
DECLARE_ALIGNED(16, int16_t, src_hbuf[64]);
@@ -1936,8 +1811,7 @@
// Swap out the reference frame for a version that's been scaled to
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
- for (i = 0; i < MAX_MB_PLANE; i++)
- backup_yv12[i] = xd->plane[i].pre[0];
+ for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
@@ -1951,8 +1825,7 @@
if (scaled_ref_frame) {
int i;
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[0] = backup_yv12[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
}
return this_sad;
}
@@ -1993,11 +1866,8 @@
best_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
{
- const uint8_t * const pos[4] = {
- ref_buf - ref_stride,
- ref_buf - 1,
- ref_buf + 1,
- ref_buf + ref_stride,
+ const uint8_t *const pos[4] = {
+ ref_buf - ref_stride, ref_buf - 1, ref_buf + 1, ref_buf + ref_stride,
};
cpi->fn_ptr[bsize].sdx4df(src_buf, src_stride, pos, ref_stride, this_sad);
@@ -2023,8 +1893,7 @@
ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col;
- tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride,
- ref_buf, ref_stride);
+ tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
if (best_sad > tmp_sad) {
*tmp_mv = this_mv;
best_sad = tmp_sad;
@@ -2035,8 +1904,7 @@
if (scaled_ref_frame) {
int i;
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[0] = backup_yv12[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
}
return best_sad;
@@ -2046,17 +1914,15 @@
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
-static int full_pixel_diamond(const VP9_COMP *cpi, MACROBLOCK *x,
- MV *mvp_full, int step_param,
- int sadpb, int further_steps, int do_refine,
- int *cost_list,
+static int full_pixel_diamond(const VP9_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
+ int step_param, int sadpb, int further_steps,
+ int do_refine, int *cost_list,
const vp9_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv) {
MV temp_mv;
int thissme, n, num00 = 0;
int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
- step_param, sadpb, &n,
- fn_ptr, ref_mv);
+ step_param, sadpb, &n, fn_ptr, ref_mv);
if (bestsme < INT_MAX)
bestsme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
*dst_mv = temp_mv;
@@ -2063,8 +1929,7 @@
// If there won't be more n-step search, check to see if refining search is
// needed.
- if (n > further_steps)
- do_refine = 0;
+ if (n > further_steps) do_refine = 0;
while (n < further_steps) {
++n;
@@ -2073,14 +1938,13 @@
num00--;
} else {
thissme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
- step_param + n, sadpb, &num00,
- fn_ptr, ref_mv);
+ step_param + n, sadpb, &num00, fn_ptr,
+ ref_mv);
if (thissme < INT_MAX)
thissme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
// check to see if refining search is needed.
- if (num00 > further_steps - n)
- do_refine = 0;
+ if (num00 > further_steps - n) do_refine = 0;
if (thissme < bestsme) {
bestsme = thissme;
@@ -2093,8 +1957,8 @@
if (do_refine) {
const int search_range = 8;
MV best_mv = *dst_mv;
- thissme = vp9_refining_search_sad(x, &best_mv, sadpb, search_range,
- fn_ptr, ref_mv);
+ thissme = vp9_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
+ ref_mv);
if (thissme < INT_MAX)
thissme = vp9_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
if (thissme < bestsme) {
@@ -2116,12 +1980,12 @@
// Runs an limited range exhaustive mesh search using a pattern set
// according to the encode speed profile.
static int full_pixel_exhaustive(VP9_COMP *cpi, MACROBLOCK *x,
- MV *centre_mv_full, int sadpb, int *cost_list,
+ MV *centre_mv_full, int sadpb, int *cost_list,
const vp9_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv) {
const SPEED_FEATURES *const sf = &cpi->sf;
- MV temp_mv = {centre_mv_full->row, centre_mv_full->col};
- MV f_ref_mv = {ref_mv->row >> 3, ref_mv->col >> 3};
+ MV temp_mv = { centre_mv_full->row, centre_mv_full->col };
+ MV f_ref_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
int bestsme;
int i;
int interval = sf->mesh_patterns[0].interval;
@@ -2132,8 +1996,8 @@
++(*x->ex_search_count_ptr);
// Trap illegal values for interval and range for this function.
- if ((range < MIN_RANGE) || (range > MAX_RANGE) ||
- (interval < MIN_INTERVAL) || (interval > range))
+ if ((range < MIN_RANGE) || (range > MAX_RANGE) || (interval < MIN_INTERVAL) ||
+ (interval > range))
return INT_MAX;
baseline_interval_divisor = range / interval;
@@ -2145,8 +2009,8 @@
interval = VPXMAX(interval, range / baseline_interval_divisor);
// initial search
- bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range,
- interval, sadpb, fn_ptr, &temp_mv);
+ bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, interval,
+ sadpb, fn_ptr, &temp_mv);
if ((interval > MIN_INTERVAL) && (range > MIN_RANGE)) {
// Progressive searches with range and step size decreasing each time
@@ -2153,13 +2017,11 @@
// till we reach a step size of 1. Then break out.
for (i = 1; i < MAX_MESH_STEP; ++i) {
// First pass with coarser step and longer range
- bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv,
- sf->mesh_patterns[i].range,
- sf->mesh_patterns[i].interval,
- sadpb, fn_ptr, &temp_mv);
+ bestsme = exhuastive_mesh_search(
+ x, &f_ref_mv, &temp_mv, sf->mesh_patterns[i].range,
+ sf->mesh_patterns[i].interval, sadpb, fn_ptr, &temp_mv);
- if (sf->mesh_patterns[i].interval == 1)
- break;
+ if (sf->mesh_patterns[i].interval == 1) break;
}
}
@@ -2186,18 +2048,20 @@
const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
- int best_sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+ int best_sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
*best_mv = *ref_mv;
for (r = row_min; r < row_max; ++r) {
for (c = col_min; c < col_max; ++c) {
- const MV mv = {r, c};
- const int sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride) +
- mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+ const MV mv = { r, c };
+ const int sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+ in_what->stride) +
+ mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
*best_mv = mv;
@@ -2219,9 +2083,10 @@
const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
- unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+ unsigned int best_sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
*best_mv = *ref_mv;
@@ -2240,7 +2105,7 @@
for (i = 0; i < 3; ++i) {
unsigned int sad = sads[i];
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2254,10 +2119,10 @@
}
while (c < col_max) {
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- check_here, in_what->stride);
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride);
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2284,9 +2149,10 @@
const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
- unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+ unsigned int best_sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
*best_mv = *ref_mv;
@@ -2305,7 +2171,7 @@
for (i = 0; i < 8; ++i) {
unsigned int sad = sads[i];
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2329,7 +2195,7 @@
for (i = 0; i < 3; ++i) {
unsigned int sad = sads[i];
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2343,10 +2209,10 @@
}
while (c < col_max) {
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- check_here, in_what->stride);
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride);
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2361,19 +2227,18 @@
return best_sad;
}
-int vp9_refining_search_sad(const MACROBLOCK *x,
- MV *ref_mv, int error_per_bit,
+int vp9_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
int search_range,
const vp9_variance_fn_ptr_t *fn_ptr,
const MV *center_mv) {
const MACROBLOCKD *const xd = &x->e_mbd;
- const MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+ const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
const uint8_t *best_address = get_buf_from_mv(in_what, ref_mv);
- unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, best_address,
- in_what->stride) +
+ unsigned int best_sad =
+ fn_ptr->sdf(what->buf, what->stride, best_address, in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
int i, j;
@@ -2386,19 +2251,16 @@
if (all_in) {
unsigned int sads[4];
- const uint8_t *const positions[4] = {
- best_address - in_what->stride,
- best_address - 1,
- best_address + 1,
- best_address + in_what->stride
- };
+ const uint8_t *const positions[4] = { best_address - in_what->stride,
+ best_address - 1, best_address + 1,
+ best_address + in_what->stride };
fn_ptr->sdx4df(what->buf, what->stride, positions, in_what->stride, sads);
for (j = 0; j < 4; ++j) {
if (sads[j] < best_sad) {
- const MV mv = {ref_mv->row + neighbors[j].row,
- ref_mv->col + neighbors[j].col};
+ const MV mv = { ref_mv->row + neighbors[j].row,
+ ref_mv->col + neighbors[j].col };
sads[j] += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
if (sads[j] < best_sad) {
best_sad = sads[j];
@@ -2408,13 +2270,13 @@
}
} else {
for (j = 0; j < 4; ++j) {
- const MV mv = {ref_mv->row + neighbors[j].row,
- ref_mv->col + neighbors[j].col};
+ const MV mv = { ref_mv->row + neighbors[j].row,
+ ref_mv->col + neighbors[j].col };
if (is_mv_in(x, &mv)) {
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv),
- in_what->stride);
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &mv), in_what->stride);
if (sad < best_sad) {
sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
if (sad < best_sad) {
@@ -2440,20 +2302,19 @@
// This function is called when we do joint motion search in comp_inter_inter
// mode.
-int vp9_refining_search_8p_c(const MACROBLOCK *x,
- MV *ref_mv, int error_per_bit,
+int vp9_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
int search_range,
const vp9_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv,
- const uint8_t *second_pred) {
- const MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0},
- {-1, -1}, {1, -1}, {-1, 1}, {1, 1}};
+ const MV *center_mv, const uint8_t *second_pred) {
+ const MV neighbors[8] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
+ { -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } };
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
- unsigned int best_sad = fn_ptr->sdaf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride, second_pred) +
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+ unsigned int best_sad =
+ fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride, second_pred) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
int i, j;
@@ -2461,12 +2322,13 @@
int best_site = -1;
for (j = 0; j < 8; ++j) {
- const MV mv = {ref_mv->row + neighbors[j].row,
- ref_mv->col + neighbors[j].col};
+ const MV mv = { ref_mv->row + neighbors[j].row,
+ ref_mv->col + neighbors[j].col };
if (is_mv_in(x, &mv)) {
- unsigned int sad = fn_ptr->sdaf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride, second_pred);
+ unsigned int sad =
+ fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+ in_what->stride, second_pred);
if (sad < best_sad) {
sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
if (sad < best_sad) {
@@ -2490,20 +2352,18 @@
#define MIN_EX_SEARCH_LIMIT 128
static int is_exhaustive_allowed(VP9_COMP *cpi, MACROBLOCK *x) {
const SPEED_FEATURES *const sf = &cpi->sf;
- const int max_ex = VPXMAX(MIN_EX_SEARCH_LIMIT,
- (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
+ const int max_ex =
+ VPXMAX(MIN_EX_SEARCH_LIMIT,
+ (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
return sf->allow_exhaustive_searches &&
- (sf->exhaustive_searches_thresh < INT_MAX) &&
- (*x->ex_search_count_ptr <= max_ex) &&
- !cpi->rc.is_src_frame_alt_ref;
+ (sf->exhaustive_searches_thresh < INT_MAX) &&
+ (*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
}
-int vp9_full_pixel_search(VP9_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, MV *mvp_full,
- int step_param, int error_per_bit,
- int *cost_list,
- const MV *ref_mv, MV *tmp_mv,
+int vp9_full_pixel_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ MV *mvp_full, int step_param, int error_per_bit,
+ int *cost_list, const MV *ref_mv, MV *tmp_mv,
int var_max, int rd) {
const SPEED_FEATURES *const sf = &cpi->sf;
const SEARCH_METHODS method = sf->mv.search_method;
@@ -2530,35 +2390,34 @@
cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case HEX:
- var = hex_search(x, mvp_full, step_param, error_per_bit, 1,
- cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+ var = hex_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
+ fn_ptr, 1, ref_mv, tmp_mv);
break;
case SQUARE:
- var = square_search(x, mvp_full, step_param, error_per_bit, 1,
- cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+ var = square_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
+ fn_ptr, 1, ref_mv, tmp_mv);
break;
case BIGDIA:
- var = bigdia_search(x, mvp_full, step_param, error_per_bit, 1,
- cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+ var = bigdia_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
+ fn_ptr, 1, ref_mv, tmp_mv);
break;
case NSTEP:
var = full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
- MAX_MVSEARCH_STEPS - 1 - step_param,
- 1, cost_list, fn_ptr, ref_mv, tmp_mv);
+ MAX_MVSEARCH_STEPS - 1 - step_param, 1,
+ cost_list, fn_ptr, ref_mv, tmp_mv);
// Should we allow a follow on exhaustive search?
if (is_exhaustive_allowed(cpi, x)) {
int64_t exhuastive_thr = sf->exhaustive_searches_thresh;
- exhuastive_thr >>= 8 - (b_width_log2_lookup[bsize] +
- b_height_log2_lookup[bsize]);
+ exhuastive_thr >>=
+ 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
// Threshold variance for an exhaustive full search.
if (var > exhuastive_thr) {
- int var_ex;
+ int var_ex;
MV tmp_mv_ex;
- var_ex = full_pixel_exhaustive(cpi, x, tmp_mv,
- error_per_bit, cost_list, fn_ptr,
- ref_mv, &tmp_mv_ex);
+ var_ex = full_pixel_exhaustive(cpi, x, tmp_mv, error_per_bit,
+ cost_list, fn_ptr, ref_mv, &tmp_mv_ex);
if (var_ex < var) {
var = var_ex;
@@ -2567,8 +2426,7 @@
}
}
break;
- default:
- assert(0 && "Invalid search method.");
+ default: assert(0 && "Invalid search method.");
}
if (method != NSTEP && rd && var < var_max)
--- a/vp9/encoder/vp9_mcomp.h
+++ b/vp9/encoder/vp9_mcomp.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_MCOMP_H_
#define VP9_ENCODER_VP9_MCOMP_H_
@@ -26,7 +25,7 @@
// Enable the use of motion vector in range [-1023, 1023].
#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS - 1)) - 1)
// Maximum size of the first step in full pel units
-#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1))
+#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1))
// Allowed motion vector pixel distance outside image border
// for Block_16x16
#define BORDER_MV_PIXELS_B16 (16 + VP9_INTERP_EXTEND)
@@ -33,29 +32,26 @@
typedef struct search_site_config {
// motion search sites
- MV ss_mv[8 * MAX_MVSEARCH_STEPS]; // Motion vector
- intptr_t ss_os[8 * MAX_MVSEARCH_STEPS]; // Offset
+ MV ss_mv[8 * MAX_MVSEARCH_STEPS]; // Motion vector
+ intptr_t ss_os[8 * MAX_MVSEARCH_STEPS]; // Offset
int searches_per_step;
int total_steps;
} search_site_config;
void vp9_init_dsmotion_compensation(search_site_config *cfg, int stride);
-void vp9_init3smotion_compensation(search_site_config *cfg, int stride);
+void vp9_init3smotion_compensation(search_site_config *cfg, int stride);
void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv);
-int vp9_mv_bit_cost(const MV *mv, const MV *ref,
- const int *mvjcost, int *mvcost[2], int weight);
+int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+ int *mvcost[2], int weight);
// Utility to compute variance + MV rate cost for a given MV
-int vp9_get_mvpred_var(const MACROBLOCK *x,
- const MV *best_mv, const MV *center_mv,
- const vp9_variance_fn_ptr_t *vfp,
+int vp9_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const vp9_variance_fn_ptr_t *vfp,
int use_mvcost);
-int vp9_get_mvpred_av_var(const MACROBLOCK *x,
- const MV *best_mv, const MV *center_mv,
- const uint8_t *second_pred,
- const vp9_variance_fn_ptr_t *vfp,
- int use_mvcost);
+int vp9_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const uint8_t *second_pred,
+ const vp9_variance_fn_ptr_t *vfp, int use_mvcost);
struct VP9_COMP;
struct SPEED_FEATURES;
@@ -62,8 +58,7 @@
int vp9_init_search_range(int size);
-int vp9_refining_search_sad(const struct macroblock *x,
- struct mv *ref_mv,
+int vp9_refining_search_sad(const struct macroblock *x, struct mv *ref_mv,
int sad_per_bit, int distance,
const struct vp9_variance_vtable *fn_ptr,
const struct mv *center_mv);
@@ -70,23 +65,16 @@
// Perform integral projection based motion estimation.
unsigned int vp9_int_pro_motion_estimation(const struct VP9_COMP *cpi,
- MACROBLOCK *x,
- BLOCK_SIZE bsize,
+ MACROBLOCK *x, BLOCK_SIZE bsize,
int mi_row, int mi_col);
-typedef uint32_t (fractional_mv_step_fp) (
- const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
+typedef uint32_t(fractional_mv_step_fp)(
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vp9_variance_fn_ptr_t *vfp,
int forced_stop, // 0 - full, 1 - qtr only, 2 - half only
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- uint32_t *distortion, uint32_t *sse1,
- const uint8_t *second_pred,
- int w, int h);
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
+ int h);
extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree;
extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree_pruned;
@@ -94,28 +82,22 @@
extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree_pruned_evenmore;
extern fractional_mv_step_fp vp9_skip_sub_pixel_tree;
-typedef int (*vp9_full_search_fn_t)(const MACROBLOCK *x,
- const MV *ref_mv, int sad_per_bit,
- int distance,
+typedef int (*vp9_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
+ int sad_per_bit, int distance,
const vp9_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, MV *best_mv);
-typedef int (*vp9_refining_search_fn_t)(const MACROBLOCK *x,
- MV *ref_mv, int sad_per_bit,
- int distance,
+typedef int (*vp9_refining_search_fn_t)(const MACROBLOCK *x, MV *ref_mv,
+ int sad_per_bit, int distance,
const vp9_variance_fn_ptr_t *fn_ptr,
const MV *center_mv);
-typedef int (*vp9_diamond_search_fn_t)(const MACROBLOCK *x,
- const search_site_config *cfg,
- MV *ref_mv, MV *best_mv,
- int search_param, int sad_per_bit,
- int *num00,
- const vp9_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv);
+typedef int (*vp9_diamond_search_fn_t)(
+ const MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv,
+ int search_param, int sad_per_bit, int *num00,
+ const vp9_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
-int vp9_refining_search_8p_c(const MACROBLOCK *x,
- MV *ref_mv, int error_per_bit,
+int vp9_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
int search_range,
const vp9_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, const uint8_t *second_pred);
@@ -122,11 +104,9 @@
struct VP9_COMP;
-int vp9_full_pixel_search(struct VP9_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, MV *mvp_full,
- int step_param, int error_per_bit,
- int *cost_list,
- const MV *ref_mv, MV *tmp_mv,
+int vp9_full_pixel_search(struct VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ MV *mvp_full, int step_param, int error_per_bit,
+ int *cost_list, const MV *ref_mv, MV *tmp_mv,
int var_max, int rd);
#ifdef __cplusplus
--- a/vp9/encoder/vp9_noise_estimate.c
+++ b/vp9/encoder/vp9_noise_estimate.c
@@ -21,9 +21,7 @@
#include "vp9/encoder/vp9_noise_estimate.h"
#include "vp9/encoder/vp9_encoder.h"
-void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne,
- int width,
- int height) {
+void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne, int width, int height) {
ne->enabled = 0;
ne->level = kLowLow;
ne->value = 0;
@@ -40,24 +38,18 @@
}
static int enable_noise_estimation(VP9_COMP *const cpi) {
- // Enable noise estimation if denoising is on.
+// Enable noise estimation if denoising is on.
#if CONFIG_VP9_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity > 0)
- return 1;
+ if (cpi->oxcf.noise_sensitivity > 0) return 1;
#endif
// Only allow noise estimate under certain encoding mode.
// Enabled for 1 pass CBR, speed >=5, and if resolution is same as original.
// Not enabled for SVC mode and screen_content_mode.
// Not enabled for low resolutions.
- if (cpi->oxcf.pass == 0 &&
- cpi->oxcf.rc_mode == VPX_CBR &&
- cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
- cpi->oxcf.speed >= 5 &&
- cpi->resize_state == ORIG &&
- cpi->resize_pending == 0 &&
- !cpi->use_svc &&
- cpi->oxcf.content != VP9E_CONTENT_SCREEN &&
- cpi->common.width >= 640 &&
+ if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
+ cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->oxcf.speed >= 5 &&
+ cpi->resize_state == ORIG && cpi->resize_pending == 0 && !cpi->use_svc &&
+ cpi->oxcf.content != VP9E_CONTENT_SCREEN && cpi->common.width >= 640 &&
cpi->common.height >= 480)
return 1;
else
@@ -65,8 +57,8 @@
}
#if CONFIG_VP9_TEMPORAL_DENOISING
-static void copy_frame(YV12_BUFFER_CONFIG * const dest,
- const YV12_BUFFER_CONFIG * const src) {
+static void copy_frame(YV12_BUFFER_CONFIG *const dest,
+ const YV12_BUFFER_CONFIG *const src) {
int r;
const uint8_t *srcbuf = src->y_buffer;
uint8_t *destbuf = dest->y_buffer;
@@ -110,18 +102,15 @@
// Estimate is between current source and last source.
YV12_BUFFER_CONFIG *last_source = cpi->Last_Source;
#if CONFIG_VP9_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity > 0)
- last_source = &cpi->denoiser.last_source;
+ if (cpi->oxcf.noise_sensitivity > 0) last_source = &cpi->denoiser.last_source;
#endif
ne->enabled = enable_noise_estimation(cpi);
- if (!ne->enabled ||
- cm->current_video_frame % frame_period != 0 ||
- last_source == NULL ||
- ne->last_w != cm->width ||
+ if (!ne->enabled || cm->current_video_frame % frame_period != 0 ||
+ last_source == NULL || ne->last_w != cm->width ||
ne->last_h != cm->height) {
#if CONFIG_VP9_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity > 0)
- copy_frame(&cpi->denoiser.last_source, cpi->Source);
+ if (cpi->oxcf.noise_sensitivity > 0)
+ copy_frame(&cpi->denoiser.last_source, cpi->Source);
#endif
if (last_source != NULL) {
ne->last_w = cm->width;
@@ -140,8 +129,8 @@
int num_samples = 0;
uint64_t avg_est = 0;
int bsize = BLOCK_16X16;
- static const unsigned char const_source[16] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ static const unsigned char const_source[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
// Loop over sub-sample of 16x16 blocks of frame, and for blocks that have
// been encoded as zero/small mv at least x consecutive frames, compute
// the variance to update estimate of noise in the source.
@@ -167,8 +156,7 @@
for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) {
for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) {
// 16x16 blocks, 1/4 sample of frame.
- if (mi_row % 4 == 0 && mi_col % 4 == 0 &&
- mi_row < cm->mi_rows - 1 &&
+ if (mi_row % 4 == 0 && mi_col % 4 == 0 && mi_row < cm->mi_rows - 1 &&
mi_col < cm->mi_cols - 1) {
int bl_index = mi_row * cm->mi_cols + mi_col;
int bl_index1 = bl_index + 1;
@@ -178,20 +166,16 @@
// been encoded as zero/low motion x (= thresh_consec_zeromv) frames
// in a row. consec_zero_mv[] defined for 8x8 blocks, so consider all
// 4 sub-blocks for 16x16 block. Also, avoid skin blocks.
- int consec_zeromv = VPXMIN(cpi->consec_zero_mv[bl_index],
- VPXMIN(cpi->consec_zero_mv[bl_index1],
- VPXMIN(cpi->consec_zero_mv[bl_index2],
- cpi->consec_zero_mv[bl_index3])));
+ int consec_zeromv =
+ VPXMIN(cpi->consec_zero_mv[bl_index],
+ VPXMIN(cpi->consec_zero_mv[bl_index1],
+ VPXMIN(cpi->consec_zero_mv[bl_index2],
+ cpi->consec_zero_mv[bl_index3])));
int is_skin = 0;
if (cpi->use_skin_detection) {
- is_skin = vp9_compute_skin_block(src_y,
- src_u,
- src_v,
- src_ystride,
- src_uvstride,
- bsize,
- consec_zeromv,
- 0);
+ is_skin =
+ vp9_compute_skin_block(src_y, src_u, src_v, src_ystride,
+ src_uvstride, bsize, consec_zeromv, 0);
}
if (frame_low_motion &&
cpi->consec_zero_mv[bl_index] > thresh_consec_zeromv &&
@@ -201,19 +185,15 @@
!is_skin) {
// Compute variance.
unsigned int sse;
- unsigned int variance = cpi->fn_ptr[bsize].vf(src_y,
- src_ystride,
- last_src_y,
- last_src_ystride,
- &sse);
+ unsigned int variance = cpi->fn_ptr[bsize].vf(
+ src_y, src_ystride, last_src_y, last_src_ystride, &sse);
// Only consider this block as valid for noise measurement if the
// average term (sse - variance = N * avg^{2}, N = 16X16) of the
// temporal residual is small (avoid effects from lighting change).
if ((sse - variance) < thresh_sum_diff) {
unsigned int sse2;
- const unsigned int spatial_variance =
- cpi->fn_ptr[bsize].vf(src_y, src_ystride, const_source,
- 0, &sse2);
+ const unsigned int spatial_variance = cpi->fn_ptr[bsize].vf(
+ src_y, src_ystride, const_source, 0, &sse2);
// Avoid blocks with high brightness and high spatial variance.
if ((sse2 - spatial_variance) < thresh_sum_spatial &&
spatial_variance < thresh_spatial_var) {
--- a/vp9/encoder/vp9_noise_estimate.h
+++ b/vp9/encoder/vp9_noise_estimate.h
@@ -23,12 +23,7 @@
extern "C" {
#endif
-typedef enum noise_level {
- kLowLow,
- kLow,
- kMedium,
- kHigh
-} NOISE_LEVEL;
+typedef enum noise_level { kLowLow, kLow, kMedium, kHigh } NOISE_LEVEL;
typedef struct noise_estimate {
int enabled;
@@ -43,9 +38,7 @@
struct VP9_COMP;
-void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne,
- int width,
- int height);
+void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne, int width, int height);
NOISE_LEVEL vp9_noise_estimate_extract_level(NOISE_ESTIMATE *const ne);
--- a/vp9/encoder/vp9_picklpf.c
+++ b/vp9/encoder/vp9_picklpf.c
@@ -33,10 +33,9 @@
}
}
-
static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
- VP9_COMP *const cpi,
- int filt_level, int partial_frame) {
+ VP9_COMP *const cpi, int filt_level,
+ int partial_frame) {
VP9_COMMON *const cm = &cpi->common;
int64_t filt_err;
@@ -44,8 +43,8 @@
if (cpi->num_workers > 1)
vp9_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
- filt_level, 1, partial_frame,
- cpi->workers, cpi->num_workers, &cpi->lf_row_sync);
+ filt_level, 1, partial_frame, cpi->workers,
+ cpi->num_workers, &cpi->lf_row_sync);
else
vp9_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
1, partial_frame);
@@ -78,8 +77,7 @@
// Start the search at the previous frame filter level unless it is now out of
// range.
- int filt_mid =
- clamp(lf->last_filt_level, min_filter_level, max_filter_level);
+ int filt_mid = clamp(lf->last_filt_level, min_filter_level, max_filter_level);
int filter_step = filt_mid < 16 ? 4 : filt_mid / 4;
// Sum squared error at each filter level
int64_t ss_err[MAX_LOOP_FILTER + 1];
@@ -105,8 +103,7 @@
bias = (bias * cpi->twopass.section_intra_rating) / 20;
// yx, bias less for large block size
- if (cm->tx_mode != ONLY_4X4)
- bias >>= 1;
+ if (cm->tx_mode != ONLY_4X4) bias >>= 1;
if (filt_direction <= 0 && filt_low != filt_mid) {
// Get Low filter error score
@@ -117,8 +114,7 @@
// filter value.
if ((ss_err[filt_low] - bias) < best_err) {
// Was it actually better than the previous best?
- if (ss_err[filt_low] < best_err)
- best_err = ss_err[filt_low];
+ if (ss_err[filt_low] < best_err) best_err = ss_err[filt_low];
filt_best = filt_low;
}
@@ -154,17 +150,16 @@
VP9_COMMON *const cm = &cpi->common;
struct loopfilter *const lf = &cm->lf;
- lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0
- : cpi->oxcf.sharpness;
+ lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
if (method == LPF_PICK_MINIMAL_LPF && lf->filter_level) {
- lf->filter_level = 0;
+ lf->filter_level = 0;
} else if (method >= LPF_PICK_FROM_Q) {
const int min_filter_level = 0;
const int max_filter_level = get_max_filter_level(cpi);
const int q = vp9_ac_quant(cm->base_qindex, 0, cm->bit_depth);
- // These values were determined by linear fitting the result of the
- // searched level, filt_guess = q * 0.316206 + 3.87252
+// These values were determined by linear fitting the result of the
+// searched level, filt_guess = q * 0.316206 + 3.87252
#if CONFIG_VP9_HIGHBITDEPTH
int filt_guess;
switch (cm->bit_depth) {
@@ -178,18 +173,18 @@
filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
break;
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
- "or VPX_BITS_12");
+ assert(0 &&
+ "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
+ "or VPX_BITS_12");
return;
}
#else
int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
#endif // CONFIG_VP9_HIGHBITDEPTH
- if (cm->frame_type == KEY_FRAME)
- filt_guess -= 4;
+ if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
} else {
- lf->filter_level = search_filter_level(sd, cpi,
- method == LPF_PICK_FROM_SUBIMAGE);
+ lf->filter_level =
+ search_filter_level(sd, cpi, method == LPF_PICK_FROM_SUBIMAGE);
}
}
--- a/vp9/encoder/vp9_picklpf.h
+++ b/vp9/encoder/vp9_picklpf.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_PICKLPF_H_
#define VP9_ENCODER_VP9_PICKLPF_H_
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -41,21 +41,15 @@
int in_use;
} PRED_BUFFER;
-
static const int pos_shift_16x16[4][4] = {
- {9, 10, 13, 14},
- {11, 12, 15, 16},
- {17, 18, 21, 22},
- {19, 20, 23, 24}
+ { 9, 10, 13, 14 }, { 11, 12, 15, 16 }, { 17, 18, 21, 22 }, { 19, 20, 23, 24 }
};
-static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm,
- const MACROBLOCK *x,
- const MACROBLOCKD *xd,
- const TileInfo *const tile,
+static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, const MACROBLOCK *x,
+ const MACROBLOCKD *xd, const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
- int_mv *mv_ref_list, int_mv *base_mv,
- int mi_row, int mi_col, int use_base_mv) {
+ int_mv *mv_ref_list, int_mv *base_mv, int mi_row,
+ int mi_col, int use_base_mv) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
@@ -74,8 +68,8 @@
for (i = 0; i < 2; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
- const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
- xd->mi_stride];
+ const MODE_INFO *const candidate_mi =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
// Keep counts for entropy encoding.
context_counter += mode_2_counter[candidate_mi->mode];
different_ref_found = 1;
@@ -94,8 +88,8 @@
for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
- const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
- xd->mi_stride];
+ const MODE_INFO *const candidate_mi =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
different_ref_found = 1;
if (candidate_mi->ref_frame[0] == ref_frame)
@@ -110,8 +104,8 @@
for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
- const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row
- * xd->mi_stride];
+ const MODE_INFO *const candidate_mi =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
// If the candidate is INTRA we don't want to consider its mv.
IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias,
@@ -124,10 +118,11 @@
ref_frame == LAST_FRAME) {
// Get base layer mv.
MV_REF *candidate =
- &cm->prev_frame->mvs[(mi_col>>1) + (mi_row>>1) * (cm->mi_cols>>1)];
+ &cm->prev_frame
+ ->mvs[(mi_col >> 1) + (mi_row >> 1) * (cm->mi_cols >> 1)];
if (candidate->mv[0].as_int != INVALID_MV) {
- base_mv->as_mv.row = (candidate->mv[0].as_mv.row * 2);
- base_mv->as_mv.col = (candidate->mv[0].as_mv.col * 2);
+ base_mv->as_mv.row = (candidate->mv[0].as_mv.row * 2);
+ base_mv->as_mv.col = (candidate->mv[0].as_mv.col * 2);
clamp_mv_ref(&base_mv->as_mv, xd);
} else {
base_mv->as_int = INVALID_MV;
@@ -134,7 +129,7 @@
}
}
- Done:
+Done:
x->mbmi_ext->mode_context[ref_frame] = counter_to_context[context_counter];
@@ -151,7 +146,7 @@
int64_t best_rd_sofar, int use_base_mv) {
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
- struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
const int step_param = cpi->sf.mv.fullpel_search_step_param;
const int sadpb = x->sadperbit16;
MV mvp_full;
@@ -166,15 +161,14 @@
const int tmp_row_max = x->mv_row_max;
int rv = 0;
int cost_list[5];
- const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi,
- ref);
+ const YV12_BUFFER_CONFIG *scaled_ref_frame =
+ vp9_get_scaled_ref_frame(cpi, ref);
if (scaled_ref_frame) {
int i;
// Swap out the reference frame for a version that's been scaled to
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
- for (i = 0; i < MAX_MB_PLANE; i++)
- backup_yv12[i] = xd->plane[i].pre[0];
+ for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
vp9_set_mv_search_range(x, &ref_mv);
@@ -194,8 +188,8 @@
center_mv = tmp_mv->as_mv;
vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
- cond_cost_list(cpi, cost_list),
- ¢er_mv, &tmp_mv->as_mv, INT_MAX, 0);
+ cond_cost_list(cpi, cost_list), ¢er_mv,
+ &tmp_mv->as_mv, INT_MAX, 0);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
@@ -206,40 +200,34 @@
mvp_full.row = tmp_mv->as_mv.row * 8;
mvp_full.col = tmp_mv->as_mv.col * 8;
- *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv, x->nmvjointcost, x->mvcost,
+ MV_COST_WEIGHT);
- rate_mode = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]]
- [INTER_OFFSET(NEWMV)];
- rv = !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) >
- best_rd_sofar);
+ rate_mode =
+ cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]][INTER_OFFSET(NEWMV)];
+ rv =
+ !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) > best_rd_sofar);
if (rv) {
- cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
- cpi->common.allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[bsize],
- cpi->sf.mv.subpel_force_stop,
- cpi->sf.mv.subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost,
- &dis, &x->pred_sse[ref], NULL, 0, 0);
- *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ cpi->find_fractional_mv_step(
+ x, &tmp_mv->as_mv, &ref_mv, cpi->common.allow_high_precision_mv,
+ x->errorperbit, &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+ cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
+ *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+ x->mvcost, MV_COST_WEIGHT);
}
if (scaled_ref_frame) {
int i;
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[0] = backup_yv12[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
}
return rv;
}
static void block_variance(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
- int w, int h, unsigned int *sse, int *sum,
- int block_size,
+ const uint8_t *ref, int ref_stride, int w, int h,
+ unsigned int *sse, int *sum, int block_size,
#if CONFIG_VP9_HIGHBITDEPTH
int use_highbitdepth, vpx_bit_depth_t bd,
#endif
@@ -258,7 +246,7 @@
vpx_highbd_8_get8x8var(src + src_stride * i + j, src_stride,
ref + ref_stride * i + j, ref_stride,
&sse8x8[k], &sum8x8[k]);
- break;
+ break;
case VPX_BITS_10:
vpx_highbd_10_get8x8var(src + src_stride * i + j, src_stride,
ref + ref_stride * i + j, ref_stride,
@@ -271,14 +259,14 @@
break;
}
} else {
- vpx_get8x8var(src + src_stride * i + j, src_stride,
- ref + ref_stride * i + j, ref_stride,
- &sse8x8[k], &sum8x8[k]);
+ vpx_get8x8var(src + src_stride * i + j, src_stride,
+ ref + ref_stride * i + j, ref_stride, &sse8x8[k],
+ &sum8x8[k]);
}
#else
vpx_get8x8var(src + src_stride * i + j, src_stride,
- ref + ref_stride * i + j, ref_stride,
- &sse8x8[k], &sum8x8[k]);
+ ref + ref_stride * i + j, ref_stride, &sse8x8[k],
+ &sum8x8[k]);
#endif
*sse += sse8x8[k];
*sum += sum8x8[k];
@@ -300,12 +288,12 @@
for (i = 0; i < nh; i += 2) {
for (j = 0; j < nw; j += 2) {
sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] +
- sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
+ sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] +
- sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
+ sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >>
- (b_width_log2_lookup[unit_size] +
- b_height_log2_lookup[unit_size] + 6));
+ (b_width_log2_lookup[unit_size] +
+ b_height_log2_lookup[unit_size] + 6));
k++;
}
}
@@ -335,9 +323,9 @@
const int bw = b_width_log2_lookup[bsize];
const int bh = b_height_log2_lookup[bsize];
const int num8x8 = 1 << (bw + bh - 2);
- unsigned int sse8x8[64] = {0};
- int sum8x8[64] = {0};
- unsigned int var8x8[64] = {0};
+ unsigned int sse8x8[64] = { 0 };
+ int sum8x8[64] = { 0 };
+ unsigned int var8x8[64] = { 0 };
TX_SIZE tx_size;
int i, k;
#if CONFIG_VP9_HIGHBITDEPTH
@@ -378,24 +366,27 @@
// Evaluate if the partition block is a skippable block in Y plane.
{
- unsigned int sse16x16[16] = {0};
- int sum16x16[16] = {0};
- unsigned int var16x16[16] = {0};
+ unsigned int sse16x16[16] = { 0 };
+ int sum16x16[16] = { 0 };
+ unsigned int var16x16[16] = { 0 };
const int num16x16 = num8x8 >> 2;
- unsigned int sse32x32[4] = {0};
- int sum32x32[4] = {0};
- unsigned int var32x32[4] = {0};
+ unsigned int sse32x32[4] = { 0 };
+ int sum32x32[4] = { 0 };
+ unsigned int var32x32[4] = { 0 };
const int num32x32 = num8x8 >> 4;
int ac_test = 1;
int dc_test = 1;
- const int num = (tx_size == TX_8X8) ? num8x8 :
- ((tx_size == TX_16X16) ? num16x16 : num32x32);
- const unsigned int *sse_tx = (tx_size == TX_8X8) ? sse8x8 :
- ((tx_size == TX_16X16) ? sse16x16 : sse32x32);
- const unsigned int *var_tx = (tx_size == TX_8X8) ? var8x8 :
- ((tx_size == TX_16X16) ? var16x16 : var32x32);
+ const int num = (tx_size == TX_8X8)
+ ? num8x8
+ : ((tx_size == TX_16X16) ? num16x16 : num32x32);
+ const unsigned int *sse_tx =
+ (tx_size == TX_8X8) ? sse8x8
+ : ((tx_size == TX_16X16) ? sse16x16 : sse32x32);
+ const unsigned int *var_tx =
+ (tx_size == TX_8X8) ? var8x8
+ : ((tx_size == TX_16X16) ? var16x16 : var32x32);
// Calculate variance if tx_size > TX_8X8
if (tx_size >= TX_16X16)
@@ -424,8 +415,7 @@
if (ac_test) {
x->skip_txfm[0] = SKIP_TXFM_AC_ONLY;
- if (dc_test)
- x->skip_txfm[0] = SKIP_TXFM_AC_DC;
+ if (dc_test) x->skip_txfm[0] = SKIP_TXFM_AC_DC;
} else if (dc_test) {
skip_dc = 1;
}
@@ -432,7 +422,7 @@
}
if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) {
- int skip_uv[2] = {0};
+ int skip_uv[2] = { 0 };
unsigned int var_uv[2];
unsigned int sse_uv[2];
@@ -449,14 +439,14 @@
const int uv_bw = b_width_log2_lookup[uv_bsize];
const int uv_bh = b_height_log2_lookup[uv_bsize];
const int sf = (uv_bw - b_width_log2_lookup[unit_size]) +
- (uv_bh - b_height_log2_lookup[unit_size]);
+ (uv_bh - b_height_log2_lookup[unit_size]);
const uint32_t uv_dc_thr = pd->dequant[0] * pd->dequant[0] >> (6 - sf);
const uint32_t uv_ac_thr = pd->dequant[1] * pd->dequant[1] >> (6 - sf);
int j = i - 1;
vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, i);
- var_uv[j] = cpi->fn_ptr[uv_bsize].vf(p->src.buf, p->src.stride,
- pd->dst.buf, pd->dst.stride, &sse_uv[j]);
+ var_uv[j] = cpi->fn_ptr[uv_bsize].vf(
+ p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse_uv[j]);
if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) &&
(sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j]))
@@ -496,8 +486,8 @@
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
ac_quant >> (xd->bd - 5), &rate, &dist);
#else
- vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
- ac_quant >> 3, &rate, &dist);
+ vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3,
+ &rate, &dist);
#endif // CONFIG_VP9_HIGHBITDEPTH
*out_rate_sum += rate;
@@ -504,10 +494,10 @@
*out_dist_sum += dist << 4;
}
-static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
- MACROBLOCK *x, MACROBLOCKD *xd,
- int *out_rate_sum, int64_t *out_dist_sum,
- unsigned int *var_y, unsigned int *sse_y) {
+static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+ MACROBLOCKD *xd, int *out_rate_sum,
+ int64_t *out_dist_sum, unsigned int *var_y,
+ unsigned int *sse_y) {
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
// we need to divide by 8 before sending to modeling function.
@@ -548,8 +538,7 @@
// Evaluate if the partition block is a skippable block in Y plane.
{
- const BLOCK_SIZE unit_size =
- txsize_to_bsize[xd->mi[0]->tx_size];
+ const BLOCK_SIZE unit_size = txsize_to_bsize[xd->mi[0]->tx_size];
const unsigned int num_blk_log2 =
(b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) +
(b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]);
@@ -564,8 +553,7 @@
if (sse_tx - var_tx < dc_thr || sse == var)
x->skip_txfm[0] = SKIP_TXFM_AC_DC;
} else {
- if (sse_tx - var_tx < dc_thr || sse == var)
- skip_dc = 1;
+ if (sse_tx - var_tx < dc_thr || sse == var) skip_dc = 1;
}
}
@@ -597,8 +585,8 @@
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
ac_quant >> (xd->bd - 5), &rate, &dist);
#else
- vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
- ac_quant >> 3, &rate, &dist);
+ vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3,
+ &rate, &dist);
#endif // CONFIG_VP9_HIGHBITDEPTH
*out_rate_sum += rate;
@@ -631,10 +619,10 @@
const int step = 1 << (tx_size << 1);
const int block_step = (1 << tx_size);
int block = 0, r, c;
- const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 :
- xd->mb_to_right_edge >> 5);
- const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 :
- xd->mb_to_bottom_edge >> 5);
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5);
+ const int max_blocks_high =
+ num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5);
int eob_cost = 0;
const int bw = 4 * num_4x4_w;
const int bh = 4 * num_4x4_h;
@@ -666,26 +654,24 @@
vpx_hadamard_16x16(src_diff, diff_stride, (int16_t *)coeff);
vp9_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_8X8:
vpx_hadamard_8x8(src_diff, diff_stride, (int16_t *)coeff);
vp9_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
*skippable &= (*eob == 0);
eob_cost += 1;
@@ -718,8 +704,7 @@
else if (*eob > 1)
this_rdc->rate += vpx_satd((const int16_t *)qcoeff, step << 4);
- this_rdc->dist +=
- vp9_block_error_fp(coeff, dqcoeff, step << 4) >> 2;
+ this_rdc->dist += vp9_block_error_fp(coeff, dqcoeff, step << 4) >> 2;
}
block += step;
}
@@ -733,9 +718,9 @@
static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE plane_bsize,
MACROBLOCK *x, MACROBLOCKD *xd,
- RD_COST *this_rdc,
- unsigned int *var_y, unsigned int *sse_y,
- int start_plane, int stop_plane) {
+ RD_COST *this_rdc, unsigned int *var_y,
+ unsigned int *sse_y, int start_plane,
+ int stop_plane) {
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
// we need to divide by 8 before sending to modeling function.
@@ -761,33 +746,32 @@
const uint32_t ac_quant = pd->dequant[1];
const BLOCK_SIZE bs = plane_bsize;
unsigned int var;
- if (!x->color_sensitivity[i - 1])
- continue;
+ if (!x->color_sensitivity[i - 1]) continue;
- var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride,
- pd->dst.buf, pd->dst.stride, &sse);
+ var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf,
+ pd->dst.stride, &sse);
assert(sse >= var);
tot_var += var;
tot_sse += sse;
- #if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_VP9_HIGHBITDEPTH
vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
dc_quant >> (xd->bd - 5), &rate, &dist);
- #else
+#else
vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
dc_quant >> 3, &rate, &dist);
- #endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_VP9_HIGHBITDEPTH
this_rdc->rate += rate >> 1;
this_rdc->dist += dist << 3;
- #if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_VP9_HIGHBITDEPTH
vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
ac_quant >> (xd->bd - 5), &rate, &dist);
- #else
- vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
- ac_quant >> 3, &rate, &dist);
- #endif // CONFIG_VP9_HIGHBITDEPTH
+#else
+ vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3,
+ &rate, &dist);
+#endif // CONFIG_VP9_HIGHBITDEPTH
this_rdc->rate += rate;
this_rdc->dist += dist << 4;
@@ -794,11 +778,11 @@
}
#if CONFIG_VP9_HIGHBITDEPTH
- *var_y = tot_var > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_var;
- *sse_y = tot_sse > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_sse;
+ *var_y = tot_var > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_var;
+ *sse_y = tot_sse > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_sse;
#else
- *var_y = tot_var;
- *sse_y = tot_sse;
+ *var_y = tot_var;
+ *sse_y = tot_sse;
#endif
}
@@ -815,15 +799,14 @@
}
static void free_pred_buffer(PRED_BUFFER *p) {
- if (p != NULL)
- p->in_use = 0;
+ if (p != NULL) p->in_use = 0;
}
-static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, int mi_row, int mi_col,
+static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ int mi_row, int mi_col,
MV_REFERENCE_FRAME ref_frame,
- PREDICTION_MODE this_mode,
- unsigned int var_y, unsigned int sse_y,
+ PREDICTION_MODE this_mode, unsigned int var_y,
+ unsigned int sse_y,
struct buf_2d yv12_mb[][MAX_MB_PLANE],
int *rate, int64_t *dist) {
MACROBLOCKD *xd = &x->e_mbd;
@@ -835,10 +818,8 @@
// Skipping threshold for dc.
unsigned int thresh_dc;
int motion_low = 1;
- if (mi->mv[0].as_mv.row > 64 ||
- mi->mv[0].as_mv.row < -64 ||
- mi->mv[0].as_mv.col > 64 ||
- mi->mv[0].as_mv.col < -64)
+ if (mi->mv[0].as_mv.row > 64 || mi->mv[0].as_mv.row < -64 ||
+ mi->mv[0].as_mv.col > 64 || mi->mv[0].as_mv.col < -64)
motion_low = 0;
if (x->encode_breakout > 0 && motion_low == 1) {
// Set a maximum for threshold to avoid big PSNR loss in low bit rate
@@ -895,17 +876,15 @@
vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
}
- var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf,
- x->plane[1].src.stride,
+ var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf, x->plane[1].src.stride,
xd->plane[1].dst.buf,
xd->plane[1].dst.stride, &sse_u);
// U skipping condition checking
if (((var_u << 2) <= thresh_ac_uv) && (sse_u - var_u <= thresh_dc_uv)) {
- var_v = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf,
- x->plane[2].src.stride,
- xd->plane[2].dst.buf,
- xd->plane[2].dst.stride, &sse_v);
+ var_v = cpi->fn_ptr[uv_size].vf(
+ x->plane[2].src.buf, x->plane[2].src.stride, xd->plane[2].dst.buf,
+ xd->plane[2].dst.stride, &sse_v);
// V skipping condition checking
if (((var_v << 2) <= thresh_ac_uv) && (sse_v - var_v <= thresh_dc_uv)) {
@@ -939,9 +918,9 @@
};
static void estimate_block_intra(int plane, int block, int row, int col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
- struct estimate_block_intra_args* const args = arg;
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *arg) {
+ struct estimate_block_intra_args *const args = arg;
VP9_COMP *const cpi = args->cpi;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -959,12 +938,10 @@
p->src.buf = &src_buf_base[4 * (row * src_stride + col)];
pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)];
// Use source buffer as an approximation for the fully reconstructed buffer.
- vp9_predict_intra_block(xd, b_width_log2_lookup[plane_bsize],
- tx_size, args->mode,
- x->skip_encode ? p->src.buf : pd->dst.buf,
- x->skip_encode ? src_stride : dst_stride,
- pd->dst.buf, dst_stride,
- col, row, plane);
+ vp9_predict_intra_block(xd, b_width_log2_lookup[plane_bsize], tx_size,
+ args->mode, x->skip_encode ? p->src.buf : pd->dst.buf,
+ x->skip_encode ? src_stride : dst_stride, pd->dst.buf,
+ dst_stride, col, row, plane);
if (plane == 0) {
int64_t this_sse = INT64_MAX;
@@ -985,14 +962,13 @@
}
static const THR_MODES mode_idx[MAX_REF_FRAMES - 1][4] = {
- {THR_DC, THR_V_PRED, THR_H_PRED, THR_TM},
- {THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV},
- {THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG},
+ { THR_DC, THR_V_PRED, THR_H_PRED, THR_TM },
+ { THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV },
+ { THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG },
};
-static const PREDICTION_MODE intra_mode_list[] = {
- DC_PRED, V_PRED, H_PRED, TM_PRED
-};
+static const PREDICTION_MODE intra_mode_list[] = { DC_PRED, V_PRED, H_PRED,
+ TM_PRED };
static int mode_offset(const PREDICTION_MODE mode) {
if (mode >= NEARESTMV) {
@@ -999,16 +975,11 @@
return INTER_OFFSET(mode);
} else {
switch (mode) {
- case DC_PRED:
- return 0;
- case V_PRED:
- return 1;
- case H_PRED:
- return 2;
- case TM_PRED:
- return 3;
- default:
- return -1;
+ case DC_PRED: return 0;
+ case V_PRED: return 1;
+ case H_PRED: return 2;
+ case TM_PRED: return 3;
+ default: return -1;
}
}
}
@@ -1046,7 +1017,7 @@
const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
bmode_costs = cpi->y_mode_costs[A][L];
- (void) ctx;
+ (void)ctx;
vp9_rd_cost_reset(&best_rdc);
vp9_rd_cost_reset(&this_rdc);
@@ -1067,8 +1038,8 @@
args.skippable = 1;
args.rdc = &this_rdc;
mi->tx_size = intra_tx_size;
- vp9_foreach_transformed_block_in_plane(xd, bsize, 0,
- estimate_block_intra, &args);
+ vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
+ &args);
if (args.skippable) {
x->skip_txfm[0] = SKIP_TXFM_AC_DC;
this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1);
@@ -1077,8 +1048,7 @@
this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0);
}
this_rdc.rate += bmode_costs[this_mode];
- this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- this_rdc.rate, this_rdc.dist);
+ this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
if (this_rdc.rdcost < best_rdc.rdcost) {
best_rdc = this_rdc;
@@ -1089,8 +1059,7 @@
*rd_cost = best_rdc;
}
-static void init_ref_frame_cost(VP9_COMMON *const cm,
- MACROBLOCKD *const xd,
+static void init_ref_frame_cost(VP9_COMMON *const cm, MACROBLOCKD *const xd,
int ref_frame_cost[MAX_REF_FRAMES]) {
vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
@@ -1098,7 +1067,7 @@
ref_frame_cost[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
ref_frame_cost[LAST_FRAME] = ref_frame_cost[GOLDEN_FRAME] =
- ref_frame_cost[ALTREF_FRAME] = vp9_cost_bit(intra_inter_p, 1);
+ ref_frame_cost[ALTREF_FRAME] = vp9_cost_bit(intra_inter_p, 1);
ref_frame_cost[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
@@ -1114,24 +1083,16 @@
#define RT_INTER_MODES 8
static const REF_MODE ref_mode_set[RT_INTER_MODES] = {
- {LAST_FRAME, ZEROMV},
- {LAST_FRAME, NEARESTMV},
- {GOLDEN_FRAME, ZEROMV},
- {LAST_FRAME, NEARMV},
- {LAST_FRAME, NEWMV},
- {GOLDEN_FRAME, NEARESTMV},
- {GOLDEN_FRAME, NEARMV},
- {GOLDEN_FRAME, NEWMV}
+ { LAST_FRAME, ZEROMV }, { LAST_FRAME, NEARESTMV },
+ { GOLDEN_FRAME, ZEROMV }, { LAST_FRAME, NEARMV },
+ { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEARESTMV },
+ { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV }
};
static const REF_MODE ref_mode_set_svc[RT_INTER_MODES] = {
- {LAST_FRAME, ZEROMV},
- {GOLDEN_FRAME, ZEROMV},
- {LAST_FRAME, NEARESTMV},
- {LAST_FRAME, NEARMV},
- {GOLDEN_FRAME, NEARESTMV},
- {GOLDEN_FRAME, NEARMV},
- {LAST_FRAME, NEWMV},
- {GOLDEN_FRAME, NEWMV}
+ { LAST_FRAME, ZEROMV }, { GOLDEN_FRAME, ZEROMV },
+ { LAST_FRAME, NEARESTMV }, { LAST_FRAME, NEARMV },
+ { GOLDEN_FRAME, NEARESTMV }, { GOLDEN_FRAME, NEARMV },
+ { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEWMV }
};
static int set_intra_cost_penalty(const VP9_COMP *const cpi, BLOCK_SIZE bsize) {
@@ -1140,23 +1101,20 @@
int reduction_fac =
(bsize <= BLOCK_16X16) ? ((bsize <= BLOCK_8X8) ? 4 : 2) : 0;
if (cpi->noise_estimate.enabled && cpi->noise_estimate.level == kHigh)
- // Don't reduce intra cost penalty if estimated noise level is high.
- reduction_fac = 0;
- return vp9_get_intra_cost_penalty(
- cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth) >> reduction_fac;
+ // Don't reduce intra cost penalty if estimated noise level is high.
+ reduction_fac = 0;
+ return vp9_get_intra_cost_penalty(cm->base_qindex, cm->y_dc_delta_q,
+ cm->bit_depth) >>
+ reduction_fac;
}
-static INLINE void find_predictors(VP9_COMP *cpi, MACROBLOCK *x,
- MV_REFERENCE_FRAME ref_frame,
- int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
- int const_motion[MAX_REF_FRAMES],
- int *ref_frame_skip_mask,
- const int flag_list[4],
- TileDataEnc *tile_data,
- int mi_row, int mi_col,
- struct buf_2d yv12_mb[4][MAX_MB_PLANE],
- BLOCK_SIZE bsize,
- int force_skip_low_temp_var) {
+static INLINE void find_predictors(
+ VP9_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
+ int const_motion[MAX_REF_FRAMES], int *ref_frame_skip_mask,
+ const int flag_list[4], TileDataEnc *tile_data, int mi_row, int mi_col,
+ struct buf_2d yv12_mb[4][MAX_MB_PLANE], BLOCK_SIZE bsize,
+ int force_skip_low_temp_var) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
@@ -1169,11 +1127,9 @@
if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
- vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
- sf, sf);
+ vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
if (cm->use_prev_frame_mvs) {
- vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame,
- candidates, mi_row, mi_col,
+ vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col,
x->mbmi_ext->mode_context);
} else {
const_motion[ref_frame] =
@@ -1187,8 +1143,8 @@
// Early exit for golden frame if force_skip_low_temp_var is set.
if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8 &&
!(force_skip_low_temp_var && ref_frame == GOLDEN_FRAME)) {
- vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
- ref_frame, bsize);
+ vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+ bsize);
}
} else {
*ref_frame_skip_mask |= (1 << ref_frame);
@@ -1222,16 +1178,16 @@
left_col = xd->left_mi->mv[0].as_mv.col;
}
if (above_mv_valid && left_mv_valid) {
- al_mv_average_row = (above_row + left_row + 1) >> 1;
- al_mv_average_col = (above_col + left_col + 1) >> 1;
+ al_mv_average_row = (above_row + left_row + 1) >> 1;
+ al_mv_average_col = (above_col + left_col + 1) >> 1;
} else if (above_mv_valid) {
- al_mv_average_row = above_row;
- al_mv_average_col = above_col;
+ al_mv_average_row = above_row;
+ al_mv_average_col = above_col;
} else if (left_mv_valid) {
- al_mv_average_row = left_row;
- al_mv_average_col = left_col;
+ al_mv_average_row = left_row;
+ al_mv_average_col = left_col;
} else {
- al_mv_average_row = al_mv_average_col = 0;
+ al_mv_average_row = al_mv_average_col = 0;
}
row_diff = (al_mv_average_row - mv_row);
col_diff = (al_mv_average_col - mv_col);
@@ -1244,9 +1200,8 @@
}
// If noise estimation is enabled, and estimated level is above threshold,
// add a bias to LAST reference with small motion, for large blocks.
- if (ne->enabled && ne->level >= kMedium &&
- bsize >= BLOCK_32X32 && is_last_frame &&
- mv_row < 8 && mv_row > -8 && mv_col < 8 && mv_col > -8) {
+ if (ne->enabled && ne->level >= kMedium && bsize >= BLOCK_32X32 &&
+ is_last_frame && mv_row < 8 && mv_row > -8 && mv_col < 8 && mv_col > -8) {
this_rdc->rdcost = 7 * this_rdc->rdcost >> 3;
}
}
@@ -1253,15 +1208,11 @@
#if CONFIG_VP9_TEMPORAL_DENOISING
static void vp9_pickmode_ctx_den_update(
- VP9_PICKMODE_CTX_DEN *ctx_den,
- int64_t zero_last_cost_orig,
+ VP9_PICKMODE_CTX_DEN *ctx_den, int64_t zero_last_cost_orig,
int ref_frame_cost[MAX_REF_FRAMES],
- int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
- int reuse_inter_pred,
- TX_SIZE best_tx_size,
- PREDICTION_MODE best_mode,
- MV_REFERENCE_FRAME best_ref_frame,
- INTERP_FILTER best_pred_filter,
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int reuse_inter_pred,
+ TX_SIZE best_tx_size, PREDICTION_MODE best_mode,
+ MV_REFERENCE_FRAME best_ref_frame, INTERP_FILTER best_pred_filter,
uint8_t best_mode_skip_txfm) {
ctx_den->zero_last_cost_orig = zero_last_cost_orig;
ctx_den->ref_frame_cost = ref_frame_cost;
@@ -1283,8 +1234,7 @@
// denoised result. Only do this under noise conditions, and if rdcost of
// ZEROMV onoriginal source is not significantly higher than rdcost of best
// mode.
- if (cpi->noise_estimate.enabled &&
- cpi->noise_estimate.level > kLow &&
+ if (cpi->noise_estimate.enabled && cpi->noise_estimate.level > kLow &&
ctx_den->zero_last_cost_orig < (best_rdc->rdcost << 3) &&
((ctx_den->best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) ||
(ctx_den->best_ref_frame == GOLDEN_FRAME &&
@@ -1304,8 +1254,8 @@
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y);
this_rdc.rate = rate + ctx_den->ref_frame_cost[LAST_FRAME] +
- cpi->inter_mode_cost[x->mbmi_ext->mode_context[LAST_FRAME]]
- [INTER_OFFSET(ZEROMV)];
+ cpi->inter_mode_cost[x->mbmi_ext->mode_context[LAST_FRAME]]
+ [INTER_OFFSET(ZEROMV)];
this_rdc.dist = dist;
this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, rate, dist);
// Switch to ZEROMV if the rdcost for ZEROMV on denoised source
@@ -1318,8 +1268,9 @@
if (ctx_den->best_ref_frame == INTRA_FRAME)
mi->mv[0].as_int = INVALID_MV;
else if (ctx_den->best_ref_frame == GOLDEN_FRAME) {
- mi->mv[0].as_int = ctx_den->frame_mv[ctx_den->best_mode]
- [ctx_den->best_ref_frame].as_int;
+ mi->mv[0].as_int =
+ ctx_den->frame_mv[ctx_den->best_mode][ctx_den->best_ref_frame]
+ .as_int;
if (ctx_den->reuse_inter_pred) {
xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0];
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
@@ -1335,9 +1286,8 @@
}
#endif // CONFIG_VP9_TEMPORAL_DENOISING
-static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
+static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low, int mi_row,
+ int mi_col, BLOCK_SIZE bsize) {
const int i = (mi_row & 0x7) >> 1;
const int j = (mi_col & 0x7) >> 1;
int force_skip_low_temp_var = 0;
@@ -1383,8 +1333,7 @@
return force_skip_low_temp_var;
}
-void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
- TileDataEnc *tile_data,
+void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
int mi_row, int mi_col, RD_COST *rd_cost,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
VP9_COMMON *const cm = &cpi->common;
@@ -1408,15 +1357,18 @@
unsigned int var_y = UINT_MAX;
unsigned int sse_y = UINT_MAX;
const int intra_cost_penalty = set_intra_cost_penalty(cpi, bsize);
- int64_t inter_mode_thresh = RDCOST(x->rdmult, x->rddiv,
- intra_cost_penalty, 0);
+ int64_t inter_mode_thresh =
+ RDCOST(x->rdmult, x->rddiv, intra_cost_penalty, 0);
const int *const rd_threshes = cpi->rd.threshes[mi->segment_id][bsize];
const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
INTERP_FILTER filter_ref;
const int bsl = mi_width_log2_lookup[bsize];
- const int pred_filter_search = cm->interp_filter == SWITCHABLE ?
- (((mi_row + mi_col) >> bsl) +
- get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
+ const int pred_filter_search =
+ cm->interp_filter == SWITCHABLE
+ ? (((mi_row + mi_col) >> bsl) +
+ get_chessboard_index(cm->current_video_frame)) &
+ 0x1
+ : 0;
int const_motion[MAX_REF_FRAMES] = { 0 };
const int bh = num_4x4_blocks_high_lookup[bsize] << 2;
const int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
@@ -1438,7 +1390,7 @@
int best_pred_sad = INT_MAX;
int best_early_term = 0;
int ref_frame_cost[MAX_REF_FRAMES];
- int svc_force_zero_mode[3] = {0};
+ int svc_force_zero_mode[3] = { 0 };
int perform_intra_pred = 1;
int use_golden_nonzeromv = 1;
int force_skip_low_temp_var = 0;
@@ -1489,8 +1441,8 @@
mi->ref_frame[0] = NONE;
mi->ref_frame[1] = NONE;
- mi->tx_size = VPXMIN(max_txsize_lookup[bsize],
- tx_mode_to_biggest_tx_size[cm->tx_mode]);
+ mi->tx_size =
+ VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[cm->tx_mode]);
if (sf->short_circuit_flat_blocks) {
#if CONFIG_VP9_HIGHBITDEPTH
@@ -1518,18 +1470,15 @@
// For svc mode, on spatial_layer_id > 0: if the reference has different scale
// constrain the inter mode to only test zero motion.
- if (cpi->use_svc &&
- svc ->force_zero_mode_spatial_ref &&
+ if (cpi->use_svc && svc->force_zero_mode_spatial_ref &&
cpi->svc.spatial_layer_id > 0) {
if (cpi->ref_frame_flags & flag_list[LAST_FRAME]) {
struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
- if (vp9_is_scaled(sf))
- svc_force_zero_mode[LAST_FRAME - 1] = 1;
+ if (vp9_is_scaled(sf)) svc_force_zero_mode[LAST_FRAME - 1] = 1;
}
if (cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) {
struct scale_factors *const sf = &cm->frame_refs[GOLDEN_FRAME - 1].sf;
- if (vp9_is_scaled(sf))
- svc_force_zero_mode[GOLDEN_FRAME - 1] = 1;
+ if (vp9_is_scaled(sf)) svc_force_zero_mode[GOLDEN_FRAME - 1] = 1;
}
}
@@ -1539,7 +1488,7 @@
}
if (!((cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) &&
- !svc_force_zero_mode[GOLDEN_FRAME - 1] && !force_skip_low_temp_var))
+ !svc_force_zero_mode[GOLDEN_FRAME - 1] && !force_skip_low_temp_var))
use_golden_nonzeromv = 0;
for (ref_frame = LAST_FRAME; ref_frame <= usable_ref_frame; ++ref_frame) {
@@ -1558,8 +1507,7 @@
int this_early_term = 0;
PREDICTION_MODE this_mode = ref_mode_set[idx].pred_mode;
- if (cpi->use_svc)
- this_mode = ref_mode_set_svc[idx].pred_mode;
+ if (cpi->use_svc) this_mode = ref_mode_set_svc[idx].pred_mode;
if (sf->short_circuit_flat_blocks && x->source_variance == 0 &&
this_mode != NEARESTMV) {
@@ -1566,8 +1514,7 @@
continue;
}
- if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode)))
- continue;
+ if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode))) continue;
ref_frame = ref_mode_set[idx].ref_frame;
if (cpi->use_svc) {
@@ -1574,11 +1521,9 @@
ref_frame = ref_mode_set_svc[idx].ref_frame;
}
- if (!(cpi->ref_frame_flags & flag_list[ref_frame]))
- continue;
+ if (!(cpi->ref_frame_flags & flag_list[ref_frame])) continue;
- if (const_motion[ref_frame] && this_mode == NEARMV)
- continue;
+ if (const_motion[ref_frame] && this_mode == NEARMV) continue;
// Skip non-zeromv mode search for golden frame if force_skip_low_temp_var
// is set. If nearestmv for golden frame is 0, zeromv mode will be skipped
@@ -1588,9 +1533,8 @@
continue;
}
- if (cpi->sf.short_circuit_low_temp_var == 2 &&
- force_skip_low_temp_var && ref_frame == LAST_FRAME &&
- this_mode == NEWMV) {
+ if (cpi->sf.short_circuit_low_temp_var == 2 && force_skip_low_temp_var &&
+ ref_frame == LAST_FRAME && this_mode == NEWMV) {
continue;
}
@@ -1608,8 +1552,7 @@
if (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[i] << 1))
ref_frame_skip_mask |= (1 << ref_frame);
}
- if (ref_frame_skip_mask & (1 << ref_frame))
- continue;
+ if (ref_frame_skip_mask & (1 << ref_frame)) continue;
// Select prediction reference frames.
for (i = 0; i < MAX_MB_PLANE; i++)
@@ -1619,58 +1562,53 @@
set_ref_ptrs(cm, xd, ref_frame, NONE);
mode_index = mode_idx[ref_frame][INTER_OFFSET(this_mode)];
- mode_rd_thresh = best_mode_skip_txfm ?
- rd_threshes[mode_index] << 1 : rd_threshes[mode_index];
+ mode_rd_thresh = best_mode_skip_txfm ? rd_threshes[mode_index] << 1
+ : rd_threshes[mode_index];
if (rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
rd_thresh_freq_fact[mode_index]))
continue;
if (this_mode == NEWMV) {
- if (ref_frame > LAST_FRAME &&
- !cpi->use_svc &&
+ if (ref_frame > LAST_FRAME && !cpi->use_svc &&
cpi->oxcf.rc_mode == VPX_CBR) {
int tmp_sad;
uint32_t dis;
int cost_list[5];
- if (bsize < BLOCK_16X16)
- continue;
+ if (bsize < BLOCK_16X16) continue;
tmp_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
- if (tmp_sad > x->pred_mv_sad[LAST_FRAME])
- continue;
+ if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) continue;
if (tmp_sad + (num_pels_log2_lookup[bsize] << 4) > best_pred_sad)
continue;
frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int;
rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv,
- &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
frame_mv[NEWMV][ref_frame].as_mv.row >>= 3;
frame_mv[NEWMV][ref_frame].as_mv.col >>= 3;
- cpi->find_fractional_mv_step(x, &frame_mv[NEWMV][ref_frame].as_mv,
- &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
- cpi->common.allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[bsize],
- cpi->sf.mv.subpel_force_stop,
- cpi->sf.mv.subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost, &dis,
- &x->pred_sse[ref_frame], NULL, 0, 0);
+ cpi->find_fractional_mv_step(
+ x, &frame_mv[NEWMV][ref_frame].as_mv,
+ &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
+ cpi->common.allow_high_precision_mv, x->errorperbit,
+ &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+ cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref_frame], NULL, 0,
+ 0);
} else if (svc->use_base_mv && svc->spatial_layer_id) {
if (frame_mv[NEWMV][ref_frame].as_int != INVALID_MV &&
frame_mv[NEWMV][ref_frame].as_int != 0) {
const int pre_stride = xd->plane[0].pre[0].stride;
int base_mv_sad = INT_MAX;
- const uint8_t * const pre_buf = xd->plane[0].pre[0].buf +
+ const uint8_t *const pre_buf =
+ xd->plane[0].pre[0].buf +
(frame_mv[NEWMV][ref_frame].as_mv.row >> 3) * pre_stride +
(frame_mv[NEWMV][ref_frame].as_mv.col >> 3);
- base_mv_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
- x->plane[0].src.stride,
- pre_buf, pre_stride);
+ base_mv_sad = cpi->fn_ptr[bsize].sdf(
+ x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride);
// TODO(wonkap): make the decision to use base layer mv on RD;
// not just SAD.
@@ -1677,19 +1615,23 @@
if (base_mv_sad < x->pred_mv_sad[ref_frame]) {
// Base layer mv is good.
if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
- &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost, 1)) {
- continue;
+ &frame_mv[NEWMV][ref_frame], &rate_mv,
+ best_rdc.rdcost, 1)) {
+ continue;
}
} else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
- &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost, 0)) {
+ &frame_mv[NEWMV][ref_frame],
+ &rate_mv, best_rdc.rdcost, 0)) {
continue;
}
} else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
- &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost, 0)) {
+ &frame_mv[NEWMV][ref_frame],
+ &rate_mv, best_rdc.rdcost, 0)) {
continue;
}
} else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
- &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost, 0)) {
+ &frame_mv[NEWMV][ref_frame], &rate_mv,
+ best_rdc.rdcost, 0)) {
continue;
}
}
@@ -1696,16 +1638,15 @@
// If use_golden_nonzeromv is false, NEWMV mode is skipped for golden, no
// need to compute best_pred_sad which is only used to skip golden NEWMV.
- if (use_golden_nonzeromv && this_mode == NEWMV &&
- ref_frame == LAST_FRAME &&
+ if (use_golden_nonzeromv && this_mode == NEWMV && ref_frame == LAST_FRAME &&
frame_mv[NEWMV][LAST_FRAME].as_int != INVALID_MV) {
const int pre_stride = xd->plane[0].pre[0].stride;
- const uint8_t * const pre_buf = xd->plane[0].pre[0].buf +
+ const uint8_t *const pre_buf =
+ xd->plane[0].pre[0].buf +
(frame_mv[NEWMV][LAST_FRAME].as_mv.row >> 3) * pre_stride +
(frame_mv[NEWMV][LAST_FRAME].as_mv.col >> 3);
- best_pred_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
- x->plane[0].src.stride,
- pre_buf, pre_stride);
+ best_pred_sad = cpi->fn_ptr[bsize].sdf(
+ x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride);
x->pred_mv_sad[LAST_FRAME] = best_pred_sad;
}
@@ -1730,11 +1671,12 @@
}
}
- if ((this_mode == NEWMV || filter_ref == SWITCHABLE) && pred_filter_search
- && (ref_frame == LAST_FRAME ||
- (ref_frame == GOLDEN_FRAME &&
- (cpi->use_svc || cpi->oxcf.rc_mode == VPX_VBR))) &&
- (((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) != 0)) {
+ if ((this_mode == NEWMV || filter_ref == SWITCHABLE) &&
+ pred_filter_search &&
+ (ref_frame == LAST_FRAME ||
+ (ref_frame == GOLDEN_FRAME &&
+ (cpi->use_svc || cpi->oxcf.rc_mode == VPX_VBR))) &&
+ (((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) != 0)) {
int pf_rate[3];
int64_t pf_dist[3];
unsigned int pf_var[3];
@@ -1785,9 +1727,9 @@
pd->dst.stride = this_mode_pred->stride;
}
} else {
- // TODO(jackychen): the low-bitdepth condition causes a segfault in
- // high-bitdepth builds.
- // https://bugs.chromium.org/p/webm/issues/detail?id=1250
+// TODO(jackychen): the low-bitdepth condition causes a segfault in
+// high-bitdepth builds.
+// https://bugs.chromium.org/p/webm/issues/detail?id=1250
#if CONFIG_VP9_HIGHBITDEPTH
const int large_block = bsize > BLOCK_32X32;
#else
@@ -1833,8 +1775,9 @@
this_rdc.rate += vp9_get_switchable_rate(cpi, xd);
}
} else {
- this_rdc.rate += cm->interp_filter == SWITCHABLE ?
- vp9_get_switchable_rate(cpi, xd) : 0;
+ this_rdc.rate += cm->interp_filter == SWITCHABLE
+ ? vp9_get_switchable_rate(cpi, xd)
+ : 0;
this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
}
@@ -1859,8 +1802,7 @@
// Bias against NEWMV that is very different from its neighbors, and bias
// to small motion-lastref for noisy input.
- if (cpi->oxcf.rc_mode == VPX_CBR &&
- cpi->oxcf.speed >= 5 &&
+ if (cpi->oxcf.rc_mode == VPX_CBR && cpi->oxcf.speed >= 5 &&
cpi->oxcf.content != VP9E_CONTENT_SCREEN) {
vp9_NEWMV_diff_bias(&cpi->noise_estimate, xd, this_mode, &this_rdc, bsize,
frame_mv[this_mode][ref_frame].as_mv.row,
@@ -1876,8 +1818,8 @@
&this_rdc.dist);
if (x->skip) {
this_rdc.rate += rate_mv;
- this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate,
- this_rdc.dist);
+ this_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
}
}
@@ -1907,12 +1849,10 @@
best_pred = this_mode_pred;
}
} else {
- if (reuse_inter_pred)
- free_pred_buffer(this_mode_pred);
+ if (reuse_inter_pred) free_pred_buffer(this_mode_pred);
}
- if (x->skip)
- break;
+ if (x->skip) break;
// If early termination flag is 1 and at least 2 modes are checked,
// the mode search is terminated.
@@ -1922,11 +1862,11 @@
}
}
- mi->mode = best_mode;
+ mi->mode = best_mode;
mi->interp_filter = best_pred_filter;
- mi->tx_size = best_tx_size;
- mi->ref_frame[0] = best_ref_frame;
- mi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int;
+ mi->tx_size = best_tx_size;
+ mi->ref_frame[0] = best_ref_frame;
+ mi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int;
xd->mi[0]->bmi[0].as_mv[0].as_int = mi->mv[0].as_int;
x->skip_txfm[0] = best_mode_skip_txfm;
@@ -1936,15 +1876,14 @@
if (cpi->svc.spatial_layer_id) {
perform_intra_pred =
cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame ||
- !(cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) ||
- (!cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame
- && svc_force_zero_mode[best_ref_frame - 1]);
+ !(cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) ||
+ (!cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame &&
+ svc_force_zero_mode[best_ref_frame - 1]);
inter_mode_thresh = (inter_mode_thresh << 1) + inter_mode_thresh;
}
// Perform intra prediction search, if the best SAD is above a certain
// threshold.
- if ((!force_skip_low_temp_var || bsize < BLOCK_32X32) &&
- perform_intra_pred &&
+ if ((!force_skip_low_temp_var || bsize < BLOCK_32X32) && perform_intra_pred &&
(best_rdc.rdcost == INT64_MAX ||
(!x->skip && best_rdc.rdcost > inter_mode_thresh &&
bsize <= cpi->sf.max_intra_bsize))) {
@@ -1967,12 +1906,12 @@
NULL, 0, NULL, 0, bw, bh, xd->bd);
else
vpx_convolve_copy(best_pred->data, best_pred->stride,
- this_mode_pred->data, this_mode_pred->stride,
- NULL, 0, NULL, 0, bw, bh);
+ this_mode_pred->data, this_mode_pred->stride, NULL,
+ 0, NULL, 0, bw, bh);
#else
vpx_convolve_copy(best_pred->data, best_pred->stride,
- this_mode_pred->data, this_mode_pred->stride,
- NULL, 0, NULL, 0, bw, bh);
+ this_mode_pred->data, this_mode_pred->stride, NULL, 0,
+ NULL, 0, bw, bh);
#endif // CONFIG_VP9_HIGHBITDEPTH
best_pred = this_mode_pred;
}
@@ -2002,8 +1941,8 @@
args.skippable = 1;
args.rdc = &this_rdc;
mi->tx_size = intra_tx_size;
- vp9_foreach_transformed_block_in_plane(xd, bsize, 0,
- estimate_block_intra, &args);
+ vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
+ &args);
// Check skip cost here since skippable is not set for for uv, this
// mirrors the behavior used by inter
if (args.skippable) {
@@ -2025,8 +1964,8 @@
this_rdc.rate += cpi->mbmode_cost[this_mode];
this_rdc.rate += ref_frame_cost[INTRA_FRAME];
this_rdc.rate += intra_cost_penalty;
- this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- this_rdc.rate, this_rdc.dist);
+ this_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
if (this_rdc.rdcost < best_rdc.rdcost) {
best_rdc = this_rdc;
@@ -2061,25 +2000,21 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth)
vpx_highbd_convolve_copy(best_pred->data, best_pred->stride,
- pd->dst.buf, pd->dst.stride, NULL, 0,
- NULL, 0, bw, bh, xd->bd);
+ pd->dst.buf, pd->dst.stride, NULL, 0, NULL, 0,
+ bw, bh, xd->bd);
else
- vpx_convolve_copy(best_pred->data, best_pred->stride,
- pd->dst.buf, pd->dst.stride, NULL, 0,
- NULL, 0, bw, bh);
+ vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
+ pd->dst.stride, NULL, 0, NULL, 0, bw, bh);
#else
- vpx_convolve_copy(best_pred->data, best_pred->stride,
- pd->dst.buf, pd->dst.stride, NULL, 0,
- NULL, 0, bw, bh);
+ vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
+ pd->dst.stride, NULL, 0, NULL, 0, bw, bh);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
#if CONFIG_VP9_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity > 0 &&
- cpi->resize_pending == 0 &&
- cpi->denoiser.denoising_level > kDenLowLow &&
- cpi->denoiser.reset == 0) {
+ if (cpi->oxcf.noise_sensitivity > 0 && cpi->resize_pending == 0 &&
+ cpi->denoiser.denoising_level > kDenLowLow && cpi->denoiser.reset == 0) {
VP9_DENOISER_DECISION decision = COPY_BLOCK;
vp9_pickmode_ctx_den_update(&ctx_den, zero_last_cost_orig, ref_frame_cost,
frame_mv, reuse_inter_pred, best_tx_size,
@@ -2097,7 +2032,7 @@
if (best_ref_frame == INTRA_FRAME) {
// Only consider the modes that are included in the intra_mode_list.
- int intra_modes = sizeof(intra_mode_list)/sizeof(PREDICTION_MODE);
+ int intra_modes = sizeof(intra_mode_list) / sizeof(PREDICTION_MODE);
int i;
// TODO(yunqingwang): Check intra mode mask and only update freq_fact
@@ -2121,9 +2056,9 @@
*rd_cost = best_rdc;
}
-void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
- int mi_row, int mi_col, RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
+void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row,
+ int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx) {
VP9_COMMON *const cm = &cpi->common;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2153,12 +2088,11 @@
if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
int_mv *const candidates = mbmi_ext->ref_mvs[ref_frame];
- const struct scale_factors *const sf =
- &cm->frame_refs[ref_frame - 1].sf;
- vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
- sf, sf);
- vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame,
- candidates, mi_row, mi_col, mbmi_ext->mode_context);
+ const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
+ vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf,
+ sf);
+ vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col,
+ mbmi_ext->mode_context);
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
&dummy_mv[0], &dummy_mv[1]);
@@ -2172,19 +2106,17 @@
mi->uv_mode = DC_PRED;
mi->ref_frame[0] = LAST_FRAME;
mi->ref_frame[1] = NONE;
- mi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
- : cm->interp_filter;
+ mi->interp_filter =
+ cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
int64_t this_rd = 0;
int plane;
- if (ref_frame_skip_mask & (1 << ref_frame))
- continue;
+ if (ref_frame_skip_mask & (1 << ref_frame)) continue;
#if CONFIG_BETTER_HW_COMPATIBILITY
- if ((bsize == BLOCK_8X4 || bsize == BLOCK_4X8) &&
- ref_frame > INTRA_FRAME &&
+ if ((bsize == BLOCK_8X4 || bsize == BLOCK_4X8) && ref_frame > INTRA_FRAME &&
vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
continue;
#endif
@@ -2232,14 +2164,13 @@
pd->dst.buf =
&pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
pd->pre[0].buf =
- &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8,
- i, pd->pre[0].stride)];
+ &pd->pre[0]
+ .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
b_mv[ZEROMV].as_int = 0;
b_mv[NEWMV].as_int = INVALID_MV;
vp9_append_sub8x8_mvs_for_idx(cm, xd, i, 0, mi_row, mi_col,
- &b_mv[NEARESTMV],
- &b_mv[NEARMV],
+ &b_mv[NEARESTMV], &b_mv[NEARMV],
mbmi_ext->mode_context);
for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
@@ -2267,11 +2198,10 @@
vp9_set_mv_search_range(x, &mbmi_ext->ref_mvs[0]->as_mv);
- vp9_full_pixel_search(
- cpi, x, bsize, &mvp_full, step_param, x->sadperbit4,
- cond_cost_list(cpi, cost_list),
- &mbmi_ext->ref_mvs[ref_frame][0].as_mv, &tmp_mv,
- INT_MAX, 0);
+ vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param,
+ x->sadperbit4, cond_cost_list(cpi, cost_list),
+ &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
+ &tmp_mv, INT_MAX, 0);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
@@ -2282,27 +2212,21 @@
mvp_full.row = tmp_mv.row * 8;
mvp_full.col = tmp_mv.col * 8;
- b_rate += vp9_mv_bit_cost(&mvp_full,
- &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
- x->nmvjointcost, x->mvcost,
- MV_COST_WEIGHT);
+ b_rate += vp9_mv_bit_cost(
+ &mvp_full, &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
[INTER_OFFSET(NEWMV)];
- if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd)
- continue;
+ if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd) continue;
- cpi->find_fractional_mv_step(x, &tmp_mv,
- &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
- cpi->common.allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[bsize],
- cpi->sf.mv.subpel_force_stop,
- cpi->sf.mv.subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost,
- &dummy_dist,
- &x->pred_sse[ref_frame], NULL, 0, 0);
+ cpi->find_fractional_mv_step(
+ x, &tmp_mv, &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
+ cpi->common.allow_high_precision_mv, x->errorperbit,
+ &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+ cpi->sf.mv.subpel_iters_per_step,
+ cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
+ &dummy_dist, &x->pred_sse[ref_frame], NULL, 0, 0);
xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv;
} else {
@@ -2312,28 +2236,22 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp9_highbd_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride,
- pd->dst.buf, pd->dst.stride,
- &xd->mi[0]->bmi[i].as_mv[0].as_mv,
- &xd->block_refs[0]->sf,
- 4 * num_4x4_blocks_wide,
- 4 * num_4x4_blocks_high, 0,
- vp9_filter_kernels[mi->interp_filter],
- MV_PRECISION_Q3,
- mi_col * MI_SIZE + 4 * (i & 0x01),
- mi_row * MI_SIZE + 4 * (i >> 1), xd->bd);
+ vp9_highbd_build_inter_predictor(
+ pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride,
+ &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf,
+ 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0,
+ vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3,
+ mi_col * MI_SIZE + 4 * (i & 0x01),
+ mi_row * MI_SIZE + 4 * (i >> 1), xd->bd);
} else {
#endif
- vp9_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride,
- pd->dst.buf, pd->dst.stride,
- &xd->mi[0]->bmi[i].as_mv[0].as_mv,
- &xd->block_refs[0]->sf,
- 4 * num_4x4_blocks_wide,
- 4 * num_4x4_blocks_high, 0,
- vp9_filter_kernels[mi->interp_filter],
- MV_PRECISION_Q3,
- mi_col * MI_SIZE + 4 * (i & 0x01),
- mi_row * MI_SIZE + 4 * (i >> 1));
+ vp9_build_inter_predictor(
+ pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride,
+ &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf,
+ 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0,
+ vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3,
+ mi_col * MI_SIZE + 4 * (i & 0x01),
+ mi_row * MI_SIZE + 4 * (i >> 1));
#if CONFIG_VP9_HIGHBITDEPTH
}
@@ -2343,8 +2261,8 @@
&var_y, &sse_y);
this_rdc.rate += b_rate;
- this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- this_rdc.rate, this_rdc.dist);
+ this_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
if (this_rdc.rdcost < b_best_rd) {
b_best_rd = this_rdc.rdcost;
bsi[ref_frame][i].as_mode = this_mode;
@@ -2359,10 +2277,8 @@
this_rd += b_best_rd;
xd->mi[0]->bmi[i] = bsi[ref_frame][i];
- if (num_4x4_blocks_wide > 1)
- xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i];
- if (num_4x4_blocks_high > 1)
- xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i];
+ if (num_4x4_blocks_wide > 1) xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i];
+ if (num_4x4_blocks_high > 1) xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i];
}
} // loop through sub8x8 blocks
--- a/vp9/encoder/vp9_pickmode.h
+++ b/vp9/encoder/vp9_pickmode.h
@@ -20,15 +20,12 @@
void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
-void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
- TileDataEnc *tile_data,
+void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
int mi_row, int mi_col, RD_COST *rd_cost,
- BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx);
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
-void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
- int mi_row, int mi_col, RD_COST *rd_cost,
- BLOCK_SIZE bsize,
+void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row,
+ int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx);
#ifdef __cplusplus
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -21,13 +21,12 @@
#include "vp9/encoder/vp9_rd.h"
void vp9_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr,
- const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
int i, eob = -1;
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
@@ -53,8 +52,7 @@
qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
- if (tmp)
- eob = i;
+ if (tmp) eob = i;
}
}
*eob_ptr = eob + 1;
@@ -61,19 +59,14 @@
}
#if CONFIG_VP9_HIGHBITDEPTH
-void vp9_highbd_quantize_fp_c(const tran_low_t *coeff_ptr,
- intptr_t count,
- int skip_block,
- const int16_t *zbin_ptr,
+void vp9_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
+ int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr,
const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan,
- const int16_t *iscan) {
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
int i;
int eob = -1;
// TODO(jingning) Decide the need of these arguments after the
@@ -97,8 +90,7 @@
const int abs_qcoeff = (int)((tmp * quant_ptr[rc != 0]) >> 16);
qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
- if (abs_qcoeff)
- eob = i;
+ if (abs_qcoeff) eob = i;
}
}
*eob_ptr = eob + 1;
@@ -108,13 +100,11 @@
// TODO(jingning) Refactor this file and combine functions with similar
// operations.
void vp9_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr,
- const int16_t *quant_ptr,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
int i, eob = -1;
(void)zbin_ptr;
@@ -140,8 +130,7 @@
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
}
- if (tmp)
- eob = i;
+ if (tmp) eob = i;
}
}
*eob_ptr = eob + 1;
@@ -148,17 +137,12 @@
}
#if CONFIG_VP9_HIGHBITDEPTH
-void vp9_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr,
- intptr_t n_coeffs, int skip_block,
- const int16_t *zbin_ptr,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+void vp9_highbd_quantize_fp_32x32_c(
+ const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+ const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
int i, eob = -1;
(void)zbin_ptr;
(void)quant_shift_ptr;
@@ -176,15 +160,14 @@
const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
if (abs_coeff >= (dequant_ptr[rc != 0] >> 2)) {
- const int64_t tmp = abs_coeff
- + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
- abs_qcoeff = (uint32_t) ((tmp * quant_ptr[rc != 0]) >> 15);
+ const int64_t tmp =
+ abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
+ abs_qcoeff = (uint32_t)((tmp * quant_ptr[rc != 0]) >> 15);
qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
}
- if (abs_qcoeff)
- eob = i;
+ if (abs_qcoeff) eob = i;
}
}
*eob_ptr = eob + 1;
@@ -199,22 +182,19 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block),
- 16, x->skip_block,
+ vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block,
p->zbin, p->round, p->quant, p->quant_shift,
BLOCK_OFFSET(p->qcoeff, block),
- BLOCK_OFFSET(pd->dqcoeff, block),
- pd->dequant, &p->eobs[block],
- scan, iscan);
+ BLOCK_OFFSET(pd->dqcoeff, block), pd->dequant,
+ &p->eobs[block], scan, iscan);
return;
}
#endif
- vpx_quantize_b(BLOCK_OFFSET(p->coeff, block),
- 16, x->skip_block,
- p->zbin, p->round, p->quant, p->quant_shift,
+ vpx_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift,
BLOCK_OFFSET(p->qcoeff, block),
- BLOCK_OFFSET(pd->dqcoeff, block),
- pd->dequant, &p->eobs[block], scan, iscan);
+ BLOCK_OFFSET(pd->dqcoeff, block), pd->dequant, &p->eobs[block],
+ scan, iscan);
}
static void invert_quant(int16_t *quant, int16_t *shift, int d) {
@@ -221,8 +201,7 @@
unsigned t;
int l, m;
t = d;
- for (l = 0; t > 1; l++)
- t >>= 1;
+ for (l = 0; t > 1; l++) t >>= 1;
m = 1 + (1 << (16 + l)) / d;
*quant = (int16_t)(m - (1 << 16));
*shift = 1 << (16 - l);
@@ -232,18 +211,15 @@
const int quant = vp9_dc_quant(q, 0, bit_depth);
#if CONFIG_VP9_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8:
- return q == 0 ? 64 : (quant < 148 ? 84 : 80);
- case VPX_BITS_10:
- return q == 0 ? 64 : (quant < 592 ? 84 : 80);
- case VPX_BITS_12:
- return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
+ case VPX_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80);
+ case VPX_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80);
+ case VPX_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
}
#else
- (void) bit_depth;
+ (void)bit_depth;
return q == 0 ? 64 : (quant < 148 ? 84 : 80);
#endif
}
@@ -259,8 +235,7 @@
for (i = 0; i < 2; ++i) {
int qrounding_factor_fp = i == 0 ? 48 : 42;
- if (q == 0)
- qrounding_factor_fp = 64;
+ if (q == 0) qrounding_factor_fp = 64;
// y
quant = i == 0 ? vp9_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
@@ -275,8 +250,8 @@
// uv
quant = i == 0 ? vp9_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
: vp9_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
- invert_quant(&quants->uv_quant[q][i],
- &quants->uv_quant_shift[q][i], quant);
+ invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i],
+ quant);
quants->uv_quant_fp[q][i] = (1 << 16) / quant;
quants->uv_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
quants->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7);
@@ -363,14 +338,11 @@
// Table that converts 0-63 Q-range values passed in outside to the Qindex
// range used internally.
static const int quantizer_to_qindex[] = {
- 0, 4, 8, 12, 16, 20, 24, 28,
- 32, 36, 40, 44, 48, 52, 56, 60,
- 64, 68, 72, 76, 80, 84, 88, 92,
- 96, 100, 104, 108, 112, 116, 120, 124,
- 128, 132, 136, 140, 144, 148, 152, 156,
- 160, 164, 168, 172, 176, 180, 184, 188,
- 192, 196, 200, 204, 208, 212, 216, 220,
- 224, 228, 232, 236, 240, 244, 249, 255,
+ 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48,
+ 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100,
+ 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152,
+ 156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204,
+ 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
};
int vp9_quantizer_to_qindex(int quantizer) {
@@ -381,8 +353,7 @@
int quantizer;
for (quantizer = 0; quantizer < 64; ++quantizer)
- if (quantizer_to_qindex[quantizer] >= qindex)
- return quantizer;
+ if (quantizer_to_qindex[quantizer] >= qindex) return quantizer;
return 63;
}
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -48,29 +48,24 @@
#define LIMIT_QP_ONEPASS_VBR_LAG 0
#if CONFIG_VP9_HIGHBITDEPTH
-#define ASSIGN_MINQ_TABLE(bit_depth, name) \
- do { \
- switch (bit_depth) { \
- case VPX_BITS_8: \
- name = name##_8; \
- break; \
- case VPX_BITS_10: \
- name = name##_10; \
- break; \
- case VPX_BITS_12: \
- name = name##_12; \
- break; \
- default: \
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
- " or VPX_BITS_12"); \
- name = NULL; \
- } \
+#define ASSIGN_MINQ_TABLE(bit_depth, name) \
+ do { \
+ switch (bit_depth) { \
+ case VPX_BITS_8: name = name##_8; break; \
+ case VPX_BITS_10: name = name##_10; break; \
+ case VPX_BITS_12: name = name##_12; break; \
+ default: \
+ assert(0 && \
+ "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
+ " or VPX_BITS_12"); \
+ name = NULL; \
+ } \
} while (0)
#else
#define ASSIGN_MINQ_TABLE(bit_depth, name) \
- do { \
- (void) bit_depth; \
- name = name##_8; \
+ do { \
+ (void) bit_depth; \
+ name = name##_8; \
} while (0)
#endif
@@ -109,25 +104,22 @@
static int get_minq_index(double maxq, double x3, double x2, double x1,
vpx_bit_depth_t bit_depth) {
int i;
- const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq,
- maxq);
+ const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq);
// Special case handling to deal with the step from q2.0
// down to lossless mode represented by q 1.0.
- if (minqtarget <= 2.0)
- return 0;
+ if (minqtarget <= 2.0) return 0;
for (i = 0; i < QINDEX_RANGE; i++) {
- if (minqtarget <= vp9_convert_qindex_to_q(i, bit_depth))
- return i;
+ if (minqtarget <= vp9_convert_qindex_to_q(i, bit_depth)) return i;
}
return QINDEX_RANGE - 1;
}
-static void init_minq_luts(int *kf_low_m, int *kf_high_m,
- int *arfgf_low, int *arfgf_high,
- int *inter, int *rtc, vpx_bit_depth_t bit_depth) {
+static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low,
+ int *arfgf_high, int *inter, int *rtc,
+ vpx_bit_depth_t bit_depth) {
int i;
for (i = 0; i < QINDEX_RANGE; i++) {
const double maxq = vp9_convert_qindex_to_q(i, bit_depth);
@@ -158,15 +150,12 @@
// quantizer tables easier. If necessary they can be replaced by lookup
// tables if and when things settle down in the experimental bitstream
double vp9_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) {
- // Convert the index to a real Q value (scaled down to match old Q values)
+// Convert the index to a real Q value (scaled down to match old Q values)
#if CONFIG_VP9_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8:
- return vp9_ac_quant(qindex, 0, bit_depth) / 4.0;
- case VPX_BITS_10:
- return vp9_ac_quant(qindex, 0, bit_depth) / 16.0;
- case VPX_BITS_12:
- return vp9_ac_quant(qindex, 0, bit_depth) / 64.0;
+ case VPX_BITS_8: return vp9_ac_quant(qindex, 0, bit_depth) / 4.0;
+ case VPX_BITS_10: return vp9_ac_quant(qindex, 0, bit_depth) / 16.0;
+ case VPX_BITS_12: return vp9_ac_quant(qindex, 0, bit_depth) / 64.0;
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1.0;
@@ -177,8 +166,7 @@
}
int vp9_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
- double correction_factor,
- vpx_bit_depth_t bit_depth) {
+ double correction_factor, vpx_bit_depth_t bit_depth) {
const double q = vp9_convert_qindex_to_q(qindex, bit_depth);
int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000;
@@ -193,8 +181,8 @@
int vp9_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
double correction_factor,
vpx_bit_depth_t bit_depth) {
- const int bpm = (int)(vp9_rc_bits_per_mb(frame_type, q, correction_factor,
- bit_depth));
+ const int bpm =
+ (int)(vp9_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
return VPXMAX(FRAME_OVERHEAD_BITS,
(int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS);
}
@@ -202,10 +190,9 @@
int vp9_rc_clamp_pframe_target_size(const VP9_COMP *const cpi, int target) {
const RATE_CONTROL *rc = &cpi->rc;
const VP9EncoderConfig *oxcf = &cpi->oxcf;
- const int min_frame_target = VPXMAX(rc->min_frame_bandwidth,
- rc->avg_frame_bandwidth >> 5);
- if (target < min_frame_target)
- target = min_frame_target;
+ const int min_frame_target =
+ VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
+ if (target < min_frame_target) target = min_frame_target;
if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) {
// If there is an active ARF at this location use the minimum
// bits on this frame even if it is a constructed arf.
@@ -214,11 +201,10 @@
target = min_frame_target;
}
// Clip the frame target to the maximum allowed value.
- if (target > rc->max_frame_bandwidth)
- target = rc->max_frame_bandwidth;
+ if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
if (oxcf->rc_max_inter_bitrate_pct) {
- const int max_rate = rc->avg_frame_bandwidth *
- oxcf->rc_max_inter_bitrate_pct / 100;
+ const int max_rate =
+ rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
target = VPXMIN(target, max_rate);
}
return target;
@@ -228,12 +214,11 @@
const RATE_CONTROL *rc = &cpi->rc;
const VP9EncoderConfig *oxcf = &cpi->oxcf;
if (oxcf->rc_max_intra_bitrate_pct) {
- const int max_rate = rc->avg_frame_bandwidth *
- oxcf->rc_max_intra_bitrate_pct / 100;
+ const int max_rate =
+ rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100;
target = VPXMIN(target, max_rate);
}
- if (target > rc->max_frame_bandwidth)
- target = rc->max_frame_bandwidth;
+ if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
return target;
}
@@ -242,14 +227,13 @@
static void update_layer_buffer_level(SVC *svc, int encoded_frame_size) {
int i = 0;
int current_temporal_layer = svc->temporal_layer_id;
- for (i = current_temporal_layer + 1;
- i < svc->number_temporal_layers; ++i) {
- const int layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id, i,
- svc->number_temporal_layers);
+ for (i = current_temporal_layer + 1; i < svc->number_temporal_layers; ++i) {
+ const int layer =
+ LAYER_IDS_TO_IDX(svc->spatial_layer_id, i, svc->number_temporal_layers);
LAYER_CONTEXT *lc = &svc->layer_context[layer];
RATE_CONTROL *lrc = &lc->rc;
- int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
- encoded_frame_size);
+ int bits_off_for_this_layer =
+ (int)(lc->target_bandwidth / lc->framerate - encoded_frame_size);
lrc->bits_off_target += bits_off_for_this_layer;
// Clip buffer level to maximum buffer size for the layer.
@@ -287,8 +271,8 @@
}
}
-int vp9_rc_get_default_min_gf_interval(
- int width, int height, double framerate) {
+int vp9_rc_get_default_min_gf_interval(int width, int height,
+ double framerate) {
// Assume we do not need any constraint lower than 4K 20 fps
static const double factor_safe = 3840 * 2160 * 20.0;
const double factor = width * height * framerate;
@@ -319,20 +303,20 @@
rc->avg_frame_qindex[KEY_FRAME] = oxcf->worst_allowed_q;
rc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q;
} else {
- rc->avg_frame_qindex[KEY_FRAME] = (oxcf->worst_allowed_q +
- oxcf->best_allowed_q) / 2;
- rc->avg_frame_qindex[INTER_FRAME] = (oxcf->worst_allowed_q +
- oxcf->best_allowed_q) / 2;
+ rc->avg_frame_qindex[KEY_FRAME] =
+ (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
+ rc->avg_frame_qindex[INTER_FRAME] =
+ (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
}
rc->last_q[KEY_FRAME] = oxcf->best_allowed_q;
rc->last_q[INTER_FRAME] = oxcf->worst_allowed_q;
- rc->buffer_level = rc->starting_buffer_level;
+ rc->buffer_level = rc->starting_buffer_level;
rc->bits_off_target = rc->starting_buffer_level;
- rc->rolling_target_bits = rc->avg_frame_bandwidth;
- rc->rolling_actual_bits = rc->avg_frame_bandwidth;
+ rc->rolling_target_bits = rc->avg_frame_bandwidth;
+ rc->rolling_actual_bits = rc->avg_frame_bandwidth;
rc->long_rolling_target_bits = rc->avg_frame_bandwidth;
rc->long_rolling_actual_bits = rc->avg_frame_bandwidth;
@@ -348,8 +332,7 @@
rc->fac_active_worst_inter = 150;
rc->fac_active_worst_gf = 100;
rc->force_qpmin = 0;
- for (i = 0; i < MAX_LAG_BUFFERS; ++i)
- rc->avg_source_sad[i] = 0;
+ for (i = 0; i < MAX_LAG_BUFFERS; ++i) rc->avg_source_sad[i] = 0;
rc->frames_since_key = 8; // Sensible default for first frame.
rc->this_key_frame_forced = 0;
rc->next_key_frame_forced = 0;
@@ -393,13 +376,11 @@
} else {
// If buffer is below drop_mark, for now just drop every other frame
// (starting with the next frame) until it increases back over drop_mark.
- int drop_mark = (int)(oxcf->drop_frames_water_mark *
- rc->optimal_buffer_level / 100);
- if ((rc->buffer_level > drop_mark) &&
- (rc->decimation_factor > 0)) {
+ int drop_mark =
+ (int)(oxcf->drop_frames_water_mark * rc->optimal_buffer_level / 100);
+ if ((rc->buffer_level > drop_mark) && (rc->decimation_factor > 0)) {
--rc->decimation_factor;
- } else if (rc->buffer_level <= drop_mark &&
- rc->decimation_factor == 0) {
+ } else if (rc->buffer_level <= drop_mark && rc->decimation_factor == 0) {
rc->decimation_factor = 1;
}
if (rc->decimation_factor > 0) {
@@ -426,7 +407,7 @@
rcf = rc->rate_correction_factors[KF_STD];
} else if (cpi->oxcf.pass == 2) {
RATE_FACTOR_LEVEL rf_lvl =
- cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
+ cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
rcf = rc->rate_correction_factors[rf_lvl];
} else {
if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
@@ -452,7 +433,7 @@
rc->rate_correction_factors[KF_STD] = factor;
} else if (cpi->oxcf.pass == 2) {
RATE_FACTOR_LEVEL rf_lvl =
- cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
+ cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
rc->rate_correction_factors[rf_lvl] = factor;
} else {
if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
@@ -473,8 +454,7 @@
int projected_size_based_on_q = 0;
// Do not update the rate factors for arf overlay frames.
- if (cpi->rc.is_src_frame_alt_ref)
- return;
+ if (cpi->rc.is_src_frame_alt_ref) return;
// Clear down mmx registers to allow floating point in what follows
vpx_clear_system_state();
@@ -486,21 +466,19 @@
projected_size_based_on_q =
vp9_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
} else {
- projected_size_based_on_q = vp9_estimate_bits_at_q(cpi->common.frame_type,
- cm->base_qindex,
- cm->MBs,
- rate_correction_factor,
- cm->bit_depth);
+ projected_size_based_on_q =
+ vp9_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex, cm->MBs,
+ rate_correction_factor, cm->bit_depth);
}
// Work out a size correction factor.
if (projected_size_based_on_q > FRAME_OVERHEAD_BITS)
correction_factor = (int)((100 * (int64_t)cpi->rc.projected_frame_size) /
- projected_size_based_on_q);
+ projected_size_based_on_q);
// More heavily damped adjustment used if we have been oscillating either side
// of target.
- adjustment_limit = 0.25 +
- 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
+ adjustment_limit =
+ 0.25 + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
cpi->rc.q_2_frame = cpi->rc.q_1_frame;
cpi->rc.q_1_frame = cm->base_qindex;
@@ -520,8 +498,8 @@
if (correction_factor > 102) {
// We are not already at the worst allowable quality
- correction_factor = (int)(100 + ((correction_factor - 100) *
- adjustment_limit));
+ correction_factor =
+ (int)(100 + ((correction_factor - 100) * adjustment_limit));
rate_correction_factor = (rate_correction_factor * correction_factor) / 100;
// Keep rate_correction_factor within limits
if (rate_correction_factor > MAX_BPB_FACTOR)
@@ -528,8 +506,8 @@
rate_correction_factor = MAX_BPB_FACTOR;
} else if (correction_factor < 99) {
// We are not already at the best allowable quality
- correction_factor = (int)(100 - ((100 - correction_factor) *
- adjustment_limit));
+ correction_factor =
+ (int)(100 - ((100 - correction_factor) * adjustment_limit));
rate_correction_factor = (rate_correction_factor * correction_factor) / 100;
// Keep rate_correction_factor within limits
@@ -540,7 +518,6 @@
set_rate_correction_factor(cpi, rate_correction_factor);
}
-
int vp9_rc_regulate_q(const VP9_COMP *cpi, int target_bits_per_frame,
int active_best_quality, int active_worst_quality) {
const VP9_COMMON *const cm = &cpi->common;
@@ -557,15 +534,13 @@
i = active_best_quality;
do {
- if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
- cm->seg.enabled &&
+ if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
cpi->svc.temporal_layer_id == 0) {
bits_per_mb_at_this_q =
(int)vp9_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
} else {
- bits_per_mb_at_this_q = (int)vp9_rc_bits_per_mb(cm->frame_type, i,
- correction_factor,
- cm->bit_depth);
+ bits_per_mb_at_this_q = (int)vp9_rc_bits_per_mb(
+ cm->frame_type, i, correction_factor, cm->bit_depth);
}
if (bits_per_mb_at_this_q <= target_bits_per_mb) {
@@ -632,16 +607,20 @@
int active_worst_quality;
if (cpi->common.frame_type == KEY_FRAME) {
- active_worst_quality = curr_frame == 0 ? rc->worst_quality
- : rc->last_q[KEY_FRAME] << 1;
+ active_worst_quality =
+ curr_frame == 0 ? rc->worst_quality : rc->last_q[KEY_FRAME] << 1;
} else {
if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
- active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 5 >> 2 :
- rc->last_q[INTER_FRAME] * rc->fac_active_worst_gf / 100;
+ active_worst_quality =
+ curr_frame == 1
+ ? rc->last_q[KEY_FRAME] * 5 >> 2
+ : rc->last_q[INTER_FRAME] * rc->fac_active_worst_gf / 100;
} else {
- active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] << 1 :
- rc->avg_frame_qindex[INTER_FRAME] * rc->fac_active_worst_inter / 100;
+ active_worst_quality = curr_frame == 1
+ ? rc->last_q[KEY_FRAME] << 1
+ : rc->avg_frame_qindex[INTER_FRAME] *
+ rc->fac_active_worst_inter / 100;
}
}
return VPXMIN(active_worst_quality, rc->worst_quality);
@@ -663,17 +642,16 @@
int active_worst_quality;
int ambient_qp;
unsigned int num_frames_weight_key = 5 * cpi->svc.number_temporal_layers;
- if (cm->frame_type == KEY_FRAME)
- return rc->worst_quality;
+ if (cm->frame_type == KEY_FRAME) return rc->worst_quality;
// For ambient_qp we use minimum of avg_frame_qindex[KEY_FRAME/INTER_FRAME]
// for the first few frames following key frame. These are both initialized
// to worst_quality and updated with (3/4, 1/4) average in postencode_update.
// So for first few frames following key, the qp of that key frame is weighted
// into the active_worst_quality setting.
- ambient_qp = (cm->current_video_frame < num_frames_weight_key) ?
- VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
- rc->avg_frame_qindex[KEY_FRAME]) :
- rc->avg_frame_qindex[INTER_FRAME];
+ ambient_qp = (cm->current_video_frame < num_frames_weight_key)
+ ? VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
+ rc->avg_frame_qindex[KEY_FRAME])
+ : rc->avg_frame_qindex[INTER_FRAME];
active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 >> 2);
if (rc->buffer_level > rc->optimal_buffer_level) {
// Adjust down.
@@ -680,11 +658,11 @@
// Maximum limit for down adjustment, ~30%.
int max_adjustment_down = active_worst_quality / 3;
if (max_adjustment_down) {
- buff_lvl_step = ((rc->maximum_buffer_size -
- rc->optimal_buffer_level) / max_adjustment_down);
+ buff_lvl_step = ((rc->maximum_buffer_size - rc->optimal_buffer_level) /
+ max_adjustment_down);
if (buff_lvl_step)
adjustment = (int)((rc->buffer_level - rc->optimal_buffer_level) /
- buff_lvl_step);
+ buff_lvl_step);
active_worst_quality -= adjustment;
}
} else if (rc->buffer_level > critical_level) {
@@ -724,9 +702,8 @@
if (rc->this_key_frame_forced) {
int qindex = rc->last_boosted_qindex;
double last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp9_compute_qdelta(rc, last_boosted_q,
- (last_boosted_q * 0.75),
- cm->bit_depth);
+ int delta_qindex = vp9_compute_qdelta(
+ rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else if (cm->current_video_frame > 0) {
// not first frame of one pass and kf_boost is set
@@ -733,9 +710,8 @@
double q_adj_factor = 1.0;
double q_val;
- active_best_quality =
- get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME],
- cm->bit_depth);
+ active_best_quality = get_kf_active_quality(
+ rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth);
// Allow somewhat lower kf minq with small image formats.
if ((cm->width * cm->height) <= (352 * 288)) {
@@ -745,12 +721,10 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
q_val = vp9_convert_qindex_to_q(active_best_quality, cm->bit_depth);
- active_best_quality += vp9_compute_qdelta(rc, q_val,
- q_val * q_adj_factor,
- cm->bit_depth);
+ active_best_quality +=
+ vp9_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
- } else if (!rc->is_src_frame_alt_ref &&
- !cpi->use_svc &&
+ } else if (!rc->is_src_frame_alt_ref && !cpi->use_svc &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
// Use the lower of active_worst_quality and recent
// average Q as basis for GF/ARF best Q limit unless last frame was
@@ -778,10 +752,10 @@
}
// Clip the active best and worst quality values to limits
- active_best_quality = clamp(active_best_quality,
- rc->best_quality, rc->worst_quality);
- active_worst_quality = clamp(active_worst_quality,
- active_best_quality, rc->worst_quality);
+ active_best_quality =
+ clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+ active_worst_quality =
+ clamp(active_worst_quality, active_best_quality, rc->worst_quality);
*top_index = active_worst_quality;
*bottom_index = active_best_quality;
@@ -788,14 +762,12 @@
#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
// Limit Q range for the adaptive loop.
- if (cm->frame_type == KEY_FRAME &&
- !rc->this_key_frame_forced &&
+ if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
!(cm->current_video_frame == 0)) {
int qdelta = 0;
vpx_clear_system_state();
- qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
- active_worst_quality, 2.0,
- cm->bit_depth);
+ qdelta = vp9_compute_qdelta_by_rate(
+ &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
*top_index = active_worst_quality + qdelta;
*top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index;
}
@@ -805,8 +777,8 @@
if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) {
q = rc->last_boosted_qindex;
} else {
- q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
- active_best_quality, active_worst_quality);
+ q = vp9_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > *top_index) {
// Special case when we are targeting the max allowed rate
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -815,8 +787,7 @@
q = *top_index;
}
}
- assert(*top_index <= rc->worst_quality &&
- *top_index >= rc->best_quality);
+ assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
assert(*bottom_index <= rc->worst_quality &&
*bottom_index >= rc->best_quality);
assert(q <= rc->worst_quality && q >= rc->best_quality);
@@ -823,12 +794,11 @@
return q;
}
-static int get_active_cq_level_one_pass(
- const RATE_CONTROL *rc, const VP9EncoderConfig *const oxcf) {
+static int get_active_cq_level_one_pass(const RATE_CONTROL *rc,
+ const VP9EncoderConfig *const oxcf) {
static const double cq_adjust_threshold = 0.1;
int active_cq_level = oxcf->cq_level;
- if (oxcf->rc_mode == VPX_CQ &&
- rc->total_target_bits > 0) {
+ if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) {
const double x = (double)rc->total_actual_bits / rc->total_target_bits;
if (x < cq_adjust_threshold) {
active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold);
@@ -837,17 +807,17 @@
return active_cq_level;
}
-#define SMOOTH_PCT_MIN 0.1
-#define SMOOTH_PCT_DIV 0.05
-static int get_active_cq_level_two_pass(
- const TWO_PASS *twopass, const RATE_CONTROL *rc,
- const VP9EncoderConfig *const oxcf) {
+#define SMOOTH_PCT_MIN 0.1
+#define SMOOTH_PCT_DIV 0.05
+static int get_active_cq_level_two_pass(const TWO_PASS *twopass,
+ const RATE_CONTROL *rc,
+ const VP9EncoderConfig *const oxcf) {
static const double cq_adjust_threshold = 0.1;
int active_cq_level = oxcf->cq_level;
if (oxcf->rc_mode == VPX_CQ) {
if (twopass->mb_smooth_pct > SMOOTH_PCT_MIN) {
- active_cq_level -= (int)((twopass->mb_smooth_pct - SMOOTH_PCT_MIN) /
- SMOOTH_PCT_DIV);
+ active_cq_level -=
+ (int)((twopass->mb_smooth_pct - SMOOTH_PCT_MIN) / SMOOTH_PCT_DIV);
active_cq_level = VPXMAX(active_cq_level, 0);
}
if (rc->total_target_bits > 0) {
@@ -877,8 +847,7 @@
if (oxcf->rc_mode == VPX_Q) {
int qindex = cq_level;
double q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp9_compute_qdelta(rc, q, q * 0.25,
- cm->bit_depth);
+ int delta_qindex = vp9_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else if (rc->this_key_frame_forced) {
// Handle the special case for key frames forced when we have reached
@@ -886,9 +855,8 @@
// based on the ambient Q to reduce the risk of popping.
int qindex = rc->last_boosted_qindex;
double last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp9_compute_qdelta(rc, last_boosted_q,
- last_boosted_q * 0.75,
- cm->bit_depth);
+ int delta_qindex = vp9_compute_qdelta(
+ rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else {
// not first frame of one pass and kf_boost is set
@@ -895,9 +863,8 @@
double q_adj_factor = 1.0;
double q_val;
- active_best_quality =
- get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME],
- cm->bit_depth);
+ active_best_quality = get_kf_active_quality(
+ rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth);
// Allow somewhat lower kf minq with small image formats.
if ((cm->width * cm->height) <= (352 * 288)) {
@@ -907,9 +874,8 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
q_val = vp9_convert_qindex_to_q(active_best_quality, cm->bit_depth);
- active_best_quality += vp9_compute_qdelta(rc, q_val,
- q_val * q_adj_factor,
- cm->bit_depth);
+ active_best_quality +=
+ vp9_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -927,8 +893,7 @@
}
// For constrained quality dont allow Q less than the cq level
if (oxcf->rc_mode == VPX_CQ) {
- if (q < cq_level)
- q = cq_level;
+ if (q < cq_level) q = cq_level;
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -951,12 +916,11 @@
if (oxcf->rc_mode == VPX_Q) {
int qindex = cq_level;
double q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
- double delta_rate[FIXED_GF_INTERVAL] =
- {0.50, 1.0, 0.85, 1.0, 0.70, 1.0, 0.85, 1.0};
- int delta_qindex =
- vp9_compute_qdelta(rc, q,
- q * delta_rate[cm->current_video_frame %
- FIXED_GF_INTERVAL], cm->bit_depth);
+ double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0,
+ 0.70, 1.0, 0.85, 1.0 };
+ int delta_qindex = vp9_compute_qdelta(
+ rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL],
+ cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else {
// Use the min of the average Q and active_worst_quality as basis for
@@ -969,8 +933,7 @@
}
// For the constrained quality mode we don't want
// q to fall below the cq level.
- if ((oxcf->rc_mode == VPX_CQ) &&
- (active_best_quality < cq_level)) {
+ if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
active_best_quality = cq_level;
}
}
@@ -977,16 +940,16 @@
}
// Clip the active best and worst quality values to limits
- active_best_quality = clamp(active_best_quality,
- rc->best_quality, rc->worst_quality);
- active_worst_quality = clamp(active_worst_quality,
- active_best_quality, rc->worst_quality);
+ active_best_quality =
+ clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+ active_worst_quality =
+ clamp(active_worst_quality, active_best_quality, rc->worst_quality);
#if LIMIT_QP_ONEPASS_VBR_LAG
if (oxcf->lag_in_frames > 0 && oxcf->rc_mode == VPX_VBR) {
if (rc->force_qpmin > 0 && active_best_quality < rc->force_qpmin)
- active_best_quality = clamp(active_best_quality,
- rc->force_qpmin, rc->worst_quality);
+ active_best_quality =
+ clamp(active_best_quality, rc->force_qpmin, rc->worst_quality);
}
#endif
@@ -999,17 +962,14 @@
vpx_clear_system_state();
// Limit Q range for the adaptive loop.
- if (cm->frame_type == KEY_FRAME &&
- !rc->this_key_frame_forced &&
+ if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
!(cm->current_video_frame == 0)) {
- qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
- active_worst_quality, 2.0,
- cm->bit_depth);
+ qdelta = vp9_compute_qdelta_by_rate(
+ &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
- qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
- active_worst_quality, 1.75,
- cm->bit_depth);
+ qdelta = vp9_compute_qdelta_by_rate(
+ &cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth);
}
*top_index = active_worst_quality + qdelta;
*top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index;
@@ -1018,12 +978,12 @@
if (oxcf->rc_mode == VPX_Q) {
q = active_best_quality;
- // Special case code to try and match quality with forced key frames
+ // Special case code to try and match quality with forced key frames
} else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) {
q = rc->last_boosted_qindex;
} else {
- q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
- active_best_quality, active_worst_quality);
+ q = vp9_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > *top_index) {
// Special case when we are targeting the max allowed rate
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -1033,8 +993,7 @@
}
}
- assert(*top_index <= rc->worst_quality &&
- *top_index >= rc->best_quality);
+ assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
assert(*bottom_index <= rc->worst_quality &&
*bottom_index >= rc->best_quality);
assert(q <= rc->worst_quality && q >= rc->best_quality);
@@ -1049,18 +1008,18 @@
1.75, // GF_ARF_STD
2.00, // KF_STD
};
- static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] =
- {INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME};
+ static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] = {
+ INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME
+ };
const VP9_COMMON *const cm = &cpi->common;
- int qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level],
- q, rate_factor_deltas[rf_level],
- cm->bit_depth);
+ int qdelta =
+ vp9_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
+ rate_factor_deltas[rf_level], cm->bit_depth);
return qdelta;
}
#define STATIC_MOTION_THRESH 95
-static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi,
- int *bottom_index,
+static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
int *top_index) {
const VP9_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
@@ -1087,8 +1046,7 @@
active_best_quality = qindex;
last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
delta_qindex = vp9_compute_qdelta(rc, last_boosted_q,
- last_boosted_q * 1.25,
- cm->bit_depth);
+ last_boosted_q * 1.25, cm->bit_depth);
active_worst_quality =
VPXMIN(qindex + delta_qindex, active_worst_quality);
} else {
@@ -1095,8 +1053,7 @@
qindex = rc->last_boosted_qindex;
last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
delta_qindex = vp9_compute_qdelta(rc, last_boosted_q,
- last_boosted_q * 0.75,
- cm->bit_depth);
+ last_boosted_q * 0.75, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
}
} else {
@@ -1104,8 +1061,8 @@
double q_adj_factor = 1.0;
double q_val;
// Baseline value derived from cpi->active_worst_quality and kf boost.
- active_best_quality = get_kf_active_quality(rc, active_worst_quality,
- cm->bit_depth);
+ active_best_quality =
+ get_kf_active_quality(rc, active_worst_quality, cm->bit_depth);
// Allow somewhat lower kf minq with small image formats.
if ((cm->width * cm->height) <= (352 * 288)) {
@@ -1118,9 +1075,8 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
q_val = vp9_convert_qindex_to_q(active_best_quality, cm->bit_depth);
- active_best_quality += vp9_compute_qdelta(rc, q_val,
- q_val * q_adj_factor,
- cm->bit_depth);
+ active_best_quality +=
+ vp9_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -1135,8 +1091,7 @@
}
// For constrained quality dont allow Q less than the cq level
if (oxcf->rc_mode == VPX_CQ) {
- if (q < cq_level)
- q = cq_level;
+ if (q < cq_level) q = cq_level;
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -1165,8 +1120,7 @@
// For the constrained quality mode we don't want
// q to fall below the cq level.
- if ((oxcf->rc_mode == VPX_CQ) &&
- (active_best_quality < cq_level)) {
+ if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
active_best_quality = cq_level;
}
}
@@ -1179,11 +1133,11 @@
(!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
active_best_quality -=
- (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
+ (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
active_worst_quality += (cpi->twopass.extend_maxq / 2);
} else {
active_best_quality -=
- (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
+ (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
active_worst_quality += cpi->twopass.extend_maxq;
}
}
@@ -1196,28 +1150,27 @@
(cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
int qdelta = vp9_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index],
active_worst_quality);
- active_worst_quality = VPXMAX(active_worst_quality + qdelta,
- active_best_quality);
+ active_worst_quality =
+ VPXMAX(active_worst_quality + qdelta, active_best_quality);
}
#endif
// Modify active_best_quality for downscaled normal frames.
if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) {
- int qdelta = vp9_compute_qdelta_by_rate(rc, cm->frame_type,
- active_best_quality, 2.0,
- cm->bit_depth);
+ int qdelta = vp9_compute_qdelta_by_rate(
+ rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth);
active_best_quality =
VPXMAX(active_best_quality + qdelta, rc->best_quality);
}
- active_best_quality = clamp(active_best_quality,
- rc->best_quality, rc->worst_quality);
- active_worst_quality = clamp(active_worst_quality,
- active_best_quality, rc->worst_quality);
+ active_best_quality =
+ clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+ active_worst_quality =
+ clamp(active_worst_quality, active_best_quality, rc->worst_quality);
if (oxcf->rc_mode == VPX_Q) {
q = active_best_quality;
- // Special case code to try and match quality with forced key frames.
+ // Special case code to try and match quality with forced key frames.
} else if ((frame_is_intra_only(cm) || vp9_is_upper_layer_key_frame(cpi)) &&
rc->this_key_frame_forced) {
// If static since last kf use better of last boosted and last kf q.
@@ -1227,8 +1180,8 @@
q = rc->last_boosted_qindex;
}
} else {
- q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
- active_best_quality, active_worst_quality);
+ q = vp9_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > active_worst_quality) {
// Special case when we are targeting the max allowed rate.
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -1242,8 +1195,7 @@
*top_index = active_worst_quality;
*bottom_index = active_best_quality;
- assert(*top_index <= rc->worst_quality &&
- *top_index >= rc->best_quality);
+ assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
assert(*bottom_index <= rc->worst_quality &&
*bottom_index >= rc->best_quality);
assert(q <= rc->worst_quality && q >= rc->best_quality);
@@ -1250,8 +1202,8 @@
return q;
}
-int vp9_rc_pick_q_and_bounds(const VP9_COMP *cpi,
- int *bottom_index, int *top_index) {
+int vp9_rc_pick_q_and_bounds(const VP9_COMP *cpi, int *bottom_index,
+ int *top_index) {
int q;
if (cpi->oxcf.pass == 0) {
if (cpi->oxcf.rc_mode == VPX_CBR)
@@ -1262,8 +1214,7 @@
q = rc_pick_q_and_bounds_two_pass(cpi, bottom_index, top_index);
}
if (cpi->sf.use_nonrd_pick_mode) {
- if (cpi->sf.force_frame_boost == 1)
- q -= cpi->sf.max_delta_qindex;
+ if (cpi->sf.force_frame_boost == 1) q -= cpi->sf.max_delta_qindex;
if (q < *bottom_index)
*bottom_index = q;
@@ -1273,20 +1224,19 @@
return q;
}
-void vp9_rc_compute_frame_size_bounds(const VP9_COMP *cpi,
- int frame_target,
+void vp9_rc_compute_frame_size_bounds(const VP9_COMP *cpi, int frame_target,
int *frame_under_shoot_limit,
int *frame_over_shoot_limit) {
if (cpi->oxcf.rc_mode == VPX_Q) {
*frame_under_shoot_limit = 0;
- *frame_over_shoot_limit = INT_MAX;
+ *frame_over_shoot_limit = INT_MAX;
} else {
// For very small rate targets where the fractional adjustment
// may be tiny make sure there is at least a minimum range.
const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100;
*frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0);
- *frame_over_shoot_limit = VPXMIN(frame_target + tolerance + 200,
- cpi->rc.max_frame_bandwidth);
+ *frame_over_shoot_limit =
+ VPXMIN(frame_target + tolerance + 200, cpi->rc.max_frame_bandwidth);
}
}
@@ -1299,12 +1249,12 @@
// Modify frame size target when down-scaling.
if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
rc->frame_size_selector != UNSCALED)
- rc->this_frame_target = (int)(rc->this_frame_target
- * rate_thresh_mult[rc->frame_size_selector]);
+ rc->this_frame_target = (int)(rc->this_frame_target *
+ rate_thresh_mult[rc->frame_size_selector]);
// Target rate per SB64 (including partial SB64s.
- rc->sb64_target_rate = ((int64_t)rc->this_frame_target * 64 * 64) /
- (cm->width * cm->height);
+ rc->sb64_target_rate =
+ ((int64_t)rc->this_frame_target * 64 * 64) / (cm->width * cm->height);
}
static void update_alt_ref_frame_stats(VP9_COMP *cpi) {
@@ -1338,13 +1288,11 @@
}
// Decrement count down till next gf
- if (rc->frames_till_gf_update_due > 0)
- rc->frames_till_gf_update_due--;
+ if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--;
} else if (!cpi->refresh_alt_ref_frame) {
// Decrement count down till next gf
- if (rc->frames_till_gf_update_due > 0)
- rc->frames_till_gf_update_due--;
+ if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--;
rc->frames_since_golden++;
}
@@ -1359,8 +1307,7 @@
int cnt_zeromv = 0;
for (mi_row = 0; mi_row < rows; mi_row++) {
for (mi_col = 0; mi_col < cols; mi_col++) {
- if (abs(mi[0]->mv[0].as_mv.row) < 16 &&
- abs(mi[0]->mv[0].as_mv.col) < 16)
+ if (abs(mi[0]->mv[0].as_mv.row) < 16 && abs(mi[0]->mv[0].as_mv.col) < 16)
cnt_zeromv++;
mi++;
}
@@ -1409,7 +1356,7 @@
!(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
rc->last_q[INTER_FRAME] = qindex;
rc->avg_frame_qindex[INTER_FRAME] =
- ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
+ ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
rc->ni_frames++;
rc->tot_q += vp9_convert_qindex_to_q(qindex, cm->bit_depth);
rc->avg_q = rc->tot_q / rc->ni_frames;
@@ -1425,15 +1372,13 @@
// If all mbs in this group are skipped only update if the Q value is
// better than that already stored.
// This is used to help set quality in forced key frames to reduce popping
- if ((qindex < rc->last_boosted_qindex) ||
- (cm->frame_type == KEY_FRAME) ||
+ if ((qindex < rc->last_boosted_qindex) || (cm->frame_type == KEY_FRAME) ||
(!rc->constrained_gf_group &&
(cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) {
rc->last_boosted_qindex = qindex;
}
- if (cm->frame_type == KEY_FRAME)
- rc->last_kf_qindex = qindex;
+ if (cm->frame_type == KEY_FRAME) rc->last_kf_qindex = qindex;
update_buffer_level(cpi, rc->projected_frame_size);
@@ -1466,8 +1411,7 @@
update_golden_frame_stats(cpi);
}
- if (cm->frame_type == KEY_FRAME)
- rc->frames_since_key = 0;
+ if (cm->frame_type == KEY_FRAME) rc->frames_since_key = 0;
if (cm->show_frame) {
rc->frames_since_key++;
rc->frames_to_key--;
@@ -1481,8 +1425,7 @@
}
if (oxcf->pass == 0) {
- if (cm->frame_type != KEY_FRAME)
- compute_frame_low_motion(cpi);
+ if (cm->frame_type != KEY_FRAME) compute_frame_low_motion(cpi);
}
}
@@ -1496,7 +1439,7 @@
}
// Use this macro to turn on/off use of alt-refs in one-pass mode.
-#define USE_ALTREF_FOR_ONE_PASS 1
+#define USE_ALTREF_FOR_ONE_PASS 1
static int calc_pframe_target_size_one_pass_vbr(const VP9_COMP *const cpi) {
const RATE_CONTROL *const rc = &cpi->rc;
@@ -1503,12 +1446,13 @@
int target;
const int af_ratio = rc->af_ratio_onepass_vbr;
#if USE_ALTREF_FOR_ONE_PASS
- target = (!rc->is_src_frame_alt_ref &&
- (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) ?
- (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) /
- (rc->baseline_gf_interval + af_ratio - 1) :
- (rc->avg_frame_bandwidth * rc->baseline_gf_interval) /
- (rc->baseline_gf_interval + af_ratio - 1);
+ target =
+ (!rc->is_src_frame_alt_ref &&
+ (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))
+ ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) /
+ (rc->baseline_gf_interval + af_ratio - 1)
+ : (rc->avg_frame_bandwidth * rc->baseline_gf_interval) /
+ (rc->baseline_gf_interval + af_ratio - 1);
#else
target = rc->avg_frame_bandwidth;
#endif
@@ -1547,13 +1491,11 @@
int target;
// TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
if (!cpi->refresh_alt_ref_frame &&
- (cm->current_video_frame == 0 ||
- (cpi->frame_flags & FRAMEFLAGS_KEY) ||
- rc->frames_to_key == 0 ||
- (cpi->oxcf.auto_key && 0))) {
+ (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+ rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
cm->frame_type = KEY_FRAME;
- rc->this_key_frame_forced = cm->current_video_frame != 0 &&
- rc->frames_to_key == 0;
+ rc->this_key_frame_forced =
+ cm->current_video_frame != 0 && rc->frames_to_key == 0;
rc->frames_to_key = cpi->oxcf.key_freq;
rc->kf_boost = DEFAULT_KF_BOOST;
rc->source_alt_ref_active = 0;
@@ -1584,8 +1526,9 @@
}
// Adjust boost and af_ratio based on avg_frame_low_motion, which varies
// between 0 and 100 (stationary, 100% zero/small motion).
- rc->gfu_boost = VPXMAX(500, DEFAULT_GF_BOOST *
- (rc->avg_frame_low_motion << 1) / (rc->avg_frame_low_motion + 100));
+ rc->gfu_boost =
+ VPXMAX(500, DEFAULT_GF_BOOST * (rc->avg_frame_low_motion << 1) /
+ (rc->avg_frame_low_motion + 100));
rc->af_ratio_onepass_vbr = VPXMIN(15, VPXMAX(5, 3 * rc->gfu_boost / 400));
}
adjust_gfint_frame_constraint(cpi, rc->frames_to_key);
@@ -1614,11 +1557,12 @@
if (oxcf->gf_cbr_boost_pct) {
const int af_ratio_pct = oxcf->gf_cbr_boost_pct + 100;
- target = cpi->refresh_golden_frame ?
- (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio_pct) /
- (rc->baseline_gf_interval * 100 + af_ratio_pct - 100) :
- (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) /
- (rc->baseline_gf_interval * 100 + af_ratio_pct - 100);
+ target = cpi->refresh_golden_frame
+ ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval *
+ af_ratio_pct) /
+ (rc->baseline_gf_interval * 100 + af_ratio_pct - 100)
+ : (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) /
+ (rc->baseline_gf_interval * 100 + af_ratio_pct - 100);
} else {
target = rc->avg_frame_bandwidth;
}
@@ -1626,9 +1570,8 @@
// Note that for layers, avg_frame_bandwidth is the cumulative
// per-frame-bandwidth. For the target size of this frame, use the
// layer average frame size (i.e., non-cumulative per-frame-bw).
- int layer =
- LAYER_IDS_TO_IDX(svc->spatial_layer_id,
- svc->temporal_layer_id, svc->number_temporal_layers);
+ int layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id, svc->temporal_layer_id,
+ svc->number_temporal_layers);
const LAYER_CONTEXT *lc = &svc->layer_context[layer];
target = lc->avg_frame_size;
min_frame_target = VPXMAX(lc->avg_frame_size >> 4, FRAME_OVERHEAD_BITS);
@@ -1644,8 +1587,8 @@
target += (target * pct_high) / 200;
}
if (oxcf->rc_max_inter_bitrate_pct) {
- const int max_rate = rc->avg_frame_bandwidth *
- oxcf->rc_max_inter_bitrate_pct / 100;
+ const int max_rate =
+ rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
target = VPXMIN(target, max_rate);
}
return VPXMAX(min_frame_target, target);
@@ -1658,22 +1601,22 @@
int target;
if (cpi->common.current_video_frame == 0) {
target = ((rc->starting_buffer_level / 2) > INT_MAX)
- ? INT_MAX : (int)(rc->starting_buffer_level / 2);
+ ? INT_MAX
+ : (int)(rc->starting_buffer_level / 2);
} else {
int kf_boost = 32;
double framerate = cpi->framerate;
- if (svc->number_temporal_layers > 1 &&
- oxcf->rc_mode == VPX_CBR) {
+ if (svc->number_temporal_layers > 1 && oxcf->rc_mode == VPX_CBR) {
// Use the layer framerate for temporal layers CBR mode.
- const int layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id,
- svc->temporal_layer_id, svc->number_temporal_layers);
+ const int layer =
+ LAYER_IDS_TO_IDX(svc->spatial_layer_id, svc->temporal_layer_id,
+ svc->number_temporal_layers);
const LAYER_CONTEXT *lc = &svc->layer_context[layer];
framerate = lc->framerate;
}
kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16));
- if (rc->frames_since_key < framerate / 2) {
- kf_boost = (int)(kf_boost * rc->frames_since_key /
- (framerate / 2));
+ if (rc->frames_since_key < framerate / 2) {
+ kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2));
}
target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4;
}
@@ -1684,12 +1627,12 @@
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target = rc->avg_frame_bandwidth;
- int layer = LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id,
- cpi->svc.temporal_layer_id, cpi->svc.number_temporal_layers);
+ int layer =
+ LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id, cpi->svc.temporal_layer_id,
+ cpi->svc.number_temporal_layers);
// Periodic key frames is based on the super-frame counter
// (svc.current_superframe), also only base spatial layer is key frame.
- if ((cm->current_video_frame == 0) ||
- (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+ if ((cm->current_video_frame == 0) || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
(cpi->oxcf.auto_key &&
(cpi->svc.current_superframe % cpi->oxcf.key_freq == 0) &&
cpi->svc.spatial_layer_id == 0)) {
@@ -1697,16 +1640,14 @@
rc->source_alt_ref_active = 0;
if (is_two_pass_svc(cpi)) {
cpi->svc.layer_context[layer].is_key_frame = 1;
- cpi->ref_frame_flags &=
- (~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG);
+ cpi->ref_frame_flags &= (~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG);
} else if (is_one_pass_cbr_svc(cpi)) {
- if (cm->current_video_frame > 0)
- vp9_svc_reset_key_frame(cpi);
+ if (cm->current_video_frame > 0) vp9_svc_reset_key_frame(cpi);
layer = LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id,
- cpi->svc.temporal_layer_id, cpi->svc.number_temporal_layers);
+ cpi->svc.temporal_layer_id,
+ cpi->svc.number_temporal_layers);
cpi->svc.layer_context[layer].is_key_frame = 1;
- cpi->ref_frame_flags &=
- (~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG);
+ cpi->ref_frame_flags &= (~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG);
// Assumption here is that LAST_FRAME is being updated for a keyframe.
// Thus no change in update flags.
target = calc_iframe_target_size_one_pass_cbr(cpi);
@@ -1720,8 +1661,7 @@
} else {
lc->is_key_frame =
cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame;
- if (lc->is_key_frame)
- cpi->ref_frame_flags &= (~VP9_LAST_FLAG);
+ if (lc->is_key_frame) cpi->ref_frame_flags &= (~VP9_LAST_FLAG);
}
cpi->ref_frame_flags &= (~VP9_ALT_FLAG);
} else if (is_one_pass_cbr_svc(cpi)) {
@@ -1751,13 +1691,11 @@
RATE_CONTROL *const rc = &cpi->rc;
int target;
// TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
- if ((cm->current_video_frame == 0 ||
- (cpi->frame_flags & FRAMEFLAGS_KEY) ||
- rc->frames_to_key == 0 ||
- (cpi->oxcf.auto_key && 0))) {
+ if ((cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+ rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
cm->frame_type = KEY_FRAME;
- rc->this_key_frame_forced = cm->current_video_frame != 0 &&
- rc->frames_to_key == 0;
+ rc->this_key_frame_forced =
+ cm->current_video_frame != 0 && rc->frames_to_key == 0;
rc->frames_to_key = cpi->oxcf.key_freq;
rc->kf_boost = DEFAULT_KF_BOOST;
rc->source_alt_ref_active = 0;
@@ -1804,15 +1742,13 @@
// Convert the average q value to an index.
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
start_index = i;
- if (vp9_convert_qindex_to_q(i, bit_depth) >= qstart)
- break;
+ if (vp9_convert_qindex_to_q(i, bit_depth) >= qstart) break;
}
// Convert the q target to an index
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
target_index = i;
- if (vp9_convert_qindex_to_q(i, bit_depth) >= qtarget)
- break;
+ if (vp9_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
}
return target_index - start_index;
@@ -1825,8 +1761,8 @@
int i;
// Look up the current projected bits per block for the base index
- const int base_bits_per_mb = vp9_rc_bits_per_mb(frame_type, qindex, 1.0,
- bit_depth);
+ const int base_bits_per_mb =
+ vp9_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
// Find the target bits per mb based on the base value and given ratio.
const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb);
@@ -1885,8 +1821,8 @@
int vbr_max_bits;
rc->avg_frame_bandwidth = (int)(oxcf->target_bandwidth / cpi->framerate);
- rc->min_frame_bandwidth = (int)(rc->avg_frame_bandwidth *
- oxcf->two_pass_vbrmin_section / 100);
+ rc->min_frame_bandwidth =
+ (int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100);
rc->min_frame_bandwidth =
VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
@@ -1898,8 +1834,9 @@
// a very high rate is given on the command line or the the rate cannnot
// be acheived because of a user specificed max q (e.g. when the user
// specifies lossless encode.
- vbr_max_bits = (int)(((int64_t)rc->avg_frame_bandwidth *
- oxcf->two_pass_vbrmax_section) / 100);
+ vbr_max_bits =
+ (int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) /
+ 100);
rc->max_frame_bandwidth =
VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
@@ -1912,27 +1849,27 @@
RATE_CONTROL *const rc = &cpi->rc;
int64_t vbr_bits_off_target = rc->vbr_bits_off_target;
int max_delta;
- int frame_window = VPXMIN(16,
- ((int)cpi->twopass.total_stats.count - cpi->common.current_video_frame));
+ int frame_window = VPXMIN(16, ((int)cpi->twopass.total_stats.count -
+ cpi->common.current_video_frame));
// Calcluate the adjustment to rate for this frame.
if (frame_window > 0) {
max_delta = (vbr_bits_off_target > 0)
- ? (int)(vbr_bits_off_target / frame_window)
- : (int)(-vbr_bits_off_target / frame_window);
+ ? (int)(vbr_bits_off_target / frame_window)
+ : (int)(-vbr_bits_off_target / frame_window);
max_delta = VPXMIN(max_delta,
- ((*this_frame_target * VBR_PCT_ADJUSTMENT_LIMIT) / 100));
+ ((*this_frame_target * VBR_PCT_ADJUSTMENT_LIMIT) / 100));
// vbr_bits_off_target > 0 means we have extra bits to spend
if (vbr_bits_off_target > 0) {
- *this_frame_target +=
- (vbr_bits_off_target > max_delta) ? max_delta
- : (int)vbr_bits_off_target;
+ *this_frame_target += (vbr_bits_off_target > max_delta)
+ ? max_delta
+ : (int)vbr_bits_off_target;
} else {
- *this_frame_target -=
- (vbr_bits_off_target < -max_delta) ? max_delta
- : (int)-vbr_bits_off_target;
+ *this_frame_target -= (vbr_bits_off_target < -max_delta)
+ ? max_delta
+ : (int)-vbr_bits_off_target;
}
}
@@ -1992,8 +1929,7 @@
down_size_on = 0;
} else {
if (cpi->resize_state == ORIG &&
- (cm->width * 3 / 4 < min_width ||
- cm->height * 3 / 4 < min_height))
+ (cm->width * 3 / 4 < min_width || cm->height * 3 / 4 < min_height))
return 0;
else if (cpi->resize_state == THREE_QUARTER &&
((cpi->oxcf.width >> 1) < min_width ||
@@ -2069,7 +2005,7 @@
cpi->resize_scale_den = 1;
}
tot_scale_change = (cpi->resize_scale_den * cpi->resize_scale_den) /
- (cpi->resize_scale_num * cpi->resize_scale_num);
+ (cpi->resize_scale_num * cpi->resize_scale_num);
// Reset buffer level to optimal, update target size.
rc->buffer_level = rc->optimal_buffer_level;
rc->bits_off_target = rc->optimal_buffer_level;
@@ -2076,26 +2012,22 @@
rc->this_frame_target = calc_pframe_target_size_one_pass_cbr(cpi);
// Get the projected qindex, based on the scaled target frame size (scaled
// so target_bits_per_mb in vp9_rc_regulate_q will be correct target).
- target_bits_per_frame = (resize_action >= 0) ?
- rc->this_frame_target * tot_scale_change :
- rc->this_frame_target / tot_scale_change;
+ target_bits_per_frame = (resize_action >= 0)
+ ? rc->this_frame_target * tot_scale_change
+ : rc->this_frame_target / tot_scale_change;
active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
- qindex = vp9_rc_regulate_q(cpi,
- target_bits_per_frame,
- rc->best_quality,
+ qindex = vp9_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
active_worst_quality);
// If resize is down, check if projected q index is close to worst_quality,
// and if so, reduce the rate correction factor (since likely can afford
// lower q for resized frame).
- if (resize_action > 0 &&
- qindex > 90 * cpi->rc.worst_quality / 100) {
+ if (resize_action > 0 && qindex > 90 * cpi->rc.worst_quality / 100) {
rc->rate_correction_factors[INTER_NORMAL] *= 0.85;
}
// If resize is back up, check if projected q index is too much above the
// current base_qindex, and if so, reduce the rate correction factor
// (since prefer to keep q for resized frame at least close to previous q).
- if (resize_action < 0 &&
- qindex > 130 * cm->base_qindex / 100) {
+ if (resize_action < 0 && qindex > 130 * cm->base_qindex / 100) {
rc->rate_correction_factors[INTER_NORMAL] *= 0.9;
}
}
@@ -2103,7 +2035,7 @@
}
void adjust_gf_boost_lag_one_pass_vbr(VP9_COMP *cpi, uint64_t avg_sad_current) {
- VP9_COMMON * const cm = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target;
int found = 0;
@@ -2130,10 +2062,11 @@
}
// Detect up-coming scene change.
if (!found &&
- (rc->avg_source_sad[lagframe_idx] > VPXMAX(sad_thresh1,
- (unsigned int)(reference_sad << 1)) ||
- rc->avg_source_sad[lagframe_idx] > VPXMAX(3 * sad_thresh1 >> 2,
- (unsigned int)(reference_sad << 2)))) {
+ (rc->avg_source_sad[lagframe_idx] >
+ VPXMAX(sad_thresh1, (unsigned int)(reference_sad << 1)) ||
+ rc->avg_source_sad[lagframe_idx] >
+ VPXMAX(3 * sad_thresh1 >> 2,
+ (unsigned int)(reference_sad << 2)))) {
high_source_sad_lagindex = lagframe_idx;
found = 1;
}
@@ -2142,20 +2075,19 @@
rc->avg_source_sad[lagframe_idx - 1] > (sad_thresh1 >> 2)) {
found2 = 1;
for (i = lagframe_idx; i < tot_frames; ++i) {
- if (!(rc->avg_source_sad[i] > 0 &&
- rc->avg_source_sad[i] < (sad_thresh1 >> 2) &&
- rc->avg_source_sad[i] < (rc->avg_source_sad[lagframe_idx - 1] >> 1))) {
- found2 = 0;
- i = tot_frames;
- }
+ if (!(rc->avg_source_sad[i] > 0 &&
+ rc->avg_source_sad[i] < (sad_thresh1 >> 2) &&
+ rc->avg_source_sad[i] <
+ (rc->avg_source_sad[lagframe_idx - 1] >> 1))) {
+ found2 = 0;
+ i = tot_frames;
+ }
}
- if (found2)
- steady_sad_lagindex = lagframe_idx;
+ if (found2) steady_sad_lagindex = lagframe_idx;
}
avg_source_sad_lag += rc->avg_source_sad[lagframe_idx];
}
- if (tot_frames > 0)
- avg_source_sad_lag = avg_source_sad_lag / tot_frames;
+ if (tot_frames > 0) avg_source_sad_lag = avg_source_sad_lag / tot_frames;
// Constrain distance between detected scene cuts.
if (high_source_sad_lagindex != -1 &&
high_source_sad_lagindex != rc->high_source_sad_lagindex - 1 &&
@@ -2165,20 +2097,18 @@
rc->high_source_sad_lagindex = high_source_sad_lagindex;
// Adjust some factors for the next GF group, ignore initial key frame,
// and only for lag_in_frames not too small.
- if (cpi->refresh_golden_frame == 1 &&
- cm->frame_type != KEY_FRAME &&
- cm->current_video_frame > 30 &&
- cpi->oxcf.lag_in_frames > 8) {
+ if (cpi->refresh_golden_frame == 1 && cm->frame_type != KEY_FRAME &&
+ cm->current_video_frame > 30 && cpi->oxcf.lag_in_frames > 8) {
int frame_constraint;
if (rc->rolling_target_bits > 0)
rate_err =
- (double)rc->rolling_actual_bits / (double)rc->rolling_target_bits;
+ (double)rc->rolling_actual_bits / (double)rc->rolling_target_bits;
high_content = high_source_sad_lagindex != -1 ||
- avg_source_sad_lag > (rc->prev_avg_source_sad_lag << 1) ||
- avg_source_sad_lag > sad_thresh2;
+ avg_source_sad_lag > (rc->prev_avg_source_sad_lag << 1) ||
+ avg_source_sad_lag > sad_thresh2;
low_content = high_source_sad_lagindex == -1 &&
- ((avg_source_sad_lag < (rc->prev_avg_source_sad_lag >> 1)) ||
- (avg_source_sad_lag < sad_thresh1));
+ ((avg_source_sad_lag < (rc->prev_avg_source_sad_lag >> 1)) ||
+ (avg_source_sad_lag < sad_thresh1));
if (low_content) {
rc->gfu_boost = DEFAULT_GF_BOOST;
rc->baseline_gf_interval =
@@ -2210,8 +2140,7 @@
}
if (low_content && rc->avg_frame_low_motion > 80) {
rc->af_ratio_onepass_vbr = 15;
- }
- else if (high_content || rc->avg_frame_low_motion < 30) {
+ } else if (high_content || rc->avg_frame_low_motion < 30) {
rc->af_ratio_onepass_vbr = 5;
rc->gfu_boost = DEFAULT_GF_BOOST >> 2;
}
@@ -2234,13 +2163,13 @@
// This function also handles special case of lag_in_frames, to measure content
// level in #future frames set by the lag_in_frames.
void vp9_avg_source_sad(VP9_COMP *cpi) {
- VP9_COMMON * const cm = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
rc->high_source_sad = 0;
if (cpi->Last_Source != NULL &&
cpi->Last_Source->y_width == cpi->Source->y_width &&
cpi->Last_Source->y_height == cpi->Source->y_height) {
- YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = {NULL};
+ YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = { NULL };
uint8_t *src_y = cpi->Source->y_buffer;
int src_ystride = cpi->Source->y_stride;
uint8_t *last_src_y = cpi->Last_Source->y_buffer;
@@ -2256,14 +2185,15 @@
thresh = 2.1f;
}
if (cpi->oxcf.lag_in_frames > 0) {
- frames_to_buffer = (cm->current_video_frame == 1) ?
- (int)vp9_lookahead_depth(cpi->lookahead) - 1: 2;
+ frames_to_buffer = (cm->current_video_frame == 1)
+ ? (int)vp9_lookahead_depth(cpi->lookahead) - 1
+ : 2;
start_frame = (int)vp9_lookahead_depth(cpi->lookahead) - 1;
for (frame = 0; frame < frames_to_buffer; ++frame) {
const int lagframe_idx = start_frame - frame;
if (lagframe_idx >= 0) {
- struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead,
- lagframe_idx);
+ struct lookahead_entry *buf =
+ vp9_lookahead_peek(cpi->lookahead, lagframe_idx);
frames[frame] = &buf->img;
}
}
@@ -2270,8 +2200,9 @@
// The avg_sad for this current frame is the value of frame#1
// (first future frame) from previous frame.
avg_sad_current = rc->avg_source_sad[1];
- if (avg_sad_current > VPXMAX(min_thresh,
- (unsigned int)(rc->avg_source_sad[0] * thresh)) &&
+ if (avg_sad_current >
+ VPXMAX(min_thresh,
+ (unsigned int)(rc->avg_source_sad[0] * thresh)) &&
cm->current_video_frame > (unsigned int)cpi->oxcf.lag_in_frames)
rc->high_source_sad = 1;
else
@@ -2278,8 +2209,8 @@
rc->high_source_sad = 0;
// Update recursive average for current frame.
if (avg_sad_current > 0)
- rc->avg_source_sad[0] = (3 * rc->avg_source_sad[0] +
- avg_sad_current) >> 2;
+ rc->avg_source_sad[0] =
+ (3 * rc->avg_source_sad[0] + avg_sad_current) >> 2;
// Shift back data, starting at frame#1.
for (frame = 1; frame < cpi->oxcf.lag_in_frames - 1; ++frame)
rc->avg_source_sad[frame] = rc->avg_source_sad[frame + 1];
@@ -2286,13 +2217,12 @@
}
for (frame = 0; frame < frames_to_buffer; ++frame) {
if (cpi->oxcf.lag_in_frames == 0 ||
- (frames[frame] != NULL &&
- frames[frame + 1] != NULL &&
+ (frames[frame] != NULL && frames[frame + 1] != NULL &&
frames[frame]->y_width == frames[frame + 1]->y_width &&
frames[frame]->y_height == frames[frame + 1]->y_height)) {
int sbi_row, sbi_col;
- const int lagframe_idx = (cpi->oxcf.lag_in_frames == 0) ? 0 :
- start_frame - frame + 1;
+ const int lagframe_idx =
+ (cpi->oxcf.lag_in_frames == 0) ? 0 : start_frame - frame + 1;
const BLOCK_SIZE bsize = BLOCK_64X64;
// Loop over sub-sample of frame, compute average sad over 64x64 blocks.
uint64_t avg_sad = 0;
@@ -2311,11 +2241,9 @@
if ((sbi_row > 0 && sbi_col > 0) &&
(sbi_row < sb_rows - 1 && sbi_col < sb_cols - 1) &&
((sbi_row % 2 == 0 && sbi_col % 2 == 0) ||
- (sbi_row % 2 != 0 && sbi_col % 2 != 0))) {
+ (sbi_row % 2 != 0 && sbi_col % 2 != 0))) {
num_samples++;
- avg_sad += cpi->fn_ptr[bsize].sdf(src_y,
- src_ystride,
- last_src_y,
+ avg_sad += cpi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y,
last_src_ystride);
}
src_y += 64;
@@ -2324,16 +2252,16 @@
src_y += (src_ystride << 6) - (sb_cols << 6);
last_src_y += (last_src_ystride << 6) - (sb_cols << 6);
}
- if (num_samples > 0)
- avg_sad = avg_sad / num_samples;
+ if (num_samples > 0) avg_sad = avg_sad / num_samples;
// Set high_source_sad flag if we detect very high increase in avg_sad
// between current and previous frame value(s). Use minimum threshold
// for cases where there is small change from content that is completely
// static.
if (lagframe_idx == 0) {
- if (avg_sad > VPXMAX(min_thresh,
- (unsigned int)(rc->avg_source_sad[0] * thresh)) &&
- rc->frames_since_key > 1)
+ if (avg_sad >
+ VPXMAX(min_thresh,
+ (unsigned int)(rc->avg_source_sad[0] * thresh)) &&
+ rc->frames_since_key > 1)
rc->high_source_sad = 1;
else
rc->high_source_sad = 0;
@@ -2345,17 +2273,15 @@
}
}
// For VBR, under scene change/high content change, force golden refresh.
- if (cpi->oxcf.rc_mode == VPX_VBR &&
- cm->frame_type != KEY_FRAME &&
- rc->high_source_sad &&
- rc->frames_to_key > 3 &&
+ if (cpi->oxcf.rc_mode == VPX_VBR && cm->frame_type != KEY_FRAME &&
+ rc->high_source_sad && rc->frames_to_key > 3 &&
rc->count_last_scene_change > 4 &&
cpi->ext_refresh_frame_flags_pending == 0) {
int target;
cpi->refresh_golden_frame = 1;
rc->gfu_boost = DEFAULT_GF_BOOST >> 1;
- rc->baseline_gf_interval = VPXMIN(20,
- VPXMAX(10, rc->baseline_gf_interval));
+ rc->baseline_gf_interval =
+ VPXMIN(20, VPXMAX(10, rc->baseline_gf_interval));
adjust_gfint_frame_constraint(cpi, rc->frames_to_key);
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
target = calc_pframe_target_size_one_pass_vbr(cpi);
@@ -2372,15 +2298,12 @@
// Test if encoded frame will significantly overshoot the target bitrate, and
// if so, set the QP, reset/adjust some rate control parameters, and return 1.
-int vp9_encodedframe_overshoot(VP9_COMP *cpi,
- int frame_size,
- int *q) {
- VP9_COMMON * const cm = &cpi->common;
+int vp9_encodedframe_overshoot(VP9_COMP *cpi, int frame_size, int *q) {
+ VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int thresh_qp = 3 * (rc->worst_quality >> 2);
int thresh_rate = rc->avg_frame_bandwidth * 10;
- if (cm->base_qindex < thresh_qp &&
- frame_size > thresh_rate) {
+ if (cm->base_qindex < thresh_qp && frame_size > thresh_rate) {
double rate_correction_factor =
cpi->rc.rate_correction_factors[INTER_NORMAL];
const int target_size = cpi->rc.avg_frame_bandwidth;
@@ -2430,8 +2353,7 @@
lrc->bits_off_target = rc->optimal_buffer_level;
lrc->rc_1_frame = 0;
lrc->rc_2_frame = 0;
- lrc->rate_correction_factors[INTER_NORMAL] =
- rate_correction_factor;
+ lrc->rate_correction_factors[INTER_NORMAL] = rate_correction_factor;
}
}
return 1;
--- a/vp9/encoder/vp9_ratectrl.h
+++ b/vp9/encoder/vp9_ratectrl.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_RATECTRL_H_
#define VP9_ENCODER_VP9_RATECTRL_H_
@@ -23,12 +22,12 @@
#endif
// Bits Per MB at different Q (Multiplied by 512)
-#define BPER_MB_NORMBITS 9
+#define BPER_MB_NORMBITS 9
-#define MIN_GF_INTERVAL 4
-#define MAX_GF_INTERVAL 16
-#define FIXED_GF_INTERVAL 8 // Used in some testing modes only
-#define ONEHALFONLY_RESIZE 0
+#define MIN_GF_INTERVAL 4
+#define MAX_GF_INTERVAL 16
+#define FIXED_GF_INTERVAL 8 // Used in some testing modes only
+#define ONEHALFONLY_RESIZE 0
typedef enum {
INTER_NORMAL = 0,
@@ -54,11 +53,7 @@
UP_ORIG = -2, // From 1/2 or 3/4 to orig.
} RESIZE_ACTION;
-typedef enum {
- ORIG = 0,
- THREE_QUARTER = 1,
- ONE_HALF = 2
-} RESIZE_STATE;
+typedef enum { ORIG = 0, THREE_QUARTER = 1, ONE_HALF = 2 } RESIZE_STATE;
// Frame dimensions multiplier wrt the native frame size, in 1/16ths,
// specified for the scale-up case.
@@ -65,25 +60,25 @@
// e.g. 24 => 16/24 = 2/3 of native size. The restriction to 1/16th is
// intended to match the capabilities of the normative scaling filters,
// giving precedence to the up-scaling accuracy.
-static const int frame_scale_factor[FRAME_SCALE_STEPS] = {16, 24};
+static const int frame_scale_factor[FRAME_SCALE_STEPS] = { 16, 24 };
// Multiplier of the target rate to be used as threshold for triggering scaling.
-static const double rate_thresh_mult[FRAME_SCALE_STEPS] = {1.0, 2.0};
+static const double rate_thresh_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 };
// Scale dependent Rate Correction Factor multipliers. Compensates for the
// greater number of bits per pixel generated in down-scaled frames.
-static const double rcf_mult[FRAME_SCALE_STEPS] = {1.0, 2.0};
+static const double rcf_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 };
typedef struct {
// Rate targetting variables
- int base_frame_target; // A baseline frame target before adjustment
- // for previous under or over shoot.
- int this_frame_target; // Actual frame target after rc adjustment.
+ int base_frame_target; // A baseline frame target before adjustment
+ // for previous under or over shoot.
+ int this_frame_target; // Actual frame target after rc adjustment.
int projected_frame_size;
int sb64_target_rate;
- int last_q[FRAME_TYPES]; // Separate values for Intra/Inter
- int last_boosted_qindex; // Last boosted GF/KF/ARF q
- int last_kf_qindex; // Q index of the last key frame coded.
+ int last_q[FRAME_TYPES]; // Separate values for Intra/Inter
+ int last_boosted_qindex; // Last boosted GF/KF/ARF q
+ int last_kf_qindex; // Q index of the last key frame coded.
int gfu_boost;
int last_boost;
@@ -179,8 +174,7 @@
RATE_CONTROL *rc);
int vp9_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
- double correction_factor,
- vpx_bit_depth_t bit_depth);
+ double correction_factor, vpx_bit_depth_t bit_depth);
double vp9_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth);
@@ -241,8 +235,7 @@
int *frame_over_shoot_limit);
// Picks q and q bounds given the target for bits
-int vp9_rc_pick_q_and_bounds(const struct VP9_COMP *cpi,
- int *bottom_index,
+int vp9_rc_pick_q_and_bounds(const struct VP9_COMP *cpi, int *bottom_index,
int *top_index);
// Estimates q to achieve a target bits per frame
--- a/vp9/encoder/vp9_rd.c
+++ b/vp9/encoder/vp9_rd.c
@@ -40,7 +40,7 @@
#include "vp9/encoder/vp9_rd.h"
#include "vp9/encoder/vp9_tokenize.h"
-#define RD_THRESH_POW 1.25
+#define RD_THRESH_POW 1.25
// Factor to weigh the rate for switchable interp filters.
#define SWITCHABLE_INTERP_RATE_FACTOR 1
@@ -98,8 +98,7 @@
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
vpx_prob probs[ENTROPY_NODES];
vp9_model_to_full_probs(p[t][i][j][k][l], probs);
- vp9_cost_tokens((int *)c[t][i][j][k][0][l], probs,
- vp9_coef_tree);
+ vp9_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp9_coef_tree);
vp9_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
vp9_coef_tree);
assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
@@ -142,13 +141,10 @@
#endif
}
-static const int rd_boost_factor[16] = {
- 64, 32, 32, 32, 24, 16, 12, 12,
- 8, 8, 4, 4, 2, 2, 1, 0
-};
-static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = {
- 128, 144, 128, 128, 144
-};
+static const int rd_boost_factor[16] = { 64, 32, 32, 32, 24, 16, 12, 12,
+ 8, 8, 4, 4, 2, 2, 1, 0 };
+static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { 128, 144, 128,
+ 128, 144 };
int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex) {
const int64_t q = vp9_dc_quant(qindex, 0, cpi->common.bit_depth);
@@ -155,15 +151,9 @@
#if CONFIG_VP9_HIGHBITDEPTH
int64_t rdmult = 0;
switch (cpi->common.bit_depth) {
- case VPX_BITS_8:
- rdmult = 88 * q * q / 24;
- break;
- case VPX_BITS_10:
- rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4);
- break;
- case VPX_BITS_12:
- rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8);
- break;
+ case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
+ case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
+ case VPX_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break;
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
@@ -179,8 +169,7 @@
rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
}
- if (rdmult < 1)
- rdmult = 1;
+ if (rdmult < 1) rdmult = 1;
return (int)rdmult;
}
@@ -188,21 +177,15 @@
double q;
#if CONFIG_VP9_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8:
- q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
- break;
- case VPX_BITS_10:
- q = vp9_dc_quant(qindex, 0, VPX_BITS_10) / 16.0;
- break;
- case VPX_BITS_12:
- q = vp9_dc_quant(qindex, 0, VPX_BITS_12) / 64.0;
- break;
+ case VPX_BITS_8: q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
+ case VPX_BITS_10: q = vp9_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
+ case VPX_BITS_12: q = vp9_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
}
#else
- (void) bit_depth;
+ (void)bit_depth;
q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
#endif // CONFIG_VP9_HIGHBITDEPTH
// TODO(debargha): Adjust the function below.
@@ -240,7 +223,8 @@
for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
const int qindex =
clamp(vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
- cm->y_dc_delta_q, 0, MAXQ);
+ cm->y_dc_delta_q,
+ 0, MAXQ);
const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) {
@@ -251,10 +235,9 @@
if (bsize >= BLOCK_8X8) {
for (i = 0; i < MAX_MODES; ++i)
- rd->threshes[segment_id][bsize][i] =
- rd->thresh_mult[i] < thresh_max
- ? rd->thresh_mult[i] * t / 4
- : INT_MAX;
+ rd->threshes[segment_id][bsize][i] = rd->thresh_mult[i] < thresh_max
+ ? rd->thresh_mult[i] * t / 4
+ : INT_MAX;
} else {
for (i = 0; i < MAX_REFS; ++i)
rd->threshes[segment_id][bsize][i] =
@@ -281,7 +264,9 @@
set_error_per_bit(x, rd->RDMULT);
x->select_tx_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
- cm->frame_type != KEY_FRAME) ? 0 : 1;
+ cm->frame_type != KEY_FRAME)
+ ? 0
+ : 1;
set_block_thresholds(cm, rd);
set_partition_probs(cm, xd);
@@ -335,19 +320,15 @@
// where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance),
// and H(x) is the binary entropy function.
static const int rate_tab_q10[] = {
- 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651,
- 4553, 4389, 4255, 4142, 4044, 3958, 3881, 3811,
- 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186,
- 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638,
- 2589, 2501, 2423, 2353, 2290, 2232, 2179, 2130,
- 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651,
- 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199,
- 1159, 1086, 1021, 963, 911, 864, 821, 781,
- 745, 680, 623, 574, 530, 490, 455, 424,
- 395, 345, 304, 269, 239, 213, 190, 171,
- 154, 126, 104, 87, 73, 61, 52, 44,
- 38, 28, 21, 16, 12, 10, 8, 6,
- 5, 3, 2, 1, 1, 1, 0, 0,
+ 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651, 4553, 4389, 4255, 4142,
+ 4044, 3958, 3881, 3811, 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186,
+ 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638, 2589, 2501, 2423, 2353,
+ 2290, 2232, 2179, 2130, 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651,
+ 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199, 1159, 1086, 1021, 963,
+ 911, 864, 821, 781, 745, 680, 623, 574, 530, 490, 455, 424,
+ 395, 345, 304, 269, 239, 213, 190, 171, 154, 126, 104, 87,
+ 73, 61, 52, 44, 38, 28, 21, 16, 12, 10, 8, 6,
+ 5, 3, 2, 1, 1, 1, 0, 0,
};
// Normalized distortion:
@@ -358,34 +339,29 @@
// where x = qpstep / sqrt(variance).
// Note the actual distortion is Dn * variance.
static const int dist_tab_q10[] = {
- 0, 0, 1, 1, 1, 2, 2, 2,
- 3, 3, 4, 5, 5, 6, 7, 7,
- 8, 9, 11, 12, 13, 15, 16, 17,
- 18, 21, 24, 26, 29, 31, 34, 36,
- 39, 44, 49, 54, 59, 64, 69, 73,
- 78, 88, 97, 106, 115, 124, 133, 142,
- 151, 167, 184, 200, 215, 231, 245, 260,
- 274, 301, 327, 351, 375, 397, 418, 439,
- 458, 495, 528, 559, 587, 613, 637, 659,
- 680, 717, 749, 777, 801, 823, 842, 859,
- 874, 899, 919, 936, 949, 960, 969, 977,
- 983, 994, 1001, 1006, 1010, 1013, 1015, 1017,
- 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024,
+ 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5,
+ 5, 6, 7, 7, 8, 9, 11, 12, 13, 15, 16, 17,
+ 18, 21, 24, 26, 29, 31, 34, 36, 39, 44, 49, 54,
+ 59, 64, 69, 73, 78, 88, 97, 106, 115, 124, 133, 142,
+ 151, 167, 184, 200, 215, 231, 245, 260, 274, 301, 327, 351,
+ 375, 397, 418, 439, 458, 495, 528, 559, 587, 613, 637, 659,
+ 680, 717, 749, 777, 801, 823, 842, 859, 874, 899, 919, 936,
+ 949, 960, 969, 977, 983, 994, 1001, 1006, 1010, 1013, 1015, 1017,
+ 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024,
};
static const int xsq_iq_q10[] = {
- 0, 4, 8, 12, 16, 20, 24, 28,
- 32, 40, 48, 56, 64, 72, 80, 88,
- 96, 112, 128, 144, 160, 176, 192, 208,
- 224, 256, 288, 320, 352, 384, 416, 448,
- 480, 544, 608, 672, 736, 800, 864, 928,
- 992, 1120, 1248, 1376, 1504, 1632, 1760, 1888,
- 2016, 2272, 2528, 2784, 3040, 3296, 3552, 3808,
- 4064, 4576, 5088, 5600, 6112, 6624, 7136, 7648,
- 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328,
- 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688,
- 32736, 36832, 40928, 45024, 49120, 53216, 57312, 61408,
- 65504, 73696, 81888, 90080, 98272, 106464, 114656, 122848,
- 131040, 147424, 163808, 180192, 196576, 212960, 229344, 245728,
+ 0, 4, 8, 12, 16, 20, 24, 28, 32,
+ 40, 48, 56, 64, 72, 80, 88, 96, 112,
+ 128, 144, 160, 176, 192, 208, 224, 256, 288,
+ 320, 352, 384, 416, 448, 480, 544, 608, 672,
+ 736, 800, 864, 928, 992, 1120, 1248, 1376, 1504,
+ 1632, 1760, 1888, 2016, 2272, 2528, 2784, 3040, 3296,
+ 3552, 3808, 4064, 4576, 5088, 5600, 6112, 6624, 7136,
+ 7648, 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328,
+ 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688, 32736,
+ 36832, 40928, 45024, 49120, 53216, 57312, 61408, 65504, 73696,
+ 81888, 90080, 98272, 106464, 114656, 122848, 131040, 147424, 163808,
+ 180192, 196576, 212960, 229344, 245728,
};
const int tmp = (xsq_q10 >> 2) + 8;
const int k = get_msb(tmp) - 3;
@@ -455,15 +431,12 @@
for (i = 0; i < num_4x4_h; i += 8)
t_left[i] = !!*(const uint64_t *)&left[i];
break;
- default:
- assert(0 && "Invalid transform size.");
- break;
+ default: assert(0 && "Invalid transform size."); break;
}
}
-void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
- uint8_t *ref_y_buffer, int ref_y_stride,
- int ref_frame, BLOCK_SIZE block_size) {
+void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+ int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
int i;
int zero_seen = 0;
int best_index = 0;
@@ -473,9 +446,9 @@
int near_same_nearest;
uint8_t *src_y_ptr = x->plane[0].src.buf;
uint8_t *ref_y_ptr;
- const int num_mv_refs = MAX_MV_REF_CANDIDATES +
- (cpi->sf.adaptive_motion_search &&
- block_size < x->max_partition_size);
+ const int num_mv_refs =
+ MAX_MV_REF_CANDIDATES +
+ (cpi->sf.adaptive_motion_search && block_size < x->max_partition_size);
MV pred_mv[3];
pred_mv[0] = x->mbmi_ext->ref_mvs[ref_frame][0].as_mv;
@@ -483,25 +456,22 @@
pred_mv[2] = x->pred_mv[ref_frame];
assert(num_mv_refs <= (int)(sizeof(pred_mv) / sizeof(pred_mv[0])));
- near_same_nearest =
- x->mbmi_ext->ref_mvs[ref_frame][0].as_int ==
- x->mbmi_ext->ref_mvs[ref_frame][1].as_int;
+ near_same_nearest = x->mbmi_ext->ref_mvs[ref_frame][0].as_int ==
+ x->mbmi_ext->ref_mvs[ref_frame][1].as_int;
// Get the sad for each candidate reference mv.
for (i = 0; i < num_mv_refs; ++i) {
const MV *this_mv = &pred_mv[i];
int fp_row, fp_col;
- if (i == 1 && near_same_nearest)
- continue;
+ if (i == 1 && near_same_nearest) continue;
fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3;
fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3;
max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
- if (fp_row ==0 && fp_col == 0 && zero_seen)
- continue;
- zero_seen |= (fp_row ==0 && fp_col == 0);
+ if (fp_row == 0 && fp_col == 0 && zero_seen) continue;
+ zero_seen |= (fp_row == 0 && fp_col == 0);
- ref_y_ptr =&ref_y_buffer[ref_y_stride * fp_row + fp_col];
+ ref_y_ptr = &ref_y_buffer[ref_y_stride * fp_row + fp_col];
// Find sad for current vector.
this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride,
ref_y_ptr, ref_y_stride);
@@ -520,8 +490,7 @@
void vp9_setup_pred_block(const MACROBLOCKD *xd,
struct buf_2d dst[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src,
- int mi_row, int mi_col,
+ const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
const struct scale_factors *scale,
const struct scale_factors *scale_uv) {
int i;
@@ -534,13 +503,13 @@
for (i = 0; i < MAX_MB_PLANE; ++i) {
setup_pred_plane(dst + i, dst[i].buf, dst[i].stride, mi_row, mi_col,
- i ? scale_uv : scale,
- xd->plane[i].subsampling_x, xd->plane[i].subsampling_y);
+ i ? scale_uv : scale, xd->plane[i].subsampling_x,
+ xd->plane[i].subsampling_y);
}
}
-int vp9_raster_block_offset(BLOCK_SIZE plane_bsize,
- int raster_block, int stride) {
+int vp9_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+ int stride) {
const int bw = b_width_log2_lookup[plane_bsize];
const int y = 4 * (raster_block >> bw);
const int x = 4 * (raster_block & ((1 << bw) - 1));
@@ -547,8 +516,8 @@
return y * stride + x;
}
-int16_t* vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
- int raster_block, int16_t *base) {
+int16_t *vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize, int raster_block,
+ int16_t *base) {
const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
return base + vp9_raster_block_offset(plane_bsize, raster_block, stride);
}
@@ -558,9 +527,9 @@
const VP9_COMMON *const cm = &cpi->common;
const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
- return
- (scaled_idx != ref_idx && scaled_idx != INVALID_IDX) ?
- &cm->buffer_pool->frame_bufs[scaled_idx].buf : NULL;
+ return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
+ ? &cm->buffer_pool->frame_bufs[scaled_idx].buf
+ : NULL;
}
int vp9_get_switchable_rate(const VP9_COMP *cpi, const MACROBLOCKD *const xd) {
@@ -567,7 +536,7 @@
const MODE_INFO *const mi = xd->mi[0];
const int ctx = get_pred_context_switchable_interp(xd);
return SWITCHABLE_INTERP_RATE_FACTOR *
- cpi->switchable_interp_costs[ctx][mi->interp_filter];
+ cpi->switchable_interp_costs[ctx][mi->interp_filter];
}
void vp9_set_rd_speed_thresholds(VP9_COMP *cpi) {
@@ -616,7 +585,7 @@
rd->thresh_mult[THR_H_PRED] += 2000;
rd->thresh_mult[THR_V_PRED] += 2000;
- rd->thresh_mult[THR_D45_PRED ] += 2500;
+ rd->thresh_mult[THR_D45_PRED] += 2500;
rd->thresh_mult[THR_D135_PRED] += 2500;
rd->thresh_mult[THR_D117_PRED] += 2500;
rd->thresh_mult[THR_D153_PRED] += 2500;
@@ -625,9 +594,10 @@
}
void vp9_set_rd_speed_thresholds_sub8x8(VP9_COMP *cpi) {
- static const int thresh_mult[2][MAX_REFS] =
- {{2500, 2500, 2500, 4500, 4500, 2500},
- {2000, 2000, 2000, 4000, 4000, 2000}};
+ static const int thresh_mult[2][MAX_REFS] = {
+ { 2500, 2500, 2500, 4500, 4500, 2500 },
+ { 2000, 2000, 2000, 4000, 4000, 2000 }
+ };
RD_OPT *const rd = &cpi->rd;
const int idx = cpi->oxcf.mode == BEST;
memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
@@ -659,12 +629,9 @@
const int q = vp9_dc_quant(qindex, qdelta, bit_depth);
#if CONFIG_VP9_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8:
- return 20 * q;
- case VPX_BITS_10:
- return 5 * q;
- case VPX_BITS_12:
- return ROUND_POWER_OF_TWO(5 * q, 2);
+ case VPX_BITS_8: return 20 * q;
+ case VPX_BITS_10: return 5 * q;
+ case VPX_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2);
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
@@ -673,4 +640,3 @@
return 20 * q;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
-
--- a/vp9/encoder/vp9_rd.h
+++ b/vp9/encoder/vp9_rd.h
@@ -23,23 +23,23 @@
extern "C" {
#endif
-#define RDDIV_BITS 7
-#define RD_EPB_SHIFT 6
+#define RDDIV_BITS 7
+#define RD_EPB_SHIFT 6
#define RDCOST(RM, DM, R, D) \
(ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP9_PROB_COST_SHIFT) + (D << DM))
-#define QIDX_SKIP_THRESH 115
+#define QIDX_SKIP_THRESH 115
-#define MV_COST_WEIGHT 108
-#define MV_COST_WEIGHT_SUB 120
+#define MV_COST_WEIGHT 108
+#define MV_COST_WEIGHT_SUB 120
#define INVALID_MV 0x80008000
#define MAX_MODES 30
-#define MAX_REFS 6
+#define MAX_REFS 6
#define RD_THRESH_MAX_FACT 64
-#define RD_THRESH_INC 1
+#define RD_THRESH_INC 1
// This enumerator type needs to be kept aligned with the mode order in
// const MODE_DEFINITION vp9_mode_order[MAX_MODES] used in the rd code.
@@ -135,17 +135,16 @@
void vp9_initialize_me_consts(struct VP9_COMP *cpi, MACROBLOCK *x, int qindex);
void vp9_model_rd_from_var_lapndz(unsigned int var, unsigned int n,
- unsigned int qstep, int *rate,
- int64_t *dist);
+ unsigned int qstep, int *rate, int64_t *dist);
int vp9_get_switchable_rate(const struct VP9_COMP *cpi,
const MACROBLOCKD *const xd);
-int vp9_raster_block_offset(BLOCK_SIZE plane_bsize,
- int raster_block, int stride);
+int vp9_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+ int stride);
-int16_t* vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
- int raster_block, int16_t *base);
+int16_t *vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize, int raster_block,
+ int16_t *base);
YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const struct VP9_COMP *cpi,
int ref_frame);
@@ -161,12 +160,12 @@
void vp9_set_rd_speed_thresholds_sub8x8(struct VP9_COMP *cpi);
-void vp9_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh,
- int bsize, int best_mode_index);
+void vp9_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh, int bsize,
+ int best_mode_index);
static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
int thresh_fact) {
- return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
+ return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
}
static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
@@ -174,14 +173,12 @@
x->errorperbit += (x->errorperbit == 0);
}
-void vp9_mv_pred(struct VP9_COMP *cpi, MACROBLOCK *x,
- uint8_t *ref_y_buffer, int ref_y_stride,
- int ref_frame, BLOCK_SIZE block_size);
+void vp9_mv_pred(struct VP9_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+ int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
void vp9_setup_pred_block(const MACROBLOCKD *xd,
struct buf_2d dst[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src,
- int mi_row, int mi_col,
+ const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
const struct scale_factors *scale,
const struct scale_factors *scale_uv);
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -42,17 +42,17 @@
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/encoder/vp9_aq_variance.h"
-#define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \
- (1 << INTRA_FRAME))
-#define GOLDEN_FRAME_MODE_MASK ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \
- (1 << INTRA_FRAME))
-#define ALT_REF_MODE_MASK ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \
- (1 << INTRA_FRAME))
+#define LAST_FRAME_MODE_MASK \
+ ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
+#define GOLDEN_FRAME_MODE_MASK \
+ ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
+#define ALT_REF_MODE_MASK \
+ ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME))
-#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
+#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
-#define MIN_EARLY_TERM_INDEX 3
-#define NEW_MV_DISCOUNT_FACTOR 8
+#define MIN_EARLY_TERM_INDEX 3
+#define NEW_MV_DISCOUNT_FACTOR 8
typedef struct {
PREDICTION_MODE mode;
@@ -59,9 +59,7 @@
MV_REFERENCE_FRAME ref_frame[2];
} MODE_DEFINITION;
-typedef struct {
- MV_REFERENCE_FRAME ref_frame[2];
-} REF_DEFINITION;
+typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
struct rdcost_block_args {
const VP9_COMP *cpi;
@@ -81,58 +79,55 @@
#define LAST_NEW_MV_INDEX 6
static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
- {NEARESTMV, {LAST_FRAME, NONE}},
- {NEARESTMV, {ALTREF_FRAME, NONE}},
- {NEARESTMV, {GOLDEN_FRAME, NONE}},
+ { NEARESTMV, { LAST_FRAME, NONE } },
+ { NEARESTMV, { ALTREF_FRAME, NONE } },
+ { NEARESTMV, { GOLDEN_FRAME, NONE } },
- {DC_PRED, {INTRA_FRAME, NONE}},
+ { DC_PRED, { INTRA_FRAME, NONE } },
- {NEWMV, {LAST_FRAME, NONE}},
- {NEWMV, {ALTREF_FRAME, NONE}},
- {NEWMV, {GOLDEN_FRAME, NONE}},
+ { NEWMV, { LAST_FRAME, NONE } },
+ { NEWMV, { ALTREF_FRAME, NONE } },
+ { NEWMV, { GOLDEN_FRAME, NONE } },
- {NEARMV, {LAST_FRAME, NONE}},
- {NEARMV, {ALTREF_FRAME, NONE}},
- {NEARMV, {GOLDEN_FRAME, NONE}},
+ { NEARMV, { LAST_FRAME, NONE } },
+ { NEARMV, { ALTREF_FRAME, NONE } },
+ { NEARMV, { GOLDEN_FRAME, NONE } },
- {ZEROMV, {LAST_FRAME, NONE}},
- {ZEROMV, {GOLDEN_FRAME, NONE}},
- {ZEROMV, {ALTREF_FRAME, NONE}},
+ { ZEROMV, { LAST_FRAME, NONE } },
+ { ZEROMV, { GOLDEN_FRAME, NONE } },
+ { ZEROMV, { ALTREF_FRAME, NONE } },
- {NEARESTMV, {LAST_FRAME, ALTREF_FRAME}},
- {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}},
+ { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
+ { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- {TM_PRED, {INTRA_FRAME, NONE}},
+ { TM_PRED, { INTRA_FRAME, NONE } },
- {NEARMV, {LAST_FRAME, ALTREF_FRAME}},
- {NEWMV, {LAST_FRAME, ALTREF_FRAME}},
- {NEARMV, {GOLDEN_FRAME, ALTREF_FRAME}},
- {NEWMV, {GOLDEN_FRAME, ALTREF_FRAME}},
+ { NEARMV, { LAST_FRAME, ALTREF_FRAME } },
+ { NEWMV, { LAST_FRAME, ALTREF_FRAME } },
+ { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+ { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- {ZEROMV, {LAST_FRAME, ALTREF_FRAME}},
- {ZEROMV, {GOLDEN_FRAME, ALTREF_FRAME}},
+ { ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
+ { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- {H_PRED, {INTRA_FRAME, NONE}},
- {V_PRED, {INTRA_FRAME, NONE}},
- {D135_PRED, {INTRA_FRAME, NONE}},
- {D207_PRED, {INTRA_FRAME, NONE}},
- {D153_PRED, {INTRA_FRAME, NONE}},
- {D63_PRED, {INTRA_FRAME, NONE}},
- {D117_PRED, {INTRA_FRAME, NONE}},
- {D45_PRED, {INTRA_FRAME, NONE}},
+ { H_PRED, { INTRA_FRAME, NONE } },
+ { V_PRED, { INTRA_FRAME, NONE } },
+ { D135_PRED, { INTRA_FRAME, NONE } },
+ { D207_PRED, { INTRA_FRAME, NONE } },
+ { D153_PRED, { INTRA_FRAME, NONE } },
+ { D63_PRED, { INTRA_FRAME, NONE } },
+ { D117_PRED, { INTRA_FRAME, NONE } },
+ { D45_PRED, { INTRA_FRAME, NONE } },
};
static const REF_DEFINITION vp9_ref_order[MAX_REFS] = {
- {{LAST_FRAME, NONE}},
- {{GOLDEN_FRAME, NONE}},
- {{ALTREF_FRAME, NONE}},
- {{LAST_FRAME, ALTREF_FRAME}},
- {{GOLDEN_FRAME, ALTREF_FRAME}},
- {{INTRA_FRAME, NONE}},
+ { { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } },
+ { { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } },
+ { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
};
-static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
- int m, int n, int min_plane, int max_plane) {
+static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n,
+ int min_plane, int max_plane) {
int i;
for (i = min_plane; i < max_plane; ++i) {
@@ -139,27 +134,27 @@
struct macroblock_plane *const p = &x->plane[i];
struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
- p->coeff = ctx->coeff_pbuf[i][m];
- p->qcoeff = ctx->qcoeff_pbuf[i][m];
+ p->coeff = ctx->coeff_pbuf[i][m];
+ p->qcoeff = ctx->qcoeff_pbuf[i][m];
pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
- p->eobs = ctx->eobs_pbuf[i][m];
+ p->eobs = ctx->eobs_pbuf[i][m];
- ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
- ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
+ ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
+ ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
- ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
+ ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
- ctx->coeff_pbuf[i][n] = p->coeff;
- ctx->qcoeff_pbuf[i][n] = p->qcoeff;
+ ctx->coeff_pbuf[i][n] = p->coeff;
+ ctx->qcoeff_pbuf[i][n] = p->qcoeff;
ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
- ctx->eobs_pbuf[i][n] = p->eobs;
+ ctx->eobs_pbuf[i][n] = p->eobs;
}
}
-static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize,
- MACROBLOCK *x, MACROBLOCKD *xd,
- int *out_rate_sum, int64_t *out_dist_sum,
- int *skip_txfm_sb, int64_t *skip_sse_sb) {
+static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+ MACROBLOCKD *xd, int *out_rate_sum,
+ int64_t *out_dist_sum, int *skip_txfm_sb,
+ int64_t *skip_sse_sb) {
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
// we need to divide by 8 before sending to modeling function.
@@ -177,10 +172,9 @@
int64_t dist;
const int dequant_shift =
#if CONFIG_VP9_HIGHBITDEPTH
- (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
- xd->bd - 5 :
+ (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
#endif // CONFIG_VP9_HIGHBITDEPTH
- 3;
+ 3;
x->pred_sse[ref] = 0;
@@ -211,8 +205,8 @@
int block_idx = (idy << 1) + idx;
int low_err_skip = 0;
- var = cpi->fn_ptr[unit_size].vf(src, p->src.stride,
- dst, pd->dst.stride, &sse);
+ var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, dst, pd->dst.stride,
+ &sse);
x->bsse[(i << 2) + block_idx] = sse;
sum_sse += sse;
@@ -232,11 +226,9 @@
}
}
- if (skip_flag && !low_err_skip)
- skip_flag = 0;
+ if (skip_flag && !low_err_skip) skip_flag = 0;
- if (i == 0)
- x->pred_sse[ref] += sse;
+ if (i == 0) x->pred_sse[ref] += sse;
}
}
@@ -257,8 +249,8 @@
dist_sum += dist;
} else {
vp9_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
- pd->dequant[1] >> dequant_shift,
- &rate, &dist);
+ pd->dequant[1] >> dequant_shift, &rate,
+ &dist);
rate_sum += rate;
dist_sum += dist;
}
@@ -272,8 +264,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
int64_t vp9_highbd_block_error_c(const tran_low_t *coeff,
- const tran_low_t *dqcoeff,
- intptr_t block_size,
+ const tran_low_t *dqcoeff, intptr_t block_size,
int64_t *ssz, int bd) {
int i;
int64_t error = 0, sqcoeff = 0;
@@ -282,7 +273,7 @@
for (i = 0; i < block_size; i++) {
const int64_t diff = coeff[i] - dqcoeff[i];
- error += diff * diff;
+ error += diff * diff;
sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
}
assert(error >= 0 && sqcoeff >= 0);
@@ -295,8 +286,7 @@
int64_t vp9_highbd_block_error_8bit_c(const tran_low_t *coeff,
const tran_low_t *dqcoeff,
- intptr_t block_size,
- int64_t *ssz) {
+ intptr_t block_size, int64_t *ssz) {
// Note that the C versions of these 2 functions (vp9_block_error and
// vp9_highbd_block_error_8bit are the same, but the optimized assembly
// routines are not compatible in the non high bitdepth configuration, so
@@ -323,7 +313,7 @@
for (i = 0; i < block_size; i++) {
const int diff = coeff[i] - dqcoeff[i];
- error += diff * diff;
+ error += diff * diff;
sqcoeff += coeff[i] * coeff[i];
}
@@ -338,7 +328,7 @@
for (i = 0; i < block_size; i++) {
const int diff = coeff[i] - dqcoeff[i];
- error += diff * diff;
+ error += diff * diff;
}
return error;
@@ -350,14 +340,13 @@
* 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
* were non-zero). */
static const int16_t band_counts[TX_SIZES][8] = {
- { 1, 2, 3, 4, 3, 16 - 13, 0 },
- { 1, 2, 3, 4, 11, 64 - 21, 0 },
- { 1, 2, 3, 4, 11, 256 - 21, 0 },
+ { 1, 2, 3, 4, 3, 16 - 13, 0 },
+ { 1, 2, 3, 4, 11, 64 - 21, 0 },
+ { 1, 2, 3, 4, 11, 256 - 21, 0 },
{ 1, 2, 3, 4, 11, 1024 - 21, 0 },
};
-static int cost_coeffs(MACROBLOCK *x, int plane, int block,
- TX_SIZE tx_size, int pt,
- const int16_t *scan, const int16_t *nb,
+static int cost_coeffs(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
+ int pt, const int16_t *scan, const int16_t *nb,
int use_fast_coef_costing) {
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
@@ -366,8 +355,8 @@
const int16_t *band_count = &band_counts[tx_size][1];
const int eob = p->eobs[block];
const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
- unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
- x->token_costs[tx_size][type][is_inter_block(mi)];
+ unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
+ x->token_costs[tx_size][type][is_inter_block(mi)];
uint8_t token_cache[32 * 32];
int c, cost;
#if CONFIG_VP9_HIGHBITDEPTH
@@ -377,8 +366,9 @@
#endif
// Check for consistency of tx_size with mode info
- assert(type == PLANE_TYPE_Y ? mi->tx_size == tx_size :
- get_uv_tx_size(mi, &xd->plane[plane]) == tx_size);
+ assert(type == PLANE_TYPE_Y
+ ? mi->tx_size == tx_size
+ : get_uv_tx_size(mi, &xd->plane[plane]) == tx_size);
if (eob == 0) {
// single eob token
@@ -413,8 +403,7 @@
}
// eob token
- if (band_left)
- cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
+ if (band_left) cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
} else { // !use_fast_coef_costing
int band_left = *band_count++;
@@ -422,7 +411,7 @@
// dc token
int v = qcoeff[0];
int16_t tok;
- unsigned int (*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
+ unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
cost = vp9_get_token_cost(v, &tok, cat6_high_cost);
cost += (*token_costs)[0][pt][tok];
@@ -468,9 +457,8 @@
static unsigned pixel_sse(const VP9_COMP *const cpi, const MACROBLOCKD *xd,
const struct macroblockd_plane *const pd,
const uint8_t *src, const int src_stride,
- const uint8_t *dst, const int dst_stride,
- int blk_row, int blk_col,
- const BLOCK_SIZE plane_bsize,
+ const uint8_t *dst, const int dst_stride, int blk_row,
+ int blk_col, const BLOCK_SIZE plane_bsize,
const BLOCK_SIZE tx_bsize) {
unsigned int sse = 0;
const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
@@ -496,8 +484,7 @@
// Skip visiting the sub blocks that are wholly within the UMV.
for (c = 0; c < max_c; ++c) {
vf_4x4(src + r * src_stride * 4 + c * 4, src_stride,
- dst + r * dst_stride * 4 + c * 4, dst_stride,
- &this_sse);
+ dst + r * dst_stride * 4 + c * 4, dst_stride, &this_sse);
sse += this_sse;
}
}
@@ -541,10 +528,10 @@
}
static void dist_block(const VP9_COMP *cpi, MACROBLOCK *x, int plane,
- BLOCK_SIZE plane_bsize, int block,
- int blk_row, int blk_col, TX_SIZE tx_size,
- int64_t *out_dist, int64_t *out_sse) {
- MACROBLOCKD* const xd = &x->e_mbd;
+ BLOCK_SIZE plane_bsize, int block, int blk_row,
+ int blk_col, TX_SIZE tx_size, int64_t *out_dist,
+ int64_t *out_sse) {
+ MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -571,9 +558,9 @@
const int64_t p =
(pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >>
#if CONFIG_VP9_HIGHBITDEPTH
- (shift + 2 + (bd - 8) * 2);
+ (shift + 2 + (bd - 8) * 2);
#else
- (shift + 2);
+ (shift + 2);
#endif // CONFIG_VP9_HIGHBITDEPTH
*out_dist += (p >> 4);
*out_sse += p;
@@ -591,8 +578,8 @@
const uint16_t *eob = &p->eobs[block];
unsigned int tmp;
- tmp = pixel_sse(cpi, xd, pd, src, src_stride, dst, dst_stride,
- blk_row, blk_col, plane_bsize, tx_bsize);
+ tmp = pixel_sse(cpi, xd, pd, src, src_stride, dst, dst_stride, blk_row,
+ blk_col, plane_bsize, tx_bsize);
*out_sse = (int64_t)tmp * 16;
if (*eob) {
@@ -624,23 +611,16 @@
case TX_32X32:
vp9_highbd_idct32x32_add(dqcoeff, recon, 32, *eob, xd->bd);
break;
- default:
- assert(0 && "Invalid transform size");
+ default: assert(0 && "Invalid transform size");
}
}
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_VP9_HIGHBITDEPTH
vpx_convolve_copy(dst, dst_stride, recon, 32, NULL, 0, NULL, 0, bs, bs);
switch (tx_size) {
- case TX_32X32:
- vp9_idct32x32_add(dqcoeff, recon, 32, *eob);
- break;
- case TX_16X16:
- vp9_idct16x16_add(dqcoeff, recon, 32, *eob);
- break;
- case TX_8X8:
- vp9_idct8x8_add(dqcoeff, recon, 32, *eob);
- break;
+ case TX_32X32: vp9_idct32x32_add(dqcoeff, recon, 32, *eob); break;
+ case TX_16X16: vp9_idct16x16_add(dqcoeff, recon, 32, *eob); break;
+ case TX_8X8: vp9_idct8x8_add(dqcoeff, recon, 32, *eob); break;
case TX_4X4:
// this is like vp9_short_idct4x4 but has a special case around
// eob<=1, which is significant (not just an optimization) for
@@ -647,16 +627,14 @@
// the lossless case.
x->itxm_add(dqcoeff, recon, 32, *eob);
break;
- default:
- assert(0 && "Invalid transform size");
- break;
+ default: assert(0 && "Invalid transform size"); break;
}
#if CONFIG_VP9_HIGHBITDEPTH
}
#endif // CONFIG_VP9_HIGHBITDEPTH
- tmp = pixel_sse(cpi, xd, pd, src, src_stride, recon, 32,
- blk_row, blk_col, plane_bsize, tx_bsize);
+ tmp = pixel_sse(cpi, xd, pd, src, src_stride, recon, 32, blk_row, blk_col,
+ plane_bsize, tx_bsize);
}
*out_dist = (int64_t)tmp * 16;
@@ -664,10 +642,9 @@
}
static int rate_block(int plane, int block, TX_SIZE tx_size, int coeff_ctx,
- struct rdcost_block_args* args) {
- return cost_coeffs(args->x, plane, block, tx_size, coeff_ctx,
- args->so->scan, args->so->neighbors,
- args->use_fast_coef_costing);
+ struct rdcost_block_args *args) {
+ return cost_coeffs(args->x, plane, block, tx_size, coeff_ctx, args->so->scan,
+ args->so->neighbors, args->use_fast_coef_costing);
}
static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
@@ -680,15 +657,14 @@
int rate;
int64_t dist;
int64_t sse;
- const int coeff_ctx = combine_entropy_contexts(args->t_left[blk_row],
- args->t_above[blk_col]);
+ const int coeff_ctx =
+ combine_entropy_contexts(args->t_left[blk_row], args->t_above[blk_col]);
- if (args->exit_early)
- return;
+ if (args->exit_early) return;
if (!is_inter_block(mi)) {
- struct encode_b_args intra_arg = {x, args->cpi->sf.quant_coeff_opt,
- args->t_above, args->t_left, &mi->skip};
+ struct encode_b_args intra_arg = { x, args->cpi->sf.quant_coeff_opt,
+ args->t_above, args->t_left, &mi->skip };
vp9_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size,
&intra_arg);
if (args->cpi->sf.txfm_domain_distortion) {
@@ -705,8 +681,8 @@
const uint8_t *dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
unsigned int tmp;
- sse = sum_squares_visible(xd, pd, diff, diff_stride, blk_row,
- blk_col, plane_bsize, tx_bsize);
+ sse = sum_squares_visible(xd, pd, diff, diff_stride, blk_row, blk_col,
+ plane_bsize, tx_bsize);
#if CONFIG_VP9_HIGHBITDEPTH
if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && (xd->bd > 8))
sse = ROUND_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
@@ -728,11 +704,11 @@
} else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
SKIP_TXFM_AC_ONLY) {
// compute DC coefficient
- tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
+ tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
vp9_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
tx_size);
- sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
+ sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
dist = sse;
if (x->plane[plane].eobs[block]) {
const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
@@ -741,8 +717,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
dc_correct >>= ((xd->bd - 8) * 2);
#endif
- if (tx_size != TX_32X32)
- dc_correct >>= 2;
+ if (tx_size != TX_32X32) dc_correct >>= 2;
dist = VPXMAX(0, sse - dc_correct);
}
@@ -750,7 +725,7 @@
// SKIP_TXFM_AC_DC
// skip forward transform
x->plane[plane].eobs[block] = 0;
- sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
+ sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
dist = sse;
}
} else {
@@ -777,8 +752,8 @@
// TODO(jingning): temporarily enabled only for luma component
rd = VPXMIN(rd1, rd2);
if (plane == 0)
- x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] ||
- (rd1 > rd2 && !xd->lossless);
+ x->zcoeff_blk[tx_size][block] =
+ !x->plane[plane].eobs[block] || (rd1 > rd2 && !xd->lossless);
args->this_rate += rate;
args->this_dist += dist;
@@ -807,33 +782,30 @@
args.use_fast_coef_costing = use_fast_coef_casting;
args.skippable = 1;
- if (plane == 0)
- xd->mi[0]->tx_size = tx_size;
+ if (plane == 0) xd->mi[0]->tx_size = tx_size;
vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
args.so = get_scan(xd, tx_size, get_plane_type(plane), 0);
- vp9_foreach_transformed_block_in_plane(xd, bsize, plane,
- block_rd_txfm, &args);
+ vp9_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
+ &args);
if (args.exit_early) {
- *rate = INT_MAX;
+ *rate = INT_MAX;
*distortion = INT64_MAX;
- *sse = INT64_MAX;
- *skippable = 0;
+ *sse = INT64_MAX;
+ *skippable = 0;
} else {
*distortion = args.this_dist;
- *rate = args.this_rate;
- *sse = args.this_sse;
- *skippable = args.skippable;
+ *rate = args.this_rate;
+ *sse = args.this_sse;
+ *skippable = args.skippable;
}
}
-static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x,
- int *rate, int64_t *distortion,
- int *skip, int64_t *sse,
- int64_t ref_best_rd,
- BLOCK_SIZE bs) {
+static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
+ int64_t *distortion, int *skip, int64_t *sse,
+ int64_t ref_best_rd, BLOCK_SIZE bs) {
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
@@ -846,12 +818,9 @@
mi->tx_size, cpi->sf.use_fast_coef_costing);
}
-static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
- int *rate,
- int64_t *distortion,
- int *skip,
- int64_t *psse,
- int64_t ref_best_rd,
+static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
+ int64_t *distortion, int *skip,
+ int64_t *psse, int64_t ref_best_rd,
BLOCK_SIZE bs) {
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
@@ -860,10 +829,10 @@
vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
int r[TX_SIZES][2], s[TX_SIZES];
int64_t d[TX_SIZES], sse[TX_SIZES];
- int64_t rd[TX_SIZES][2] = {{INT64_MAX, INT64_MAX},
- {INT64_MAX, INT64_MAX},
- {INT64_MAX, INT64_MAX},
- {INT64_MAX, INT64_MAX}};
+ int64_t rd[TX_SIZES][2] = { { INT64_MAX, INT64_MAX },
+ { INT64_MAX, INT64_MAX },
+ { INT64_MAX, INT64_MAX },
+ { INT64_MAX, INT64_MAX } };
int n, m;
int s0, s1;
int64_t best_rd = INT64_MAX;
@@ -879,8 +848,8 @@
start_tx = max_tx_size;
end_tx = 0;
} else {
- TX_SIZE chosen_tx_size = VPXMIN(max_tx_size,
- tx_mode_to_biggest_tx_size[cm->tx_mode]);
+ TX_SIZE chosen_tx_size =
+ VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[cm->tx_mode]);
start_tx = chosen_tx_size;
end_tx = chosen_tx_size;
}
@@ -887,7 +856,7 @@
for (n = start_tx; n >= end_tx; n--) {
int r_tx_size = 0;
- for (m = 0; m <= n - (n == (int) max_tx_size); m++) {
+ for (m = 0; m <= n - (n == (int)max_tx_size); m++) {
if (m == n)
r_tx_size += vp9_cost_zero(tx_probs[m]);
else
@@ -922,8 +891,7 @@
// Early termination in transform size search.
if (cpi->sf.tx_size_search_breakout &&
(rd[n][1] == INT64_MAX ||
- (n < (int) max_tx_size && rd[n][1] > rd[n + 1][1]) ||
- s[n] == 1))
+ (n < (int)max_tx_size && rd[n][1] > rd[n + 1][1]) || s[n] == 1))
break;
if (rd[n][1] < best_rd) {
@@ -934,15 +902,14 @@
mi->tx_size = best_tx;
*distortion = d[mi->tx_size];
- *rate = r[mi->tx_size][cm->tx_mode == TX_MODE_SELECT];
- *skip = s[mi->tx_size];
- *psse = sse[mi->tx_size];
+ *rate = r[mi->tx_size][cm->tx_mode == TX_MODE_SELECT];
+ *skip = s[mi->tx_size];
+ *psse = sse[mi->tx_size];
}
static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
- int64_t *distortion, int *skip,
- int64_t *psse, BLOCK_SIZE bs,
- int64_t ref_best_rd) {
+ int64_t *distortion, int *skip, int64_t *psse,
+ BLOCK_SIZE bs, int64_t ref_best_rd) {
MACROBLOCKD *xd = &x->e_mbd;
int64_t sse;
int64_t *ret_sse = psse ? psse : &sse;
@@ -953,39 +920,33 @@
choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
bs);
} else {
- choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse,
- ref_best_rd, bs);
+ choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
+ bs);
}
}
static int conditional_skipintra(PREDICTION_MODE mode,
PREDICTION_MODE best_intra_mode) {
- if (mode == D117_PRED &&
- best_intra_mode != V_PRED &&
+ if (mode == D117_PRED && best_intra_mode != V_PRED &&
best_intra_mode != D135_PRED)
return 1;
- if (mode == D63_PRED &&
- best_intra_mode != V_PRED &&
+ if (mode == D63_PRED && best_intra_mode != V_PRED &&
best_intra_mode != D45_PRED)
return 1;
- if (mode == D207_PRED &&
- best_intra_mode != H_PRED &&
+ if (mode == D207_PRED && best_intra_mode != H_PRED &&
best_intra_mode != D45_PRED)
return 1;
- if (mode == D153_PRED &&
- best_intra_mode != H_PRED &&
+ if (mode == D153_PRED && best_intra_mode != H_PRED &&
best_intra_mode != D135_PRED)
return 1;
return 0;
}
-static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x,
- int row, int col,
- PREDICTION_MODE *best_mode,
- const int *bmode_costs,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- int *bestrate, int *bestratey,
- int64_t *bestdistortion,
+static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int row,
+ int col, PREDICTION_MODE *best_mode,
+ const int *bmode_costs, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l, int *bestrate,
+ int *bestratey, int64_t *bestdistortion,
BLOCK_SIZE bsize, int64_t rd_thresh) {
PREDICTION_MODE mode;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1018,14 +979,12 @@
int64_t distortion = 0;
int rate = bmode_costs[mode];
- if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
- continue;
+ if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
// Only do the oblique modes if the best so far is
// one of the neighboring directional modes
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
- if (conditional_skipintra(mode, *best_mode))
- continue;
+ if (conditional_skipintra(mode, *best_mode)) continue;
}
memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
@@ -1036,50 +995,46 @@
const int block = (row + idy) * 2 + (col + idx);
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
- int16_t *const src_diff = vp9_raster_block_offset_int16(BLOCK_8X8,
- block,
- p->src_diff);
+ int16_t *const src_diff =
+ vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0]->bmi[block].as_mode = mode;
vp9_predict_intra_block(xd, 1, TX_4X4, mode,
x->skip_encode ? src : dst,
- x->skip_encode ? src_stride : dst_stride,
- dst, dst_stride,
- col + idx, row + idy, 0);
- vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride,
- dst, dst_stride, xd->bd);
+ x->skip_encode ? src_stride : dst_stride, dst,
+ dst_stride, col + idx, row + idy, 0);
+ vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
+ dst_stride, xd->bd);
if (xd->lossless) {
const scan_order *so = &vp9_default_scan_orders[TX_4X4];
- const int coeff_ctx = combine_entropy_contexts(tempa[idx],
- templ[idy]);
+ const int coeff_ctx =
+ combine_entropy_contexts(tempa[idx], templ[idy]);
vp9_highbd_fwht4x4(src_diff, coeff, 8);
vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
- ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx,
- so->scan, so->neighbors,
- cpi->sf.use_fast_coef_costing);
+ ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
+ so->neighbors, cpi->sf.use_fast_coef_costing);
tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next_highbd;
- vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block),
- dst, dst_stride,
- p->eobs[block], xd->bd);
+ vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], xd->bd);
} else {
int64_t unused;
const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
- const int coeff_ctx = combine_entropy_contexts(tempa[idx],
- templ[idy]);
+ const int coeff_ctx =
+ combine_entropy_contexts(tempa[idx], templ[idy]);
if (tx_type == DCT_DCT)
vpx_highbd_fdct4x4(src_diff, coeff, 8);
else
vp9_highbd_fht4x4(src_diff, coeff, 8, tx_type);
vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
- ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx,
- so->scan, so->neighbors,
- cpi->sf.use_fast_coef_costing);
+ ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
+ so->neighbors, cpi->sf.use_fast_coef_costing);
distortion += vp9_highbd_block_error_dispatch(
- coeff, BLOCK_OFFSET(pd->dqcoeff, block),
- 16, &unused, xd->bd) >> 2;
+ coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
+ &unused, xd->bd) >>
+ 2;
tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next_highbd;
@@ -1106,16 +1061,13 @@
num_4x4_blocks_wide * 4 * sizeof(uint16_t));
}
}
- next_highbd:
- {}
+ next_highbd : {}
}
- if (best_rd >= rd_thresh || x->skip_encode)
- return best_rd;
+ if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
- best_dst16 + idy * 8,
- num_4x4_blocks_wide * 4 * sizeof(uint16_t));
+ best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t));
}
return best_rd;
@@ -1128,14 +1080,12 @@
int64_t distortion = 0;
int rate = bmode_costs[mode];
- if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
- continue;
+ if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
// Only do the oblique modes if the best so far is
// one of the neighboring directional modes
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
- if (conditional_skipintra(mode, *best_mode))
- continue;
+ if (conditional_skipintra(mode, *best_mode)) continue;
}
memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
@@ -1150,21 +1100,19 @@
vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0]->bmi[block].as_mode = mode;
- vp9_predict_intra_block(xd, 1, TX_4X4, mode,
- x->skip_encode ? src : dst,
- x->skip_encode ? src_stride : dst_stride,
- dst, dst_stride, col + idx, row + idy, 0);
+ vp9_predict_intra_block(xd, 1, TX_4X4, mode, x->skip_encode ? src : dst,
+ x->skip_encode ? src_stride : dst_stride, dst,
+ dst_stride, col + idx, row + idy, 0);
vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
if (xd->lossless) {
const scan_order *so = &vp9_default_scan_orders[TX_4X4];
- const int coeff_ctx = combine_entropy_contexts(tempa[idx],
- templ[idy]);
+ const int coeff_ctx =
+ combine_entropy_contexts(tempa[idx], templ[idy]);
vp9_fwht4x4(src_diff, coeff, 8);
vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
- ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx,
- so->scan, so->neighbors,
- cpi->sf.use_fast_coef_costing);
+ ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
+ so->neighbors, cpi->sf.use_fast_coef_costing);
tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next;
@@ -1174,25 +1122,27 @@
int64_t unused;
const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
- const int coeff_ctx = combine_entropy_contexts(tempa[idx],
- templ[idy]);
+ const int coeff_ctx =
+ combine_entropy_contexts(tempa[idx], templ[idy]);
vp9_fht4x4(src_diff, coeff, 8, tx_type);
vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
- ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx,
- so->scan, so->neighbors,
- cpi->sf.use_fast_coef_costing);
+ ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
+ so->neighbors, cpi->sf.use_fast_coef_costing);
tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
#if CONFIG_VP9_HIGHBITDEPTH
- distortion += vp9_highbd_block_error_8bit(
- coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16, &unused) >> 2;
+ distortion +=
+ vp9_highbd_block_error_8bit(
+ coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16, &unused) >>
+ 2;
#else
distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
- 16, &unused) >> 2;
+ 16, &unused) >>
+ 2;
#endif
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next;
- vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block),
- dst, dst_stride, p->eobs[block]);
+ vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block]);
}
}
}
@@ -1212,12 +1162,10 @@
memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
num_4x4_blocks_wide * 4);
}
- next:
- {}
+ next : {}
}
- if (best_rd >= rd_thresh || x->skip_encode)
- return best_rd;
+ if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
@@ -1256,17 +1204,15 @@
const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i);
const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i);
- bmode_costs = cpi->y_mode_costs[A][L];
+ bmode_costs = cpi->y_mode_costs[A][L];
}
- this_rd = rd_pick_intra4x4block(cpi, mb, idy, idx, &best_mode,
- bmode_costs,
- xd->plane[0].above_context + idx,
- xd->plane[0].left_context + idy,
- &r, &ry, &d, bsize, best_rd - total_rd);
+ this_rd = rd_pick_intra4x4block(
+ cpi, mb, idy, idx, &best_mode, bmode_costs,
+ xd->plane[0].above_context + idx, xd->plane[0].left_context + idy, &r,
+ &ry, &d, bsize, best_rd - total_rd);
- if (this_rd >= best_rd - total_rd)
- return INT64_MAX;
+ if (this_rd >= best_rd - total_rd) return INT64_MAX;
total_rd += this_rd;
cost += r;
@@ -1279,8 +1225,7 @@
for (j = 1; j < num_4x4_blocks_wide; ++j)
mic->bmi[i + j].as_mode = best_mode;
- if (total_rd >= best_rd)
- return INT64_MAX;
+ if (total_rd >= best_rd) return INT64_MAX;
}
}
@@ -1293,10 +1238,9 @@
}
// This function is used only for intra_only frames
-static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
- int *rate, int *rate_tokenonly,
- int64_t *distortion, int *skippable,
- BLOCK_SIZE bsize,
+static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
+ int *rate_tokenonly, int64_t *distortion,
+ int *skippable, BLOCK_SIZE bsize,
int64_t best_rd) {
PREDICTION_MODE mode;
PREDICTION_MODE mode_selected = DC_PRED;
@@ -1318,31 +1262,28 @@
if (cpi->sf.use_nonrd_pick_mode) {
// These speed features are turned on in hybrid non-RD and RD mode
// for key frame coding in the context of real-time setting.
- if (conditional_skipintra(mode, mode_selected))
- continue;
- if (*skippable)
- break;
+ if (conditional_skipintra(mode, mode_selected)) continue;
+ if (*skippable) break;
}
mic->mode = mode;
- super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
- &s, NULL, bsize, best_rd);
+ super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+ bsize, best_rd);
- if (this_rate_tokenonly == INT_MAX)
- continue;
+ if (this_rate_tokenonly == INT_MAX) continue;
this_rate = this_rate_tokenonly + bmode_costs[mode];
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd) {
- mode_selected = mode;
- best_rd = this_rd;
- best_tx = mic->tx_size;
- *rate = this_rate;
+ mode_selected = mode;
+ best_rd = this_rd;
+ best_tx = mic->tx_size;
+ *rate = this_rate;
*rate_tokenonly = this_rate_tokenonly;
- *distortion = this_distortion;
- *skippable = s;
+ *distortion = this_distortion;
+ *skippable = s;
}
}
@@ -1354,10 +1295,9 @@
// Return value 0: early termination triggered, no valid rd cost available;
// 1: rd cost values are valid.
-static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x,
- int *rate, int64_t *distortion, int *skippable,
- int64_t *sse, BLOCK_SIZE bsize,
- int64_t ref_best_rd) {
+static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
+ int64_t *distortion, int *skippable, int64_t *sse,
+ BLOCK_SIZE bsize, int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *const mi = xd->mi[0];
const TX_SIZE uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
@@ -1366,8 +1306,7 @@
int64_t pndist = 0, pnsse = 0;
int is_cost_valid = 1;
- if (ref_best_rd < 0)
- is_cost_valid = 0;
+ if (ref_best_rd < 0) is_cost_valid = 0;
if (is_inter_block(mi) && is_cost_valid) {
int plane;
@@ -1405,10 +1344,10 @@
}
static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
- PICK_MODE_CONTEXT *ctx,
- int *rate, int *rate_tokenonly,
- int64_t *distortion, int *skippable,
- BLOCK_SIZE bsize, TX_SIZE max_tx_size) {
+ PICK_MODE_CONTEXT *ctx, int *rate,
+ int *rate_tokenonly, int64_t *distortion,
+ int *skippable, BLOCK_SIZE bsize,
+ TX_SIZE max_tx_size) {
MACROBLOCKD *xd = &x->e_mbd;
PREDICTION_MODE mode;
PREDICTION_MODE mode_selected = DC_PRED;
@@ -1418,8 +1357,7 @@
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
- if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
- continue;
+ if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue;
#if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
(xd->above_mi == NULL || xd->left_mi == NULL) && need_top_left[mode])
@@ -1428,23 +1366,22 @@
xd->mi[0]->uv_mode = mode;
- if (!super_block_uvrd(cpi, x, &this_rate_tokenonly,
- &this_distortion, &s, &this_sse, bsize, best_rd))
+ if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+ &this_sse, bsize, best_rd))
continue;
- this_rate = this_rate_tokenonly +
- cpi->intra_uv_mode_cost[cpi->common.frame_type]
- [xd->mi[0]->mode][mode];
+ this_rate =
+ this_rate_tokenonly +
+ cpi->intra_uv_mode_cost[cpi->common.frame_type][xd->mi[0]->mode][mode];
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd) {
- mode_selected = mode;
- best_rd = this_rd;
- *rate = this_rate;
+ mode_selected = mode;
+ best_rd = this_rd;
+ *rate = this_rate;
*rate_tokenonly = this_rate_tokenonly;
- *distortion = this_distortion;
- *skippable = s;
- if (!x->select_tx_size)
- swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
+ *distortion = this_distortion;
+ *skippable = s;
+ if (!x->select_tx_size) swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
}
}
@@ -1452,40 +1389,38 @@
return best_rd;
}
-static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x,
- int *rate, int *rate_tokenonly,
- int64_t *distortion, int *skippable,
- BLOCK_SIZE bsize) {
+static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
+ int *rate_tokenonly, int64_t *distortion,
+ int *skippable, BLOCK_SIZE bsize) {
const VP9_COMMON *cm = &cpi->common;
int64_t unused;
x->e_mbd.mi[0]->uv_mode = DC_PRED;
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
- super_block_uvrd(cpi, x, rate_tokenonly, distortion,
- skippable, &unused, bsize, INT64_MAX);
- *rate = *rate_tokenonly +
- cpi->intra_uv_mode_cost[cm->frame_type]
- [x->e_mbd.mi[0]->mode][DC_PRED];
+ super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused,
+ bsize, INT64_MAX);
+ *rate =
+ *rate_tokenonly +
+ cpi->intra_uv_mode_cost[cm->frame_type][x->e_mbd.mi[0]->mode][DC_PRED];
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
static void choose_intra_uv_mode(VP9_COMP *cpi, MACROBLOCK *const x,
- PICK_MODE_CONTEXT *ctx,
- BLOCK_SIZE bsize, TX_SIZE max_tx_size,
- int *rate_uv, int *rate_uv_tokenonly,
- int64_t *dist_uv, int *skip_uv,
- PREDICTION_MODE *mode_uv) {
+ PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
+ TX_SIZE max_tx_size, int *rate_uv,
+ int *rate_uv_tokenonly, int64_t *dist_uv,
+ int *skip_uv, PREDICTION_MODE *mode_uv) {
// Use an estimated rd for uv_intra based on DC_PRED if the
// appropriate speed flag is set.
if (cpi->sf.use_uv_intra_rd_estimate) {
- rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv,
- skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
- // Else do a proper rd search for each possible transform size that may
- // be considered in the main rd loop.
+ rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
+ bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
+ // Else do a proper rd search for each possible transform size that may
+ // be considered in the main rd loop.
} else {
- rd_pick_intra_sbuv_mode(cpi, x, ctx,
- rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
- bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size);
+ rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv,
+ skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
+ max_tx_size);
}
*mode_uv = x->e_mbd.mi[0]->uv_mode;
}
@@ -1497,8 +1432,7 @@
}
static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
- int i,
- PREDICTION_MODE mode, int_mv this_mv[2],
+ int i, PREDICTION_MODE mode, int_mv this_mv[2],
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
int_mv seg_mvs[MAX_REF_FRAMES],
int_mv *best_ref_mv[2], const int *mvjcost,
@@ -1530,16 +1464,13 @@
break;
case ZEROMV:
this_mv[0].as_int = 0;
- if (is_compound)
- this_mv[1].as_int = 0;
+ if (is_compound) this_mv[1].as_int = 0;
break;
- default:
- break;
+ default: break;
}
mi->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
- if (is_compound)
- mi->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
+ if (is_compound) mi->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
mi->bmi[i].as_mode = mode;
@@ -1548,17 +1479,13 @@
memmove(&mi->bmi[i + idy * 2 + idx], &mi->bmi[i], sizeof(mi->bmi[i]));
return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mi->ref_frame[0]]) +
- thismvcost;
+ thismvcost;
}
-static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
- MACROBLOCK *x,
- int64_t best_yrd,
- int i,
- int *labelyrate,
+static int64_t encode_inter_mb_segment(VP9_COMP *cpi, MACROBLOCK *x,
+ int64_t best_yrd, int i, int *labelyrate,
int64_t *distortion, int64_t *sse,
- ENTROPY_CONTEXT *ta,
- ENTROPY_CONTEXT *tl,
+ ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
int mi_row, int mi_col) {
int k;
MACROBLOCKD *xd = &x->e_mbd;
@@ -1572,8 +1499,8 @@
const uint8_t *const src =
&p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
- uint8_t *const dst = &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i,
- pd->dst.stride)];
+ uint8_t *const dst =
+ &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
int64_t thisdistortion = 0, thissse = 0;
int thisrate = 0, ref;
const scan_order *so = &vp9_default_scan_orders[TX_4X4];
@@ -1594,35 +1521,26 @@
y_stride = xd->block_refs[ref]->buf->y_stride;
pre = xd->block_refs[ref]->buf->y_buffer;
- pre += scaled_buffer_offset(x_start + w, y_start + h,
- y_stride, sf);
+ pre += scaled_buffer_offset(x_start + w, y_start + h, y_stride, sf);
}
#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp9_highbd_build_inter_predictor(pre, y_stride,
- dst, pd->dst.stride,
- &mi->bmi[i].as_mv[ref].as_mv,
- &xd->block_refs[ref]->sf, width, height,
- ref, kernel, MV_PRECISION_Q3,
- mi_col * MI_SIZE + 4 * (i % 2),
- mi_row * MI_SIZE + 4 * (i / 2), xd->bd);
- } else {
- vp9_build_inter_predictor(pre, y_stride,
- dst, pd->dst.stride,
- &mi->bmi[i].as_mv[ref].as_mv,
- &xd->block_refs[ref]->sf, width, height, ref,
- kernel, MV_PRECISION_Q3,
- mi_col * MI_SIZE + 4 * (i % 2),
- mi_row * MI_SIZE + 4 * (i / 2));
- }
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ vp9_highbd_build_inter_predictor(
+ pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
+ &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
+ mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2),
+ xd->bd);
+ } else {
+ vp9_build_inter_predictor(
+ pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
+ &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
+ mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
+ }
#else
- vp9_build_inter_predictor(pre, y_stride,
- dst, pd->dst.stride,
- &mi->bmi[i].as_mv[ref].as_mv,
- &xd->block_refs[ref]->sf, width, height, ref,
- kernel, MV_PRECISION_Q3,
- mi_col * MI_SIZE + 4 * (i % 2),
- mi_row * MI_SIZE + 4 * (i / 2));
+ vp9_build_inter_predictor(
+ pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
+ &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
+ mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
#endif // CONFIG_VP9_HIGHBITDEPTH
}
@@ -1632,9 +1550,9 @@
height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
} else {
- vpx_subtract_block(
- height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
- 8, src, p->src.stride, dst, pd->dst.stride);
+ vpx_subtract_block(height, width,
+ vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+ 8, src, p->src.stride, dst, pd->dst.stride);
}
#else
vpx_subtract_block(height, width,
@@ -1649,7 +1567,7 @@
const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
#endif
int64_t ssz, rd, rd1, rd2;
- tran_low_t* coeff;
+ tran_low_t *coeff;
int coeff_ctx;
k += (idy * 2 + idx);
coeff_ctx = combine_entropy_contexts(ta[k & 1], tl[k >> 1]);
@@ -1661,19 +1579,17 @@
thisdistortion += vp9_highbd_block_error_dispatch(
coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, bd);
#else
- thisdistortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
- 16, &ssz);
+ thisdistortion +=
+ vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
#endif // CONFIG_VP9_HIGHBITDEPTH
thissse += ssz;
- thisrate += cost_coeffs(x, 0, k, TX_4X4, coeff_ctx,
- so->scan, so->neighbors,
- cpi->sf.use_fast_coef_costing);
+ thisrate += cost_coeffs(x, 0, k, TX_4X4, coeff_ctx, so->scan,
+ so->neighbors, cpi->sf.use_fast_coef_costing);
ta[k & 1] = tl[k >> 1] = (x->plane[0].eobs[k] > 0) ? 1 : 0;
rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
rd = VPXMIN(rd1, rd2);
- if (rd >= best_yrd)
- return INT64_MAX;
+ if (rd >= best_yrd) return INT64_MAX;
}
}
@@ -1711,10 +1627,8 @@
} BEST_SEG_INFO;
static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
- return (mv->row >> 3) < x->mv_row_min ||
- (mv->row >> 3) > x->mv_row_max ||
- (mv->col >> 3) < x->mv_col_min ||
- (mv->col >> 3) > x->mv_col_max;
+ return (mv->row >> 3) < x->mv_row_min || (mv->row >> 3) > x->mv_row_max ||
+ (mv->col >> 3) < x->mv_col_min || (mv->col >> 3) > x->mv_col_max;
}
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
@@ -1722,14 +1636,15 @@
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
- p->src.buf = &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i,
- p->src.stride)];
+ p->src.buf =
+ &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
- pd->pre[0].buf = &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i,
- pd->pre[0].stride)];
+ pd->pre[0].buf =
+ &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
if (has_second_ref(mi))
- pd->pre[1].buf = &pd->pre[1].buf[vp9_raster_block_offset(BLOCK_8X8, i,
- pd->pre[1].stride)];
+ pd->pre[1].buf =
+ &pd->pre[1]
+ .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
}
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
@@ -1737,8 +1652,7 @@
MODE_INFO *mi = x->e_mbd.mi[0];
x->plane[0].src = orig_src;
x->e_mbd.plane[0].pre[0] = orig_pre[0];
- if (has_second_ref(mi))
- x->e_mbd.plane[0].pre[1] = orig_pre[1];
+ if (has_second_ref(mi)) x->e_mbd.plane[0].pre[1] = orig_pre[1];
}
static INLINE int mv_has_subpel(const MV *mv) {
@@ -1747,10 +1661,11 @@
// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
// TODO(aconverse): Find out if this is still productive then clean up or remove
-static int check_best_zero_mv(
- const VP9_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES],
- int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode,
- const MV_REFERENCE_FRAME ref_frames[2]) {
+static int check_best_zero_mv(const VP9_COMP *cpi,
+ const uint8_t mode_context[MAX_REF_FRAMES],
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
+ int this_mode,
+ const MV_REFERENCE_FRAME ref_frames[2]) {
if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
(ref_frames[1] == NONE ||
@@ -1782,10 +1697,8 @@
return 1;
}
-static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int_mv *frame_mv,
- int mi_row, int mi_col,
+static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ int_mv *frame_mv, int mi_row, int mi_col,
int_mv single_newmv[MAX_REF_FRAMES],
int *rate_mv) {
const VP9_COMMON *const cm = &cpi->common;
@@ -1793,8 +1706,8 @@
const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
- const int refs[2] = {mi->ref_frame[0],
- mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]};
+ const int refs[2] = { mi->ref_frame[0],
+ mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1] };
int_mv ref_mv[2];
int ite, ref;
const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
@@ -1802,13 +1715,13 @@
// Do joint motion search in compound mode to get more accurate mv.
struct buf_2d backup_yv12[2][MAX_MB_PLANE];
- uint32_t last_besterr[2] = {UINT_MAX, UINT_MAX};
+ uint32_t last_besterr[2] = { UINT_MAX, UINT_MAX };
const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
vp9_get_scaled_ref_frame(cpi, mi->ref_frame[0]),
vp9_get_scaled_ref_frame(cpi, mi->ref_frame[1])
};
- // Prediction buffer from second frame.
+// Prediction buffer from second frame.
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
uint8_t *second_pred;
@@ -1833,15 +1746,14 @@
frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
}
- // Since we have scaled the reference frames to match the size of the current
- // frame we must use a unit scaling factor during mode selection.
+// Since we have scaled the reference frames to match the size of the current
+// frame we must use a unit scaling factor during mode selection.
#if CONFIG_VP9_HIGHBITDEPTH
- vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
- cm->width, cm->height,
- cm->use_highbitdepth);
+ vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+ cm->height, cm->use_highbitdepth);
#else
- vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
- cm->width, cm->height);
+ vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+ cm->height);
#endif // CONFIG_VP9_HIGHBITDEPTH
// Allow joint search multiple times iteratively for each reference frame
@@ -1865,41 +1777,30 @@
ref_yv12[0] = xd->plane[0].pre[0];
ref_yv12[1] = xd->plane[0].pre[1];
- // Get the prediction block from the 'other' reference frame.
+// Get the prediction block from the 'other' reference frame.
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
- vp9_highbd_build_inter_predictor(ref_yv12[!id].buf,
- ref_yv12[!id].stride,
- second_pred, pw,
- &frame_mv[refs[!id]].as_mv,
- &sf, pw, ph, 0,
- kernel, MV_PRECISION_Q3,
- mi_col * MI_SIZE, mi_row * MI_SIZE,
- xd->bd);
+ vp9_highbd_build_inter_predictor(
+ ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
+ &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
+ mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
} else {
second_pred = (uint8_t *)second_pred_alloc_16;
- vp9_build_inter_predictor(ref_yv12[!id].buf,
- ref_yv12[!id].stride,
- second_pred, pw,
- &frame_mv[refs[!id]].as_mv,
- &sf, pw, ph, 0,
- kernel, MV_PRECISION_Q3,
+ vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+ second_pred, pw, &frame_mv[refs[!id]].as_mv,
+ &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
mi_col * MI_SIZE, mi_row * MI_SIZE);
}
#else
- vp9_build_inter_predictor(ref_yv12[!id].buf,
- ref_yv12[!id].stride,
- second_pred, pw,
- &frame_mv[refs[!id]].as_mv,
- &sf, pw, ph, 0,
- kernel, MV_PRECISION_Q3,
+ vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+ second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
+ pw, ph, 0, kernel, MV_PRECISION_Q3,
mi_col * MI_SIZE, mi_row * MI_SIZE);
#endif // CONFIG_VP9_HIGHBITDEPTH
// Do compound motion search on the current reference frame.
- if (id)
- xd->plane[0].pre[0] = ref_yv12[id];
+ if (id) xd->plane[0].pre[0] = ref_yv12[id];
vp9_set_mv_search_range(x, &ref_mv[id].as_mv);
// Use the mv result from the single mode as mv predictor.
@@ -1909,10 +1810,9 @@
tmp_mv.row >>= 3;
// Small-range full-pixel motion search.
- bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb,
- search_range,
- &cpi->fn_ptr[bsize],
- &ref_mv[id].as_mv, second_pred);
+ bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
+ &cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
+ second_pred);
if (bestsme < UINT_MAX)
bestsme = vp9_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
second_pred, &cpi->fn_ptr[bsize], 1);
@@ -1926,21 +1826,14 @@
uint32_t dis; /* TODO: use dis in distortion calculation later. */
uint32_t sse;
bestsme = cpi->find_fractional_mv_step(
- x, &tmp_mv,
- &ref_mv[id].as_mv,
- cpi->common.allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[bsize],
- 0, cpi->sf.mv.subpel_iters_per_step,
- NULL,
- x->nmvjointcost, x->mvcost,
- &dis, &sse, second_pred,
- pw, ph);
+ x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
+ x->errorperbit, &cpi->fn_ptr[bsize], 0,
+ cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
+ &dis, &sse, second_pred, pw, ph);
}
// Restore the pointer to the first (possibly scaled) prediction buffer.
- if (id)
- xd->plane[0].pre[0] = ref_yv12[0];
+ if (id) xd->plane[0].pre[0] = ref_yv12[0];
if (bestsme < last_besterr[id]) {
frame_mv[refs[id]].as_mv = tmp_mv;
@@ -1966,17 +1859,12 @@
}
}
-static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv,
- int_mv *second_best_ref_mv,
- int64_t best_rd, int *returntotrate,
- int *returnyrate,
- int64_t *returndistortion,
- int *skippable, int64_t *psse,
- int mvthresh,
- int_mv seg_mvs[4][MAX_REF_FRAMES],
- BEST_SEG_INFO *bsi_buf, int filter_idx,
- int mi_row, int mi_col) {
+static int64_t rd_pick_best_sub8x8_mode(
+ VP9_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
+ int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
+ int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
+ int filter_idx, int mi_row, int mi_col) {
int i;
BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
MACROBLOCKD *xd = &x->e_mbd;
@@ -2010,8 +1898,7 @@
bsi->mvp.as_int = best_ref_mv->as_int;
bsi->mvthresh = mvthresh;
- for (i = 0; i < 4; i++)
- bsi->modes[i] = ZEROMV;
+ for (i = 0; i < 4; i++) bsi->modes[i] = ZEROMV;
memcpy(t_above, pd->above_context, sizeof(t_above));
memcpy(t_left, pd->left_context, sizeof(t_left));
@@ -2037,10 +1924,9 @@
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
frame_mv[ZEROMV][frame].as_int = 0;
- vp9_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
- &frame_mv[NEARESTMV][frame],
- &frame_mv[NEARMV][frame],
- mbmi_ext->mode_context);
+ vp9_append_sub8x8_mvs_for_idx(
+ cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
+ &frame_mv[NEARMV][frame], mbmi_ext->mode_context);
}
// search for the best motion vector on this segment
@@ -2050,8 +1936,7 @@
mode_idx = INTER_OFFSET(this_mode);
bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
- if (!(inter_mode_mask & (1 << this_mode)))
- continue;
+ if (!(inter_mode_mask & (1 << this_mode))) continue;
if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
this_mode, mi->ref_frame))
@@ -2076,15 +1961,13 @@
/* Is the best so far sufficiently good that we cant justify doing
* and new motion search. */
- if (best_rd < label_mv_thresh)
- break;
+ if (best_rd < label_mv_thresh) break;
if (cpi->oxcf.mode != BEST) {
// use previous block's result as next block's MV predictor.
if (i > 0) {
bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
- if (i == 2)
- bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
+ if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
}
}
if (i == 0)
@@ -2097,8 +1980,8 @@
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and the best ref mvs of the current block for
// the given reference.
- step_param = (vp9_init_search_range(max_mv) +
- cpi->mv_step_param) / 2;
+ step_param =
+ (vp9_init_search_range(max_mv) + cpi->mv_step_param) / 2;
} else {
step_param = cpi->mv_step_param;
}
@@ -2120,24 +2003,16 @@
bestsme = vp9_full_pixel_search(
cpi, x, bsize, &mvp_full, step_param, sadpb,
sf->mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
- &bsi->ref_mv[0]->as_mv, new_mv,
- INT_MAX, 1);
+ &bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
if (bestsme < UINT_MAX) {
uint32_t distortion;
cpi->find_fractional_mv_step(
- x,
- new_mv,
- &bsi->ref_mv[0]->as_mv,
- cm->allow_high_precision_mv,
- x->errorperbit, &cpi->fn_ptr[bsize],
- sf->mv.subpel_force_stop,
- sf->mv.subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost,
- &distortion,
- &x->pred_sse[mi->ref_frame[0]],
- NULL, 0, 0);
+ x, new_mv, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
+ x->errorperbit, &cpi->fn_ptr[bsize], sf->mv.subpel_force_stop,
+ sf->mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+ x->nmvjointcost, x->mvcost, &distortion,
+ &x->pred_sse[mi->ref_frame[0]], NULL, 0, 0);
// save motion search result for use in compound prediction
seg_mvs[i][mi->ref_frame[0]].as_mv = *new_mv;
@@ -2162,9 +2037,8 @@
mi_buf_shift(x, i);
if (sf->comp_inter_joint_search_thresh <= bsize) {
int rate_mv;
- joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
- mi_row, mi_col, seg_mvs[i],
- &rate_mv);
+ joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
+ mi_col, seg_mvs[i], &rate_mv);
seg_mvs[i][mi->ref_frame[0]].as_int =
frame_mv[this_mode][mi->ref_frame[0]].as_int;
seg_mvs[i][mi->ref_frame[1]].as_int =
@@ -2174,10 +2048,9 @@
mi_buf_restore(x, orig_src, orig_pre);
}
- bsi->rdstat[i][mode_idx].brate =
- set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode],
- frame_mv, seg_mvs[i], bsi->ref_mv,
- x->nmvjointcost, x->mvcost);
+ bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs(
+ cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i],
+ bsi->ref_mv, x->nmvjointcost, x->mvcost);
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
bsi->rdstat[i][mode_idx].mvs[ref].as_int =
@@ -2192,8 +2065,7 @@
// Trap vectors that reach beyond the UMV borders
if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) ||
- (has_second_rf &&
- mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
+ (has_second_rf && mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
continue;
if (filter_idx > 0) {
@@ -2204,7 +2076,7 @@
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
have_ref &= mode_mv[this_mode][ref].as_int ==
- ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
+ ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
}
if (filter_idx > 1 && !subpelmv && !have_ref) {
@@ -2212,7 +2084,7 @@
have_ref = 1;
for (ref = 0; ref < 1 + has_second_rf; ++ref)
have_ref &= mode_mv[this_mode][ref].as_int ==
- ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
+ ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
}
if (!subpelmv && have_ref &&
@@ -2234,18 +2106,14 @@
}
}
- bsi->rdstat[i][mode_idx].brdcost =
- encode_inter_mb_segment(cpi, x,
- bsi->segment_rd - this_segment_rd, i,
- &bsi->rdstat[i][mode_idx].byrate,
- &bsi->rdstat[i][mode_idx].bdist,
- &bsi->rdstat[i][mode_idx].bsse,
- bsi->rdstat[i][mode_idx].ta,
- bsi->rdstat[i][mode_idx].tl,
- mi_row, mi_col);
+ bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment(
+ cpi, x, bsi->segment_rd - this_segment_rd, i,
+ &bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist,
+ &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta,
+ bsi->rdstat[i][mode_idx].tl, mi_row, mi_col);
if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
- bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv,
- bsi->rdstat[i][mode_idx].brate, 0);
+ bsi->rdstat[i][mode_idx].brdcost +=
+ RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0);
bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
if (num_4x4_blocks_wide > 1)
@@ -2301,11 +2169,9 @@
bsi->sse = block_sse;
// update the coding decisions
- for (k = 0; k < 4; ++k)
- bsi->modes[k] = mi->bmi[k].as_mode;
+ for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode;
- if (bsi->segment_rd > best_rd)
- return INT64_MAX;
+ if (bsi->segment_rd > best_rd) return INT64_MAX;
/* set it to the best */
for (i = 0; i < 4; i++) {
mode_idx = INTER_OFFSET(bsi->modes[i]);
@@ -2330,16 +2196,15 @@
}
static void estimate_ref_frame_costs(const VP9_COMMON *cm,
- const MACROBLOCKD *xd,
- int segment_id,
+ const MACROBLOCKD *xd, int segment_id,
unsigned int *ref_costs_single,
unsigned int *ref_costs_comp,
vpx_prob *comp_mode_p) {
- int seg_ref_active = segfeature_active(&cm->seg, segment_id,
- SEG_LVL_REF_FRAME);
+ int seg_ref_active =
+ segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
if (seg_ref_active) {
memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
- memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
+ memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
*comp_mode_p = 128;
} else {
vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
@@ -2364,13 +2229,13 @@
ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
ref_costs_single[ALTREF_FRAME] = base_cost;
- ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
+ ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
} else {
- ref_costs_single[LAST_FRAME] = 512;
+ ref_costs_single[LAST_FRAME] = 512;
ref_costs_single[GOLDEN_FRAME] = 512;
ref_costs_single[ALTREF_FRAME] = 512;
}
@@ -2381,20 +2246,19 @@
if (cm->reference_mode == REFERENCE_MODE_SELECT)
base_cost += vp9_cost_bit(comp_inter_p, 1);
- ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
+ ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1);
} else {
- ref_costs_comp[LAST_FRAME] = 512;
+ ref_costs_comp[LAST_FRAME] = 512;
ref_costs_comp[GOLDEN_FRAME] = 512;
}
}
}
-static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
- int mode_index,
- int64_t comp_pred_diff[REFERENCE_MODES],
- int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],
- int skippable) {
+static void store_coding_context(
+ MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
+ int64_t comp_pred_diff[REFERENCE_MODES],
+ int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], int skippable) {
MACROBLOCKD *const xd = &x->e_mbd;
// Take a snapshot of the coding context so it can be
@@ -2405,7 +2269,7 @@
ctx->mic = *xd->mi[0];
ctx->mbmi_ext = *x->mbmi_ext;
ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
- ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
+ ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
memcpy(ctx->best_filter_diff, best_filter_diff,
@@ -2414,8 +2278,7 @@
static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
MV_REFERENCE_FRAME ref_frame,
- BLOCK_SIZE block_size,
- int mi_row, int mi_col,
+ BLOCK_SIZE block_size, int mi_row, int mi_col,
int_mv frame_nearest_mv[MAX_REF_FRAMES],
int_mv frame_near_mv[MAX_REF_FRAMES],
struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
@@ -2446,18 +2309,17 @@
// in full and choose the best as the centre point for subsequent searches.
// The current implementation doesn't support scaling.
if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8)
- vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
- ref_frame, block_size);
+ vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+ block_size);
}
-static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int mi_row, int mi_col,
- int_mv *tmp_mv, int *rate_mv) {
+static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ int mi_row, int mi_col, int_mv *tmp_mv,
+ int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
const VP9_COMMON *cm = &cpi->common;
MODE_INFO *mi = xd->mi[0];
- struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
int bestsme = INT_MAX;
int step_param;
int sadpb = x->sadperbit16;
@@ -2471,8 +2333,8 @@
int tmp_row_max = x->mv_row_max;
int cost_list[5];
- const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi,
- ref);
+ const YV12_BUFFER_CONFIG *scaled_ref_frame =
+ vp9_get_scaled_ref_frame(cpi, ref);
MV pred_mv[3];
pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -2484,8 +2346,7 @@
// Swap out the reference frame for a version that's been scaled to
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
- for (i = 0; i < MAX_MB_PLANE; i++)
- backup_yv12[i] = xd->plane[i].pre[0];
+ for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
@@ -2498,8 +2359,9 @@
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and that based on the best ref mvs of the current
// block for the given reference.
- step_param = (vp9_init_search_range(x->max_mv_context[ref]) +
- cpi->mv_step_param) / 2;
+ step_param =
+ (vp9_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+ 2;
} else {
step_param = cpi->mv_step_param;
}
@@ -2516,8 +2378,7 @@
int bhl = b_height_log2_lookup[bsize];
int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
- if (tlevel < 5)
- step_param += 2;
+ if (tlevel < 5) step_param += 2;
// prev_mv_sad is not setup for dynamically scaled frames.
if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
@@ -2545,8 +2406,8 @@
mvp_full.row >>= 3;
bestsme = vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
- cond_cost_list(cpi, cost_list),
- &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
+ cond_cost_list(cpi, cost_list), &ref_mv,
+ &tmp_mv->as_mv, INT_MAX, 1);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
@@ -2554,32 +2415,24 @@
x->mv_row_max = tmp_row_max;
if (bestsme < INT_MAX) {
- uint32_t dis; /* TODO: use dis in distortion calculation later. */
- cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
- cm->allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[bsize],
- cpi->sf.mv.subpel_force_stop,
- cpi->sf.mv.subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost,
- &dis, &x->pred_sse[ref], NULL, 0, 0);
+ uint32_t dis; /* TODO: use dis in distortion calculation later. */
+ cpi->find_fractional_mv_step(
+ x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
+ &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+ cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
}
- *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+ x->mvcost, MV_COST_WEIGHT);
- if (cpi->sf.adaptive_motion_search)
- x->pred_mv[ref] = tmp_mv->as_mv;
+ if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = tmp_mv->as_mv;
if (scaled_ref_frame) {
int i;
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[0] = backup_yv12[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
}
}
-
-
static INLINE void restore_dst_buf(MACROBLOCKD *xd,
uint8_t *orig_dst[MAX_MB_PLANE],
int orig_dst_stride[MAX_MB_PLANE]) {
@@ -2597,13 +2450,11 @@
// However, once established that vector may be usable through the nearest and
// near mv modes to reduce distortion in subsequent blocks and also improve
// visual quality.
-static int discount_newmv_test(const VP9_COMP *cpi,
- int this_mode,
+static int discount_newmv_test(const VP9_COMP *cpi, int this_mode,
int_mv this_mv,
int_mv (*mode_mv)[MAX_REF_FRAMES],
int ref_frame) {
- return (!cpi->rc.is_src_frame_alt_ref &&
- (this_mode == NEWMV) &&
+ return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) &&
(this_mv.as_int != 0) &&
((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
(mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
@@ -2611,21 +2462,14 @@
(mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
}
-static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int *rate2, int64_t *distortion,
- int *skippable,
- int *rate_y, int *rate_uv,
- int *disable_skip,
- int_mv (*mode_mv)[MAX_REF_FRAMES],
- int mi_row, int mi_col,
- int_mv single_newmv[MAX_REF_FRAMES],
- INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
- int (*single_skippable)[MAX_REF_FRAMES],
- int64_t *psse,
- const int64_t ref_best_rd,
- int64_t *mask_filter,
- int64_t filter_cache[]) {
+static int64_t handle_inter_mode(
+ VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+ int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
+ int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
+ int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
+ INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
+ int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
+ const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
@@ -2635,7 +2479,7 @@
int_mv *frame_mv = mode_mv[this_mode];
int i;
int refs[2] = { mi->ref_frame[0],
- (mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]) };
+ (mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]) };
int_mv cur_mv[2];
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
@@ -2651,13 +2495,16 @@
int orig_dst_stride[MAX_MB_PLANE];
int rs = 0;
INTERP_FILTER best_filter = SWITCHABLE;
- uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0};
- int64_t bsse[MAX_MB_PLANE << 2] = {0};
+ uint8_t skip_txfm[MAX_MB_PLANE << 2] = { 0 };
+ int64_t bsse[MAX_MB_PLANE << 2] = { 0 };
int bsl = mi_width_log2_lookup[bsize];
- int pred_filter_search = cpi->sf.cb_pred_filter_search ?
- (((mi_row + mi_col) >> bsl) +
- get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
+ int pred_filter_search =
+ cpi->sf.cb_pred_filter_search
+ ? (((mi_row + mi_col) >> bsl) +
+ get_chessboard_index(cm->current_video_frame)) &
+ 0x1
+ : 0;
int skip_txfm_sb = 0;
int64_t skip_sse_sb = INT64_MAX;
@@ -2678,8 +2525,7 @@
if (xd->left_mi && is_inter_block(xd->left_mi))
lf = xd->left_mi->interp_filter;
- if ((this_mode != NEWMV) || (af == lf))
- best_filter = af;
+ if ((this_mode != NEWMV) || (af == lf)) best_filter = af;
}
if (is_comp_pred) {
@@ -2702,12 +2548,12 @@
frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
- joint_motion_search(cpi, x, bsize, frame_mv,
- mi_row, mi_col, single_newmv, &rate_mv);
+ joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
+ single_newmv, &rate_mv);
} else {
- rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
- &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+ &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv,
&x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
@@ -2715,13 +2561,11 @@
*rate2 += rate_mv;
} else {
int_mv tmp_mv;
- single_motion_search(cpi, x, bsize, mi_row, mi_col,
- &tmp_mv, &rate_mv);
- if (tmp_mv.as_int == INVALID_MV)
- return INT64_MAX;
+ single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
+ if (tmp_mv.as_int == INVALID_MV) return INT64_MAX;
- frame_mv[refs[0]].as_int =
- xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ frame_mv[refs[0]].as_int = xd->mi[0]->bmi[0].as_mv[0].as_int =
+ tmp_mv.as_int;
single_newmv[refs[0]].as_int = tmp_mv.as_int;
// Estimate the rate implications of a new mv but discount this
@@ -2739,11 +2583,9 @@
for (i = 0; i < is_comp_pred + 1; ++i) {
cur_mv[i] = frame_mv[refs[i]];
// Clip "next_nearest" so that it does not extend to far out of image
- if (this_mode != NEWMV)
- clamp_mv2(&cur_mv[i].as_mv, xd);
+ if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd);
- if (mv_check_bounds(x, &cur_mv[i].as_mv))
- return INT64_MAX;
+ if (mv_check_bounds(x, &cur_mv[i].as_mv)) return INT64_MAX;
mi->mv[i].as_int = cur_mv[i].as_int;
}
@@ -2763,12 +2605,11 @@
//
// Under some circumstances we discount the cost of new mv mode to encourage
// initiation of a motion field.
- if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]],
- mode_mv, refs[0])) {
- *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode,
- mbmi_ext->mode_context[refs[0]]),
- cost_mv_ref(cpi, NEARESTMV,
- mbmi_ext->mode_context[refs[0]]));
+ if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
+ refs[0])) {
+ *rate2 +=
+ VPXMIN(cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]),
+ cost_mv_ref(cpi, NEARESTMV, mbmi_ext->mode_context[refs[0]]));
} else {
*rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
}
@@ -2780,13 +2621,11 @@
pred_exists = 0;
// Are all MVs integer pel for Y and UV
intpel_mv = !mv_has_subpel(&mi->mv[0].as_mv);
- if (is_comp_pred)
- intpel_mv &= !mv_has_subpel(&mi->mv[1].as_mv);
+ if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mi->mv[1].as_mv);
// Search for best switchable filter by checking the variance of
// pred error irrespective of whether the filter will be used
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
if (cm->interp_filter != BILINEAR) {
if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
@@ -2811,8 +2650,7 @@
filter_cache[i] = rd;
filter_cache[SWITCHABLE_FILTERS] =
VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
- if (cm->interp_filter == SWITCHABLE)
- rd += rs_rd;
+ if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
*mask_filter = VPXMAX(*mask_filter, rd);
} else {
int rate_sum = 0;
@@ -2824,8 +2662,7 @@
continue;
}
- if ((cm->interp_filter == SWITCHABLE &&
- (!i || best_needs_copy)) ||
+ if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) ||
(cm->interp_filter != SWITCHABLE &&
(cm->interp_filter == mi->interp_filter ||
(i == 0 && intpel_mv)))) {
@@ -2837,15 +2674,14 @@
}
}
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
- model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
- &tmp_skip_sb, &tmp_skip_sse);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
+ &tmp_skip_sse);
rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
filter_cache[i] = rd;
filter_cache[SWITCHABLE_FILTERS] =
VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
- if (cm->interp_filter == SWITCHABLE)
- rd += rs_rd;
+ if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
*mask_filter = VPXMAX(*mask_filter, rd);
if (i == 0 && intpel_mv) {
@@ -2885,8 +2721,8 @@
}
}
// Set the appropriate filter
- mi->interp_filter = cm->interp_filter != SWITCHABLE ?
- cm->interp_filter : best_filter;
+ mi->interp_filter =
+ cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
rs = cm->interp_filter == SWITCHABLE ? vp9_get_switchable_rate(cpi, xd) : 0;
if (pred_exists) {
@@ -2905,15 +2741,14 @@
// switchable list (ex. bilinear) is indicated at the frame level, or
// skip condition holds.
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
- model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
- &skip_txfm_sb, &skip_sse_sb);
+ model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
+ &skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
memcpy(bsse, x->bsse, sizeof(bsse));
}
- if (!is_comp_pred)
- single_filter[this_mode][refs[0]] = mi->interp_filter;
+ if (!is_comp_pred) single_filter[this_mode][refs[0]] = mi->interp_filter;
if (cpi->sf.adaptive_mode_search)
if (is_comp_pred)
@@ -2930,8 +2765,7 @@
}
}
- if (cm->interp_filter == SWITCHABLE)
- *rate2 += rs;
+ if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
memcpy(x->bsse, bsse, sizeof(bsse));
@@ -2943,8 +2777,8 @@
// Y cost and distortion
vp9_subtract_plane(x, bsize, 0);
- super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
- bsize, ref_best_rd);
+ super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
+ ref_best_rd);
if (*rate_y == INT_MAX) {
*rate2 = INT_MAX;
@@ -2981,16 +2815,15 @@
*distortion = skip_sse_sb;
}
- if (!is_comp_pred)
- single_skippable[this_mode][refs[0]] = *skippable;
+ if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
restore_dst_buf(xd, orig_dst, orig_dst_stride);
return 0; // The rate-distortion cost will be re-calculated by caller.
}
-void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
- RD_COST *rd_cost, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
+void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblockd_plane *const pd = xd->plane;
@@ -3007,9 +2840,8 @@
xd->mi[0]->interp_filter = SWITCHABLE_FILTERS;
if (bsize >= BLOCK_8X8) {
- if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
- &dist_y, &y_skip, bsize,
- best_rd) >= best_rd) {
+ if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
+ &y_skip, bsize, best_rd) >= best_rd) {
rd_cost->rate = INT_MAX;
return;
}
@@ -3021,12 +2853,10 @@
return;
}
}
- max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->tx_size, bsize,
- pd[1].subsampling_x,
- pd[1].subsampling_y);
- rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly,
- &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize),
- max_uv_tx_size);
+ max_uv_tx_size = get_uv_tx_size_impl(
+ xd->mi[0]->tx_size, bsize, pd[1].subsampling_x, pd[1].subsampling_y);
+ rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv,
+ &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
if (y_skip && uv_skip) {
rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
@@ -3033,8 +2863,8 @@
vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
rd_cost->dist = dist_y + dist_uv;
} else {
- rd_cost->rate = rate_y + rate_uv +
- vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
+ rd_cost->rate =
+ rate_y + rate_uv + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
rd_cost->dist = dist_y + dist_uv;
}
@@ -3048,10 +2878,8 @@
#define LOW_VAR_THRESH 16
#define VLOW_ADJ_MAX 25
#define VHIGH_ADJ_MAX 8
-static void rd_variance_adjustment(VP9_COMP *cpi,
- MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int64_t *this_rd,
+static void rd_variance_adjustment(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize, int64_t *this_rd,
MV_REFERENCE_FRAME ref_frame,
unsigned int source_variance) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -3060,30 +2888,28 @@
int64_t var_error = 0;
int64_t var_factor = 0;
- if (*this_rd == INT64_MAX)
- return;
+ if (*this_rd == INT64_MAX) return;
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- recon_variance =
- vp9_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize, xd->bd);
+ recon_variance = vp9_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
+ bsize, xd->bd);
} else {
recon_variance =
- vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
}
#else
- recon_variance =
- vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ recon_variance = vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
#endif // CONFIG_VP9_HIGHBITDEPTH
if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
absvar_diff = (source_variance > recon_variance)
- ? (source_variance - recon_variance)
- : (recon_variance - source_variance);
+ ? (source_variance - recon_variance)
+ : (recon_variance - source_variance);
var_error = ((int64_t)200 * source_variance * recon_variance) /
- (((int64_t)source_variance * source_variance) +
- ((int64_t)recon_variance * recon_variance));
+ (((int64_t)source_variance * source_variance) +
+ ((int64_t)recon_variance * recon_variance));
var_error = 100 - var_error;
}
@@ -3093,8 +2919,8 @@
if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
(source_variance > recon_variance)) {
var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
- // A second possible case of interest is where the source variance
- // is very low and we wish to discourage false texture or motion trails.
+ // A second possible case of interest is where the source variance
+ // is very low and we wish to discourage false texture or motion trails.
} else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
(recon_variance > source_variance)) {
var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
@@ -3102,12 +2928,11 @@
*this_rd += (*this_rd * var_factor) / 100;
}
-
// Do we have an internal image edge (e.g. formatting bars).
int vp9_internal_image_edge(VP9_COMP *cpi) {
return (cpi->oxcf.pass == 2) &&
- ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
- (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
+ ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
+ (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
}
// Checks to see if a super block is on a horizontal image edge.
@@ -3167,19 +2992,15 @@
// Checks to see if a super block is at the edge of the active image.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int vp9_active_edge_sb(VP9_COMP *cpi,
- int mi_row, int mi_col) {
+int vp9_active_edge_sb(VP9_COMP *cpi, int mi_row, int mi_col) {
return vp9_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
vp9_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
}
-void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi,
- TileDataEnc *tile_data,
- MACROBLOCK *x,
- int mi_row, int mi_col,
+void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *x, int mi_row, int mi_col,
RD_COST *rd_cost, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far) {
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) {
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
RD_OPT *const rd_opt = &cpi->rd;
@@ -3234,20 +3055,16 @@
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
&comp_mode_p);
- for (i = 0; i < REFERENCE_MODES; ++i)
- best_pred_rd[i] = INT64_MAX;
+ for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
best_filter_rd[i] = INT64_MAX;
- for (i = 0; i < TX_SIZES; i++)
- rate_uv_intra[i] = INT_MAX;
- for (i = 0; i < MAX_REF_FRAMES; ++i)
- x->pred_sse[i] = INT_MAX;
+ for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX;
+ for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
for (i = 0; i < MB_MODE_COUNT; ++i) {
for (k = 0; k < MAX_REF_FRAMES; ++k) {
single_inter_filter[i][k] = SWITCHABLE;
@@ -3340,12 +3157,11 @@
mode_skip_mask[INTRA_FRAME] |=
~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
- for (i = 0; i <= LAST_NEW_MV_INDEX; ++i)
- mode_threshold[i] = 0;
+ for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0;
for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
- midx = sf->schedule_mode_search ? mode_skip_start : 0;
+ midx = sf->schedule_mode_search ? mode_skip_start : 0;
while (midx > 4) {
uint8_t end_pos = 0;
for (i = 5; i < midx; ++i) {
@@ -3380,8 +3196,7 @@
// skip mask to look at a subset of the remaining modes.
if (midx == mode_skip_start && best_mode_index >= 0) {
switch (best_mbmode.ref_frame[0]) {
- case INTRA_FRAME:
- break;
+ case INTRA_FRAME: break;
case LAST_FRAME:
ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -3390,13 +3205,9 @@
ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
break;
- case ALTREF_FRAME:
- ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK;
- break;
+ case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break;
case NONE:
- case MAX_REF_FRAMES:
- assert(0 && "Invalid Reference frame");
- break;
+ case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
}
}
@@ -3404,24 +3215,24 @@
(ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
continue;
- if (mode_skip_mask[ref_frame] & (1 << this_mode))
- continue;
+ if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
// Test best rd so far against threshold for trying this mode.
if (best_mode_skippable && sf->schedule_mode_search)
mode_threshold[mode_index] <<= 1;
- if (best_rd < mode_threshold[mode_index])
- continue;
+ if (best_rd < mode_threshold[mode_index]) continue;
if (sf->motion_field_mode_search) {
- const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize],
- tile_info->mi_col_end - mi_col);
+ const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize],
+ tile_info->mi_col_end - mi_col);
const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize],
tile_info->mi_row_end - mi_row);
const int bsl = mi_width_log2_lookup[bsize];
- int cb_partition_search_ctrl = (((mi_row + mi_col) >> bsl)
- + get_chessboard_index(cm->current_video_frame)) & 0x1;
+ int cb_partition_search_ctrl =
+ (((mi_row + mi_col) >> bsl) +
+ get_chessboard_index(cm->current_video_frame)) &
+ 0x1;
MODE_INFO *ref_mi;
int const_motion = 1;
int skip_ref_frame = !cb_partition_search_ctrl;
@@ -3441,10 +3252,8 @@
}
if ((mi_col - 1) >= tile_info->mi_col_start) {
- if (ref_mv.as_int == INVALID_MV)
- ref_mv = xd->mi[-1]->mv[0];
- if (rf == NONE)
- rf = xd->mi[-1]->ref_frame[0];
+ if (ref_mv.as_int == INVALID_MV) ref_mv = xd->mi[-1]->mv[0];
+ if (rf == NONE) rf = xd->mi[-1]->ref_frame[0];
for (i = 0; i < mi_height; ++i) {
ref_mi = xd->mi[i * xd->mi_stride - 1];
const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
@@ -3455,27 +3264,22 @@
if (skip_ref_frame && this_mode != NEARESTMV && this_mode != NEWMV)
if (rf > INTRA_FRAME)
- if (ref_frame != rf)
- continue;
+ if (ref_frame != rf) continue;
if (const_motion)
- if (this_mode == NEARMV || this_mode == ZEROMV)
- continue;
+ if (this_mode == NEARMV || this_mode == ZEROMV) continue;
}
comp_pred = second_ref_frame > INTRA_FRAME;
if (comp_pred) {
- if (!cpi->allow_comp_inter_inter)
- continue;
+ if (!cpi->allow_comp_inter_inter) continue;
// Skip compound inter modes if ARF is not available.
- if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
- continue;
+ if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
// Do not allow compound prediction if the segment level reference frame
// feature is in use as in this case there can only be one reference.
- if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
- continue;
+ if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
@@ -3504,19 +3308,17 @@
// one of the neighboring directional modes
if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
(this_mode >= D45_PRED && this_mode <= TM_PRED)) {
- if (best_mode_index >= 0 &&
- best_mbmode.ref_frame[0] > INTRA_FRAME)
+ if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME)
continue;
}
if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
- if (conditional_skipintra(this_mode, best_intra_mode))
- continue;
+ if (conditional_skipintra(this_mode, best_intra_mode)) continue;
}
}
} else {
- const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
- if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
- this_mode, ref_frames))
+ const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
+ if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode,
+ ref_frames))
continue;
}
@@ -3526,8 +3328,8 @@
mi->ref_frame[1] = second_ref_frame;
// Evaluate all sub-pel filters irrespective of whether we can use
// them for this frame.
- mi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
- : cm->interp_filter;
+ mi->interp_filter =
+ cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
mi->mv[0].as_int = mi->mv[1].as_int = 0;
x->skip = 0;
@@ -3536,8 +3338,7 @@
// Select prediction reference frames.
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
- if (comp_pred)
- xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+ if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
}
if (ref_frame == INTRA_FRAME) {
@@ -3544,17 +3345,16 @@
TX_SIZE uv_tx;
struct macroblockd_plane *const pd = &xd->plane[1];
memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
- super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
- NULL, bsize, best_rd);
- if (rate_y == INT_MAX)
- continue;
+ super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
+ best_rd);
+ if (rate_y == INT_MAX) continue;
uv_tx = get_uv_tx_size_impl(mi->tx_size, bsize, pd->subsampling_x,
pd->subsampling_y);
if (rate_uv_intra[uv_tx] == INT_MAX) {
- choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx,
- &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx],
- &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]);
+ choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
+ &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
+ &skip_uv[uv_tx], &mode_uv[uv_tx]);
}
rate_uv = rate_uv_tokenonly[uv_tx];
@@ -3567,21 +3367,16 @@
rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv;
} else {
- this_rd = handle_inter_mode(cpi, x, bsize,
- &rate2, &distortion2, &skippable,
- &rate_y, &rate_uv,
- &disable_skip, frame_mv,
- mi_row, mi_col,
- single_newmv, single_inter_filter,
- single_skippable, &total_sse, best_rd,
- &mask_filter, filter_cache);
- if (this_rd == INT64_MAX)
- continue;
+ this_rd = handle_inter_mode(
+ cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
+ &disable_skip, frame_mv, mi_row, mi_col, single_newmv,
+ single_inter_filter, single_skippable, &total_sse, best_rd,
+ &mask_filter, filter_cache);
+ if (this_rd == INT64_MAX) continue;
compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
- if (cm->reference_mode == REFERENCE_MODE_SELECT)
- rate2 += compmode_cost;
+ if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
}
// Estimate the reference frame signaling cost and add it
@@ -3604,8 +3399,8 @@
// Cost the skip mb case
rate2 += skip_cost1;
} else if (ref_frame != INTRA_FRAME && !xd->lossless) {
- if (RDCOST(x->rdmult, x->rddiv,
- rate_y + rate_uv + skip_cost0, distortion2) <
+ if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
+ distortion2) <
RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
// Add in the cost of the no skip flag.
rate2 += skip_cost0;
@@ -3629,11 +3424,11 @@
// Apply an adjustment to the rd value based on the similarity of the
// source variance and reconstructed variance.
- rd_variance_adjustment(cpi, x, bsize, &this_rd,
- ref_frame, x->source_variance);
+ rd_variance_adjustment(cpi, x, bsize, &this_rd, ref_frame,
+ x->source_variance);
if (ref_frame == INTRA_FRAME) {
- // Keep record of best intra rd
+ // Keep record of best intra rd
if (this_rd < best_intra_rd) {
best_intra_rd = this_rd;
best_intra_mode = mi->mode;
@@ -3673,8 +3468,7 @@
best_skip2 = this_skip2;
best_mode_skippable = skippable;
- if (!x->select_tx_size)
- swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
+ if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mi->tx_size],
sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
@@ -3694,8 +3488,7 @@
const int var_adjust = (x->source_variance < 16);
scale -= var_adjust;
}
- if (ref_frame > INTRA_FRAME &&
- distortion2 * scale < qstep * qstep) {
+ if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
early_term = 1;
}
}
@@ -3729,8 +3522,9 @@
/* keep record of best filter type */
if (!mode_excluded && cm->interp_filter != BILINEAR) {
- int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
- SWITCHABLE_FILTERS : cm->interp_filter];
+ int64_t ref =
+ filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
+ : cm->interp_filter];
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
int64_t adj_rd;
@@ -3751,11 +3545,9 @@
}
}
- if (early_term)
- break;
+ if (early_term) break;
- if (x->skip && !comp_pred)
- break;
+ if (x->skip && !comp_pred) break;
}
// The inter modes' rate costs are not calculated precisely in some cases.
@@ -3763,20 +3555,23 @@
// ZEROMV. Here, checks are added for those cases, and the mode decisions
// are corrected.
if (best_mbmode.mode == NEWMV) {
- const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0],
- best_mbmode.ref_frame[1]};
+ const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
+ best_mbmode.ref_frame[1] };
int comp_pred_mode = refs[1] > INTRA_FRAME;
if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
- ((comp_pred_mode && frame_mv[NEARESTMV][refs[1]].as_int ==
- best_mbmode.mv[1].as_int) || !comp_pred_mode))
+ ((comp_pred_mode &&
+ frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
+ !comp_pred_mode))
best_mbmode.mode = NEARESTMV;
else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
- ((comp_pred_mode && frame_mv[NEARMV][refs[1]].as_int ==
- best_mbmode.mv[1].as_int) || !comp_pred_mode))
+ ((comp_pred_mode &&
+ frame_mv[NEARMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
+ !comp_pred_mode))
best_mbmode.mode = NEARMV;
else if (best_mbmode.mv[0].as_int == 0 &&
- ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || !comp_pred_mode))
+ ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) ||
+ !comp_pred_mode))
best_mbmode.mode = ZEROMV;
}
@@ -3795,8 +3590,7 @@
uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
&rate_uv_tokenonly[uv_tx_size],
- &dist_uv[uv_tx_size],
- &skip_uv[uv_tx_size],
+ &dist_uv[uv_tx_size], &skip_uv[uv_tx_size],
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
uv_tx_size);
}
@@ -3841,8 +3635,7 @@
if (!x->skip && !x->select_tx_size) {
int has_high_freq_coeff = 0;
int plane;
- int max_plane = is_inter_block(xd->mi[0])
- ? MAX_MB_PLANE : 1;
+ int max_plane = is_inter_block(xd->mi[0]) ? MAX_MB_PLANE : 1;
for (plane = 0; plane < max_plane; ++plane) {
x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
@@ -3862,10 +3655,8 @@
best_filter_diff, best_mode_skippable);
}
-void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi,
- TileDataEnc *tile_data,
- MACROBLOCK *x,
- RD_COST *rd_cost,
+void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *x, RD_COST *rd_cost,
BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
@@ -3889,10 +3680,8 @@
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
&comp_mode_p);
- for (i = 0; i < MAX_REF_FRAMES; ++i)
- x->pred_sse[i] = INT_MAX;
- for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
- x->pred_mv_sad[i] = INT_MAX;
+ for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
+ for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX;
rd_cost->rate = INT_MAX;
@@ -3956,18 +3745,13 @@
vp9_zero(best_pred_diff);
vp9_zero(best_filter_diff);
- if (!x->select_tx_size)
- swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
- store_coding_context(x, ctx, THR_ZEROMV,
- best_pred_diff, best_filter_diff, 0);
+ if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
+ store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
}
-void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi,
- TileDataEnc *tile_data,
- MACROBLOCK *x,
- int mi_row, int mi_col,
- RD_COST *rd_cost,
- BLOCK_SIZE bsize,
+void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *x, int mi_row, int mi_col,
+ RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
VP9_COMMON *const cm = &cpi->common;
@@ -3999,7 +3783,7 @@
int skip_uv;
PREDICTION_MODE mode_uv = DC_PRED;
const int intra_cost_penalty = vp9_get_intra_cost_penalty(
- cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
int_mv seg_mvs[4][MAX_REF_FRAMES];
b_mode_info best_bmodes[4];
int best_skip2 = 0;
@@ -4007,26 +3791,23 @@
int64_t mask_filter = 0;
int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
int internal_active_edge =
- vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi);
+ vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi);
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
memset(x->zcoeff_blk[TX_4X4], 0, 4);
vp9_zero(best_mbmode);
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
for (i = 0; i < 4; i++) {
int j;
- for (j = 0; j < MAX_REF_FRAMES; j++)
- seg_mvs[i][j].as_int = INVALID_MV;
+ for (j = 0; j < MAX_REF_FRAMES; j++) seg_mvs[i][j].as_int = INVALID_MV;
}
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
&comp_mode_p);
- for (i = 0; i < REFERENCE_MODES; ++i)
- best_pred_rd[i] = INT64_MAX;
+ for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
best_filter_rd[i] = INT64_MAX;
rate_uv_intra = INT_MAX;
@@ -4036,8 +3817,7 @@
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
- frame_mv[NEARESTMV], frame_mv[NEARMV],
- yv12_mb);
+ frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
} else {
ref_frame_skip_mask[0] |= (1 << ref_frame);
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -4069,8 +3849,7 @@
int ref_scaled = vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf);
if (second_ref_frame > INTRA_FRAME)
ref_scaled += vp9_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf);
- if (ref_scaled)
- continue;
+ if (ref_scaled) continue;
}
#endif
// Look at the reference frame of the best mode so far and set the
@@ -4078,8 +3857,7 @@
if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
if (ref_index == 3) {
switch (best_mbmode.ref_frame[0]) {
- case INTRA_FRAME:
- break;
+ case INTRA_FRAME: break;
case LAST_FRAME:
ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -4092,9 +3870,7 @@
ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
break;
case NONE:
- case MAX_REF_FRAMES:
- assert(0 && "Invalid Reference frame");
- break;
+ case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
}
}
}
@@ -4112,14 +3888,11 @@
comp_pred = second_ref_frame > INTRA_FRAME;
if (comp_pred) {
- if (!cpi->allow_comp_inter_inter)
- continue;
- if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
- continue;
+ if (!cpi->allow_comp_inter_inter) continue;
+ if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
// Do not allow compound prediction if the segment level reference frame
// feature is in use as in this case there can only be one reference.
- if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
- continue;
+ if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
best_mbmode.ref_frame[0] == INTRA_FRAME)
@@ -4136,9 +3909,9 @@
if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
continue;
- // Disable this drop out case if the ref frame
- // segment level feature is enabled for this segment. This is to
- // prevent the possibility that we end up unable to pick any mode.
+ // Disable this drop out case if the ref frame
+ // segment level feature is enabled for this segment. This is to
+ // prevent the possibility that we end up unable to pick any mode.
} else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
// Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
// unless ARNR filtering is enabled in which case we want
@@ -4154,8 +3927,8 @@
mi->ref_frame[1] = second_ref_frame;
// Evaluate all sub-pel filters irrespective of whether we can use
// them for this frame.
- mi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
- : cm->interp_filter;
+ mi->interp_filter =
+ cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
x->skip = 0;
set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
@@ -4162,14 +3935,13 @@
// Select prediction reference frames.
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
- if (comp_pred)
- xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+ if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
}
if (ref_frame == INTRA_FRAME) {
int rate;
- if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y,
- &distortion_y, best_rd) >= best_rd)
+ if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y,
+ best_rd) >= best_rd)
continue;
rate2 += rate;
rate2 += intra_cost_penalty;
@@ -4176,11 +3948,8 @@
distortion2 += distortion_y;
if (rate_uv_intra == INT_MAX) {
- choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4,
- &rate_uv_intra,
- &rate_uv_tokenonly,
- &dist_uv, &skip_uv,
- &mode_uv);
+ choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra,
+ &rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv);
}
rate2 += rate_uv_intra;
rate_uv = rate_uv_tokenonly;
@@ -4196,8 +3965,8 @@
int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
int tmp_best_skippable = 0;
int switchable_filter_index;
- int_mv *second_ref = comp_pred ?
- &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
+ int_mv *second_ref =
+ comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
b_mode_info tmp_best_bmodes[16];
MODE_INFO tmp_best_mbmode;
BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
@@ -4204,12 +3973,14 @@
int pred_exists = 0;
int uv_skippable;
- YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL};
+ YV12_BUFFER_CONFIG *scaled_ref_frame[2] = { NULL, NULL };
int ref;
for (ref = 0; ref < 2; ++ref) {
- scaled_ref_frame[ref] = mi->ref_frame[ref] > INTRA_FRAME ?
- vp9_get_scaled_ref_frame(cpi, mi->ref_frame[ref]) : NULL;
+ scaled_ref_frame[ref] =
+ mi->ref_frame[ref] > INTRA_FRAME
+ ? vp9_get_scaled_ref_frame(cpi, mi->ref_frame[ref])
+ : NULL;
if (scaled_ref_frame[ref]) {
int i;
@@ -4223,11 +3994,12 @@
}
}
- this_rd_thresh = (ref_frame == LAST_FRAME) ?
- rd_opt->threshes[segment_id][bsize][THR_LAST] :
- rd_opt->threshes[segment_id][bsize][THR_ALTR];
- this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
- rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh;
+ this_rd_thresh = (ref_frame == LAST_FRAME)
+ ? rd_opt->threshes[segment_id][bsize][THR_LAST]
+ : rd_opt->threshes[segment_id][bsize][THR_ALTR];
+ this_rd_thresh = (ref_frame == GOLDEN_FRAME)
+ ? rd_opt->threshes[segment_id][bsize][THR_GOLD]
+ : this_rd_thresh;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
filter_cache[i] = INT64_MAX;
@@ -4239,8 +4011,9 @@
ctx->pred_interp_filter < SWITCHABLE) {
tmp_best_filter = ctx->pred_interp_filter;
} else if (sf->adaptive_pred_interp_filter == 2) {
- tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ?
- ctx->pred_interp_filter : 0;
+ tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE
+ ? ctx->pred_interp_filter
+ : 0;
} else {
for (switchable_filter_index = 0;
switchable_filter_index < SWITCHABLE_FILTERS;
@@ -4249,24 +4022,19 @@
int64_t rs_rd;
MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
mi->interp_filter = switchable_filter_index;
- tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
- &mbmi_ext->ref_mvs[ref_frame][0],
- second_ref, best_yrd, &rate,
- &rate_y, &distortion,
- &skippable, &total_sse,
- (int) this_rd_thresh, seg_mvs,
- bsi, switchable_filter_index,
- mi_row, mi_col);
+ tmp_rd = rd_pick_best_sub8x8_mode(
+ cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
+ &rate, &rate_y, &distortion, &skippable, &total_sse,
+ (int)this_rd_thresh, seg_mvs, bsi, switchable_filter_index,
+ mi_row, mi_col);
- if (tmp_rd == INT64_MAX)
- continue;
+ if (tmp_rd == INT64_MAX) continue;
rs = vp9_get_switchable_rate(cpi, xd);
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
filter_cache[switchable_filter_index] = tmp_rd;
filter_cache[SWITCHABLE_FILTERS] =
VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
- if (cm->interp_filter == SWITCHABLE)
- tmp_rd += rs_rd;
+ if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
mask_filter = VPXMAX(mask_filter, tmp_rd);
@@ -4290,8 +4058,7 @@
x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
}
pred_exists = 1;
- if (switchable_filter_index == 0 &&
- sf->use_rd_breakout &&
+ if (switchable_filter_index == 0 && sf->use_rd_breakout &&
best_rd < INT64_MAX) {
if (tmp_best_rdu / 2 > best_rd) {
// skip searching the other filters if the first is
@@ -4306,22 +4073,18 @@
}
}
- if (tmp_best_rdu == INT64_MAX && pred_exists)
- continue;
+ if (tmp_best_rdu == INT64_MAX && pred_exists) continue;
- mi->interp_filter = (cm->interp_filter == SWITCHABLE ?
- tmp_best_filter : cm->interp_filter);
+ mi->interp_filter = (cm->interp_filter == SWITCHABLE ? tmp_best_filter
+ : cm->interp_filter);
if (!pred_exists) {
// Handles the special case when a filter that is not in the
// switchable list (bilinear, 6-tap) is indicated at the frame level
- tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
- &x->mbmi_ext->ref_mvs[ref_frame][0],
- second_ref, best_yrd, &rate, &rate_y,
- &distortion, &skippable, &total_sse,
- (int) this_rd_thresh, seg_mvs, bsi, 0,
- mi_row, mi_col);
- if (tmp_rd == INT64_MAX)
- continue;
+ tmp_rd = rd_pick_best_sub8x8_mode(
+ cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
+ &rate, &rate_y, &distortion, &skippable, &total_sse,
+ (int)this_rd_thresh, seg_mvs, bsi, 0, mi_row, mi_col);
+ if (tmp_rd == INT64_MAX) continue;
} else {
total_sse = tmp_best_sse;
rate = tmp_best_rate;
@@ -4329,8 +4092,7 @@
distortion = tmp_best_distortion;
skippable = tmp_best_skippable;
*mi = tmp_best_mbmode;
- for (i = 0; i < 4; i++)
- xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
+ for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
}
rate2 += rate;
@@ -4352,8 +4114,7 @@
if (tmp_best_rdu > 0) {
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
- vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
- BLOCK_8X8);
+ vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
&uv_sse, BLOCK_8X8, tmp_best_rdu)) {
@@ -4383,8 +4144,7 @@
}
}
- if (cm->reference_mode == REFERENCE_MODE_SELECT)
- rate2 += compmode_cost;
+ if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
// Estimate the reference frame signaling cost and add it
// to the rolling cost variable.
@@ -4402,8 +4162,8 @@
// Skip is never coded at the segment level for sub8x8 blocks and instead
// always coded in the bitstream at the mode info level.
if (ref_frame != INTRA_FRAME && !xd->lossless) {
- if (RDCOST(x->rdmult, x->rddiv,
- rate_y + rate_uv + skip_cost0, distortion2) <
+ if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
+ distortion2) <
RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
// Add in the cost of the no skip flag.
rate2 += skip_cost0;
@@ -4453,17 +4213,15 @@
rd_cost->dist = distortion2;
rd_cost->rdcost = this_rd;
best_rd = this_rd;
- best_yrd = best_rd -
- RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
+ best_yrd =
+ best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
best_mbmode = *mi;
best_skip2 = this_skip2;
- if (!x->select_tx_size)
- swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
+ if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
- for (i = 0; i < 4; i++)
- best_bmodes[i] = xd->mi[0]->bmi[i];
+ for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i];
// TODO(debargha): enhance this test with a better distortion prediction
// based on qp, activity mask and history
@@ -4481,8 +4239,7 @@
const int var_adjust = (x->source_variance < 16);
scale -= var_adjust;
}
- if (ref_frame > INTRA_FRAME &&
- distortion2 * scale < qstep * qstep) {
+ if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
early_term = 1;
}
}
@@ -4516,8 +4273,9 @@
/* keep record of best filter type */
if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
cm->interp_filter != BILINEAR) {
- int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
- SWITCHABLE_FILTERS : cm->interp_filter];
+ int64_t ref =
+ filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
+ : cm->interp_filter];
int64_t adj_rd;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
if (ref == INT64_MAX)
@@ -4536,11 +4294,9 @@
}
}
- if (early_term)
- break;
+ if (early_term) break;
- if (x->skip && !comp_pred)
- break;
+ if (x->skip && !comp_pred) break;
}
if (best_rd >= best_rd_so_far) {
@@ -4554,11 +4310,8 @@
// Do Intra UV best rd mode selection if best mode choice above was intra.
if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
*mi = best_mbmode;
- rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra,
- &rate_uv_tokenonly,
- &dist_uv,
- &skip_uv,
- BLOCK_8X8, TX_4X4);
+ rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, &rate_uv_tokenonly,
+ &dist_uv, &skip_uv, BLOCK_8X8, TX_4X4);
}
}
@@ -4573,15 +4326,14 @@
(cm->interp_filter == best_mbmode.interp_filter) ||
!is_inter_block(&best_mbmode));
- vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
- sf->adaptive_rd_thresh, bsize, best_ref_index);
+ vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact, sf->adaptive_rd_thresh,
+ bsize, best_ref_index);
// macroblock modes
*mi = best_mbmode;
x->skip |= best_skip2;
if (!is_inter_block(&best_mbmode)) {
- for (i = 0; i < 4; i++)
- xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
+ for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
} else {
for (i = 0; i < 4; ++i)
memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
@@ -4610,6 +4362,6 @@
vp9_zero(best_filter_diff);
}
- store_coding_context(x, ctx, best_ref_index,
- best_pred_diff, best_filter_diff, 0);
+ store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,
+ 0);
}
--- a/vp9/encoder/vp9_rdopt.h
+++ b/vp9/encoder/vp9_rdopt.h
@@ -31,19 +31,14 @@
void vp9_rd_pick_inter_mode_sb(struct VP9_COMP *cpi,
struct TileDataEnc *tile_data,
- struct macroblock *x,
- int mi_row, int mi_col,
- struct RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far);
+ struct macroblock *x, int mi_row, int mi_col,
+ struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far);
-void vp9_rd_pick_inter_mode_sb_seg_skip(struct VP9_COMP *cpi,
- struct TileDataEnc *tile_data,
- struct macroblock *x,
- struct RD_COST *rd_cost,
- BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far);
+void vp9_rd_pick_inter_mode_sb_seg_skip(
+ struct VP9_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
+ struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far);
int vp9_internal_image_edge(struct VP9_COMP *cpi);
int vp9_active_h_edge(struct VP9_COMP *cpi, int mi_row, int mi_step);
@@ -52,10 +47,9 @@
void vp9_rd_pick_inter_mode_sub8x8(struct VP9_COMP *cpi,
struct TileDataEnc *tile_data,
- struct macroblock *x,
- int mi_row, int mi_col,
- struct RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ struct macroblock *x, int mi_row, int mi_col,
+ struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far);
#ifdef __cplusplus
--- a/vp9/encoder/vp9_resize.c
+++ b/vp9/encoder/vp9_resize.c
@@ -23,198 +23,118 @@
#include "vp9/common/vp9_common.h"
#include "vp9/encoder/vp9_resize.h"
-#define FILTER_BITS 7
+#define FILTER_BITS 7
-#define INTERP_TAPS 8
-#define SUBPEL_BITS 5
-#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1)
-#define INTERP_PRECISION_BITS 32
+#define INTERP_TAPS 8
+#define SUBPEL_BITS 5
+#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1)
+#define INTERP_PRECISION_BITS 32
typedef int16_t interp_kernel[INTERP_TAPS];
// Filters for interpolation (0.5-band) - note this also filters integer pels.
static const interp_kernel filteredinterp_filters500[(1 << SUBPEL_BITS)] = {
- {-3, 0, 35, 64, 35, 0, -3, 0},
- {-3, -1, 34, 64, 36, 1, -3, 0},
- {-3, -1, 32, 64, 38, 1, -3, 0},
- {-2, -2, 31, 63, 39, 2, -3, 0},
- {-2, -2, 29, 63, 41, 2, -3, 0},
- {-2, -2, 28, 63, 42, 3, -4, 0},
- {-2, -3, 27, 63, 43, 4, -4, 0},
- {-2, -3, 25, 62, 45, 5, -4, 0},
- {-2, -3, 24, 62, 46, 5, -4, 0},
- {-2, -3, 23, 61, 47, 6, -4, 0},
- {-2, -3, 21, 60, 49, 7, -4, 0},
- {-1, -4, 20, 60, 50, 8, -4, -1},
- {-1, -4, 19, 59, 51, 9, -4, -1},
- {-1, -4, 17, 58, 52, 10, -4, 0},
- {-1, -4, 16, 57, 53, 12, -4, -1},
- {-1, -4, 15, 56, 54, 13, -4, -1},
- {-1, -4, 14, 55, 55, 14, -4, -1},
- {-1, -4, 13, 54, 56, 15, -4, -1},
- {-1, -4, 12, 53, 57, 16, -4, -1},
- {0, -4, 10, 52, 58, 17, -4, -1},
- {-1, -4, 9, 51, 59, 19, -4, -1},
- {-1, -4, 8, 50, 60, 20, -4, -1},
- {0, -4, 7, 49, 60, 21, -3, -2},
- {0, -4, 6, 47, 61, 23, -3, -2},
- {0, -4, 5, 46, 62, 24, -3, -2},
- {0, -4, 5, 45, 62, 25, -3, -2},
- {0, -4, 4, 43, 63, 27, -3, -2},
- {0, -4, 3, 42, 63, 28, -2, -2},
- {0, -3, 2, 41, 63, 29, -2, -2},
- {0, -3, 2, 39, 63, 31, -2, -2},
- {0, -3, 1, 38, 64, 32, -1, -3},
- {0, -3, 1, 36, 64, 34, -1, -3}
+ { -3, 0, 35, 64, 35, 0, -3, 0 }, { -3, -1, 34, 64, 36, 1, -3, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 }, { -2, -2, 31, 63, 39, 2, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 }, { -2, -2, 28, 63, 42, 3, -4, 0 },
+ { -2, -3, 27, 63, 43, 4, -4, 0 }, { -2, -3, 25, 62, 45, 5, -4, 0 },
+ { -2, -3, 24, 62, 46, 5, -4, 0 }, { -2, -3, 23, 61, 47, 6, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 }, { -1, -4, 20, 60, 50, 8, -4, -1 },
+ { -1, -4, 19, 59, 51, 9, -4, -1 }, { -1, -4, 17, 58, 52, 10, -4, 0 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 }, { -1, -4, 15, 56, 54, 13, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 }, { -1, -4, 13, 54, 56, 15, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 }, { 0, -4, 10, 52, 58, 17, -4, -1 },
+ { -1, -4, 9, 51, 59, 19, -4, -1 }, { -1, -4, 8, 50, 60, 20, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 }, { 0, -4, 6, 47, 61, 23, -3, -2 },
+ { 0, -4, 5, 46, 62, 24, -3, -2 }, { 0, -4, 5, 45, 62, 25, -3, -2 },
+ { 0, -4, 4, 43, 63, 27, -3, -2 }, { 0, -4, 3, 42, 63, 28, -2, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 }, { 0, -3, 2, 39, 63, 31, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 }, { 0, -3, 1, 36, 64, 34, -1, -3 }
};
// Filters for interpolation (0.625-band) - note this also filters integer pels.
static const interp_kernel filteredinterp_filters625[(1 << SUBPEL_BITS)] = {
- {-1, -8, 33, 80, 33, -8, -1, 0},
- {-1, -8, 30, 80, 35, -8, -1, 1},
- {-1, -8, 28, 80, 37, -7, -2, 1},
- {0, -8, 26, 79, 39, -7, -2, 1},
- {0, -8, 24, 79, 41, -7, -2, 1},
- {0, -8, 22, 78, 43, -6, -2, 1},
- {0, -8, 20, 78, 45, -5, -3, 1},
- {0, -8, 18, 77, 48, -5, -3, 1},
- {0, -8, 16, 76, 50, -4, -3, 1},
- {0, -8, 15, 75, 52, -3, -4, 1},
- {0, -7, 13, 74, 54, -3, -4, 1},
- {0, -7, 11, 73, 56, -2, -4, 1},
- {0, -7, 10, 71, 58, -1, -4, 1},
- {1, -7, 8, 70, 60, 0, -5, 1},
- {1, -6, 6, 68, 62, 1, -5, 1},
- {1, -6, 5, 67, 63, 2, -5, 1},
- {1, -6, 4, 65, 65, 4, -6, 1},
- {1, -5, 2, 63, 67, 5, -6, 1},
- {1, -5, 1, 62, 68, 6, -6, 1},
- {1, -5, 0, 60, 70, 8, -7, 1},
- {1, -4, -1, 58, 71, 10, -7, 0},
- {1, -4, -2, 56, 73, 11, -7, 0},
- {1, -4, -3, 54, 74, 13, -7, 0},
- {1, -4, -3, 52, 75, 15, -8, 0},
- {1, -3, -4, 50, 76, 16, -8, 0},
- {1, -3, -5, 48, 77, 18, -8, 0},
- {1, -3, -5, 45, 78, 20, -8, 0},
- {1, -2, -6, 43, 78, 22, -8, 0},
- {1, -2, -7, 41, 79, 24, -8, 0},
- {1, -2, -7, 39, 79, 26, -8, 0},
- {1, -2, -7, 37, 80, 28, -8, -1},
- {1, -1, -8, 35, 80, 30, -8, -1},
+ { -1, -8, 33, 80, 33, -8, -1, 0 }, { -1, -8, 30, 80, 35, -8, -1, 1 },
+ { -1, -8, 28, 80, 37, -7, -2, 1 }, { 0, -8, 26, 79, 39, -7, -2, 1 },
+ { 0, -8, 24, 79, 41, -7, -2, 1 }, { 0, -8, 22, 78, 43, -6, -2, 1 },
+ { 0, -8, 20, 78, 45, -5, -3, 1 }, { 0, -8, 18, 77, 48, -5, -3, 1 },
+ { 0, -8, 16, 76, 50, -4, -3, 1 }, { 0, -8, 15, 75, 52, -3, -4, 1 },
+ { 0, -7, 13, 74, 54, -3, -4, 1 }, { 0, -7, 11, 73, 56, -2, -4, 1 },
+ { 0, -7, 10, 71, 58, -1, -4, 1 }, { 1, -7, 8, 70, 60, 0, -5, 1 },
+ { 1, -6, 6, 68, 62, 1, -5, 1 }, { 1, -6, 5, 67, 63, 2, -5, 1 },
+ { 1, -6, 4, 65, 65, 4, -6, 1 }, { 1, -5, 2, 63, 67, 5, -6, 1 },
+ { 1, -5, 1, 62, 68, 6, -6, 1 }, { 1, -5, 0, 60, 70, 8, -7, 1 },
+ { 1, -4, -1, 58, 71, 10, -7, 0 }, { 1, -4, -2, 56, 73, 11, -7, 0 },
+ { 1, -4, -3, 54, 74, 13, -7, 0 }, { 1, -4, -3, 52, 75, 15, -8, 0 },
+ { 1, -3, -4, 50, 76, 16, -8, 0 }, { 1, -3, -5, 48, 77, 18, -8, 0 },
+ { 1, -3, -5, 45, 78, 20, -8, 0 }, { 1, -2, -6, 43, 78, 22, -8, 0 },
+ { 1, -2, -7, 41, 79, 24, -8, 0 }, { 1, -2, -7, 39, 79, 26, -8, 0 },
+ { 1, -2, -7, 37, 80, 28, -8, -1 }, { 1, -1, -8, 35, 80, 30, -8, -1 },
};
// Filters for interpolation (0.75-band) - note this also filters integer pels.
static const interp_kernel filteredinterp_filters750[(1 << SUBPEL_BITS)] = {
- {2, -11, 25, 96, 25, -11, 2, 0},
- {2, -11, 22, 96, 28, -11, 2, 0},
- {2, -10, 19, 95, 31, -11, 2, 0},
- {2, -10, 17, 95, 34, -12, 2, 0},
- {2, -9, 14, 94, 37, -12, 2, 0},
- {2, -8, 12, 93, 40, -12, 1, 0},
- {2, -8, 9, 92, 43, -12, 1, 1},
- {2, -7, 7, 91, 46, -12, 1, 0},
- {2, -7, 5, 90, 49, -12, 1, 0},
- {2, -6, 3, 88, 52, -12, 0, 1},
- {2, -5, 1, 86, 55, -12, 0, 1},
- {2, -5, -1, 84, 58, -11, 0, 1},
- {2, -4, -2, 82, 61, -11, -1, 1},
- {2, -4, -4, 80, 64, -10, -1, 1},
- {1, -3, -5, 77, 67, -9, -1, 1},
- {1, -3, -6, 75, 70, -8, -2, 1},
- {1, -2, -7, 72, 72, -7, -2, 1},
- {1, -2, -8, 70, 75, -6, -3, 1},
- {1, -1, -9, 67, 77, -5, -3, 1},
- {1, -1, -10, 64, 80, -4, -4, 2},
- {1, -1, -11, 61, 82, -2, -4, 2},
- {1, 0, -11, 58, 84, -1, -5, 2},
- {1, 0, -12, 55, 86, 1, -5, 2},
- {1, 0, -12, 52, 88, 3, -6, 2},
- {0, 1, -12, 49, 90, 5, -7, 2},
- {0, 1, -12, 46, 91, 7, -7, 2},
- {1, 1, -12, 43, 92, 9, -8, 2},
- {0, 1, -12, 40, 93, 12, -8, 2},
- {0, 2, -12, 37, 94, 14, -9, 2},
- {0, 2, -12, 34, 95, 17, -10, 2},
- {0, 2, -11, 31, 95, 19, -10, 2},
- {0, 2, -11, 28, 96, 22, -11, 2}
+ { 2, -11, 25, 96, 25, -11, 2, 0 }, { 2, -11, 22, 96, 28, -11, 2, 0 },
+ { 2, -10, 19, 95, 31, -11, 2, 0 }, { 2, -10, 17, 95, 34, -12, 2, 0 },
+ { 2, -9, 14, 94, 37, -12, 2, 0 }, { 2, -8, 12, 93, 40, -12, 1, 0 },
+ { 2, -8, 9, 92, 43, -12, 1, 1 }, { 2, -7, 7, 91, 46, -12, 1, 0 },
+ { 2, -7, 5, 90, 49, -12, 1, 0 }, { 2, -6, 3, 88, 52, -12, 0, 1 },
+ { 2, -5, 1, 86, 55, -12, 0, 1 }, { 2, -5, -1, 84, 58, -11, 0, 1 },
+ { 2, -4, -2, 82, 61, -11, -1, 1 }, { 2, -4, -4, 80, 64, -10, -1, 1 },
+ { 1, -3, -5, 77, 67, -9, -1, 1 }, { 1, -3, -6, 75, 70, -8, -2, 1 },
+ { 1, -2, -7, 72, 72, -7, -2, 1 }, { 1, -2, -8, 70, 75, -6, -3, 1 },
+ { 1, -1, -9, 67, 77, -5, -3, 1 }, { 1, -1, -10, 64, 80, -4, -4, 2 },
+ { 1, -1, -11, 61, 82, -2, -4, 2 }, { 1, 0, -11, 58, 84, -1, -5, 2 },
+ { 1, 0, -12, 55, 86, 1, -5, 2 }, { 1, 0, -12, 52, 88, 3, -6, 2 },
+ { 0, 1, -12, 49, 90, 5, -7, 2 }, { 0, 1, -12, 46, 91, 7, -7, 2 },
+ { 1, 1, -12, 43, 92, 9, -8, 2 }, { 0, 1, -12, 40, 93, 12, -8, 2 },
+ { 0, 2, -12, 37, 94, 14, -9, 2 }, { 0, 2, -12, 34, 95, 17, -10, 2 },
+ { 0, 2, -11, 31, 95, 19, -10, 2 }, { 0, 2, -11, 28, 96, 22, -11, 2 }
};
// Filters for interpolation (0.875-band) - note this also filters integer pels.
static const interp_kernel filteredinterp_filters875[(1 << SUBPEL_BITS)] = {
- {3, -8, 13, 112, 13, -8, 3, 0},
- {3, -7, 10, 112, 17, -9, 3, -1},
- {2, -6, 7, 111, 21, -9, 3, -1},
- {2, -5, 4, 111, 24, -10, 3, -1},
- {2, -4, 1, 110, 28, -11, 3, -1},
- {1, -3, -1, 108, 32, -12, 4, -1},
- {1, -2, -3, 106, 36, -13, 4, -1},
- {1, -1, -6, 105, 40, -14, 4, -1},
- {1, -1, -7, 102, 44, -14, 4, -1},
- {1, 0, -9, 100, 48, -15, 4, -1},
- {1, 1, -11, 97, 53, -16, 4, -1},
- {0, 1, -12, 95, 57, -16, 4, -1},
- {0, 2, -13, 91, 61, -16, 4, -1},
- {0, 2, -14, 88, 65, -16, 4, -1},
- {0, 3, -15, 84, 69, -17, 4, 0},
- {0, 3, -16, 81, 73, -16, 3, 0},
- {0, 3, -16, 77, 77, -16, 3, 0},
- {0, 3, -16, 73, 81, -16, 3, 0},
- {0, 4, -17, 69, 84, -15, 3, 0},
- {-1, 4, -16, 65, 88, -14, 2, 0},
- {-1, 4, -16, 61, 91, -13, 2, 0},
- {-1, 4, -16, 57, 95, -12, 1, 0},
- {-1, 4, -16, 53, 97, -11, 1, 1},
- {-1, 4, -15, 48, 100, -9, 0, 1},
- {-1, 4, -14, 44, 102, -7, -1, 1},
- {-1, 4, -14, 40, 105, -6, -1, 1},
- {-1, 4, -13, 36, 106, -3, -2, 1},
- {-1, 4, -12, 32, 108, -1, -3, 1},
- {-1, 3, -11, 28, 110, 1, -4, 2},
- {-1, 3, -10, 24, 111, 4, -5, 2},
- {-1, 3, -9, 21, 111, 7, -6, 2},
- {-1, 3, -9, 17, 112, 10, -7, 3}
+ { 3, -8, 13, 112, 13, -8, 3, 0 }, { 3, -7, 10, 112, 17, -9, 3, -1 },
+ { 2, -6, 7, 111, 21, -9, 3, -1 }, { 2, -5, 4, 111, 24, -10, 3, -1 },
+ { 2, -4, 1, 110, 28, -11, 3, -1 }, { 1, -3, -1, 108, 32, -12, 4, -1 },
+ { 1, -2, -3, 106, 36, -13, 4, -1 }, { 1, -1, -6, 105, 40, -14, 4, -1 },
+ { 1, -1, -7, 102, 44, -14, 4, -1 }, { 1, 0, -9, 100, 48, -15, 4, -1 },
+ { 1, 1, -11, 97, 53, -16, 4, -1 }, { 0, 1, -12, 95, 57, -16, 4, -1 },
+ { 0, 2, -13, 91, 61, -16, 4, -1 }, { 0, 2, -14, 88, 65, -16, 4, -1 },
+ { 0, 3, -15, 84, 69, -17, 4, 0 }, { 0, 3, -16, 81, 73, -16, 3, 0 },
+ { 0, 3, -16, 77, 77, -16, 3, 0 }, { 0, 3, -16, 73, 81, -16, 3, 0 },
+ { 0, 4, -17, 69, 84, -15, 3, 0 }, { -1, 4, -16, 65, 88, -14, 2, 0 },
+ { -1, 4, -16, 61, 91, -13, 2, 0 }, { -1, 4, -16, 57, 95, -12, 1, 0 },
+ { -1, 4, -16, 53, 97, -11, 1, 1 }, { -1, 4, -15, 48, 100, -9, 0, 1 },
+ { -1, 4, -14, 44, 102, -7, -1, 1 }, { -1, 4, -14, 40, 105, -6, -1, 1 },
+ { -1, 4, -13, 36, 106, -3, -2, 1 }, { -1, 4, -12, 32, 108, -1, -3, 1 },
+ { -1, 3, -11, 28, 110, 1, -4, 2 }, { -1, 3, -10, 24, 111, 4, -5, 2 },
+ { -1, 3, -9, 21, 111, 7, -6, 2 }, { -1, 3, -9, 17, 112, 10, -7, 3 }
};
// Filters for interpolation (full-band) - no filtering for integer pixels
static const interp_kernel filteredinterp_filters1000[(1 << SUBPEL_BITS)] = {
- {0, 0, 0, 128, 0, 0, 0, 0},
- {0, 1, -3, 128, 3, -1, 0, 0},
- {-1, 2, -6, 127, 7, -2, 1, 0},
- {-1, 3, -9, 126, 12, -4, 1, 0},
- {-1, 4, -12, 125, 16, -5, 1, 0},
- {-1, 4, -14, 123, 20, -6, 2, 0},
- {-1, 5, -15, 120, 25, -8, 2, 0},
- {-1, 5, -17, 118, 30, -9, 3, -1},
- {-1, 6, -18, 114, 35, -10, 3, -1},
- {-1, 6, -19, 111, 41, -12, 3, -1},
- {-1, 6, -20, 107, 46, -13, 4, -1},
- {-1, 6, -21, 103, 52, -14, 4, -1},
- {-1, 6, -21, 99, 57, -16, 5, -1},
- {-1, 6, -21, 94, 63, -17, 5, -1},
- {-1, 6, -20, 89, 68, -18, 5, -1},
- {-1, 6, -20, 84, 73, -19, 6, -1},
- {-1, 6, -20, 79, 79, -20, 6, -1},
- {-1, 6, -19, 73, 84, -20, 6, -1},
- {-1, 5, -18, 68, 89, -20, 6, -1},
- {-1, 5, -17, 63, 94, -21, 6, -1},
- {-1, 5, -16, 57, 99, -21, 6, -1},
- {-1, 4, -14, 52, 103, -21, 6, -1},
- {-1, 4, -13, 46, 107, -20, 6, -1},
- {-1, 3, -12, 41, 111, -19, 6, -1},
- {-1, 3, -10, 35, 114, -18, 6, -1},
- {-1, 3, -9, 30, 118, -17, 5, -1},
- {0, 2, -8, 25, 120, -15, 5, -1},
- {0, 2, -6, 20, 123, -14, 4, -1},
- {0, 1, -5, 16, 125, -12, 4, -1},
- {0, 1, -4, 12, 126, -9, 3, -1},
- {0, 1, -2, 7, 127, -6, 2, -1},
- {0, 0, -1, 3, 128, -3, 1, 0}
+ { 0, 0, 0, 128, 0, 0, 0, 0 }, { 0, 1, -3, 128, 3, -1, 0, 0 },
+ { -1, 2, -6, 127, 7, -2, 1, 0 }, { -1, 3, -9, 126, 12, -4, 1, 0 },
+ { -1, 4, -12, 125, 16, -5, 1, 0 }, { -1, 4, -14, 123, 20, -6, 2, 0 },
+ { -1, 5, -15, 120, 25, -8, 2, 0 }, { -1, 5, -17, 118, 30, -9, 3, -1 },
+ { -1, 6, -18, 114, 35, -10, 3, -1 }, { -1, 6, -19, 111, 41, -12, 3, -1 },
+ { -1, 6, -20, 107, 46, -13, 4, -1 }, { -1, 6, -21, 103, 52, -14, 4, -1 },
+ { -1, 6, -21, 99, 57, -16, 5, -1 }, { -1, 6, -21, 94, 63, -17, 5, -1 },
+ { -1, 6, -20, 89, 68, -18, 5, -1 }, { -1, 6, -20, 84, 73, -19, 6, -1 },
+ { -1, 6, -20, 79, 79, -20, 6, -1 }, { -1, 6, -19, 73, 84, -20, 6, -1 },
+ { -1, 5, -18, 68, 89, -20, 6, -1 }, { -1, 5, -17, 63, 94, -21, 6, -1 },
+ { -1, 5, -16, 57, 99, -21, 6, -1 }, { -1, 4, -14, 52, 103, -21, 6, -1 },
+ { -1, 4, -13, 46, 107, -20, 6, -1 }, { -1, 3, -12, 41, 111, -19, 6, -1 },
+ { -1, 3, -10, 35, 114, -18, 6, -1 }, { -1, 3, -9, 30, 118, -17, 5, -1 },
+ { 0, 2, -8, 25, 120, -15, 5, -1 }, { 0, 2, -6, 20, 123, -14, 4, -1 },
+ { 0, 1, -5, 16, 125, -12, 4, -1 }, { 0, 1, -4, 12, 126, -9, 3, -1 },
+ { 0, 1, -2, 7, 127, -6, 2, -1 }, { 0, 0, -1, 3, 128, -3, 1, 0 }
};
// Filters for factor of 2 downsampling.
-static const int16_t vp9_down2_symeven_half_filter[] = {56, 12, -3, -1};
-static const int16_t vp9_down2_symodd_half_filter[] = {64, 35, 0, -3};
+static const int16_t vp9_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
+static const int16_t vp9_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
int outlength16 = outlength * 16;
@@ -232,11 +152,14 @@
static void interpolate(const uint8_t *const input, int inlength,
uint8_t *output, int outlength) {
- const int64_t delta = (((uint64_t)inlength << 32) + outlength / 2) /
- outlength;
- const int64_t offset = inlength > outlength ?
- (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength :
- -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength;
+ const int64_t delta =
+ (((uint64_t)inlength << 32) + outlength / 2) / outlength;
+ const int64_t offset =
+ inlength > outlength
+ ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) /
+ outlength
+ : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) /
+ outlength;
uint8_t *optr = output;
int x, x1, x2, sum, k, int_pel, sub_pel;
int64_t y;
@@ -253,8 +176,8 @@
x1 = x;
x = outlength - 1;
y = delta * x + offset;
- while ((y >> INTERP_PRECISION_BITS) +
- (int64_t)(INTERP_TAPS / 2) >= inlength) {
+ while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >=
+ inlength) {
x--;
y -= delta;
}
@@ -268,8 +191,8 @@
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k) {
const int pk = int_pel - INTERP_TAPS / 2 + 1 + k;
- sum += filter[k] * input[(pk < 0 ? 0 :
- (pk >= inlength ? inlength - 1 : pk))];
+ sum += filter[k] *
+ input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
}
*optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
}
@@ -282,9 +205,9 @@
filter = interp_filters[sub_pel];
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k)
- sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ?
- 0 :
- int_pel - INTERP_TAPS / 2 + 1 + k)];
+ sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0
+ ? 0
+ : int_pel - INTERP_TAPS / 2 + 1 + k)];
*optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
}
// Middle part.
@@ -306,9 +229,9 @@
filter = interp_filters[sub_pel];
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k)
- sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >=
- inlength ? inlength - 1 :
- int_pel - INTERP_TAPS / 2 + 1 + k)];
+ sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength
+ ? inlength - 1
+ : int_pel - INTERP_TAPS / 2 + 1 + k)];
*optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
}
}
@@ -332,7 +255,7 @@
for (j = 0; j < filter_len_half; ++j) {
sum += (input[(i - j < 0 ? 0 : i - j)] +
input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel(sum);
@@ -362,7 +285,7 @@
for (j = 0; j < filter_len_half; ++j) {
sum += (input[i - j] +
input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel(sum);
@@ -388,7 +311,7 @@
for (j = 1; j < filter_len_half; ++j) {
sum += (input[(i - j < 0 ? 0 : i - j)] +
input[(i + j >= length ? length - 1 : i + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel(sum);
@@ -417,7 +340,7 @@
int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
for (j = 1; j < filter_len_half; ++j) {
sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel(sum);
@@ -427,8 +350,7 @@
static int get_down2_length(int length, int steps) {
int s;
- for (s = 0; s < steps; ++s)
- length = (length + 1) >> 1;
+ for (s = 0; s < steps; ++s) length = (length + 1) >> 1;
return length;
}
@@ -442,11 +364,8 @@
return steps;
}
-static void resize_multistep(const uint8_t *const input,
- int length,
- uint8_t *output,
- int olength,
- uint8_t *otmp) {
+static void resize_multistep(const uint8_t *const input, int length,
+ uint8_t *output, int olength, uint8_t *otmp) {
int steps;
if (length == olength) {
memcpy(output, input, sizeof(output[0]) * length);
@@ -501,22 +420,16 @@
}
}
-void vp9_resize_plane(const uint8_t *const input,
- int height,
- int width,
- int in_stride,
- uint8_t *output,
- int height2,
- int width2,
+void vp9_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2, int width2,
int out_stride) {
int i;
uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height);
- uint8_t *tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) *
- (width < height ? height : width));
+ uint8_t *tmpbuf =
+ (uint8_t *)malloc(sizeof(uint8_t) * (width < height ? height : width));
uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * height);
uint8_t *arrbuf2 = (uint8_t *)malloc(sizeof(uint8_t) * height2);
- if (intbuf == NULL || tmpbuf == NULL ||
- arrbuf == NULL || arrbuf2 == NULL)
+ if (intbuf == NULL || tmpbuf == NULL || arrbuf == NULL || arrbuf2 == NULL)
goto Error;
assert(width > 0);
assert(height > 0);
@@ -523,8 +436,8 @@
assert(width2 > 0);
assert(height2 > 0);
for (i = 0; i < height; ++i)
- resize_multistep(input + in_stride * i, width,
- intbuf + width2 * i, width2, tmpbuf);
+ resize_multistep(input + in_stride * i, width, intbuf + width2 * i, width2,
+ tmpbuf);
for (i = 0; i < width2; ++i) {
fill_col_to_arr(intbuf + i, width2, height, arrbuf);
resize_multistep(arrbuf, height, arrbuf2, height2, tmpbuf);
@@ -531,7 +444,7 @@
fill_arr_to_col(output + i, out_stride, height2, arrbuf2);
}
- Error:
+Error:
free(intbuf);
free(tmpbuf);
free(arrbuf);
@@ -543,9 +456,12 @@
uint16_t *output, int outlength, int bd) {
const int64_t delta =
(((uint64_t)inlength << 32) + outlength / 2) / outlength;
- const int64_t offset = inlength > outlength ?
- (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength :
- -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength;
+ const int64_t offset =
+ inlength > outlength
+ ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) /
+ outlength
+ : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) /
+ outlength;
uint16_t *optr = output;
int x, x1, x2, sum, k, int_pel, sub_pel;
int64_t y;
@@ -562,8 +478,8 @@
x1 = x;
x = outlength - 1;
y = delta * x + offset;
- while ((y >> INTERP_PRECISION_BITS) +
- (int64_t)(INTERP_TAPS / 2) >= inlength) {
+ while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >=
+ inlength) {
x--;
y -= delta;
}
@@ -578,7 +494,7 @@
for (k = 0; k < INTERP_TAPS; ++k) {
const int pk = int_pel - INTERP_TAPS / 2 + 1 + k;
sum += filter[k] *
- input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
+ input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
}
*optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
}
@@ -591,9 +507,9 @@
filter = interp_filters[sub_pel];
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k)
- sum += filter[k] *
- input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ?
- 0 : int_pel - INTERP_TAPS / 2 + 1 + k)];
+ sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0
+ ? 0
+ : int_pel - INTERP_TAPS / 2 + 1 + k)];
*optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
}
// Middle part.
@@ -615,9 +531,9 @@
filter = interp_filters[sub_pel];
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k)
- sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >=
- inlength ? inlength - 1 :
- int_pel - INTERP_TAPS / 2 + 1 + k)];
+ sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength
+ ? inlength - 1
+ : int_pel - INTERP_TAPS / 2 + 1 + k)];
*optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
}
}
@@ -641,7 +557,7 @@
for (j = 0; j < filter_len_half; ++j) {
sum += (input[(i - j < 0 ? 0 : i - j)] +
input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel_highbd(sum, bd);
@@ -671,7 +587,7 @@
for (j = 0; j < filter_len_half; ++j) {
sum += (input[i - j] +
input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel_highbd(sum, bd);
@@ -680,7 +596,7 @@
}
static void highbd_down2_symodd(const uint16_t *const input, int length,
- uint16_t *output, int bd) {
+ uint16_t *output, int bd) {
// Actual filter len = 2 * filter_len_half - 1.
static const int16_t *filter = vp9_down2_symodd_half_filter;
const int filter_len_half = sizeof(vp9_down2_symodd_half_filter) / 2;
@@ -697,7 +613,7 @@
for (j = 1; j < filter_len_half; ++j) {
sum += (input[(i - j < 0 ? 0 : i - j)] +
input[(i + j >= length ? length - 1 : i + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel_highbd(sum, bd);
@@ -726,7 +642,7 @@
int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
for (j = 1; j < filter_len_half; ++j) {
sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel_highbd(sum, bd);
@@ -734,12 +650,9 @@
}
}
-static void highbd_resize_multistep(const uint16_t *const input,
- int length,
- uint16_t *output,
- int olength,
- uint16_t *otmp,
- int bd) {
+static void highbd_resize_multistep(const uint16_t *const input, int length,
+ uint16_t *output, int olength,
+ uint16_t *otmp, int bd) {
int steps;
if (length == olength) {
memcpy(output, input, sizeof(output[0]) * length);
@@ -796,23 +709,16 @@
}
}
-void vp9_highbd_resize_plane(const uint8_t *const input,
- int height,
- int width,
- int in_stride,
- uint8_t *output,
- int height2,
- int width2,
- int out_stride,
- int bd) {
+void vp9_highbd_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2,
+ int width2, int out_stride, int bd) {
int i;
uint16_t *intbuf = (uint16_t *)malloc(sizeof(uint16_t) * width2 * height);
- uint16_t *tmpbuf = (uint16_t *)malloc(sizeof(uint16_t) *
- (width < height ? height : width));
+ uint16_t *tmpbuf =
+ (uint16_t *)malloc(sizeof(uint16_t) * (width < height ? height : width));
uint16_t *arrbuf = (uint16_t *)malloc(sizeof(uint16_t) * height);
uint16_t *arrbuf2 = (uint16_t *)malloc(sizeof(uint16_t) * height2);
- if (intbuf == NULL || tmpbuf == NULL ||
- arrbuf == NULL || arrbuf2 == NULL)
+ if (intbuf == NULL || tmpbuf == NULL || arrbuf == NULL || arrbuf2 == NULL)
goto Error;
for (i = 0; i < height; ++i) {
highbd_resize_multistep(CONVERT_TO_SHORTPTR(input + in_stride * i), width,
@@ -820,13 +726,12 @@
}
for (i = 0; i < width2; ++i) {
highbd_fill_col_to_arr(intbuf + i, width2, height, arrbuf);
- highbd_resize_multistep(arrbuf, height, arrbuf2, height2, tmpbuf,
- bd);
+ highbd_resize_multistep(arrbuf, height, arrbuf2, height2, tmpbuf, bd);
highbd_fill_arr_to_col(CONVERT_TO_SHORTPTR(output + i), out_stride, height2,
arrbuf2);
}
- Error:
+Error:
free(intbuf);
free(tmpbuf);
free(arrbuf);
@@ -834,96 +739,82 @@
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-void vp9_resize_frame420(const uint8_t *const y,
- int y_stride,
+void vp9_resize_frame420(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth) {
- vp9_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride);
- vp9_resize_plane(u, height / 2, width / 2, uv_stride,
- ou, oheight / 2, owidth / 2, ouv_stride);
- vp9_resize_plane(v, height / 2, width / 2, uv_stride,
- ov, oheight / 2, owidth / 2, ouv_stride);
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ vp9_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ vp9_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+ owidth / 2, ouv_stride);
+ vp9_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+ owidth / 2, ouv_stride);
}
void vp9_resize_frame422(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth) {
- vp9_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride);
- vp9_resize_plane(u, height, width / 2, uv_stride,
- ou, oheight, owidth / 2, ouv_stride);
- vp9_resize_plane(v, height, width / 2, uv_stride,
- ov, oheight, owidth / 2, ouv_stride);
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ vp9_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ vp9_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
+ ouv_stride);
+ vp9_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
+ ouv_stride);
}
void vp9_resize_frame444(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth) {
- vp9_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride);
- vp9_resize_plane(u, height, width, uv_stride,
- ou, oheight, owidth, ouv_stride);
- vp9_resize_plane(v, height, width, uv_stride,
- ov, oheight, owidth, ouv_stride);
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ vp9_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ vp9_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+ ouv_stride);
+ vp9_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+ ouv_stride);
}
#if CONFIG_VP9_HIGHBITDEPTH
-void vp9_highbd_resize_frame420(const uint8_t *const y,
- int y_stride,
+void vp9_highbd_resize_frame420(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth, int bd) {
- vp9_highbd_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride, bd);
- vp9_highbd_resize_plane(u, height / 2, width / 2, uv_stride,
- ou, oheight / 2, owidth / 2, ouv_stride, bd);
- vp9_highbd_resize_plane(v, height / 2, width / 2, uv_stride,
- ov, oheight / 2, owidth / 2, ouv_stride, bd);
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ vp9_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ vp9_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+ owidth / 2, ouv_stride, bd);
+ vp9_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+ owidth / 2, ouv_stride, bd);
}
void vp9_highbd_resize_frame422(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth, int bd) {
- vp9_highbd_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride, bd);
- vp9_highbd_resize_plane(u, height, width / 2, uv_stride,
- ou, oheight, owidth / 2, ouv_stride, bd);
- vp9_highbd_resize_plane(v, height, width / 2, uv_stride,
- ov, oheight, owidth / 2, ouv_stride, bd);
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ vp9_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ vp9_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
+ owidth / 2, ouv_stride, bd);
+ vp9_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
+ owidth / 2, ouv_stride, bd);
}
void vp9_highbd_resize_frame444(const uint8_t *const y, int y_stride,
const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth, int bd) {
- vp9_highbd_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride, bd);
- vp9_highbd_resize_plane(u, height, width, uv_stride,
- ou, oheight, owidth, ouv_stride, bd);
- vp9_highbd_resize_plane(v, height, width, uv_stride,
- ov, oheight, owidth, ouv_stride, bd);
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ vp9_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ vp9_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+ ouv_stride, bd);
+ vp9_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+ ouv_stride, bd);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
--- a/vp9/encoder/vp9_resize.h
+++ b/vp9/encoder/vp9_resize.h
@@ -18,116 +18,51 @@
extern "C" {
#endif
-void vp9_resize_plane(const uint8_t *const input,
- int height,
- int width,
- int in_stride,
- uint8_t *output,
- int height2,
- int width2,
+void vp9_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2, int width2,
int out_stride);
-void vp9_resize_frame420(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth);
-void vp9_resize_frame422(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth);
-void vp9_resize_frame444(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth);
+void vp9_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
+void vp9_resize_frame422(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
+void vp9_resize_frame444(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
#if CONFIG_VP9_HIGHBITDEPTH
-void vp9_highbd_resize_plane(const uint8_t *const input,
- int height,
- int width,
- int in_stride,
- uint8_t *output,
- int height2,
- int width2,
- int out_stride,
- int bd);
-void vp9_highbd_resize_frame420(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth,
- int bd);
-void vp9_highbd_resize_frame422(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth,
- int bd);
-void vp9_highbd_resize_frame444(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth,
- int bd);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+void vp9_highbd_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2,
+ int width2, int out_stride, int bd);
+void vp9_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+void vp9_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+void vp9_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+#endif // CONFIG_VP9_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP9_ENCODER_VP9_RESIZE_H_
+#endif // VP9_ENCODER_VP9_RESIZE_H_
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <limits.h>
#include "vpx_mem/vpx_mem.h"
@@ -31,8 +30,7 @@
seg->update_data = 0;
}
-void vp9_set_segment_data(struct segmentation *seg,
- signed char *feature_data,
+void vp9_set_segment_data(struct segmentation *seg, signed char *feature_data,
unsigned char abs_delta) {
seg->abs_delta = abs_delta;
@@ -75,13 +73,11 @@
const int c4567 = c45 + c67;
// Cost the top node of the tree
- int cost = c0123 * vp9_cost_zero(probs[0]) +
- c4567 * vp9_cost_one(probs[0]);
+ int cost = c0123 * vp9_cost_zero(probs[0]) + c4567 * vp9_cost_one(probs[0]);
// Cost subsequent levels
if (c0123 > 0) {
- cost += c01 * vp9_cost_zero(probs[1]) +
- c23 * vp9_cost_one(probs[1]);
+ cost += c01 * vp9_cost_zero(probs[1]) + c23 * vp9_cost_one(probs[1]);
if (c01 > 0)
cost += segcounts[0] * vp9_cost_zero(probs[3]) +
@@ -92,8 +88,7 @@
}
if (c4567 > 0) {
- cost += c45 * vp9_cost_zero(probs[2]) +
- c67 * vp9_cost_one(probs[2]);
+ cost += c45 * vp9_cost_zero(probs[2]) + c67 * vp9_cost_one(probs[2]);
if (c45 > 0)
cost += segcounts[4] * vp9_cost_zero(probs[5]) +
@@ -110,12 +105,11 @@
const TileInfo *tile, MODE_INFO **mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
- int *t_unpred_seg_counts,
- int bw, int bh, int mi_row, int mi_col) {
+ int *t_unpred_seg_counts, int bw, int bh, int mi_row,
+ int mi_col) {
int segment_id;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
xd->mi = mi;
segment_id = xd->mi[0]->segment_id;
@@ -129,8 +123,8 @@
if (cm->frame_type != KEY_FRAME) {
const BLOCK_SIZE bsize = xd->mi[0]->sb_type;
// Test to see if the segment id matches the predicted value.
- const int pred_segment_id = get_segment_id(cm, cm->last_frame_seg_map,
- bsize, mi_row, mi_col);
+ const int pred_segment_id =
+ get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col);
const int pred_flag = pred_segment_id == segment_id;
const int pred_context = vp9_get_pred_context_seg_id(xd);
@@ -140,8 +134,7 @@
temporal_predictor_count[pred_context][pred_flag]++;
// Update the "unpredicted" segment count
- if (!pred_flag)
- t_unpred_seg_counts[segment_id]++;
+ if (!pred_flag) t_unpred_seg_counts[segment_id]++;
}
}
@@ -149,15 +142,13 @@
const TileInfo *tile, MODE_INFO **mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
- int *t_unpred_seg_counts,
- int mi_row, int mi_col,
+ int *t_unpred_seg_counts, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
const int mis = cm->mi_stride;
int bw, bh;
const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
bw = num_8x8_blocks_wide_lookup[mi[0]->sb_type];
bh = num_8x8_blocks_high_lookup[mi[0]->sb_type];
@@ -174,9 +165,9 @@
} else if (bw < bs && bh == bs) {
count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
- count_segs(cm, xd, tile, mi + hbs,
- no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts,
- hbs, bs, mi_row, mi_col + hbs);
+ count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts,
+ temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row,
+ mi_col + hbs);
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
int n;
@@ -187,9 +178,8 @@
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
- count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc],
- no_pred_segcounts, temporal_predictor_count,
- t_unpred_seg_counts,
+ count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts,
+ temporal_predictor_count, t_unpred_seg_counts,
mi_row + mi_dr, mi_col + mi_dc, subsize);
}
}
@@ -230,8 +220,8 @@
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += 8, mi += 8)
count_segs_sb(cm, xd, &tile, mi, no_pred_segcounts,
- temporal_predictor_count, t_unpred_seg_counts,
- mi_row, mi_col, BLOCK_64X64);
+ temporal_predictor_count, t_unpred_seg_counts, mi_row,
+ mi_col, BLOCK_64X64);
}
}
--- a/vp9/encoder/vp9_segmentation.h
+++ b/vp9/encoder/vp9_segmentation.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_SEGMENTATION_H_
#define VP9_ENCODER_VP9_SEGMENTATION_H_
@@ -22,11 +21,9 @@
void vp9_enable_segmentation(struct segmentation *seg);
void vp9_disable_segmentation(struct segmentation *seg);
-void vp9_disable_segfeature(struct segmentation *seg,
- int segment_id,
+void vp9_disable_segfeature(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id);
-void vp9_clear_segdata(struct segmentation *seg,
- int segment_id,
+void vp9_clear_segdata(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id);
// The values given for each segment can be either deltas (from the default
--- a/vp9/encoder/vp9_skin_detection.c
+++ b/vp9/encoder/vp9_skin_detection.c
@@ -18,11 +18,14 @@
#define MODEL_MODE 1
// Fixed-point skin color model parameters.
-static const int skin_mean[5][2] = {
- {7463, 9614}, {6400, 10240}, {7040, 10240}, {8320, 9280}, {6800, 9614}};
-static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16
-static const int skin_threshold[6] = {1570636, 1400000, 800000, 800000, 800000,
- 800000}; // q18
+static const int skin_mean[5][2] = { { 7463, 9614 },
+ { 6400, 10240 },
+ { 7040, 10240 },
+ { 8320, 9280 },
+ { 6800, 9614 } };
+static const int skin_inv_cov[4] = { 4107, 1663, 1663, 2157 }; // q16
+static const int skin_threshold[6] = { 1570636, 1400000, 800000,
+ 800000, 800000, 800000 }; // q18
// Thresholds on luminance.
static const int y_low = 40;
@@ -41,10 +44,9 @@
const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
- const int skin_diff = skin_inv_cov[0] * cb_diff_q2 +
- skin_inv_cov[1] * cbcr_diff_q2 +
- skin_inv_cov[2] * cbcr_diff_q2 +
- skin_inv_cov[3] * cr_diff_q2;
+ const int skin_diff =
+ skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
+ skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
return skin_diff;
}
@@ -58,20 +60,18 @@
} else {
int i = 0;
// Exit on grey.
- if (cb == 128 && cr == 128)
- return 0;
+ if (cb == 128 && cr == 128) return 0;
// Exit on very strong cb.
- if (cb > 150 && cr < 110)
- return 0;
+ if (cb > 150 && cr < 110) return 0;
for (; i < 5; i++) {
int skin_color_diff = evaluate_skin_color_difference(cb, cr, i);
if (skin_color_diff < skin_threshold[i + 1]) {
- if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2))
- return 0;
- else if (motion == 0 &&
- skin_color_diff > (skin_threshold[i + 1] >> 1))
- return 0;
- else
+ if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2))
+ return 0;
+ else if (motion == 0 &&
+ skin_color_diff > (skin_threshold[i + 1] >> 1))
+ return 0;
+ else
return 1;
}
// Exit if difference is much large than the threshold.
@@ -100,13 +100,11 @@
const uint8_t ysource = y[y_height_shift * stride + y_width_shift];
const uint8_t usource = u[uv_height_shift * strideuv + uv_width_shift];
const uint8_t vsource = v[uv_height_shift * strideuv + uv_width_shift];
- if (consec_zeromv > 25 && curr_motion_magn == 0)
- motion = 0;
+ if (consec_zeromv > 25 && curr_motion_magn == 0) motion = 0;
return vp9_skin_pixel(ysource, usource, vsource, motion);
}
}
-
#ifdef OUTPUT_YUV_SKINMAP
// For viewing skin map on input source.
void vp9_compute_skin_map(VP9_COMP *const cpi, FILE *yuv_skinmap_file) {
@@ -129,11 +127,11 @@
int mode_filter = 0;
YV12_BUFFER_CONFIG skinmap;
memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG));
- if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
- VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment)) {
- vpx_free_frame_buffer(&skinmap);
- return;
+ if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y, VP9_ENC_BORDER_IN_PIXELS,
+ cm->byte_alignment)) {
+ vpx_free_frame_buffer(&skinmap);
+ return;
}
memset(skinmap.buffer_alloc, 128, skinmap.frame_size);
y = skinmap.y_buffer;
@@ -153,11 +151,11 @@
uint8_t usource2 = src_u[(uvpos + 1) * src_uvstride + uvpos];
uint8_t vsource2 = src_v[(uvpos + 1) * src_uvstride + uvpos];
uint8_t ysource3 = src_y[ypos * src_ystride + (ypos + 1)];
- uint8_t usource3 = src_u[uvpos * src_uvstride + (uvpos + 1)];
- uint8_t vsource3 = src_v[uvpos * src_uvstride + (uvpos + 1)];
+ uint8_t usource3 = src_u[uvpos * src_uvstride + (uvpos + 1)];
+ uint8_t vsource3 = src_v[uvpos * src_uvstride + (uvpos + 1)];
uint8_t ysource4 = src_y[(ypos + 1) * src_ystride + (ypos + 1)];
- uint8_t usource4 = src_u[(uvpos + 1) * src_uvstride + (uvpos + 1)];
- uint8_t vsource4 = src_v[(uvpos + 1) * src_uvstride + (uvpos + 1)];
+ uint8_t usource4 = src_u[(uvpos + 1) * src_uvstride + (uvpos + 1)];
+ uint8_t vsource4 = src_v[(uvpos + 1) * src_uvstride + (uvpos + 1)];
ysource = (ysource + ysource2 + ysource3 + ysource4) >> 2;
usource = (usource + usource2 + usource3 + usource4) >> 2;
vsource = (vsource + vsource2 + vsource3 + vsource4) >> 2;
@@ -172,16 +170,15 @@
if (y_bsize == 8)
consec_zeromv = cpi->consec_zero_mv[bl_index];
else
- consec_zeromv = VPXMIN(cpi->consec_zero_mv[bl_index],
- VPXMIN(cpi->consec_zero_mv[bl_index1],
- VPXMIN(cpi->consec_zero_mv[bl_index2],
- cpi->consec_zero_mv[bl_index3])));
- if (y_bsize == 16)
- block_size = BLOCK_16X16;
- is_skin = vp9_compute_skin_block(src_y, src_u, src_v, src_ystride,
- src_uvstride, block_size,
- consec_zeromv,
- 0);
+ consec_zeromv =
+ VPXMIN(cpi->consec_zero_mv[bl_index],
+ VPXMIN(cpi->consec_zero_mv[bl_index1],
+ VPXMIN(cpi->consec_zero_mv[bl_index2],
+ cpi->consec_zero_mv[bl_index3])));
+ if (y_bsize == 16) block_size = BLOCK_16X16;
+ is_skin =
+ vp9_compute_skin_block(src_y, src_u, src_v, src_ystride,
+ src_uvstride, block_size, consec_zeromv, 0);
}
for (i = 0; i < y_bsize; i++) {
for (j = 0; j < y_bsize; j++) {
--- a/vp9/encoder/vp9_speed_features.c
+++ b/vp9/encoder/vp9_speed_features.c
@@ -16,21 +16,23 @@
#include "vpx_dsp/vpx_dsp_common.h"
// Mesh search patters for various speed settings
-static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] =
- {{64, 4}, {28, 2}, {15, 1}, {7, 1}};
+static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = {
+ { 64, 4 }, { 28, 2 }, { 15, 1 }, { 7, 1 }
+};
#define MAX_MESH_SPEED 5 // Max speed setting for mesh motion method
-static MESH_PATTERN good_quality_mesh_patterns[MAX_MESH_SPEED + 1]
- [MAX_MESH_STEP] =
- {{{64, 8}, {28, 4}, {15, 1}, {7, 1}},
- {{64, 8}, {28, 4}, {15, 1}, {7, 1}},
- {{64, 8}, {14, 2}, {7, 1}, {7, 1}},
- {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
- {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
- {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
+static MESH_PATTERN
+ good_quality_mesh_patterns[MAX_MESH_SPEED + 1][MAX_MESH_STEP] = {
+ { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } },
+ { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } },
+ { { 64, 8 }, { 14, 2 }, { 7, 1 }, { 7, 1 } },
+ { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+ { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+ { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
};
-static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] =
- {50, 25, 15, 5, 1, 1};
+static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] = {
+ 50, 25, 15, 5, 1, 1
+};
// Intra only frames, golden frames (except alt ref overlays) and
// alt ref frames tend to be coded at a higher than ambient quality
@@ -67,8 +69,8 @@
if (speed >= 1) {
if (VPXMIN(cm->width, cm->height) >= 720) {
- sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
- : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask =
+ cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
sf->partition_search_breakout_dist_thr = (1 << 23);
} else {
sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
@@ -78,8 +80,8 @@
if (speed >= 2) {
if (VPXMIN(cm->width, cm->height) >= 720) {
- sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
- : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask =
+ cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
sf->adaptive_pred_interp_filter = 0;
sf->partition_search_breakout_dist_thr = (1 << 24);
sf->partition_search_breakout_rate_thr = 120;
@@ -147,7 +149,7 @@
}
sf->use_square_only_threshold = BLOCK_4X4;
- sf->less_rectangular_check = 1;
+ sf->less_rectangular_check = 1;
sf->use_rd_breakout = 1;
sf->adaptive_motion_search = 1;
@@ -167,17 +169,17 @@
}
if (speed >= 2) {
- sf->tx_size_search_method = frame_is_boosted(cpi) ? USE_FULL_RD
- : USE_LARGESTALL;
+ sf->tx_size_search_method =
+ frame_is_boosted(cpi) ? USE_FULL_RD : USE_LARGESTALL;
// Reference masking is not supported in dynamic scaling mode.
sf->reference_masking = cpi->oxcf.resize_mode != RESIZE_DYNAMIC ? 1 : 0;
- sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
- FLAG_SKIP_INTRA_DIRMISMATCH |
- FLAG_SKIP_INTRA_BESTINTER |
- FLAG_SKIP_COMP_BESTINTRA |
- FLAG_SKIP_INTRA_LOWVAR;
+ sf->mode_search_skip_flags =
+ (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH |
+ FLAG_SKIP_INTRA_BESTINTER |
+ FLAG_SKIP_COMP_BESTINTRA |
+ FLAG_SKIP_INTRA_LOWVAR;
sf->disable_filter_search_var_thresh = 100;
sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX;
@@ -186,8 +188,8 @@
if (speed >= 3) {
sf->use_square_partition_only = !frame_is_intra_only(cm);
- sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD
- : USE_LARGESTALL;
+ sf->tx_size_search_method =
+ frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL;
sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED;
sf->adaptive_pred_interp_filter = 0;
sf->adaptive_mode_search = 1;
@@ -234,13 +236,14 @@
}
static void set_rt_speed_feature_framesize_dependent(VP9_COMP *cpi,
- SPEED_FEATURES *sf, int speed) {
+ SPEED_FEATURES *sf,
+ int speed) {
VP9_COMMON *const cm = &cpi->common;
if (speed >= 1) {
if (VPXMIN(cm->width, cm->height) >= 720) {
- sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
- : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask =
+ cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
} else {
sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
}
@@ -248,8 +251,8 @@
if (speed >= 2) {
if (VPXMIN(cm->width, cm->height) >= 720) {
- sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
- : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask =
+ cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
} else {
sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY;
}
@@ -264,13 +267,13 @@
}
if (speed >= 7) {
- sf->encode_breakout_thresh = (VPXMIN(cm->width, cm->height) >= 720) ?
- 800 : 300;
+ sf->encode_breakout_thresh =
+ (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300;
}
}
-static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf,
- int speed, vp9e_tune_content content) {
+static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, int speed,
+ vp9e_tune_content content) {
VP9_COMMON *const cm = &cpi->common;
const int is_keyframe = cm->frame_type == KEY_FRAME;
const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
@@ -285,8 +288,8 @@
sf->quant_coeff_opt = 0;
sf->use_square_partition_only = !frame_is_intra_only(cm);
sf->less_rectangular_check = 1;
- sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD
- : USE_LARGESTALL;
+ sf->tx_size_search_method =
+ frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL;
sf->use_rd_breakout = 1;
@@ -300,11 +303,11 @@
}
if (speed >= 2) {
- sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
- FLAG_SKIP_INTRA_DIRMISMATCH |
- FLAG_SKIP_INTRA_BESTINTER |
- FLAG_SKIP_COMP_BESTINTRA |
- FLAG_SKIP_INTRA_LOWVAR;
+ sf->mode_search_skip_flags =
+ (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH |
+ FLAG_SKIP_INTRA_BESTINTER |
+ FLAG_SKIP_COMP_BESTINTRA |
+ FLAG_SKIP_INTRA_LOWVAR;
sf->adaptive_pred_interp_filter = 2;
// Reference masking only enabled for 1 spatial layer, and if none of the
@@ -315,15 +318,14 @@
(cpi->external_resize == 1 ||
cpi->oxcf.resize_mode == RESIZE_DYNAMIC)) {
MV_REFERENCE_FRAME ref_frame;
- static const int flag_list[4] =
- {0, VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG};
+ static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
+ VP9_ALT_FLAG };
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
if (yv12 != NULL && (cpi->ref_frame_flags & flag_list[ref_frame])) {
const struct scale_factors *const scale_fac =
&cm->frame_refs[ref_frame - 1].sf;
- if (vp9_is_scaled(scale_fac))
- sf->reference_masking = 0;
+ if (vp9_is_scaled(scale_fac)) sf->reference_masking = 0;
}
}
}
@@ -360,8 +362,8 @@
sf->use_fast_coef_costing = 0;
sf->auto_min_max_partition_size = STRICT_NEIGHBORING_MIN_MAX;
sf->adjust_partitioning_from_last_frame =
- cm->last_frame_type != cm->frame_type || (0 ==
- (frames_since_key + 1) % sf->last_partitioning_redo_frequency);
+ cm->last_frame_type != cm->frame_type ||
+ (0 == (frames_since_key + 1) % sf->last_partitioning_redo_frequency);
sf->mv.subpel_force_stop = 1;
for (i = 0; i < TX_SIZES; i++) {
sf->intra_y_mode_mask[i] = INTRA_DC_H_V;
@@ -381,11 +383,12 @@
if (speed >= 5) {
sf->use_quant_fp = !is_keyframe;
- sf->auto_min_max_partition_size = is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX
- : STRICT_NEIGHBORING_MIN_MAX;
+ sf->auto_min_max_partition_size =
+ is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX : STRICT_NEIGHBORING_MIN_MAX;
sf->default_max_partition_size = BLOCK_32X32;
sf->default_min_partition_size = BLOCK_8X8;
- sf->force_frame_boost = is_keyframe ||
+ sf->force_frame_boost =
+ is_keyframe ||
(frames_since_key % (sf->last_partitioning_redo_frequency << 1) == 1);
sf->max_delta_qindex = is_keyframe ? 20 : 15;
sf->partition_search_type = REFERENCE_PARTITION;
@@ -404,8 +407,7 @@
sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH;
sf->tx_size_search_method = is_keyframe ? USE_LARGESTALL : USE_TX_8X8;
sf->simple_model_rd_from_var = 1;
- if (cpi->oxcf.rc_mode == VPX_VBR)
- sf->mv.search_method = NSTEP;
+ if (cpi->oxcf.rc_mode == VPX_VBR) sf->mv.search_method = NSTEP;
if (!is_keyframe) {
int i;
@@ -562,8 +564,7 @@
sf->mode_skip_start = MAX_MODES; // Mode index at which mode skip mask set
sf->schedule_mode_search = 0;
sf->use_nonrd_pick_mode = 0;
- for (i = 0; i < BLOCK_SIZES; ++i)
- sf->inter_mode_mask[i] = INTER_ALL;
+ for (i = 0; i < BLOCK_SIZES; ++i) sf->inter_mode_mask[i] = INTER_ALL;
sf->max_intra_bsize = BLOCK_64X64;
sf->reuse_inter_pred_sby = 0;
// This setting only takes effect when partition_search_type is set
@@ -614,8 +615,7 @@
sf->exhaustive_searches_thresh = sf->exhaustive_searches_thresh << 1;
for (i = 0; i < MAX_MESH_STEP; ++i) {
- sf->mesh_patterns[i].range =
- good_quality_mesh_patterns[speed][i].range;
+ sf->mesh_patterns[i].range = good_quality_mesh_patterns[speed][i].range;
sf->mesh_patterns[i].interval =
good_quality_mesh_patterns[speed][i].interval;
}
@@ -623,8 +623,7 @@
// Slow quant, dct and trellis not worthwhile for first pass
// so make sure they are always turned off.
- if (oxcf->pass == 1)
- sf->optimize_coefficients = 0;
+ if (oxcf->pass == 1) sf->optimize_coefficients = 0;
// No recode for 1 pass.
if (oxcf->pass == 0) {
--- a/vp9/encoder/vp9_speed_features.h
+++ b/vp9/encoder/vp9_speed_features.h
@@ -18,17 +18,14 @@
#endif
enum {
- INTRA_ALL = (1 << DC_PRED) |
- (1 << V_PRED) | (1 << H_PRED) |
- (1 << D45_PRED) | (1 << D135_PRED) |
- (1 << D117_PRED) | (1 << D153_PRED) |
- (1 << D207_PRED) | (1 << D63_PRED) |
- (1 << TM_PRED),
- INTRA_DC = (1 << DC_PRED),
- INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED),
- INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED),
- INTRA_DC_TM_H_V = (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) |
- (1 << H_PRED)
+ INTRA_ALL = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED) | (1 << D45_PRED) |
+ (1 << D135_PRED) | (1 << D117_PRED) | (1 << D153_PRED) |
+ (1 << D207_PRED) | (1 << D63_PRED) | (1 << TM_PRED),
+ INTRA_DC = (1 << DC_PRED),
+ INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED),
+ INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED),
+ INTRA_DC_TM_H_V =
+ (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) | (1 << H_PRED)
};
enum {
@@ -42,20 +39,15 @@
};
enum {
- DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) |
- (1 << THR_COMP_LA) |
- (1 << THR_ALTR) |
- (1 << THR_GOLD) |
- (1 << THR_LAST),
+ DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
+ (1 << THR_ALTR) | (1 << THR_GOLD) | (1 << THR_LAST),
- DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT,
+ DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT,
- DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA),
+ DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA),
- LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) |
- (1 << THR_COMP_LA) |
- (1 << THR_ALTR) |
- (1 << THR_GOLD)
+ LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
+ (1 << THR_ALTR) | (1 << THR_GOLD)
};
typedef enum {
--- a/vp9/encoder/vp9_subexp.c
+++ b/vp9/encoder/vp9_subexp.c
@@ -15,22 +15,20 @@
#include "vp9/encoder/vp9_subexp.h"
static const uint8_t update_bits[255] = {
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 0,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 0,
};
#define MIN_DELP_BITS 5
@@ -48,23 +46,23 @@
static const uint8_t map_table[MAX_PROB - 1] = {
// generated by:
// map_table[j] = split_index(j, MAX_PROB - 1, MODULUS_PARAM);
- 20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33,
- 34, 35, 36, 37, 1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 2, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
- 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 4, 74,
- 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88,
- 89, 90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102,
- 103, 104, 105, 106, 107, 108, 109, 7, 110, 111, 112, 113, 114, 115, 116,
- 117, 118, 119, 120, 121, 8, 122, 123, 124, 125, 126, 127, 128, 129, 130,
- 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
- 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11,
- 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171,
- 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13, 182, 183, 184, 185,
- 186, 187, 188, 189, 190, 191, 192, 193, 14, 194, 195, 196, 197, 198, 199,
- 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212, 213,
- 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
- 228, 229, 17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
- 18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19,
+ 20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 2, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 4, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 7, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 8, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 14, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212, 213,
+ 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19,
};
v--;
m--;
@@ -115,9 +113,8 @@
encode_term_subexp(w, delp);
}
-int vp9_prob_diff_update_savings_search(const unsigned int *ct,
- vpx_prob oldp, vpx_prob *bestp,
- vpx_prob upd) {
+int vp9_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
+ vpx_prob *bestp, vpx_prob upd) {
const int old_b = cost_branch256(ct, oldp);
int bestsavings = 0;
vpx_prob newp, bestnewp = oldp;
@@ -141,8 +138,7 @@
int vp9_prob_diff_update_savings_search_model(const unsigned int *ct,
const vpx_prob oldp,
- vpx_prob *bestp,
- vpx_prob upd,
+ vpx_prob *bestp, vpx_prob upd,
int stepsize) {
int i, old_b, new_b, update_b, savings, bestsavings;
int newp;
@@ -185,8 +181,8 @@
const unsigned int ct[2]) {
const vpx_prob upd = DIFF_UPDATE_PROB;
vpx_prob newp = get_binary_prob(ct[0], ct[1]);
- const int savings = vp9_prob_diff_update_savings_search(ct, *oldp, &newp,
- upd);
+ const int savings =
+ vp9_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
assert(newp >= 1);
if (savings > 0) {
vpx_write(w, 1, upd);
--- a/vp9/encoder/vp9_subexp.h
+++ b/vp9/encoder/vp9_subexp.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_SUBEXP_H_
#define VP9_ENCODER_VP9_SUBEXP_H_
@@ -20,21 +19,18 @@
struct vpx_writer;
-void vp9_write_prob_diff_update(struct vpx_writer *w,
- vpx_prob newp, vpx_prob oldp);
+void vp9_write_prob_diff_update(struct vpx_writer *w, vpx_prob newp,
+ vpx_prob oldp);
void vp9_cond_prob_diff_update(struct vpx_writer *w, vpx_prob *oldp,
const unsigned int ct[2]);
-int vp9_prob_diff_update_savings_search(const unsigned int *ct,
- vpx_prob oldp, vpx_prob *bestp,
- vpx_prob upd);
+int vp9_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
+ vpx_prob *bestp, vpx_prob upd);
-
int vp9_prob_diff_update_savings_search_model(const unsigned int *ct,
const vpx_prob oldp,
- vpx_prob *bestp,
- vpx_prob upd,
+ vpx_prob *bestp, vpx_prob upd,
int stepsize);
#ifdef __cplusplus
--- a/vp9/encoder/vp9_svc_layercontext.c
+++ b/vp9/encoder/vp9_svc_layercontext.c
@@ -16,7 +16,7 @@
#include "vp9/encoder/vp9_extend.h"
#include "vpx_dsp/vpx_dsp_common.h"
-#define SMALL_FRAME_WIDTH 32
+#define SMALL_FRAME_WIDTH 32
#define SMALL_FRAME_HEIGHT 16
void vp9_init_layer_context(VP9_COMP *const cpi) {
@@ -36,8 +36,7 @@
svc->scaled_temp_is_alloc = 0;
svc->scaled_one_half = 0;
svc->current_superframe = 0;
- for (i = 0; i < REF_FRAMES; ++i)
- svc->ref_frame_index[i] = -1;
+ for (i = 0; i < REF_FRAMES; ++i) svc->ref_frame_index[i] = -1;
for (sl = 0; sl < oxcf->ss_number_layers; ++sl) {
cpi->svc.ext_frame_flags[sl] = 0;
cpi->svc.ext_lst_fb_idx[sl] = 0;
@@ -46,16 +45,14 @@
}
if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.pass == 2) {
- if (vpx_realloc_frame_buffer(&cpi->svc.empty_frame.img,
- SMALL_FRAME_WIDTH, SMALL_FRAME_HEIGHT,
- cpi->common.subsampling_x,
+ if (vpx_realloc_frame_buffer(&cpi->svc.empty_frame.img, SMALL_FRAME_WIDTH,
+ SMALL_FRAME_HEIGHT, cpi->common.subsampling_x,
cpi->common.subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cpi->common.use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS,
- cpi->common.byte_alignment,
- NULL, NULL, NULL))
+ cpi->common.byte_alignment, NULL, NULL, NULL))
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate empty frame for multiple frame "
"contexts");
@@ -97,10 +94,10 @@
lc->target_bandwidth = oxcf->layer_target_bitrate[layer];
lrc->last_q[KEY_FRAME] = oxcf->best_allowed_q;
lrc->last_q[INTER_FRAME] = oxcf->best_allowed_q;
- lrc->avg_frame_qindex[KEY_FRAME] = (oxcf->worst_allowed_q +
- oxcf->best_allowed_q) / 2;
- lrc->avg_frame_qindex[INTER_FRAME] = (oxcf->worst_allowed_q +
- oxcf->best_allowed_q) / 2;
+ lrc->avg_frame_qindex[KEY_FRAME] =
+ (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
+ lrc->avg_frame_qindex[INTER_FRAME] =
+ (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
if (oxcf->ss_enable_auto_arf[sl])
lc->alt_ref_idx = alt_ref_idx++;
else
@@ -108,8 +105,8 @@
lc->gold_ref_idx = INVALID_IDX;
}
- lrc->buffer_level = oxcf->starting_buffer_level_ms *
- lc->target_bandwidth / 1000;
+ lrc->buffer_level =
+ oxcf->starting_buffer_level_ms * lc->target_bandwidth / 1000;
lrc->bits_off_target = lrc->buffer_level;
// Initialize the cyclic refresh parameters. If spatial layers are used
@@ -116,8 +113,7 @@
// (i.e., ss_number_layers > 1), these need to be updated per spatial
// layer.
// Cyclic refresh is only applied on base temporal layer.
- if (oxcf->ss_number_layers > 1 &&
- tl == 0) {
+ if (oxcf->ss_number_layers > 1 && tl == 0) {
size_t last_coded_q_map_size;
size_t consec_zero_mv_size;
VP9_COMMON *const cm = &cpi->common;
@@ -125,8 +121,8 @@
CHECK_MEM_ERROR(cm, lc->map,
vpx_malloc(mi_rows * mi_cols * sizeof(*lc->map)));
memset(lc->map, 0, mi_rows * mi_cols);
- last_coded_q_map_size = mi_rows * mi_cols *
- sizeof(*lc->last_coded_q_map);
+ last_coded_q_map_size =
+ mi_rows * mi_cols * sizeof(*lc->last_coded_q_map);
CHECK_MEM_ERROR(cm, lc->last_coded_q_map,
vpx_malloc(last_coded_q_map_size));
assert(MAXQ <= 255);
@@ -140,8 +136,8 @@
}
// Still have extra buffer for base layer golden frame
- if (!(svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR)
- && alt_ref_idx < REF_FRAMES)
+ if (!(svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) &&
+ alt_ref_idx < REF_FRAMES)
svc->layer_context[0].gold_ref_idx = alt_ref_idx;
}
@@ -162,11 +158,12 @@
oxcf->layer_target_bitrate[layer];
}
- layer = LAYER_IDS_TO_IDX(sl, ((oxcf->ts_number_layers - 1) < 0 ?
- 0 : (oxcf->ts_number_layers - 1)), oxcf->ts_number_layers);
- spatial_layer_target =
- svc->layer_context[layer].target_bandwidth =
- oxcf->layer_target_bitrate[layer];
+ layer = LAYER_IDS_TO_IDX(
+ sl,
+ ((oxcf->ts_number_layers - 1) < 0 ? 0 : (oxcf->ts_number_layers - 1)),
+ oxcf->ts_number_layers);
+ spatial_layer_target = svc->layer_context[layer].target_bandwidth =
+ oxcf->layer_target_bitrate[layer];
for (tl = 0; tl < oxcf->ts_number_layers; ++tl) {
LAYER_CONTEXT *const lc =
@@ -214,8 +211,8 @@
(int64_t)(rc->optimal_buffer_level * bitrate_alloc);
lrc->maximum_buffer_size =
(int64_t)(rc->maximum_buffer_size * bitrate_alloc);
- lrc->bits_off_target = VPXMIN(lrc->bits_off_target,
- lrc->maximum_buffer_size);
+ lrc->bits_off_target =
+ VPXMIN(lrc->bits_off_target, lrc->maximum_buffer_size);
lrc->buffer_level = VPXMIN(lrc->buffer_level, lrc->maximum_buffer_size);
// Update framerate-related quantities.
if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) {
@@ -235,12 +232,12 @@
static LAYER_CONTEXT *get_layer_context(VP9_COMP *const cpi) {
if (is_one_pass_cbr_svc(cpi))
return &cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers + cpi->svc.temporal_layer_id];
+ cpi->svc.number_temporal_layers +
+ cpi->svc.temporal_layer_id];
else
- return (cpi->svc.number_temporal_layers > 1 &&
- cpi->oxcf.rc_mode == VPX_CBR) ?
- &cpi->svc.layer_context[cpi->svc.temporal_layer_id] :
- &cpi->svc.layer_context[cpi->svc.spatial_layer_id];
+ return (cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR)
+ ? &cpi->svc.layer_context[cpi->svc.temporal_layer_id]
+ : &cpi->svc.layer_context[cpi->svc.spatial_layer_id];
}
void vp9_update_temporal_layer_framerate(VP9_COMP *const cpi) {
@@ -250,7 +247,7 @@
RATE_CONTROL *const lrc = &lc->rc;
// Index into spatial+temporal arrays.
const int st_idx = svc->spatial_layer_id * svc->number_temporal_layers +
- svc->temporal_layer_id;
+ svc->temporal_layer_id;
const int tl = svc->temporal_layer_id;
lc->framerate = cpi->framerate / oxcf->ts_rate_decimator[tl];
@@ -277,10 +274,11 @@
lc->framerate = framerate;
lrc->avg_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate);
- lrc->min_frame_bandwidth = (int)(lrc->avg_frame_bandwidth *
- oxcf->two_pass_vbrmin_section / 100);
+ lrc->min_frame_bandwidth =
+ (int)(lrc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100);
lrc->max_frame_bandwidth = (int)(((int64_t)lrc->avg_frame_bandwidth *
- oxcf->two_pass_vbrmax_section) / 100);
+ oxcf->two_pass_vbrmax_section) /
+ 100);
vp9_rc_set_gf_interval_range(cpi, lrc);
}
@@ -309,8 +307,7 @@
// For spatial-svc, allow cyclic-refresh to be applied on the spatial layers,
// for the base temporal layer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
- cpi->svc.number_spatial_layers > 1 &&
- cpi->svc.temporal_layer_id == 0) {
+ cpi->svc.number_spatial_layers > 1 && cpi->svc.temporal_layer_id == 0) {
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
signed char *temp = cr->map;
uint8_t *temp2 = cr->last_coded_q_map;
@@ -337,8 +334,7 @@
// For spatial-svc, allow cyclic-refresh to be applied on the spatial layers,
// for the base temporal layer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
- cpi->svc.number_spatial_layers > 1 &&
- cpi->svc.temporal_layer_id == 0) {
+ cpi->svc.number_spatial_layers > 1 && cpi->svc.temporal_layer_id == 0) {
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
signed char *temp = lc->map;
uint8_t *temp2 = lc->last_coded_q_map;
@@ -380,20 +376,19 @@
}
int vp9_is_upper_layer_key_frame(const VP9_COMP *const cpi) {
- return is_two_pass_svc(cpi) &&
- cpi->svc.spatial_layer_id > 0 &&
+ return is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id > 0 &&
cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers +
- cpi->svc.temporal_layer_id].is_key_frame;
+ cpi->svc.number_temporal_layers +
+ cpi->svc.temporal_layer_id]
+ .is_key_frame;
}
static void get_layer_resolution(const int width_org, const int height_org,
- const int num, const int den,
- int *width_out, int *height_out) {
+ const int num, const int den, int *width_out,
+ int *height_out) {
int w, h;
- if (width_out == NULL || height_out == NULL || den == 0)
- return;
+ if (width_out == NULL || height_out == NULL || den == 0) return;
w = width_org * num / den;
h = height_org * num / den;
@@ -415,10 +410,13 @@
spatial_id = cpi->svc.spatial_layer_id = cpi->svc.spatial_layer_to_encode;
frame_num_within_temporal_struct =
cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers].current_video_frame_in_layer % 4;
+ cpi->svc.number_temporal_layers]
+ .current_video_frame_in_layer %
+ 4;
temporal_id = cpi->svc.temporal_layer_id =
- (frame_num_within_temporal_struct & 1) ? 2 :
- (frame_num_within_temporal_struct >> 1);
+ (frame_num_within_temporal_struct & 1)
+ ? 2
+ : (frame_num_within_temporal_struct >> 1);
cpi->ext_refresh_last_frame = cpi->ext_refresh_golden_frame =
cpi->ext_refresh_alt_ref_frame = 0;
if (!temporal_id) {
@@ -465,7 +463,7 @@
if (spatial_id == cpi->svc.number_spatial_layers - 1) { // top layer
cpi->ext_refresh_frame_flags_pending = 1;
if (!spatial_id)
- cpi->ref_frame_flags = VP9_LAST_FLAG;
+ cpi->ref_frame_flags = VP9_LAST_FLAG;
else
cpi->ref_frame_flags = VP9_LAST_FLAG | VP9_GOLD_FLAG;
} else if (!spatial_id) {
@@ -486,7 +484,7 @@
cpi->lst_fb_idx = spatial_id - 1;
cpi->gld_fb_idx = spatial_id;
} else {
- cpi->gld_fb_idx = spatial_id - 1;
+ cpi->gld_fb_idx = spatial_id - 1;
}
} else {
cpi->gld_fb_idx = 0;
@@ -515,9 +513,11 @@
spatial_id = cpi->svc.spatial_layer_id = cpi->svc.spatial_layer_to_encode;
temporal_id = cpi->svc.temporal_layer_id =
cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers].current_video_frame_in_layer & 1;
+ cpi->svc.number_temporal_layers]
+ .current_video_frame_in_layer &
+ 1;
cpi->ext_refresh_last_frame = cpi->ext_refresh_golden_frame =
- cpi->ext_refresh_alt_ref_frame = 0;
+ cpi->ext_refresh_alt_ref_frame = 0;
if (!temporal_id) {
cpi->ext_refresh_frame_flags_pending = 1;
cpi->ext_refresh_last_frame = 1;
@@ -548,7 +548,7 @@
cpi->lst_fb_idx = spatial_id - 1;
cpi->gld_fb_idx = spatial_id;
} else {
- cpi->gld_fb_idx = spatial_id - 1;
+ cpi->gld_fb_idx = spatial_id - 1;
}
} else {
cpi->gld_fb_idx = 0;
@@ -567,8 +567,8 @@
VP9_COMP *const cpi) {
int spatial_id;
spatial_id = cpi->svc.spatial_layer_id = cpi->svc.spatial_layer_to_encode;
- cpi->ext_refresh_last_frame =
- cpi->ext_refresh_golden_frame = cpi->ext_refresh_alt_ref_frame = 0;
+ cpi->ext_refresh_last_frame = cpi->ext_refresh_golden_frame =
+ cpi->ext_refresh_alt_ref_frame = 0;
cpi->ext_refresh_frame_flags_pending = 1;
cpi->ext_refresh_last_frame = 1;
if (!spatial_id) {
@@ -586,7 +586,7 @@
cpi->lst_fb_idx = spatial_id - 1;
cpi->gld_fb_idx = spatial_id;
} else {
- cpi->gld_fb_idx = spatial_id - 1;
+ cpi->gld_fb_idx = spatial_id - 1;
}
} else {
cpi->gld_fb_idx = 0;
@@ -596,20 +596,19 @@
int vp9_one_pass_cbr_svc_start_layer(VP9_COMP *const cpi) {
int width = 0, height = 0;
LAYER_CONTEXT *lc = NULL;
- if (cpi->svc.number_spatial_layers > 1)
- cpi->svc.use_base_mv = 1;
+ if (cpi->svc.number_spatial_layers > 1) cpi->svc.use_base_mv = 1;
cpi->svc.force_zero_mode_spatial_ref = 1;
if (cpi->svc.temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_0212) {
set_flags_and_fb_idx_for_temporal_mode3(cpi);
} else if (cpi->svc.temporal_layering_mode ==
- VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING) {
+ VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING) {
set_flags_and_fb_idx_for_temporal_mode_noLayering(cpi);
} else if (cpi->svc.temporal_layering_mode ==
- VP9E_TEMPORAL_LAYERING_MODE_0101) {
+ VP9E_TEMPORAL_LAYERING_MODE_0101) {
set_flags_and_fb_idx_for_temporal_mode2(cpi);
} else if (cpi->svc.temporal_layering_mode ==
- VP9E_TEMPORAL_LAYERING_MODE_BYPASS) {
+ VP9E_TEMPORAL_LAYERING_MODE_BYPASS) {
// In the BYPASS/flexible mode, the encoder is relying on the application
// to specify, for each spatial layer, the flags and buffer indices for the
// layering.
@@ -633,7 +632,7 @@
cpi->svc.rc_drop_superframe = 0;
lc = &cpi->svc.layer_context[cpi->svc.spatial_layer_id *
- cpi->svc.number_temporal_layers +
+ cpi->svc.number_temporal_layers +
cpi->svc.temporal_layer_id];
// Setting the worst/best_quality via the encoder control: SET_SVC_PARAMETERS,
@@ -641,12 +640,12 @@
if (cpi->svc.temporal_layering_mode != VP9E_TEMPORAL_LAYERING_MODE_BYPASS) {
RATE_CONTROL *const lrc = &lc->rc;
lrc->worst_quality = vp9_quantizer_to_qindex(lc->max_q);
- lrc->best_quality = vp9_quantizer_to_qindex(lc->min_q);
+ lrc->best_quality = vp9_quantizer_to_qindex(lc->min_q);
}
get_layer_resolution(cpi->oxcf.width, cpi->oxcf.height,
- lc->scaling_factor_num, lc->scaling_factor_den,
- &width, &height);
+ lc->scaling_factor_num, lc->scaling_factor_den, &width,
+ &height);
if (vp9_set_size_literal(cpi, width, height) != 0)
return VPX_CODEC_INVALID_PARAM;
@@ -677,8 +676,8 @@
cpi->lst_fb_idx = cpi->svc.spatial_layer_id;
if (cpi->svc.spatial_layer_id == 0)
- cpi->gld_fb_idx = (lc->gold_ref_idx >= 0) ?
- lc->gold_ref_idx : cpi->lst_fb_idx;
+ cpi->gld_fb_idx =
+ (lc->gold_ref_idx >= 0) ? lc->gold_ref_idx : cpi->lst_fb_idx;
else
cpi->gld_fb_idx = cpi->svc.spatial_layer_id - 1;
@@ -692,8 +691,7 @@
} else {
if (cpi->oxcf.ss_enable_auto_arf[cpi->svc.spatial_layer_id]) {
cpi->alt_fb_idx = lc->alt_ref_idx;
- if (!lc->has_alt_frame)
- cpi->ref_frame_flags &= (~VP9_ALT_FLAG);
+ if (!lc->has_alt_frame) cpi->ref_frame_flags &= (~VP9_ALT_FLAG);
} else {
// Find a proper alt_fb_idx for layers that don't have alt ref frame
if (cpi->svc.spatial_layer_id == 0) {
@@ -714,8 +712,8 @@
}
get_layer_resolution(cpi->oxcf.width, cpi->oxcf.height,
- lc->scaling_factor_num, lc->scaling_factor_den,
- &width, &height);
+ lc->scaling_factor_num, lc->scaling_factor_den, &width,
+ &height);
// Workaround for multiple frame contexts. In some frames we can't use prev_mi
// since its previous frame could be changed during decoding time. The idea is
@@ -740,11 +738,10 @@
cpi->common.show_frame = 0;
cpi->ref_frame_flags = 0;
cpi->common.frame_type = INTER_FRAME;
- cpi->lst_fb_idx =
- cpi->gld_fb_idx = cpi->alt_fb_idx = SMALL_FRAME_FB_IDX;
+ cpi->lst_fb_idx = cpi->gld_fb_idx = cpi->alt_fb_idx =
+ SMALL_FRAME_FB_IDX;
- if (cpi->svc.encode_intra_empty_frame != 0)
- cpi->common.intra_only = 1;
+ if (cpi->svc.encode_intra_empty_frame != 0) cpi->common.intra_only = 1;
width = SMALL_FRAME_WIDTH;
height = SMALL_FRAME_HEIGHT;
@@ -794,12 +791,9 @@
for (tl = 0; tl < oxcf->ts_number_layers; ++tl) {
int layer = LAYER_IDS_TO_IDX(sl, tl, oxcf->ts_number_layers);
LAYER_CONTEXT *const lc = &svc->layer_context[layer];
- if (lc->map)
- vpx_free(lc->map);
- if (lc->last_coded_q_map)
- vpx_free(lc->last_coded_q_map);
- if (lc->consec_zero_mv)
- vpx_free(lc->consec_zero_mv);
+ if (lc->map) vpx_free(lc->map);
+ if (lc->last_coded_q_map) vpx_free(lc->last_coded_q_map);
+ if (lc->consec_zero_mv) vpx_free(lc->consec_zero_mv);
}
}
}
@@ -820,9 +814,9 @@
set_flags_and_fb_idx_for_temporal_mode3(cpi);
} else if (svc->temporal_layering_mode ==
VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING) {
- set_flags_and_fb_idx_for_temporal_mode_noLayering(cpi);
+ set_flags_and_fb_idx_for_temporal_mode_noLayering(cpi);
} else if (svc->temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_0101) {
- set_flags_and_fb_idx_for_temporal_mode2(cpi);
+ set_flags_and_fb_idx_for_temporal_mode2(cpi);
}
vp9_update_temporal_layer_framerate(cpi);
vp9_restore_layer_context(cpi);
--- a/vp9/encoder/vp9_svc_layercontext.h
+++ b/vp9/encoder/vp9_svc_layercontext.h
@@ -35,7 +35,7 @@
int is_key_frame;
int frames_from_key_frame;
FRAME_TYPE last_frame_type;
- struct lookahead_entry *alt_ref_source;
+ struct lookahead_entry *alt_ref_source;
int alt_ref_idx;
int gold_ref_idx;
int has_alt_frame;
@@ -60,11 +60,7 @@
int rc_drop_superframe;
// Workaround for multiple frame contexts
- enum {
- ENCODED = 0,
- ENCODING,
- NEED_TO_ENCODE
- }encode_empty_frame_state;
+ enum { ENCODED = 0, ENCODING, NEED_TO_ENCODE } encode_empty_frame_state;
struct lookahead_entry empty_frame;
int encode_intra_empty_frame;
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -31,18 +31,10 @@
static int fixed_divide[512];
-static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
- uint8_t *y_mb_ptr,
- uint8_t *u_mb_ptr,
- uint8_t *v_mb_ptr,
- int stride,
- int uv_block_width,
- int uv_block_height,
- int mv_row,
- int mv_col,
- uint8_t *pred,
- struct scale_factors *scale,
- int x, int y) {
+static void temporal_filter_predictors_mb_c(
+ MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr,
+ int stride, int uv_block_width, int uv_block_height, int mv_row, int mv_col,
+ uint8_t *pred, struct scale_factors *scale, int x, int y) {
const int which_mv = 0;
const MV mv = { mv_row, mv_col };
const InterpKernel *const kernel = vp9_filter_kernels[EIGHTTAP_SHARP];
@@ -59,56 +51,33 @@
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp9_highbd_build_inter_predictor(y_mb_ptr, stride,
- &pred[0], 16,
- &mv,
- scale,
- 16, 16,
- which_mv,
- kernel, MV_PRECISION_Q3, x, y, xd->bd);
+ vp9_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale,
+ 16, 16, which_mv, kernel, MV_PRECISION_Q3,
+ x, y, xd->bd);
- vp9_highbd_build_inter_predictor(u_mb_ptr, uv_stride,
- &pred[256], uv_block_width,
- &mv,
- scale,
- uv_block_width, uv_block_height,
- which_mv,
- kernel, mv_precision_uv, x, y, xd->bd);
+ vp9_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
+ uv_block_width, &mv, scale, uv_block_width,
+ uv_block_height, which_mv, kernel,
+ mv_precision_uv, x, y, xd->bd);
- vp9_highbd_build_inter_predictor(v_mb_ptr, uv_stride,
- &pred[512], uv_block_width,
- &mv,
- scale,
- uv_block_width, uv_block_height,
- which_mv,
- kernel, mv_precision_uv, x, y, xd->bd);
+ vp9_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
+ uv_block_width, &mv, scale, uv_block_width,
+ uv_block_height, which_mv, kernel,
+ mv_precision_uv, x, y, xd->bd);
return;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
(void)xd;
- vp9_build_inter_predictor(y_mb_ptr, stride,
- &pred[0], 16,
- &mv,
- scale,
- 16, 16,
- which_mv,
- kernel, MV_PRECISION_Q3, x, y);
+ vp9_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
+ which_mv, kernel, MV_PRECISION_Q3, x, y);
- vp9_build_inter_predictor(u_mb_ptr, uv_stride,
- &pred[256], uv_block_width,
- &mv,
- scale,
- uv_block_width, uv_block_height,
- which_mv,
- kernel, mv_precision_uv, x, y);
+ vp9_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
+ &mv, scale, uv_block_width, uv_block_height,
+ which_mv, kernel, mv_precision_uv, x, y);
- vp9_build_inter_predictor(v_mb_ptr, uv_stride,
- &pred[512], uv_block_width,
- &mv,
- scale,
- uv_block_width, uv_block_height,
- which_mv,
- kernel, mv_precision_uv, x, y);
+ vp9_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
+ &mv, scale, uv_block_width, uv_block_height,
+ which_mv, kernel, mv_precision_uv, x, y);
}
void vp9_temporal_filter_init(void) {
@@ -115,18 +84,13 @@
int i;
fixed_divide[0] = 0;
- for (i = 1; i < 512; ++i)
- fixed_divide[i] = 0x80000 / i;
+ for (i = 1; i < 512; ++i) fixed_divide[i] = 0x80000 / i;
}
-void vp9_temporal_filter_apply_c(uint8_t *frame1,
- unsigned int stride,
- uint8_t *frame2,
- unsigned int block_width,
- unsigned int block_height,
- int strength,
- int filter_weight,
- unsigned int *accumulator,
+void vp9_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
+ uint8_t *frame2, unsigned int block_width,
+ unsigned int block_height, int strength,
+ int filter_weight, unsigned int *accumulator,
uint16_t *count) {
unsigned int i, j, k;
int modifier;
@@ -146,10 +110,10 @@
int row = (int)i + idy;
int col = (int)j + idx;
- if (row >= 0 && row < (int)block_height &&
- col >= 0 && col < (int)block_width) {
+ if (row >= 0 && row < (int)block_height && col >= 0 &&
+ col < (int)block_width) {
int diff = frame1[byte + idy * (int)stride + idx] -
- frame2[idy * (int)block_width + idx];
+ frame2[idy * (int)block_width + idx];
diff_sse[index] = diff * diff;
++index;
}
@@ -159,8 +123,7 @@
assert(index > 0);
modifier = 0;
- for (idx = 0; idx < 9; ++idx)
- modifier += diff_sse[idx];
+ for (idx = 0; idx < 9; ++idx) modifier += diff_sse[idx];
modifier *= 3;
modifier /= index;
@@ -167,11 +130,10 @@
++frame2;
- modifier += rounding;
+ modifier += rounding;
modifier >>= strength;
- if (modifier > 16)
- modifier = 16;
+ if (modifier > 16) modifier = 16;
modifier = 16 - modifier;
modifier *= filter_weight;
@@ -187,15 +149,10 @@
}
#if CONFIG_VP9_HIGHBITDEPTH
-void vp9_highbd_temporal_filter_apply_c(uint8_t *frame1_8,
- unsigned int stride,
- uint8_t *frame2_8,
- unsigned int block_width,
- unsigned int block_height,
- int strength,
- int filter_weight,
- unsigned int *accumulator,
- uint16_t *count) {
+void vp9_highbd_temporal_filter_apply_c(
+ uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8,
+ unsigned int block_width, unsigned int block_height, int strength,
+ int filter_weight, unsigned int *accumulator, uint16_t *count) {
uint16_t *frame1 = CONVERT_TO_SHORTPTR(frame1_8);
uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8);
unsigned int i, j, k;
@@ -214,10 +171,10 @@
int row = (int)i + idy;
int col = (int)j + idx;
- if (row >= 0 && row < (int)block_height &&
- col >= 0 && col < (int)block_width) {
+ if (row >= 0 && row < (int)block_height && col >= 0 &&
+ col < (int)block_width) {
int diff = frame1[byte + idy * (int)stride + idx] -
- frame2[idy * (int)block_width + idx];
+ frame2[idy * (int)block_width + idx];
diff_sse[index] = diff * diff;
++index;
}
@@ -226,8 +183,7 @@
assert(index > 0);
modifier = 0;
- for (idx = 0; idx < 9; ++idx)
- modifier += diff_sse[idx];
+ for (idx = 0; idx < 9; ++idx) modifier += diff_sse[idx];
modifier *= 3;
modifier /= index;
@@ -236,8 +192,7 @@
modifier += rounding;
modifier >>= strength;
- if (modifier > 16)
- modifier = 16;
+ if (modifier > 16) modifier = 16;
modifier = 16 - modifier;
modifier *= filter_weight;
@@ -268,7 +223,7 @@
uint32_t sse;
int cost_list[5];
- MV best_ref_mv1 = {0, 0};
+ MV best_ref_mv1 = { 0, 0 };
MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv;
@@ -295,15 +250,11 @@
mv_sf->search_method = old_search_method;
// Ignore mv costing by sending NULL pointer instead of cost array
- bestsme = cpi->find_fractional_mv_step(x, ref_mv,
- &best_ref_mv1,
- cpi->common.allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[BLOCK_16X16],
- 0, mv_sf->subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- NULL, NULL,
- &distortion, &sse, NULL, 0, 0);
+ bestsme = cpi->find_fractional_mv_step(
+ x, ref_mv, &best_ref_mv1, cpi->common.allow_high_precision_mv,
+ x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], 0,
+ mv_sf->subpel_iters_per_step, cond_cost_list(cpi, cost_list), NULL, NULL,
+ &distortion, &sse, NULL, 0, 0);
// Restore input state
x->plane[0].src = src;
@@ -314,8 +265,7 @@
static void temporal_filter_iterate_c(VP9_COMP *cpi,
YV12_BUFFER_CONFIG **frames,
- int frame_count,
- int alt_ref_index,
+ int frame_count, int alt_ref_index,
int strength,
struct scale_factors *scale) {
int byte;
@@ -332,17 +282,17 @@
YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
uint8_t *dst1, *dst2;
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
- DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
+ DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
+ DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
uint8_t *predictor;
#else
- DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]);
+ DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]);
#endif
const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y;
- const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
+ const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
// Save input state
- uint8_t* input_buffer[MAX_MB_PLANE];
+ uint8_t *input_buffer[MAX_MB_PLANE];
int i;
#if CONFIG_VP9_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
@@ -352,8 +302,7 @@
}
#endif
- for (i = 0; i < MAX_MB_PLANE; i++)
- input_buffer[i] = mbd->plane[i].pre[0].buf;
+ for (i = 0; i < MAX_MB_PLANE; i++) input_buffer[i] = mbd->plane[i].pre[0].buf;
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
// Source frames are extended to 16 pixels. This is different than
@@ -368,8 +317,8 @@
// To keep the mv in play for both Y and UV planes the max that it
// can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1).
cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND));
- cpi->td.mb.mv_row_max = ((mb_rows - 1 - mb_row) * 16)
- + (17 - 2 * VP9_INTERP_EXTEND);
+ cpi->td.mb.mv_row_max =
+ ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * VP9_INTERP_EXTEND);
for (mb_col = 0; mb_col < mb_cols; mb_col++) {
int i, j, k;
@@ -379,15 +328,14 @@
memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND));
- cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16)
- + (17 - 2 * VP9_INTERP_EXTEND);
+ cpi->td.mb.mv_col_max =
+ ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * VP9_INTERP_EXTEND);
for (frame = 0; frame < frame_count; frame++) {
- const int thresh_low = 10000;
+ const int thresh_low = 10000;
const int thresh_high = 20000;
- if (frames[frame] == NULL)
- continue;
+ if (frames[frame] == NULL) continue;
mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0;
mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0;
@@ -396,29 +344,24 @@
filter_weight = 2;
} else {
// Find best match in this frame by MC
- int err = temporal_filter_find_matching_mb_c(cpi,
- frames[alt_ref_index]->y_buffer + mb_y_offset,
- frames[frame]->y_buffer + mb_y_offset,
- frames[frame]->y_stride);
+ int err = temporal_filter_find_matching_mb_c(
+ cpi, frames[alt_ref_index]->y_buffer + mb_y_offset,
+ frames[frame]->y_buffer + mb_y_offset, frames[frame]->y_stride);
// Assign higher weight to matching MB if its error
// score is lower. If not applying MC default behavior
// is to weight all MBs equal.
- filter_weight = err < thresh_low
- ? 2 : err < thresh_high ? 1 : 0;
+ filter_weight = err < thresh_low ? 2 : err < thresh_high ? 1 : 0;
}
if (filter_weight != 0) {
// Construct the predictors
- temporal_filter_predictors_mb_c(mbd,
- frames[frame]->y_buffer + mb_y_offset,
+ temporal_filter_predictors_mb_c(
+ mbd, frames[frame]->y_buffer + mb_y_offset,
frames[frame]->u_buffer + mb_uv_offset,
- frames[frame]->v_buffer + mb_uv_offset,
- frames[frame]->y_stride,
- mb_uv_width, mb_uv_height,
- mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
- mbd->mi[0]->bmi[0].as_mv[0].as_mv.col,
- predictor, scale,
+ frames[frame]->v_buffer + mb_uv_offset, frames[frame]->y_stride,
+ mb_uv_width, mb_uv_height, mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
+ mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale,
mb_col * 16, mb_row * 16);
#if CONFIG_VP9_HIGHBITDEPTH
@@ -425,57 +368,45 @@
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int adj_strength = strength + 2 * (mbd->bd - 8);
// Apply the filter (YUV)
- vp9_highbd_temporal_filter_apply_c(f->y_buffer + mb_y_offset,
- f->y_stride,
- predictor, 16, 16, adj_strength,
- filter_weight,
- accumulator, count);
- vp9_highbd_temporal_filter_apply_c(f->u_buffer + mb_uv_offset,
- f->uv_stride, predictor + 256,
- mb_uv_width, mb_uv_height,
- adj_strength, filter_weight,
- accumulator + 256, count + 256);
- vp9_highbd_temporal_filter_apply_c(f->v_buffer + mb_uv_offset,
- f->uv_stride, predictor + 512,
- mb_uv_width, mb_uv_height,
- adj_strength, filter_weight,
- accumulator + 512, count + 512);
+ vp9_highbd_temporal_filter_apply_c(
+ f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16,
+ adj_strength, filter_weight, accumulator, count);
+ vp9_highbd_temporal_filter_apply_c(
+ f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
+ mb_uv_width, mb_uv_height, adj_strength, filter_weight,
+ accumulator + 256, count + 256);
+ vp9_highbd_temporal_filter_apply_c(
+ f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
+ mb_uv_width, mb_uv_height, adj_strength, filter_weight,
+ accumulator + 512, count + 512);
} else {
// Apply the filter (YUV)
vp9_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
- predictor, 16, 16,
- strength, filter_weight,
- accumulator, count);
- vp9_temporal_filter_apply_c(f->u_buffer + mb_uv_offset,
- f->uv_stride,
- predictor + 256,
- mb_uv_width, mb_uv_height, strength,
- filter_weight, accumulator + 256,
- count + 256);
- vp9_temporal_filter_apply_c(f->v_buffer + mb_uv_offset,
- f->uv_stride,
- predictor + 512,
- mb_uv_width, mb_uv_height, strength,
- filter_weight, accumulator + 512,
- count + 512);
+ predictor, 16, 16, strength,
+ filter_weight, accumulator, count);
+ vp9_temporal_filter_apply_c(
+ f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
+ mb_uv_width, mb_uv_height, strength, filter_weight,
+ accumulator + 256, count + 256);
+ vp9_temporal_filter_apply_c(
+ f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
+ mb_uv_width, mb_uv_height, strength, filter_weight,
+ accumulator + 512, count + 512);
}
#else
// Apply the filter (YUV)
// TODO(jingning): Need SIMD optimization for this.
vp9_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
- predictor, 16, 16,
- strength, filter_weight,
- accumulator, count);
+ predictor, 16, 16, strength,
+ filter_weight, accumulator, count);
vp9_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, f->uv_stride,
- predictor + 256,
- mb_uv_width, mb_uv_height, strength,
- filter_weight, accumulator + 256,
- count + 256);
+ predictor + 256, mb_uv_width,
+ mb_uv_height, strength, filter_weight,
+ accumulator + 256, count + 256);
vp9_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, f->uv_stride,
- predictor + 512,
- mb_uv_width, mb_uv_height, strength,
- filter_weight, accumulator + 512,
- count + 512);
+ predictor + 512, mb_uv_width,
+ mb_uv_height, strength, filter_weight,
+ accumulator + 512, count + 512);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
@@ -630,13 +561,11 @@
}
// Restore input state
- for (i = 0; i < MAX_MB_PLANE; i++)
- mbd->plane[i].pre[0].buf = input_buffer[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) mbd->plane[i].pre[0].buf = input_buffer[i];
}
// Apply buffer limits and context specific adjustments to arnr filter.
-static void adjust_arnr_filter(VP9_COMP *cpi,
- int distance, int group_boost,
+static void adjust_arnr_filter(VP9_COMP *cpi, int distance, int group_boost,
int *arnr_frames, int *arnr_strength) {
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
const int frames_after_arf =
@@ -647,8 +576,7 @@
// Context dependent two pass adjustment to strength.
if (oxcf->pass == 2) {
- base_strength =
- oxcf->arnr_strength + cpi->twopass.arnr_strength_adjustment;
+ base_strength = oxcf->arnr_strength + cpi->twopass.arnr_strength_adjustment;
// Clip to allowed range.
base_strength = VPXMIN(6, VPXMAX(0, base_strength));
} else {
@@ -656,17 +584,14 @@
}
// Define the forward and backwards filter limits for this arnr group.
- if (frames_fwd > frames_after_arf)
- frames_fwd = frames_after_arf;
- if (frames_fwd > distance)
- frames_fwd = distance;
+ if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf;
+ if (frames_fwd > distance) frames_fwd = distance;
frames_bwd = frames_fwd;
// For even length filter there is one more frame backward
// than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
- if (frames_bwd < distance)
- frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
+ if (frames_bwd < distance) frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
// Set the baseline active filter size.
frames = frames_bwd + 1 + frames_fwd;
@@ -673,17 +598,16 @@
// Adjust the strength based on active max q.
if (cpi->common.current_video_frame > 1)
- q = ((int)vp9_convert_qindex_to_q(
- cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth));
+ q = ((int)vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
+ cpi->common.bit_depth));
else
- q = ((int)vp9_convert_qindex_to_q(
- cpi->rc.avg_frame_qindex[KEY_FRAME], cpi->common.bit_depth));
+ q = ((int)vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
+ cpi->common.bit_depth));
if (q > 16) {
strength = base_strength;
} else {
strength = base_strength - ((16 - q) / 2);
- if (strength < 0)
- strength = 0;
+ if (strength < 0) strength = 0;
}
// Adjust number of frames in filter and strength based on gf boost level.
@@ -719,7 +643,7 @@
int frames_to_blur_backward;
int frames_to_blur_forward;
struct scale_factors sf;
- YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = {NULL};
+ YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = { NULL };
// Apply context specific adjustments to the arnr filter parameters.
adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength);
@@ -730,8 +654,8 @@
// Setup frame pointers, NULL indicates frame not included in filter.
for (frame = 0; frame < frames_to_blur; ++frame) {
const int which_buffer = start_frame - frame;
- struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead,
- which_buffer);
+ struct lookahead_entry *buf =
+ vp9_lookahead_peek(cpi->lookahead, which_buffer);
frames[frames_to_blur - 1 - frame] = &buf->img;
}
@@ -744,16 +668,13 @@
int frame_used = 0;
#if CONFIG_VP9_HIGHBITDEPTH
vp9_setup_scale_factors_for_frame(
- &sf,
- get_frame_new_buffer(cm)->y_crop_width,
+ &sf, get_frame_new_buffer(cm)->y_crop_width,
get_frame_new_buffer(cm)->y_crop_height,
get_frame_new_buffer(cm)->y_crop_width,
- get_frame_new_buffer(cm)->y_crop_height,
- cm->use_highbitdepth);
+ get_frame_new_buffer(cm)->y_crop_height, cm->use_highbitdepth);
#else
vp9_setup_scale_factors_for_frame(
- &sf,
- get_frame_new_buffer(cm)->y_crop_width,
+ &sf, get_frame_new_buffer(cm)->y_crop_width,
get_frame_new_buffer(cm)->y_crop_height,
get_frame_new_buffer(cm)->y_crop_width,
get_frame_new_buffer(cm)->y_crop_height);
@@ -763,14 +684,13 @@
if (cm->mi_cols * MI_SIZE != frames[frame]->y_width ||
cm->mi_rows * MI_SIZE != frames[frame]->y_height) {
if (vpx_realloc_frame_buffer(&cpi->svc.scaled_frames[frame_used],
- cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
+ cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_ENC_BORDER_IN_PIXELS,
- cm->byte_alignment,
- NULL, NULL, NULL)) {
+ cm->byte_alignment, NULL, NULL, NULL)) {
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to reallocate alt_ref_buffer");
}
@@ -783,20 +703,16 @@
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
} else {
- // ARF is produced at the native frame size and resized when coded.
+// ARF is produced at the native frame size and resized when coded.
#if CONFIG_VP9_HIGHBITDEPTH
- vp9_setup_scale_factors_for_frame(&sf,
- frames[0]->y_crop_width,
- frames[0]->y_crop_height,
- frames[0]->y_crop_width,
- frames[0]->y_crop_height,
- cm->use_highbitdepth);
+ vp9_setup_scale_factors_for_frame(
+ &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
+ frames[0]->y_crop_width, frames[0]->y_crop_height,
+ cm->use_highbitdepth);
#else
- vp9_setup_scale_factors_for_frame(&sf,
- frames[0]->y_crop_width,
- frames[0]->y_crop_height,
- frames[0]->y_crop_width,
- frames[0]->y_crop_height);
+ vp9_setup_scale_factors_for_frame(
+ &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
+ frames[0]->y_crop_width, frames[0]->y_crop_height);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -24,31 +24,30 @@
#include "vp9/encoder/vp9_tokenize.h"
static const TOKENVALUE dct_cat_lt_10_value_tokens[] = {
- {9, 63}, {9, 61}, {9, 59}, {9, 57}, {9, 55}, {9, 53}, {9, 51}, {9, 49},
- {9, 47}, {9, 45}, {9, 43}, {9, 41}, {9, 39}, {9, 37}, {9, 35}, {9, 33},
- {9, 31}, {9, 29}, {9, 27}, {9, 25}, {9, 23}, {9, 21}, {9, 19}, {9, 17},
- {9, 15}, {9, 13}, {9, 11}, {9, 9}, {9, 7}, {9, 5}, {9, 3}, {9, 1},
- {8, 31}, {8, 29}, {8, 27}, {8, 25}, {8, 23}, {8, 21},
- {8, 19}, {8, 17}, {8, 15}, {8, 13}, {8, 11}, {8, 9},
- {8, 7}, {8, 5}, {8, 3}, {8, 1},
- {7, 15}, {7, 13}, {7, 11}, {7, 9}, {7, 7}, {7, 5}, {7, 3}, {7, 1},
- {6, 7}, {6, 5}, {6, 3}, {6, 1}, {5, 3}, {5, 1},
- {4, 1}, {3, 1}, {2, 1}, {1, 1}, {0, 0},
- {1, 0}, {2, 0}, {3, 0}, {4, 0},
- {5, 0}, {5, 2}, {6, 0}, {6, 2}, {6, 4}, {6, 6},
- {7, 0}, {7, 2}, {7, 4}, {7, 6}, {7, 8}, {7, 10}, {7, 12}, {7, 14},
- {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {8, 10}, {8, 12},
- {8, 14}, {8, 16}, {8, 18}, {8, 20}, {8, 22}, {8, 24},
- {8, 26}, {8, 28}, {8, 30}, {9, 0}, {9, 2},
- {9, 4}, {9, 6}, {9, 8}, {9, 10}, {9, 12}, {9, 14}, {9, 16},
- {9, 18}, {9, 20}, {9, 22}, {9, 24}, {9, 26}, {9, 28},
- {9, 30}, {9, 32}, {9, 34}, {9, 36}, {9, 38}, {9, 40},
- {9, 42}, {9, 44}, {9, 46}, {9, 48}, {9, 50}, {9, 52},
- {9, 54}, {9, 56}, {9, 58}, {9, 60}, {9, 62}
+ { 9, 63 }, { 9, 61 }, { 9, 59 }, { 9, 57 }, { 9, 55 }, { 9, 53 }, { 9, 51 },
+ { 9, 49 }, { 9, 47 }, { 9, 45 }, { 9, 43 }, { 9, 41 }, { 9, 39 }, { 9, 37 },
+ { 9, 35 }, { 9, 33 }, { 9, 31 }, { 9, 29 }, { 9, 27 }, { 9, 25 }, { 9, 23 },
+ { 9, 21 }, { 9, 19 }, { 9, 17 }, { 9, 15 }, { 9, 13 }, { 9, 11 }, { 9, 9 },
+ { 9, 7 }, { 9, 5 }, { 9, 3 }, { 9, 1 }, { 8, 31 }, { 8, 29 }, { 8, 27 },
+ { 8, 25 }, { 8, 23 }, { 8, 21 }, { 8, 19 }, { 8, 17 }, { 8, 15 }, { 8, 13 },
+ { 8, 11 }, { 8, 9 }, { 8, 7 }, { 8, 5 }, { 8, 3 }, { 8, 1 }, { 7, 15 },
+ { 7, 13 }, { 7, 11 }, { 7, 9 }, { 7, 7 }, { 7, 5 }, { 7, 3 }, { 7, 1 },
+ { 6, 7 }, { 6, 5 }, { 6, 3 }, { 6, 1 }, { 5, 3 }, { 5, 1 }, { 4, 1 },
+ { 3, 1 }, { 2, 1 }, { 1, 1 }, { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 },
+ { 4, 0 }, { 5, 0 }, { 5, 2 }, { 6, 0 }, { 6, 2 }, { 6, 4 }, { 6, 6 },
+ { 7, 0 }, { 7, 2 }, { 7, 4 }, { 7, 6 }, { 7, 8 }, { 7, 10 }, { 7, 12 },
+ { 7, 14 }, { 8, 0 }, { 8, 2 }, { 8, 4 }, { 8, 6 }, { 8, 8 }, { 8, 10 },
+ { 8, 12 }, { 8, 14 }, { 8, 16 }, { 8, 18 }, { 8, 20 }, { 8, 22 }, { 8, 24 },
+ { 8, 26 }, { 8, 28 }, { 8, 30 }, { 9, 0 }, { 9, 2 }, { 9, 4 }, { 9, 6 },
+ { 9, 8 }, { 9, 10 }, { 9, 12 }, { 9, 14 }, { 9, 16 }, { 9, 18 }, { 9, 20 },
+ { 9, 22 }, { 9, 24 }, { 9, 26 }, { 9, 28 }, { 9, 30 }, { 9, 32 }, { 9, 34 },
+ { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 },
+ { 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }
};
-const TOKENVALUE *vp9_dct_cat_lt_10_value_tokens = dct_cat_lt_10_value_tokens +
- (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens))
- / 2;
+const TOKENVALUE *vp9_dct_cat_lt_10_value_tokens =
+ dct_cat_lt_10_value_tokens +
+ (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) /
+ 2;
// The corresponding costs of the extrabits for the tokens in the above table
// are stored in the table below. The values are obtained from looking up the
// entry for the specified extrabits in the table corresponding to the token
@@ -55,31 +54,24 @@
// (as defined in cost element vp9_extra_bits)
// e.g. {9, 63} maps to cat5_cost[63 >> 1], {1, 1} maps to sign_cost[1 >> 1]
static const int dct_cat_lt_10_value_cost[] = {
- 3773, 3750, 3704, 3681, 3623, 3600, 3554, 3531,
- 3432, 3409, 3363, 3340, 3282, 3259, 3213, 3190,
- 3136, 3113, 3067, 3044, 2986, 2963, 2917, 2894,
- 2795, 2772, 2726, 2703, 2645, 2622, 2576, 2553,
- 3197, 3116, 3058, 2977, 2881, 2800,
- 2742, 2661, 2615, 2534, 2476, 2395,
- 2299, 2218, 2160, 2079,
- 2566, 2427, 2334, 2195, 2023, 1884, 1791, 1652,
- 1893, 1696, 1453, 1256, 1229, 864,
- 512, 512, 512, 512, 0,
- 512, 512, 512, 512,
- 864, 1229, 1256, 1453, 1696, 1893,
- 1652, 1791, 1884, 2023, 2195, 2334, 2427, 2566,
- 2079, 2160, 2218, 2299, 2395, 2476, 2534, 2615,
- 2661, 2742, 2800, 2881, 2977, 3058, 3116, 3197,
- 2553, 2576, 2622, 2645, 2703, 2726, 2772, 2795,
- 2894, 2917, 2963, 2986, 3044, 3067, 3113, 3136,
- 3190, 3213, 3259, 3282, 3340, 3363, 3409, 3432,
- 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773,
+ 3773, 3750, 3704, 3681, 3623, 3600, 3554, 3531, 3432, 3409, 3363, 3340, 3282,
+ 3259, 3213, 3190, 3136, 3113, 3067, 3044, 2986, 2963, 2917, 2894, 2795, 2772,
+ 2726, 2703, 2645, 2622, 2576, 2553, 3197, 3116, 3058, 2977, 2881, 2800, 2742,
+ 2661, 2615, 2534, 2476, 2395, 2299, 2218, 2160, 2079, 2566, 2427, 2334, 2195,
+ 2023, 1884, 1791, 1652, 1893, 1696, 1453, 1256, 1229, 864, 512, 512, 512,
+ 512, 0, 512, 512, 512, 512, 864, 1229, 1256, 1453, 1696, 1893, 1652,
+ 1791, 1884, 2023, 2195, 2334, 2427, 2566, 2079, 2160, 2218, 2299, 2395, 2476,
+ 2534, 2615, 2661, 2742, 2800, 2881, 2977, 3058, 3116, 3197, 2553, 2576, 2622,
+ 2645, 2703, 2726, 2772, 2795, 2894, 2917, 2963, 2986, 3044, 3067, 3113, 3136,
+ 3190, 3213, 3259, 3282, 3340, 3363, 3409, 3432, 3531, 3554, 3600, 3623, 3681,
+ 3704, 3750, 3773,
};
-const int *vp9_dct_cat_lt_10_value_cost = dct_cat_lt_10_value_cost +
- (sizeof(dct_cat_lt_10_value_cost) / sizeof(*dct_cat_lt_10_value_cost))
- / 2;
+const int *vp9_dct_cat_lt_10_value_cost =
+ dct_cat_lt_10_value_cost +
+ (sizeof(dct_cat_lt_10_value_cost) / sizeof(*dct_cat_lt_10_value_cost)) / 2;
// Array indices are identical to previously-existing CONTEXT_NODE indices
+/* clang-format off */
const vpx_tree_index vp9_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
-EOB_TOKEN, 2, // 0 = EOB
-ZERO_TOKEN, 4, // 1 = ZERO
@@ -93,226 +85,229 @@
-CATEGORY3_TOKEN, -CATEGORY4_TOKEN, // 9 = CAT_THREE
-CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 10 = CAT_FIVE
};
+/* clang-format on */
-static const int16_t zero_cost[] = {0};
-static const int16_t sign_cost[1] = {512};
-static const int16_t cat1_cost[1 << 1] = {864, 1229};
-static const int16_t cat2_cost[1 << 2] = {1256, 1453, 1696, 1893};
-static const int16_t cat3_cost[1 << 3] = {1652, 1791, 1884, 2023,
- 2195, 2334, 2427, 2566};
-static const int16_t cat4_cost[1 << 4] = {2079, 2160, 2218, 2299, 2395, 2476,
- 2534, 2615, 2661, 2742, 2800, 2881,
- 2977, 3058, 3116, 3197};
+static const int16_t zero_cost[] = { 0 };
+static const int16_t sign_cost[1] = { 512 };
+static const int16_t cat1_cost[1 << 1] = { 864, 1229 };
+static const int16_t cat2_cost[1 << 2] = { 1256, 1453, 1696, 1893 };
+static const int16_t cat3_cost[1 << 3] = { 1652, 1791, 1884, 2023,
+ 2195, 2334, 2427, 2566 };
+static const int16_t cat4_cost[1 << 4] = { 2079, 2160, 2218, 2299, 2395, 2476,
+ 2534, 2615, 2661, 2742, 2800, 2881,
+ 2977, 3058, 3116, 3197 };
static const int16_t cat5_cost[1 << 5] = {
- 2553, 2576, 2622, 2645, 2703, 2726, 2772, 2795, 2894, 2917, 2963,
- 2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363,
- 3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773};
+ 2553, 2576, 2622, 2645, 2703, 2726, 2772, 2795, 2894, 2917, 2963,
+ 2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363,
+ 3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773
+};
const int16_t vp9_cat6_low_cost[256] = {
- 3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552,
- 3574, 3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763,
- 3810, 3822, 3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008,
- 4030, 4042, 4053, 4065, 4112, 4124, 4135, 4147, 4169, 4181, 4192, 4204,
- 4266, 4278, 4289, 4301, 4323, 4335, 4346, 4358, 4405, 4417, 4428, 4440,
- 4462, 4474, 4485, 4497, 4253, 4265, 4276, 4288, 4310, 4322, 4333, 4345,
- 4392, 4404, 4415, 4427, 4449, 4461, 4472, 4484, 4546, 4558, 4569, 4581,
- 4603, 4615, 4626, 4638, 4685, 4697, 4708, 4720, 4742, 4754, 4765, 4777,
- 4848, 4860, 4871, 4883, 4905, 4917, 4928, 4940, 4987, 4999, 5010, 5022,
- 5044, 5056, 5067, 5079, 5141, 5153, 5164, 5176, 5198, 5210, 5221, 5233,
- 5280, 5292, 5303, 5315, 5337, 5349, 5360, 5372, 4988, 5000, 5011, 5023,
- 5045, 5057, 5068, 5080, 5127, 5139, 5150, 5162, 5184, 5196, 5207, 5219,
- 5281, 5293, 5304, 5316, 5338, 5350, 5361, 5373, 5420, 5432, 5443, 5455,
- 5477, 5489, 5500, 5512, 5583, 5595, 5606, 5618, 5640, 5652, 5663, 5675,
- 5722, 5734, 5745, 5757, 5779, 5791, 5802, 5814, 5876, 5888, 5899, 5911,
- 5933, 5945, 5956, 5968, 6015, 6027, 6038, 6050, 6072, 6084, 6095, 6107,
- 5863, 5875, 5886, 5898, 5920, 5932, 5943, 5955, 6002, 6014, 6025, 6037,
- 6059, 6071, 6082, 6094, 6156, 6168, 6179, 6191, 6213, 6225, 6236, 6248,
- 6295, 6307, 6318, 6330, 6352, 6364, 6375, 6387, 6458, 6470, 6481, 6493,
- 6515, 6527, 6538, 6550, 6597, 6609, 6620, 6632, 6654, 6666, 6677, 6689,
- 6751, 6763, 6774, 6786, 6808, 6820, 6831, 6843, 6890, 6902, 6913, 6925,
- 6947, 6959, 6970, 6982};
+ 3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552, 3574,
+ 3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763, 3810, 3822,
+ 3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008, 4030, 4042, 4053,
+ 4065, 4112, 4124, 4135, 4147, 4169, 4181, 4192, 4204, 4266, 4278, 4289, 4301,
+ 4323, 4335, 4346, 4358, 4405, 4417, 4428, 4440, 4462, 4474, 4485, 4497, 4253,
+ 4265, 4276, 4288, 4310, 4322, 4333, 4345, 4392, 4404, 4415, 4427, 4449, 4461,
+ 4472, 4484, 4546, 4558, 4569, 4581, 4603, 4615, 4626, 4638, 4685, 4697, 4708,
+ 4720, 4742, 4754, 4765, 4777, 4848, 4860, 4871, 4883, 4905, 4917, 4928, 4940,
+ 4987, 4999, 5010, 5022, 5044, 5056, 5067, 5079, 5141, 5153, 5164, 5176, 5198,
+ 5210, 5221, 5233, 5280, 5292, 5303, 5315, 5337, 5349, 5360, 5372, 4988, 5000,
+ 5011, 5023, 5045, 5057, 5068, 5080, 5127, 5139, 5150, 5162, 5184, 5196, 5207,
+ 5219, 5281, 5293, 5304, 5316, 5338, 5350, 5361, 5373, 5420, 5432, 5443, 5455,
+ 5477, 5489, 5500, 5512, 5583, 5595, 5606, 5618, 5640, 5652, 5663, 5675, 5722,
+ 5734, 5745, 5757, 5779, 5791, 5802, 5814, 5876, 5888, 5899, 5911, 5933, 5945,
+ 5956, 5968, 6015, 6027, 6038, 6050, 6072, 6084, 6095, 6107, 5863, 5875, 5886,
+ 5898, 5920, 5932, 5943, 5955, 6002, 6014, 6025, 6037, 6059, 6071, 6082, 6094,
+ 6156, 6168, 6179, 6191, 6213, 6225, 6236, 6248, 6295, 6307, 6318, 6330, 6352,
+ 6364, 6375, 6387, 6458, 6470, 6481, 6493, 6515, 6527, 6538, 6550, 6597, 6609,
+ 6620, 6632, 6654, 6666, 6677, 6689, 6751, 6763, 6774, 6786, 6808, 6820, 6831,
+ 6843, 6890, 6902, 6913, 6925, 6947, 6959, 6970, 6982
+};
const int vp9_cat6_high_cost[64] = {
- 88, 2251, 2727, 4890, 3148, 5311, 5787, 7950, 3666, 5829, 6305,
- 8468, 6726, 8889, 9365, 11528, 3666, 5829, 6305, 8468, 6726, 8889,
- 9365, 11528, 7244, 9407, 9883, 12046, 10304, 12467, 12943, 15106, 3666,
- 5829, 6305, 8468, 6726, 8889, 9365, 11528, 7244, 9407, 9883, 12046,
- 10304, 12467, 12943, 15106, 7244, 9407, 9883, 12046, 10304, 12467, 12943,
- 15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684};
+ 88, 2251, 2727, 4890, 3148, 5311, 5787, 7950, 3666, 5829, 6305,
+ 8468, 6726, 8889, 9365, 11528, 3666, 5829, 6305, 8468, 6726, 8889,
+ 9365, 11528, 7244, 9407, 9883, 12046, 10304, 12467, 12943, 15106, 3666,
+ 5829, 6305, 8468, 6726, 8889, 9365, 11528, 7244, 9407, 9883, 12046,
+ 10304, 12467, 12943, 15106, 7244, 9407, 9883, 12046, 10304, 12467, 12943,
+ 15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684
+};
#if CONFIG_VP9_HIGHBITDEPTH
const int vp9_cat6_high10_high_cost[256] = {
- 94, 2257, 2733, 4896, 3154, 5317, 5793, 7956, 3672, 5835, 6311,
- 8474, 6732, 8895, 9371, 11534, 3672, 5835, 6311, 8474, 6732, 8895,
- 9371, 11534, 7250, 9413, 9889, 12052, 10310, 12473, 12949, 15112, 3672,
- 5835, 6311, 8474, 6732, 8895, 9371, 11534, 7250, 9413, 9889, 12052,
- 10310, 12473, 12949, 15112, 7250, 9413, 9889, 12052, 10310, 12473, 12949,
- 15112, 10828, 12991, 13467, 15630, 13888, 16051, 16527, 18690, 4187, 6350,
- 6826, 8989, 7247, 9410, 9886, 12049, 7765, 9928, 10404, 12567, 10825,
- 12988, 13464, 15627, 7765, 9928, 10404, 12567, 10825, 12988, 13464, 15627,
- 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 7765, 9928, 10404,
- 12567, 10825, 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566,
- 17042, 19205, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921,
- 17084, 17560, 19723, 17981, 20144, 20620, 22783, 4187, 6350, 6826, 8989,
- 7247, 9410, 9886, 12049, 7765, 9928, 10404, 12567, 10825, 12988, 13464,
- 15627, 7765, 9928, 10404, 12567, 10825, 12988, 13464, 15627, 11343, 13506,
- 13982, 16145, 14403, 16566, 17042, 19205, 7765, 9928, 10404, 12567, 10825,
- 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205,
- 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921, 17084, 17560,
- 19723, 17981, 20144, 20620, 22783, 8280, 10443, 10919, 13082, 11340, 13503,
- 13979, 16142, 11858, 14021, 14497, 16660, 14918, 17081, 17557, 19720, 11858,
- 14021, 14497, 16660, 14918, 17081, 17557, 19720, 15436, 17599, 18075, 20238,
- 18496, 20659, 21135, 23298, 11858, 14021, 14497, 16660, 14918, 17081, 17557,
- 19720, 15436, 17599, 18075, 20238, 18496, 20659, 21135, 23298, 15436, 17599,
- 18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074,
- 24237, 24713, 26876};
+ 94, 2257, 2733, 4896, 3154, 5317, 5793, 7956, 3672, 5835, 6311,
+ 8474, 6732, 8895, 9371, 11534, 3672, 5835, 6311, 8474, 6732, 8895,
+ 9371, 11534, 7250, 9413, 9889, 12052, 10310, 12473, 12949, 15112, 3672,
+ 5835, 6311, 8474, 6732, 8895, 9371, 11534, 7250, 9413, 9889, 12052,
+ 10310, 12473, 12949, 15112, 7250, 9413, 9889, 12052, 10310, 12473, 12949,
+ 15112, 10828, 12991, 13467, 15630, 13888, 16051, 16527, 18690, 4187, 6350,
+ 6826, 8989, 7247, 9410, 9886, 12049, 7765, 9928, 10404, 12567, 10825,
+ 12988, 13464, 15627, 7765, 9928, 10404, 12567, 10825, 12988, 13464, 15627,
+ 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 7765, 9928, 10404,
+ 12567, 10825, 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566,
+ 17042, 19205, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921,
+ 17084, 17560, 19723, 17981, 20144, 20620, 22783, 4187, 6350, 6826, 8989,
+ 7247, 9410, 9886, 12049, 7765, 9928, 10404, 12567, 10825, 12988, 13464,
+ 15627, 7765, 9928, 10404, 12567, 10825, 12988, 13464, 15627, 11343, 13506,
+ 13982, 16145, 14403, 16566, 17042, 19205, 7765, 9928, 10404, 12567, 10825,
+ 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205,
+ 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921, 17084, 17560,
+ 19723, 17981, 20144, 20620, 22783, 8280, 10443, 10919, 13082, 11340, 13503,
+ 13979, 16142, 11858, 14021, 14497, 16660, 14918, 17081, 17557, 19720, 11858,
+ 14021, 14497, 16660, 14918, 17081, 17557, 19720, 15436, 17599, 18075, 20238,
+ 18496, 20659, 21135, 23298, 11858, 14021, 14497, 16660, 14918, 17081, 17557,
+ 19720, 15436, 17599, 18075, 20238, 18496, 20659, 21135, 23298, 15436, 17599,
+ 18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074,
+ 24237, 24713, 26876
+};
const int vp9_cat6_high12_high_cost[1024] = {
- 100, 2263, 2739, 4902, 3160, 5323, 5799, 7962, 3678, 5841, 6317,
- 8480, 6738, 8901, 9377, 11540, 3678, 5841, 6317, 8480, 6738, 8901,
- 9377, 11540, 7256, 9419, 9895, 12058, 10316, 12479, 12955, 15118, 3678,
- 5841, 6317, 8480, 6738, 8901, 9377, 11540, 7256, 9419, 9895, 12058,
- 10316, 12479, 12955, 15118, 7256, 9419, 9895, 12058, 10316, 12479, 12955,
- 15118, 10834, 12997, 13473, 15636, 13894, 16057, 16533, 18696, 4193, 6356,
- 6832, 8995, 7253, 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831,
- 12994, 13470, 15633, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633,
- 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410,
- 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572,
- 17048, 19211, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927,
- 17090, 17566, 19729, 17987, 20150, 20626, 22789, 4193, 6356, 6832, 8995,
- 7253, 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, 12994, 13470,
- 15633, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512,
- 13988, 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, 12573, 10831,
- 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211,
- 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566,
- 19729, 17987, 20150, 20626, 22789, 8286, 10449, 10925, 13088, 11346, 13509,
- 13985, 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864,
- 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244,
- 18502, 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563,
- 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605,
- 18081, 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080,
- 24243, 24719, 26882, 4193, 6356, 6832, 8995, 7253, 9416, 9892, 12055,
- 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 7771, 9934, 10410,
- 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572,
- 17048, 19211, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349,
- 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349, 13512, 13988, 16151,
- 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729, 17987, 20150, 20626,
- 22789, 8286, 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027,
- 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924,
- 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304,
- 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081,
- 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665,
- 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 8286,
- 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666,
- 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563,
- 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027,
- 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502,
- 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304,
- 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018,
- 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180,
- 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535,
- 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759,
- 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234,
- 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276,
- 25752, 27915, 26173, 28336, 28812, 30975, 4193, 6356, 6832, 8995, 7253,
- 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633,
- 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988,
- 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, 12573, 10831, 12994,
- 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349,
- 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729,
- 17987, 20150, 20626, 22789, 8286, 10449, 10925, 13088, 11346, 13509, 13985,
- 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027,
- 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502,
- 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726,
- 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081,
- 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243,
- 24719, 26882, 8286, 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864,
- 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666,
- 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141,
- 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605,
- 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502,
- 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882,
- 12379, 14542, 15018, 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596,
- 20759, 19017, 21180, 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180,
- 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957,
- 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337,
- 22595, 24758, 25234, 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234,
- 27397, 23113, 25276, 25752, 27915, 26173, 28336, 28812, 30975, 8286, 10449,
- 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666, 14924,
- 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726,
- 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027, 14503,
- 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665,
- 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 19020,
- 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018, 17181,
- 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656,
- 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698,
- 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017,
- 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397,
- 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752,
- 27915, 26173, 28336, 28812, 30975, 12379, 14542, 15018, 17181, 15439, 17602,
- 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 15957,
- 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337,
- 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017, 21180, 21656,
- 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 19535, 21698,
- 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752, 27915, 26173,
- 28336, 28812, 30975, 16472, 18635, 19111, 21274, 19532, 21695, 22171, 24334,
- 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 20050, 22213, 22689,
- 24852, 23110, 25273, 25749, 27912, 23628, 25791, 26267, 28430, 26688, 28851,
- 29327, 31490, 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 23628,
- 25791, 26267, 28430, 26688, 28851, 29327, 31490, 23628, 25791, 26267, 28430,
- 26688, 28851, 29327, 31490, 27206, 29369, 29845, 32008, 30266, 32429, 32905,
- 35068};
+ 100, 2263, 2739, 4902, 3160, 5323, 5799, 7962, 3678, 5841, 6317,
+ 8480, 6738, 8901, 9377, 11540, 3678, 5841, 6317, 8480, 6738, 8901,
+ 9377, 11540, 7256, 9419, 9895, 12058, 10316, 12479, 12955, 15118, 3678,
+ 5841, 6317, 8480, 6738, 8901, 9377, 11540, 7256, 9419, 9895, 12058,
+ 10316, 12479, 12955, 15118, 7256, 9419, 9895, 12058, 10316, 12479, 12955,
+ 15118, 10834, 12997, 13473, 15636, 13894, 16057, 16533, 18696, 4193, 6356,
+ 6832, 8995, 7253, 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831,
+ 12994, 13470, 15633, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633,
+ 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410,
+ 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572,
+ 17048, 19211, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927,
+ 17090, 17566, 19729, 17987, 20150, 20626, 22789, 4193, 6356, 6832, 8995,
+ 7253, 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, 12994, 13470,
+ 15633, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512,
+ 13988, 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, 12573, 10831,
+ 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211,
+ 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566,
+ 19729, 17987, 20150, 20626, 22789, 8286, 10449, 10925, 13088, 11346, 13509,
+ 13985, 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864,
+ 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244,
+ 18502, 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563,
+ 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605,
+ 18081, 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080,
+ 24243, 24719, 26882, 4193, 6356, 6832, 8995, 7253, 9416, 9892, 12055,
+ 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 7771, 9934, 10410,
+ 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572,
+ 17048, 19211, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349,
+ 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349, 13512, 13988, 16151,
+ 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729, 17987, 20150, 20626,
+ 22789, 8286, 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027,
+ 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924,
+ 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304,
+ 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081,
+ 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665,
+ 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 8286,
+ 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666,
+ 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563,
+ 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027,
+ 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502,
+ 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304,
+ 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018,
+ 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180,
+ 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535,
+ 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759,
+ 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234,
+ 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276,
+ 25752, 27915, 26173, 28336, 28812, 30975, 4193, 6356, 6832, 8995, 7253,
+ 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633,
+ 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988,
+ 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, 12573, 10831, 12994,
+ 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349,
+ 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729,
+ 17987, 20150, 20626, 22789, 8286, 10449, 10925, 13088, 11346, 13509, 13985,
+ 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027,
+ 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502,
+ 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726,
+ 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081,
+ 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243,
+ 24719, 26882, 8286, 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864,
+ 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666,
+ 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141,
+ 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605,
+ 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502,
+ 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882,
+ 12379, 14542, 15018, 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596,
+ 20759, 19017, 21180, 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180,
+ 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957,
+ 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337,
+ 22595, 24758, 25234, 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234,
+ 27397, 23113, 25276, 25752, 27915, 26173, 28336, 28812, 30975, 8286, 10449,
+ 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666, 14924,
+ 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726,
+ 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027, 14503,
+ 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665,
+ 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 19020,
+ 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018, 17181,
+ 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656,
+ 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698,
+ 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017,
+ 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397,
+ 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752,
+ 27915, 26173, 28336, 28812, 30975, 12379, 14542, 15018, 17181, 15439, 17602,
+ 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 15957,
+ 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337,
+ 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017, 21180, 21656,
+ 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 19535, 21698,
+ 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752, 27915, 26173,
+ 28336, 28812, 30975, 16472, 18635, 19111, 21274, 19532, 21695, 22171, 24334,
+ 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 20050, 22213, 22689,
+ 24852, 23110, 25273, 25749, 27912, 23628, 25791, 26267, 28430, 26688, 28851,
+ 29327, 31490, 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 23628,
+ 25791, 26267, 28430, 26688, 28851, 29327, 31490, 23628, 25791, 26267, 28430,
+ 26688, 28851, 29327, 31490, 27206, 29369, 29845, 32008, 30266, 32429, 32905,
+ 35068
+};
#endif
const vp9_extra_bit vp9_extra_bits[ENTROPY_TOKENS] = {
- {0, 0, 0, zero_cost}, // ZERO_TOKEN
- {0, 0, 1, sign_cost}, // ONE_TOKEN
- {0, 0, 2, sign_cost}, // TWO_TOKEN
- {0, 0, 3, sign_cost}, // THREE_TOKEN
- {0, 0, 4, sign_cost}, // FOUR_TOKEN
- {vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CATEGORY1_TOKEN
- {vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CATEGORY2_TOKEN
- {vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CATEGORY3_TOKEN
- {vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CATEGORY4_TOKEN
- {vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CATEGORY5_TOKEN
- {vp9_cat6_prob, 14, CAT6_MIN_VAL, 0}, // CATEGORY6_TOKEN
- {0, 0, 0, zero_cost} // EOB_TOKEN
+ { 0, 0, 0, zero_cost }, // ZERO_TOKEN
+ { 0, 0, 1, sign_cost }, // ONE_TOKEN
+ { 0, 0, 2, sign_cost }, // TWO_TOKEN
+ { 0, 0, 3, sign_cost }, // THREE_TOKEN
+ { 0, 0, 4, sign_cost }, // FOUR_TOKEN
+ { vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CATEGORY1_TOKEN
+ { vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CATEGORY2_TOKEN
+ { vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CATEGORY3_TOKEN
+ { vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CATEGORY4_TOKEN
+ { vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CATEGORY5_TOKEN
+ { vp9_cat6_prob, 14, CAT6_MIN_VAL, 0 }, // CATEGORY6_TOKEN
+ { 0, 0, 0, zero_cost } // EOB_TOKEN
};
#if CONFIG_VP9_HIGHBITDEPTH
const vp9_extra_bit vp9_extra_bits_high10[ENTROPY_TOKENS] = {
- {0, 0, 0, zero_cost}, // ZERO
- {0, 0, 1, sign_cost}, // ONE
- {0, 0, 2, sign_cost}, // TWO
- {0, 0, 3, sign_cost}, // THREE
- {0, 0, 4, sign_cost}, // FOUR
- {vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1
- {vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2
- {vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3
- {vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4
- {vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5
- {vp9_cat6_prob_high12 + 2, 16, CAT6_MIN_VAL, 0}, // CAT6
- {0, 0, 0, zero_cost} // EOB
+ { 0, 0, 0, zero_cost }, // ZERO
+ { 0, 0, 1, sign_cost }, // ONE
+ { 0, 0, 2, sign_cost }, // TWO
+ { 0, 0, 3, sign_cost }, // THREE
+ { 0, 0, 4, sign_cost }, // FOUR
+ { vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
+ { vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
+ { vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
+ { vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
+ { vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
+ { vp9_cat6_prob_high12 + 2, 16, CAT6_MIN_VAL, 0 }, // CAT6
+ { 0, 0, 0, zero_cost } // EOB
};
const vp9_extra_bit vp9_extra_bits_high12[ENTROPY_TOKENS] = {
- {0, 0, 0, zero_cost}, // ZERO
- {0, 0, 1, sign_cost}, // ONE
- {0, 0, 2, sign_cost}, // TWO
- {0, 0, 3, sign_cost}, // THREE
- {0, 0, 4, sign_cost}, // FOUR
- {vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1
- {vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2
- {vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3
- {vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4
- {vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5
- {vp9_cat6_prob_high12, 18, CAT6_MIN_VAL, 0}, // CAT6
- {0, 0, 0, zero_cost} // EOB
+ { 0, 0, 0, zero_cost }, // ZERO
+ { 0, 0, 1, sign_cost }, // ONE
+ { 0, 0, 2, sign_cost }, // TWO
+ { 0, 0, 3, sign_cost }, // THREE
+ { 0, 0, 4, sign_cost }, // FOUR
+ { vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
+ { vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
+ { vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
+ { vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
+ { vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
+ { vp9_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 }, // CAT6
+ { 0, 0, 0, zero_cost } // EOB
};
#endif
const struct vp9_token vp9_coef_encodings[ENTROPY_TOKENS] = {
- {2, 2}, {6, 3}, {28, 5}, {58, 6}, {59, 6}, {60, 6}, {61, 6}, {124, 7},
- {125, 7}, {126, 7}, {127, 7}, {0, 1}
+ { 2, 2 }, { 6, 3 }, { 28, 5 }, { 58, 6 }, { 59, 6 }, { 60, 6 },
+ { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
};
-
struct tokenize_b_args {
VP9_COMP *cpi;
ThreadData *td;
@@ -320,16 +315,15 @@
};
static void set_entropy_context_b(int plane, int block, int row, int col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
- struct tokenize_b_args* const args = arg;
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *arg) {
+ struct tokenize_b_args *const args = arg;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *p = &x->plane[plane];
struct macroblockd_plane *pd = &xd->plane[plane];
- vp9_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0,
- col, row);
+ vp9_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, col, row);
}
static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree,
@@ -344,8 +338,7 @@
static INLINE void add_token_no_extra(TOKENEXTRA **t,
const vpx_prob *context_tree,
- int16_t token,
- unsigned int *counts) {
+ int16_t token, unsigned int *counts) {
(*t)->context_tree = context_tree;
(*t)->token = token;
(*t)++;
@@ -353,9 +346,8 @@
}
static void tokenize_b(int plane, int block, int row, int col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
- struct tokenize_b_args* const args = arg;
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
+ struct tokenize_b_args *const args = arg;
VP9_COMP *cpi = args->cpi;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
@@ -367,7 +359,7 @@
MODE_INFO *mi = xd->mi[0];
int pt; /* near block/prev token context index */
int c;
- TOKENEXTRA *t = *tp; /* store tokens starting here */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
int eob = p->eobs[block];
const PLANE_TYPE type = get_plane_type(plane);
const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
@@ -408,8 +400,7 @@
vp9_get_token_extra(v, &token, &extra);
- add_token(&t, coef_probs[band[c]][pt], token, extra,
- counts[band[c]][pt]);
+ add_token(&t, coef_probs[band[c]][pt], token, extra, counts[band[c]][pt]);
token_cache[scan[c]] = vp9_pt_energy_class[token];
++c;
@@ -432,8 +423,7 @@
};
static void is_skippable(int plane, int block, int row, int col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- void *argv) {
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *argv) {
struct is_skippable_args *args = argv;
(void)plane;
(void)plane_bsize;
@@ -447,7 +437,7 @@
// vp9_foreach_transform_block() and simplify is_skippable().
int vp9_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 1;
- struct is_skippable_args args = {x->plane[plane].eobs, &result};
+ struct is_skippable_args args = { x->plane[plane].eobs, &result };
vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
&args);
return result;
@@ -458,28 +448,28 @@
void *argv) {
struct is_skippable_args *args = argv;
int eobs = (tx_size == TX_4X4) ? 3 : 10;
- (void) plane;
- (void) plane_bsize;
- (void) row;
- (void) col;
+ (void)plane;
+ (void)plane_bsize;
+ (void)row;
+ (void)col;
*(args->skippable) |= (args->eobs[block] > eobs);
}
int vp9_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 0;
- struct is_skippable_args args = {x->plane[plane].eobs, &result};
+ struct is_skippable_args args = { x->plane[plane].eobs, &result };
vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
has_high_freq_coeff, &args);
return result;
}
-void vp9_tokenize_sb(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
- int dry_run, int seg_skip, BLOCK_SIZE bsize) {
+void vp9_tokenize_sb(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t, int dry_run,
+ int seg_skip, BLOCK_SIZE bsize) {
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *const mi = xd->mi[0];
const int ctx = vp9_get_skip_context(xd);
- struct tokenize_b_args arg = {cpi, td, t};
+ struct tokenize_b_args arg = { cpi, td, t };
if (seg_skip) {
assert(mi->skip);
@@ -486,8 +476,7 @@
}
if (mi->skip) {
- if (!dry_run && !seg_skip)
- ++td->counts->skip[ctx][1];
+ if (!dry_run && !seg_skip) ++td->counts->skip[ctx][1];
reset_skip_context(xd, bsize);
return;
}
--- a/vp9/encoder/vp9_tokenize.h
+++ b/vp9/encoder/vp9_tokenize.h
@@ -20,15 +20,14 @@
extern "C" {
#endif
-#define EOSB_TOKEN 127 // Not signalled, encoder only
+#define EOSB_TOKEN 127 // Not signalled, encoder only
#if CONFIG_VP9_HIGHBITDEPTH
- typedef int32_t EXTRABIT;
+typedef int32_t EXTRABIT;
#else
- typedef int16_t EXTRABIT;
+typedef int16_t EXTRABIT;
#endif
-
typedef struct {
int16_t token;
EXTRABIT extra;
@@ -84,19 +83,19 @@
const int *cat6_high_table) {
if (token != CATEGORY6_TOKEN)
return vp9_extra_bits[token].cost[extrabits >> 1];
- return vp9_cat6_low_cost[(extrabits >> 1) & 0xff]
- + cat6_high_table[extrabits >> 9];
+ return vp9_cat6_low_cost[(extrabits >> 1) & 0xff] +
+ cat6_high_table[extrabits >> 9];
}
#if CONFIG_VP9_HIGHBITDEPTH
-static INLINE const int* vp9_get_high_cost_table(int bit_depth) {
+static INLINE const int *vp9_get_high_cost_table(int bit_depth) {
return bit_depth == 8 ? vp9_cat6_high_cost
- : (bit_depth == 10 ? vp9_cat6_high10_high_cost :
- vp9_cat6_high12_high_cost);
+ : (bit_depth == 10 ? vp9_cat6_high10_high_cost
+ : vp9_cat6_high12_high_cost);
}
#else
-static INLINE const int* vp9_get_high_cost_table(int bit_depth) {
- (void) bit_depth;
+static INLINE const int *vp9_get_high_cost_table(int bit_depth) {
+ (void)bit_depth;
return vp9_cat6_high_cost;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -114,8 +113,7 @@
*extra = vp9_dct_cat_lt_10_value_tokens[v].extra;
}
static INLINE int16_t vp9_get_token(int v) {
- if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL)
- return 10;
+ if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10;
return vp9_dct_cat_lt_10_value_tokens[v].token;
}
--- a/vp9/encoder/vp9_treewriter.h
+++ b/vp9/encoder/vp9_treewriter.h
@@ -18,8 +18,8 @@
#endif
void vp9_tree_probs_from_distribution(vpx_tree tree,
- unsigned int branch_ct[ /* n - 1 */ ][2],
- const unsigned int num_events[ /* n */ ]);
+ unsigned int branch_ct[/* n - 1 */][2],
+ const unsigned int num_events[/* n */]);
struct vp9_token {
int value;
@@ -26,7 +26,7 @@
int len;
};
-void vp9_tokens_from_tree(struct vp9_token*, const vpx_tree_index *);
+void vp9_tokens_from_tree(struct vp9_token *, const vpx_tree_index *);
static INLINE void vp9_write_tree(vpx_writer *w, const vpx_tree_index *tree,
const vpx_prob *probs, int bits, int len,
--- a/vp9/encoder/x86/vp9_dct_intrin_sse2.c
+++ b/vp9/encoder/x86/vp9_dct_intrin_sse2.c
@@ -78,8 +78,8 @@
const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
__m128i u[4], v[4];
- u[0]=_mm_unpacklo_epi16(in[0], in[1]);
- u[1]=_mm_unpacklo_epi16(in[3], in[2]);
+ u[0] = _mm_unpacklo_epi16(in[0], in[1]);
+ u[1] = _mm_unpacklo_epi16(in[3], in[2]);
v[0] = _mm_add_epi16(u[0], u[1]);
v[1] = _mm_sub_epi16(u[0], u[1]);
@@ -151,14 +151,12 @@
transpose_4x4(in);
}
-void vp9_fht4x4_sse2(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in[4];
switch (tx_type) {
- case DCT_DCT:
- vpx_fdct4x4_sse2(input, output, stride);
- break;
+ case DCT_DCT: vpx_fdct4x4_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_4x4(input, in, stride);
fadst4_sse2(in);
@@ -177,21 +175,18 @@
fadst4_sse2(in);
write_buffer_4x4(output, in);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride,
- int16_t* coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t* zbin_ptr,
- const int16_t* round_ptr, const int16_t* quant_ptr,
- const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
- int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- uint16_t* eob_ptr,
- const int16_t* scan_ptr,
- const int16_t* iscan_ptr) {
+ int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan_ptr,
+ const int16_t *iscan_ptr) {
__m128i zero;
int pass;
// Constants
@@ -208,14 +203,14 @@
const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
// Load input
- __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
- __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
- __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
- __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
- __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
- __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
- __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
- __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
__m128i *in[8];
int index = 0;
@@ -469,9 +464,9 @@
// Setup global values
{
- round = _mm_load_si128((const __m128i*)round_ptr);
- quant = _mm_load_si128((const __m128i*)quant_ptr);
- dequant = _mm_load_si128((const __m128i*)dequant_ptr);
+ round = _mm_load_si128((const __m128i *)round_ptr);
+ quant = _mm_load_si128((const __m128i *)quant_ptr);
+ dequant = _mm_load_si128((const __m128i *)dequant_ptr);
}
{
@@ -503,15 +498,15 @@
qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
dequant = _mm_unpackhi_epi64(dequant, dequant);
coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
}
{
@@ -524,8 +519,8 @@
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -568,14 +563,14 @@
qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
}
{
@@ -588,8 +583,8 @@
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -615,10 +610,10 @@
}
} else {
do {
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
n_coeffs += 8 * 2;
} while (n_coeffs < 0);
*eob_ptr = 0;
@@ -628,14 +623,14 @@
// load 8x8 array
static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
int stride) {
- in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
- in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
- in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
- in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
- in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
- in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
- in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
- in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
in[0] = _mm_slli_epi16(in[0], 2);
in[1] = _mm_slli_epi16(in[1], 2);
@@ -930,14 +925,14 @@
__m128i in0, in1, in2, in3, in4, in5, in6, in7;
// properly aligned for butterfly input
- in0 = in[7];
- in1 = in[0];
- in2 = in[5];
- in3 = in[2];
- in4 = in[3];
- in5 = in[4];
- in6 = in[1];
- in7 = in[6];
+ in0 = in[7];
+ in1 = in[0];
+ in2 = in[5];
+ in3 = in[2];
+ in4 = in[3];
+ in5 = in[4];
+ in6 = in[1];
+ in7 = in[6];
// column transformation
// stage 1
@@ -1135,14 +1130,12 @@
array_transpose_8x8(in, in);
}
-void vp9_fht8x8_sse2(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in[8];
switch (tx_type) {
- case DCT_DCT:
- vpx_fdct8x8_sse2(input, output, stride);
- break;
+ case DCT_DCT: vpx_fdct8x8_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_8x8(input, in, stride);
fadst8_sse2(in);
@@ -1164,13 +1157,11 @@
right_shift_8x8(in, 1);
write_buffer_8x8(output, in, 8);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
-static INLINE void load_buffer_16x16(const int16_t* input, __m128i *in0,
+static INLINE void load_buffer_16x16(const int16_t *input, __m128i *in0,
__m128i *in1, int stride) {
// load first 8 columns
load_buffer_8x8(input, in0, stride);
@@ -1530,13 +1521,13 @@
v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
- in[1] = _mm_packs_epi32(v[0], v[1]);
- in[9] = _mm_packs_epi32(v[2], v[3]);
- in[5] = _mm_packs_epi32(v[4], v[5]);
+ in[1] = _mm_packs_epi32(v[0], v[1]);
+ in[9] = _mm_packs_epi32(v[2], v[3]);
+ in[5] = _mm_packs_epi32(v[4], v[5]);
in[13] = _mm_packs_epi32(v[6], v[7]);
- in[3] = _mm_packs_epi32(v[8], v[9]);
+ in[3] = _mm_packs_epi32(v[8], v[9]);
in[11] = _mm_packs_epi32(v[10], v[11]);
- in[7] = _mm_packs_epi32(v[12], v[13]);
+ in[7] = _mm_packs_epi32(v[12], v[13]);
in[15] = _mm_packs_epi32(v[14], v[15]);
}
@@ -2022,14 +2013,12 @@
array_transpose_16x16(in0, in1);
}
-void vp9_fht16x16_sse2(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp9_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in0[16], in1[16];
switch (tx_type) {
- case DCT_DCT:
- vpx_fdct16x16_sse2(input, output, stride);
- break;
+ case DCT_DCT: vpx_fdct16x16_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_16x16(input, in0, in1, stride);
fadst16_sse2(in0, in1);
@@ -2051,8 +2040,6 @@
fadst16_sse2(in0, in1);
write_buffer_16x16(output, in0, in1, 16);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
--- a/vp9/encoder/x86/vp9_dct_ssse3.c
+++ b/vp9/encoder/x86/vp9_dct_ssse3.c
@@ -15,16 +15,12 @@
#include "vpx_dsp/x86/inv_txfm_sse2.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
-void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride,
- int16_t* coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t* zbin_ptr,
- const int16_t* round_ptr, const int16_t* quant_ptr,
- const int16_t* quant_shift_ptr,
- int16_t* qcoeff_ptr,
- int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- uint16_t* eob_ptr,
- const int16_t* scan_ptr,
- const int16_t* iscan_ptr) {
+void vp9_fdct8x8_quant_ssse3(
+ const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
+ const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
+ int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
__m128i zero;
int pass;
// Constants
@@ -42,14 +38,14 @@
const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
// Load input
- __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
- __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
- __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
- __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
- __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
- __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
- __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
- __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
__m128i *in[8];
int index = 0;
@@ -298,9 +294,9 @@
// Setup global values
{
- round = _mm_load_si128((const __m128i*)round_ptr);
- quant = _mm_load_si128((const __m128i*)quant_ptr);
- dequant = _mm_load_si128((const __m128i*)dequant_ptr);
+ round = _mm_load_si128((const __m128i *)round_ptr);
+ quant = _mm_load_si128((const __m128i *)quant_ptr);
+ dequant = _mm_load_si128((const __m128i *)dequant_ptr);
}
{
@@ -332,15 +328,15 @@
qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
dequant = _mm_unpackhi_epi64(dequant, dequant);
coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
}
{
@@ -353,8 +349,8 @@
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -388,7 +384,7 @@
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) |
- _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
+ _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
if (nzflag) {
qcoeff0 = _mm_adds_epi16(qcoeff0, round);
@@ -402,20 +398,20 @@
qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
} else {
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
}
}
@@ -429,8 +425,8 @@
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -456,10 +452,10 @@
}
} else {
do {
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
n_coeffs += 8 * 2;
} while (n_coeffs < 0);
*eob_ptr = 0;
--- a/vp9/encoder/x86/vp9_denoiser_sse2.c
+++ b/vp9/encoder/x86/vp9_denoiser_sse2.c
@@ -37,17 +37,11 @@
}
// Denoise a 16x1 vector.
-static INLINE __m128i vp9_denoiser_16x1_sse2(const uint8_t *sig,
- const uint8_t *mc_running_avg_y,
- uint8_t *running_avg_y,
- const __m128i *k_0,
- const __m128i *k_4,
- const __m128i *k_8,
- const __m128i *k_16,
- const __m128i *l3,
- const __m128i *l32,
- const __m128i *l21,
- __m128i acc_diff) {
+static INLINE __m128i vp9_denoiser_16x1_sse2(
+ const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
+ const __m128i *k_0, const __m128i *k_4, const __m128i *k_8,
+ const __m128i *k_16, const __m128i *l3, const __m128i *l32,
+ const __m128i *l21, __m128i acc_diff) {
// Calculate differences
const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0]));
const __m128i v_mc_running_avg_y =
@@ -69,7 +63,7 @@
__m128i adj2 = _mm_and_si128(mask2, *l32);
const __m128i adj1 = _mm_and_si128(mask1, *l21);
const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
- __m128i adj, padj, nadj;
+ __m128i adj, padj, nadj;
// Combine the adjustments and get absolute adjustments.
adj2 = _mm_add_epi8(adj2, adj1);
@@ -95,9 +89,8 @@
// Denoise a 16x1 vector with a weaker filter.
static INLINE __m128i vp9_denoiser_adj_16x1_sse2(
- const uint8_t *sig, const uint8_t *mc_running_avg_y,
- uint8_t *running_avg_y, const __m128i k_0,
- const __m128i k_delta, __m128i acc_diff) {
+ const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
+ const __m128i k_0, const __m128i k_delta, __m128i acc_diff) {
__m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0]));
// Calculate differences.
const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0]));
@@ -108,8 +101,7 @@
// Obtain the sign. FF if diff is negative.
const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
// Clamp absolute difference to delta to get the adjustment.
- const __m128i adj =
- _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
+ const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
// Restore the sign and get positive and negative adjustments.
__m128i padj, nadj;
padj = _mm_andnot_si128(diff_sign, adj);
@@ -126,14 +118,17 @@
}
// Denoise 8x8 and 8x16 blocks.
-static int vp9_denoiser_NxM_sse2_small(
- const uint8_t *sig, int sig_stride, const uint8_t *mc_running_avg_y,
- int mc_avg_y_stride, uint8_t *running_avg_y, int avg_y_stride,
- int increase_denoising, BLOCK_SIZE bs, int motion_magnitude, int width) {
+static int vp9_denoiser_NxM_sse2_small(const uint8_t *sig, int sig_stride,
+ const uint8_t *mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t *running_avg_y, int avg_y_stride,
+ int increase_denoising, BLOCK_SIZE bs,
+ int motion_magnitude, int width) {
int sum_diff_thresh, r, sum_diff = 0;
- const int shift_inc = (increase_denoising &&
- motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ?
- 1 : 0;
+ const int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
+ ? 1
+ : 0;
uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16];
__m128i acc_diff = _mm_setzero_si128();
const __m128i k_0 = _mm_setzero_si128();
@@ -153,15 +148,13 @@
memcpy(sig_buffer[r], sig, width);
memcpy(sig_buffer[r] + width, sig + sig_stride, width);
memcpy(mc_running_buffer[r], mc_running_avg_y, width);
- memcpy(mc_running_buffer[r] + width,
- mc_running_avg_y + mc_avg_y_stride, width);
+ memcpy(mc_running_buffer[r] + width, mc_running_avg_y + mc_avg_y_stride,
+ width);
memcpy(running_buffer[r], running_avg_y, width);
memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width);
- acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r],
- mc_running_buffer[r],
- running_buffer[r],
- &k_0, &k_4, &k_8, &k_16,
- &l3, &l32, &l21, acc_diff);
+ acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], mc_running_buffer[r],
+ running_buffer[r], &k_0, &k_4, &k_8,
+ &k_16, &l3, &l32, &l21, acc_diff);
memcpy(running_avg_y, running_buffer[r], width);
memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width);
// Update pointers for next iteration.
@@ -184,8 +177,8 @@
// The delta is set by the excess of absolute pixel diff over the
// threshold.
- const int delta = ((abs(sum_diff) - sum_diff_thresh) >>
- num_pels_log2_lookup[bs]) + 1;
+ const int delta =
+ ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
// Only apply the adjustment for max delta up to 3.
if (delta < 4) {
const __m128i k_delta = _mm_set1_epi8(delta);
@@ -192,11 +185,11 @@
running_avg_y -= avg_y_stride * (b_height << 1);
for (r = 0; r < b_height; ++r) {
acc_diff = vp9_denoiser_adj_16x1_sse2(
- sig_buffer[r], mc_running_buffer[r], running_buffer[r],
- k_0, k_delta, acc_diff);
+ sig_buffer[r], mc_running_buffer[r], running_buffer[r], k_0,
+ k_delta, acc_diff);
memcpy(running_avg_y, running_buffer[r], width);
- memcpy(running_avg_y + avg_y_stride,
- running_buffer[r] + width, width);
+ memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width,
+ width);
// Update pointers for next iteration.
running_avg_y += (avg_y_stride << 1);
}
@@ -216,14 +209,14 @@
static int vp9_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride,
const uint8_t *mc_running_avg_y,
int mc_avg_y_stride,
- uint8_t *running_avg_y,
- int avg_y_stride,
+ uint8_t *running_avg_y, int avg_y_stride,
int increase_denoising, BLOCK_SIZE bs,
int motion_magnitude) {
int sum_diff_thresh, r, c, sum_diff = 0;
- const int shift_inc = (increase_denoising &&
- motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ?
- 1 : 0;
+ const int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
+ ? 1
+ : 0;
__m128i acc_diff[4][4];
const __m128i k_0 = _mm_setzero_si128();
const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
@@ -248,9 +241,9 @@
for (r = 0; r < b_height; ++r) {
for (c = 0; c < b_width_shift4; ++c) {
- acc_diff[c][r>>4] = vp9_denoiser_16x1_sse2(
- sig, mc_running_avg_y, running_avg_y, &k_0, &k_4,
- &k_8, &k_16, &l3, &l32, &l21, acc_diff[c][r>>4]);
+ acc_diff[c][r >> 4] = vp9_denoiser_16x1_sse2(
+ sig, mc_running_avg_y, running_avg_y, &k_0, &k_4, &k_8, &k_16, &l3,
+ &l32, &l21, acc_diff[c][r >> 4]);
// Update pointers for next iteration.
sig += 16;
mc_running_avg_y += 16;
@@ -259,7 +252,7 @@
if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
for (c = 0; c < b_width_shift4; ++c) {
- sum_diff += sum_diff_16x1(acc_diff[c][r>>4]);
+ sum_diff += sum_diff_16x1(acc_diff[c][r >> 4]);
}
}
@@ -272,8 +265,8 @@
{
sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
if (abs(sum_diff) > sum_diff_thresh) {
- const int delta = ((abs(sum_diff) - sum_diff_thresh) >>
- num_pels_log2_lookup[bs]) + 1;
+ const int delta =
+ ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
// Only apply the adjustment for max delta up to 3.
if (delta < 4) {
@@ -284,9 +277,9 @@
sum_diff = 0;
for (r = 0; r < b_height; ++r) {
for (c = 0; c < b_width_shift4; ++c) {
- acc_diff[c][r>>4] = vp9_denoiser_adj_16x1_sse2(
- sig, mc_running_avg_y, running_avg_y, k_0,
- k_delta, acc_diff[c][r>>4]);
+ acc_diff[c][r >> 4] =
+ vp9_denoiser_adj_16x1_sse2(sig, mc_running_avg_y, running_avg_y,
+ k_0, k_delta, acc_diff[c][r >> 4]);
// Update pointers for next iteration.
sig += 16;
mc_running_avg_y += 16;
@@ -295,7 +288,7 @@
if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
for (c = 0; c < b_width_shift4; ++c) {
- sum_diff += sum_diff_16x1(acc_diff[c][r>>4]);
+ sum_diff += sum_diff_16x1(acc_diff[c][r >> 4]);
}
}
sig = sig - b_width + sig_stride;
@@ -314,27 +307,21 @@
}
int vp9_denoiser_filter_sse2(const uint8_t *sig, int sig_stride,
- const uint8_t *mc_avg,
- int mc_avg_stride,
+ const uint8_t *mc_avg, int mc_avg_stride,
uint8_t *avg, int avg_stride,
- int increase_denoising,
- BLOCK_SIZE bs,
+ int increase_denoising, BLOCK_SIZE bs,
int motion_magnitude) {
// Rank by frequency of the block type to have an early termination.
if (bs == BLOCK_16X16 || bs == BLOCK_32X32 || bs == BLOCK_64X64 ||
bs == BLOCK_16X32 || bs == BLOCK_16X8 || bs == BLOCK_32X16 ||
bs == BLOCK_32X64 || bs == BLOCK_64X32) {
- return vp9_denoiser_NxM_sse2_big(sig, sig_stride,
- mc_avg, mc_avg_stride,
- avg, avg_stride,
- increase_denoising,
- bs, motion_magnitude);
+ return vp9_denoiser_NxM_sse2_big(sig, sig_stride, mc_avg, mc_avg_stride,
+ avg, avg_stride, increase_denoising, bs,
+ motion_magnitude);
} else if (bs == BLOCK_8X8 || bs == BLOCK_8X16) {
- return vp9_denoiser_NxM_sse2_small(sig, sig_stride,
- mc_avg, mc_avg_stride,
- avg, avg_stride,
- increase_denoising,
- bs, motion_magnitude, 8);
+ return vp9_denoiser_NxM_sse2_small(sig, sig_stride, mc_avg, mc_avg_stride,
+ avg, avg_stride, increase_denoising, bs,
+ motion_magnitude, 8);
} else {
return COPY_BLOCK;
}
--- a/vp9/encoder/x86/vp9_diamond_search_sad_avx.c
+++ b/vp9/encoder/x86/vp9_diamond_search_sad_avx.c
@@ -9,7 +9,7 @@
*/
#if defined(_MSC_VER)
-# include <intrin.h>
+#include <intrin.h>
#endif
#include <emmintrin.h>
#include <smmintrin.h>
@@ -19,11 +19,11 @@
#include "vpx_ports/mem.h"
#ifdef __GNUC__
-# define LIKELY(v) __builtin_expect(v, 1)
-# define UNLIKELY(v) __builtin_expect(v, 0)
+#define LIKELY(v) __builtin_expect(v, 1)
+#define UNLIKELY(v) __builtin_expect(v, 0)
#else
-# define LIKELY(v) (v)
-# define UNLIKELY(v) (v)
+#define LIKELY(v) (v)
+#define UNLIKELY(v) (v)
#endif
static INLINE int_mv pack_int_mv(int16_t row, int16_t col) {
@@ -40,19 +40,19 @@
return mv.as_int == 0 ? 0 : 1;
}
-static INLINE int mv_cost(const int_mv mv,
- const int *joint_cost, int *const comp_cost[2]) {
- return joint_cost[get_mv_joint(mv)] +
- comp_cost[0][mv.as_mv.row] + comp_cost[1][mv.as_mv.col];
+static INLINE int mv_cost(const int_mv mv, const int *joint_cost,
+ int *const comp_cost[2]) {
+ return joint_cost[get_mv_joint(mv)] + comp_cost[0][mv.as_mv.row] +
+ comp_cost[1][mv.as_mv.col];
}
static int mvsad_err_cost(const MACROBLOCK *x, const int_mv mv, const MV *ref,
int sad_per_bit) {
- const int_mv diff = pack_int_mv(mv.as_mv.row - ref->row,
- mv.as_mv.col - ref->col);
- return ROUND_POWER_OF_TWO((unsigned)mv_cost(diff, x->nmvjointsadcost,
- x->nmvsadcost) *
- sad_per_bit, VP9_PROB_COST_SHIFT);
+ const int_mv diff =
+ pack_int_mv(mv.as_mv.row - ref->row, mv.as_mv.col - ref->col);
+ return ROUND_POWER_OF_TWO(
+ (unsigned)mv_cost(diff, x->nmvjointsadcost, x->nmvsadcost) * sad_per_bit,
+ VP9_PROB_COST_SHIFT);
}
/*****************************************************************************
@@ -71,10 +71,9 @@
* which does not rely on these properties. *
*****************************************************************************/
int vp9_diamond_search_sad_avx(const MACROBLOCK *x,
- const search_site_config *cfg,
- MV *ref_mv, MV *best_mv, int search_param,
- int sad_per_bit, int *num00,
- const vp9_variance_fn_ptr_t *fn_ptr,
+ const search_site_config *cfg, MV *ref_mv,
+ MV *best_mv, int search_param, int sad_per_bit,
+ int *num00, const vp9_variance_fn_ptr_t *fn_ptr,
const MV *center_mv) {
const int_mv maxmv = pack_int_mv(x->mv_row_max, x->mv_col_max);
const __m128i v_max_mv_w = _mm_set1_epi32(maxmv.as_int);
@@ -91,12 +90,12 @@
// 0 = initial step (MAX_FIRST_STEP) pel
// 1 = (MAX_FIRST_STEP/2) pel,
// 2 = (MAX_FIRST_STEP/4) pel...
- const MV *ss_mv = &cfg->ss_mv[cfg->searches_per_step * search_param];
+ const MV *ss_mv = &cfg->ss_mv[cfg->searches_per_step * search_param];
const intptr_t *ss_os = &cfg->ss_os[cfg->searches_per_step * search_param];
const int tot_steps = cfg->total_steps - search_param;
- const int_mv fcenter_mv = pack_int_mv(center_mv->row >> 3,
- center_mv->col >> 3);
+ const int_mv fcenter_mv =
+ pack_int_mv(center_mv->row >> 3, center_mv->col >> 3);
const __m128i vfcmv = _mm_set1_epi32(fcenter_mv.as_int);
const int ref_row = clamp(ref_mv->row, minmv.as_mv.row, maxmv.as_mv.row);
@@ -109,8 +108,8 @@
const int what_stride = x->plane[0].src.stride;
const int in_what_stride = x->e_mbd.plane[0].pre[0].stride;
const uint8_t *const what = x->plane[0].src.buf;
- const uint8_t *const in_what = x->e_mbd.plane[0].pre[0].buf +
- ref_row * in_what_stride + ref_col;
+ const uint8_t *const in_what =
+ x->e_mbd.plane[0].pre[0].buf + ref_row * in_what_stride + ref_col;
// Work out the start point for the search
const uint8_t *best_address = in_what;
@@ -181,10 +180,9 @@
__m128i v_bo10_q = _mm_loadu_si128((const __m128i *)&ss_os[i + 0]);
__m128i v_bo32_q = _mm_loadu_si128((const __m128i *)&ss_os[i + 2]);
// Set the ones falling outside to zero
- v_bo10_q = _mm_and_si128(v_bo10_q,
- _mm_cvtepi32_epi64(v_inside_d));
- v_bo32_q = _mm_and_si128(v_bo32_q,
- _mm_unpackhi_epi32(v_inside_d, v_inside_d));
+ v_bo10_q = _mm_and_si128(v_bo10_q, _mm_cvtepi32_epi64(v_inside_d));
+ v_bo32_q =
+ _mm_and_si128(v_bo32_q, _mm_unpackhi_epi32(v_inside_d, v_inside_d));
// Compute the candidate addresses
v_blocka[0] = _mm_add_epi64(v_ba_q, v_bo10_q);
v_blocka[1] = _mm_add_epi64(v_ba_q, v_bo32_q);
@@ -195,9 +193,8 @@
#endif
}
- fn_ptr->sdx4df(what, what_stride,
- (const uint8_t **)&v_blocka[0], in_what_stride,
- (uint32_t*)&v_sad_d);
+ fn_ptr->sdx4df(what, what_stride, (const uint8_t **)&v_blocka[0],
+ in_what_stride, (uint32_t *)&v_sad_d);
// Look up the component cost of the residual motion vector
{
@@ -226,11 +223,10 @@
// Now add in the joint cost
{
- const __m128i v_sel_d = _mm_cmpeq_epi32(v_diff_mv_w,
- _mm_setzero_si128());
- const __m128i v_joint_cost_d = _mm_blendv_epi8(v_joint_cost_1_d,
- v_joint_cost_0_d,
- v_sel_d);
+ const __m128i v_sel_d =
+ _mm_cmpeq_epi32(v_diff_mv_w, _mm_setzero_si128());
+ const __m128i v_joint_cost_d =
+ _mm_blendv_epi8(v_joint_cost_1_d, v_joint_cost_0_d, v_sel_d);
v_cost_d = _mm_add_epi32(v_cost_d, v_joint_cost_d);
}
--- a/vp9/encoder/x86/vp9_error_intrin_avx2.c
+++ b/vp9/encoder/x86/vp9_error_intrin_avx2.c
@@ -13,10 +13,8 @@
#include "./vp9_rtcd.h"
#include "vpx/vpx_integer.h"
-int64_t vp9_block_error_avx2(const int16_t *coeff,
- const int16_t *dqcoeff,
- intptr_t block_size,
- int64_t *ssz) {
+int64_t vp9_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz) {
__m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
__m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
__m256i sse_reg_64hi, ssz_reg_64hi;
@@ -29,7 +27,7 @@
sse_reg = _mm256_set1_epi16(0);
ssz_reg = _mm256_set1_epi16(0);
- for (i = 0 ; i < block_size ; i+= 16) {
+ for (i = 0; i < block_size; i += 16) {
// load 32 bytes from coeff and dqcoeff
coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i));
dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i));
@@ -66,8 +64,8 @@
_mm256_extractf128_si256(ssz_reg, 1));
// store the results
- _mm_storel_epi64((__m128i*)(&sse), sse_reg128);
+ _mm_storel_epi64((__m128i *)(&sse), sse_reg128);
- _mm_storel_epi64((__m128i*)(ssz), ssz_reg128);
+ _mm_storel_epi64((__m128i *)(ssz), ssz_reg128);
return sse;
}
--- a/vp9/encoder/x86/vp9_frame_scale_ssse3.c
+++ b/vp9/encoder/x86/vp9_frame_scale_ssse3.c
@@ -19,8 +19,8 @@
YV12_BUFFER_CONFIG *dst);
static void downsample_2_to_1_ssse3(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- int w, int h) {
+ uint8_t *dst, ptrdiff_t dst_stride, int w,
+ int h) {
const __m128i mask = _mm_set1_epi16(0x00FF);
const int max_width = w & ~15;
int y;
@@ -27,7 +27,7 @@
for (y = 0; y < h; ++y) {
int x;
for (x = 0; x < max_width; x += 16) {
- const __m128i a = _mm_loadu_si128((const __m128i *)(src + x * 2 + 0));
+ const __m128i a = _mm_loadu_si128((const __m128i *)(src + x * 2 + 0));
const __m128i b = _mm_loadu_si128((const __m128i *)(src + x * 2 + 16));
const __m128i a_and = _mm_and_si128(a, mask);
const __m128i b_and = _mm_and_si128(b, mask);
@@ -34,8 +34,7 @@
const __m128i c = _mm_packus_epi16(a_and, b_and);
_mm_storeu_si128((__m128i *)(dst + x), c);
}
- for (; x < w; ++x)
- dst[x] = src[x * 2];
+ for (; x < w; ++x) dst[x] = src[x * 2];
src += src_stride * 2;
dst += dst_stride;
}
@@ -47,9 +46,8 @@
const __m128i *const g, const __m128i *const h) {
const __m128i coeffs_ab =
_mm_set_epi8(6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1);
- const __m128i coeffs_cd =
- _mm_set_epi8(78, -19, 78, -19, 78, -19, 78, -19, 78, -19, 78, -19,
- 78, -19, 78, -19);
+ const __m128i coeffs_cd = _mm_set_epi8(78, -19, 78, -19, 78, -19, 78, -19, 78,
+ -19, 78, -19, 78, -19, 78, -19);
const __m128i const64_x16 = _mm_set1_epi16(64);
const __m128i ab = _mm_unpacklo_epi8(*a, *b);
const __m128i cd = _mm_unpacklo_epi8(*c, *d);
@@ -88,8 +86,8 @@
}
static void upsample_1_to_2_ssse3(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- int dst_w, int dst_h) {
+ uint8_t *dst, ptrdiff_t dst_stride, int dst_w,
+ int dst_h) {
dst_w /= 2;
dst_h /= 2;
{
@@ -116,7 +114,7 @@
int x;
eight_tap_row_ssse3(src + src_stride * 4 - 3, tmp7, dst_w);
for (x = 0; x < max_width; x += 8) {
- const __m128i A = _mm_loadl_epi64((const __m128i *)(src + x));
+ const __m128i A = _mm_loadl_epi64((const __m128i *)(src + x));
const __m128i B = _mm_loadl_epi64((const __m128i *)(tmp3 + x));
const __m128i AB = _mm_unpacklo_epi8(A, B);
__m128i C, D, CD;
@@ -179,23 +177,23 @@
const int dst_uv_h = dst_h / 2;
if (dst_w * 2 == src_w && dst_h * 2 == src_h) {
- downsample_2_to_1_ssse3(src->y_buffer, src->y_stride,
- dst->y_buffer, dst->y_stride, dst_w, dst_h);
- downsample_2_to_1_ssse3(src->u_buffer, src->uv_stride,
- dst->u_buffer, dst->uv_stride, dst_uv_w, dst_uv_h);
- downsample_2_to_1_ssse3(src->v_buffer, src->uv_stride,
- dst->v_buffer, dst->uv_stride, dst_uv_w, dst_uv_h);
+ downsample_2_to_1_ssse3(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, dst_w, dst_h);
+ downsample_2_to_1_ssse3(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h);
+ downsample_2_to_1_ssse3(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h);
vpx_extend_frame_borders(dst);
} else if (dst_w == src_w * 2 && dst_h == src_h * 2) {
// The upsample() supports widths up to 1920 * 2. If greater, fall back
// to vp9_scale_and_extend_frame_c().
- if (dst_w/2 <= 1920) {
- upsample_1_to_2_ssse3(src->y_buffer, src->y_stride,
- dst->y_buffer, dst->y_stride, dst_w, dst_h);
- upsample_1_to_2_ssse3(src->u_buffer, src->uv_stride,
- dst->u_buffer, dst->uv_stride, dst_uv_w, dst_uv_h);
- upsample_1_to_2_ssse3(src->v_buffer, src->uv_stride,
- dst->v_buffer, dst->uv_stride, dst_uv_w, dst_uv_h);
+ if (dst_w / 2 <= 1920) {
+ upsample_1_to_2_ssse3(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, dst_w, dst_h);
+ upsample_1_to_2_ssse3(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h);
+ upsample_1_to_2_ssse3(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, dst_uv_w, dst_uv_h);
vpx_extend_frame_borders(dst);
} else {
vp9_scale_and_extend_frame_c(src, dst);
--- a/vp9/encoder/x86/vp9_highbd_block_error_intrin_sse2.c
+++ b/vp9/encoder/x86/vp9_highbd_block_error_intrin_sse2.c
@@ -23,41 +23,41 @@
const int shift = 2 * (bps - 8);
const int rounding = shift > 0 ? 1 << (shift - 1) : 0;
- for (i = 0; i < block_size; i+=8) {
+ for (i = 0; i < block_size; i += 8) {
// Load the data into xmm registers
- __m128i mm_coeff = _mm_load_si128((__m128i*) (coeff + i));
- __m128i mm_coeff2 = _mm_load_si128((__m128i*) (coeff + i + 4));
- __m128i mm_dqcoeff = _mm_load_si128((__m128i*) (dqcoeff + i));
- __m128i mm_dqcoeff2 = _mm_load_si128((__m128i*) (dqcoeff + i + 4));
+ __m128i mm_coeff = _mm_load_si128((__m128i *)(coeff + i));
+ __m128i mm_coeff2 = _mm_load_si128((__m128i *)(coeff + i + 4));
+ __m128i mm_dqcoeff = _mm_load_si128((__m128i *)(dqcoeff + i));
+ __m128i mm_dqcoeff2 = _mm_load_si128((__m128i *)(dqcoeff + i + 4));
// Check if any values require more than 15 bit
max = _mm_set1_epi32(0x3fff);
min = _mm_set1_epi32(0xffffc000);
cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max),
- _mm_cmplt_epi32(mm_coeff, min));
+ _mm_cmplt_epi32(mm_coeff, min));
cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max),
- _mm_cmplt_epi32(mm_coeff2, min));
+ _mm_cmplt_epi32(mm_coeff2, min));
cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max),
- _mm_cmplt_epi32(mm_dqcoeff, min));
+ _mm_cmplt_epi32(mm_dqcoeff, min));
cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max),
- _mm_cmplt_epi32(mm_dqcoeff2, min));
- test = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(cmp0, cmp1),
- _mm_or_si128(cmp2, cmp3)));
+ _mm_cmplt_epi32(mm_dqcoeff2, min));
+ test = _mm_movemask_epi8(
+ _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)));
if (!test) {
- __m128i mm_diff, error_sse2, sqcoeff_sse2;;
+ __m128i mm_diff, error_sse2, sqcoeff_sse2;
mm_coeff = _mm_packs_epi32(mm_coeff, mm_coeff2);
mm_dqcoeff = _mm_packs_epi32(mm_dqcoeff, mm_dqcoeff2);
mm_diff = _mm_sub_epi16(mm_coeff, mm_dqcoeff);
error_sse2 = _mm_madd_epi16(mm_diff, mm_diff);
sqcoeff_sse2 = _mm_madd_epi16(mm_coeff, mm_coeff);
- _mm_storeu_si128((__m128i*)temp, error_sse2);
+ _mm_storeu_si128((__m128i *)temp, error_sse2);
error = error + temp[0] + temp[1] + temp[2] + temp[3];
- _mm_storeu_si128((__m128i*)temp, sqcoeff_sse2);
+ _mm_storeu_si128((__m128i *)temp, sqcoeff_sse2);
sqcoeff += temp[0] + temp[1] + temp[2] + temp[3];
} else {
for (j = 0; j < 8; j++) {
const int64_t diff = coeff[i + j] - dqcoeff[i + j];
- error += diff * diff;
+ error += diff * diff;
sqcoeff += (int64_t)coeff[i + j] * (int64_t)coeff[i + j];
}
}
--- a/vp9/encoder/x86/vp9_quantize_sse2.c
+++ b/vp9/encoder/x86/vp9_quantize_sse2.c
@@ -14,14 +14,13 @@
#include "./vp9_rtcd.h"
#include "vpx/vpx_integer.h"
-void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t* zbin_ptr,
- const int16_t* round_ptr, const int16_t* quant_ptr,
- const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
- int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- uint16_t* eob_ptr,
- const int16_t* scan_ptr,
- const int16_t* iscan_ptr) {
+void vp9_quantize_fp_sse2(const int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan_ptr,
+ const int16_t *iscan_ptr) {
__m128i zero;
__m128i thr;
int16_t nzflag;
@@ -44,9 +43,9 @@
// Setup global values
{
- round = _mm_load_si128((const __m128i*)round_ptr);
- quant = _mm_load_si128((const __m128i*)quant_ptr);
- dequant = _mm_load_si128((const __m128i*)dequant_ptr);
+ round = _mm_load_si128((const __m128i *)round_ptr);
+ quant = _mm_load_si128((const __m128i *)quant_ptr);
+ dequant = _mm_load_si128((const __m128i *)dequant_ptr);
}
{
@@ -54,8 +53,8 @@
__m128i qcoeff0, qcoeff1;
__m128i qtmp0, qtmp1;
// Do DC and first 15 AC
- coeff0 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs));
- coeff1 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs) + 1);
+ coeff0 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs));
+ coeff1 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs) + 1);
// Poor man's sign extract
coeff0_sign = _mm_srai_epi16(coeff0, 15);
@@ -78,15 +77,15 @@
qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
dequant = _mm_unpackhi_epi64(dequant, dequant);
coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
}
{
@@ -99,8 +98,8 @@
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -121,8 +120,8 @@
__m128i qcoeff0, qcoeff1;
__m128i qtmp0, qtmp1;
- coeff0 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs));
- coeff1 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs) + 1);
+ coeff0 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs));
+ coeff1 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs) + 1);
// Poor man's sign extract
coeff0_sign = _mm_srai_epi16(coeff0, 15);
@@ -133,7 +132,7 @@
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) |
- _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
+ _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
if (nzflag) {
qcoeff0 = _mm_adds_epi16(qcoeff0, round);
@@ -147,20 +146,20 @@
qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
} else {
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
}
}
@@ -174,8 +173,8 @@
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -200,10 +199,10 @@
}
} else {
do {
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
n_coeffs += 8 * 2;
} while (n_coeffs < 0);
*eob_ptr = 0;