shithub: libvpx

Download patch

ref: 0d9cc0a9f0fdd7b8f08fce2d54d81de0ea5a9942
parent: b6a3062d8181c48b8056458f12950bb6fd08628f
author: Dmitry Kovalev <dkovalev@google.com>
date: Thu Feb 28 08:18:02 EST 2013

Code cleanup.

Removing redundant 'extern' keyword, better formatting, code
simplification.

Change-Id: I132fea14f08c706ee9ea147d19464d03f833f25b

--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -45,7 +45,7 @@
 int enc_debug = 0;
 #endif
 
-extern void select_interp_filter_type(VP9_COMP *cpi);
+void vp9_select_interp_filter_type(VP9_COMP *cpi);
 
 static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
                               int output_enabled, int mb_row, int mb_col);
@@ -100,7 +100,7 @@
    */
   act = vp9_variance16x16(x->src.y_buffer, x->src.y_stride, VP9_VAR_OFFS, 0,
                           &sse);
-  act = act << 4;
+  act <<= 4;
 
   /* If the region is flat, lower the activity some more. */
   if (act < 8 << 12)
@@ -882,13 +882,10 @@
   /* Find best coding mode & reconstruct the MB so it is available
    * as a predictor for MBs that follow in the SB */
   if (cm->frame_type == KEY_FRAME) {
-    vp9_rd_pick_intra_mode_sb64(cpi, x,
-                                totalrate,
-                                totaldist);
+    vp9_rd_pick_intra_mode_sb64(cpi, x, totalrate, totaldist);
 
     /* Save the coding context */
-    vpx_memcpy(&x->sb64_context.mic, xd->mode_info_context,
-               sizeof(MODE_INFO));
+    vpx_memcpy(&x->sb64_context.mic, xd->mode_info_context, sizeof(MODE_INFO));
   } else {
     vp9_rd_pick_inter_mode_sb64(cpi, x, mb_row, mb_col, totalrate, totaldist);
   }
@@ -1221,9 +1218,7 @@
   vpx_memset(cm->above_context, 0,
              sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
 
-  xd->fullpixel_mask = 0xffffffff;
-  if (cm->full_pixel)
-    xd->fullpixel_mask = 0xfffffff8;
+  xd->fullpixel_mask = cm->full_pixel ? 0xfffffff8 : 0xffffffff;
 }
 
 static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
@@ -1421,9 +1416,8 @@
   int x, y;
 
   for (y = 0; y < ymbs; y++) {
-    for (x = 0; x < xmbs; x++) {
+    for (x = 0; x < xmbs; x++)
       mi[y * mis + x].mbmi.txfm_size = txfm_size;
-    }
   }
 }
 
@@ -1678,7 +1672,7 @@
 
     // Update interpolation filter strategy for next frame.
     if ((cpi->common.frame_type != KEY_FRAME) && (cpi->sf.search_best_filter))
-      select_interp_filter_type(cpi);
+      vp9_select_interp_filter_type(cpi);
   } else {
     encode_frame_internal(cpi);
   }
@@ -1690,27 +1684,23 @@
   int i;
 
   for (r = 0; r < 4; r++) {
-    for (c = 0; c < 4; c++) {
+    for (c = 0; c < 4; c++)
       x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
-    }
   }
 
   for (r = 0; r < 2; r++) {
-    for (c = 0; c < 2; c++) {
+    for (c = 0; c < 2; c++)
       x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
-    }
   }
 
 
   for (r = 0; r < 2; r++) {
-    for (c = 0; c < 2; c++) {
+    for (c = 0; c < 2; c++)
       x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
-    }
   }
 
-  for (i = 0; i < 24; i++) {
+  for (i = 0; i < 24; i++)
     x->block[i].coeff = x->coeff + i * 16;
-  }
 }
 
 void vp9_build_block_offsets(MACROBLOCK *x) {
--- a/vp9/encoder/vp9_encodeintra.c
+++ b/vp9/encoder/vp9_encodeintra.c
@@ -17,8 +17,6 @@
 #include "vp9/encoder/vp9_encodeintra.h"
 
 int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
-  int i;
-  int intra_pred_var = 0;
   MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
   (void) cpi;
 
@@ -29,6 +27,8 @@
 
     vp9_encode_intra16x16mby(x);
   } else {
+    int i;
+
     for (i = 0; i < 16; i++) {
       x->e_mbd.block[i].bmi.as_mode.first = B_DC_PRED;
       vp9_encode_intra4x4block(x, i);
@@ -35,9 +35,7 @@
     }
   }
 
-  intra_pred_var = vp9_get_mb_ss(x->src_diff);
-
-  return intra_pred_var;
+  return vp9_get_mb_ss(x->src_diff);
 }
 
 void vp9_encode_intra4x4block(MACROBLOCK *x, int ib) {
@@ -71,7 +69,6 @@
 
   for (i = 0; i < 16; i++)
     vp9_encode_intra4x4block(mb, i);
-  return;
 }
 
 void vp9_encode_intra16x16mby(MACROBLOCK *x) {
@@ -83,24 +80,28 @@
 
   vp9_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
 
-  if (tx_size == TX_16X16) {
-    vp9_transform_mby_16x16(x);
-    vp9_quantize_mby_16x16(x);
-    if (x->optimize)
-      vp9_optimize_mby_16x16(x);
-    vp9_inverse_transform_mby_16x16(xd);
-  } else if (tx_size == TX_8X8) {
-    vp9_transform_mby_8x8(x);
-    vp9_quantize_mby_8x8(x);
-    if (x->optimize)
-      vp9_optimize_mby_8x8(x);
-    vp9_inverse_transform_mby_8x8(xd);
-  } else {
-    vp9_transform_mby_4x4(x);
-    vp9_quantize_mby_4x4(x);
-    if (x->optimize)
-      vp9_optimize_mby_4x4(x);
-    vp9_inverse_transform_mby_4x4(xd);
+  switch (tx_size) {
+    case TX_16X16:
+      vp9_transform_mby_16x16(x);
+      vp9_quantize_mby_16x16(x);
+      if (x->optimize)
+        vp9_optimize_mby_16x16(x);
+      vp9_inverse_transform_mby_16x16(xd);
+      break;
+    case TX_8X8:
+      vp9_transform_mby_8x8(x);
+      vp9_quantize_mby_8x8(x);
+      if (x->optimize)
+        vp9_optimize_mby_8x8(x);
+      vp9_inverse_transform_mby_8x8(xd);
+      break;
+    default:
+      vp9_transform_mby_4x4(x);
+      vp9_quantize_mby_4x4(x);
+      if (x->optimize)
+        vp9_optimize_mby_4x4(x);
+      vp9_inverse_transform_mby_4x4(xd);
+      break;
   }
 
   vp9_recon_mby(xd);
@@ -115,19 +116,22 @@
   vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
                     xd->predictor, x->src.uv_stride);
 
-  if (tx_size == TX_4X4) {
-    vp9_transform_mbuv_4x4(x);
-    vp9_quantize_mbuv_4x4(x);
-    if (x->optimize)
-      vp9_optimize_mbuv_4x4(x);
-    vp9_inverse_transform_mbuv_4x4(xd);
-  } else /* 16x16 or 8x8 */ {
-    vp9_transform_mbuv_8x8(x);
-    vp9_quantize_mbuv_8x8(x);
-    if (x->optimize)
-      vp9_optimize_mbuv_8x8(x);
-    vp9_inverse_transform_mbuv_8x8(xd);
-  }
+  switch (tx_size) {
+    case TX_4X4:
+      vp9_transform_mbuv_4x4(x);
+      vp9_quantize_mbuv_4x4(x);
+      if (x->optimize)
+        vp9_optimize_mbuv_4x4(x);
+      vp9_inverse_transform_mbuv_4x4(xd);
+      break;
+    default:  // 16x16 or 8x8
+      vp9_transform_mbuv_8x8(x);
+      vp9_quantize_mbuv_8x8(x);
+      if (x->optimize)
+        vp9_optimize_mbuv_8x8(x);
+      vp9_inverse_transform_mbuv_8x8(xd);
+      break;
+    }
 
   vp9_recon_intra_mbuv(xd);
 }
@@ -190,16 +194,13 @@
 }
 
 void vp9_encode_intra8x8mby(MACROBLOCK *x) {
-  int i, ib;
+  int i;
 
-  for (i = 0; i < 4; i++) {
-    ib = vp9_i8x8_block[i];
-    vp9_encode_intra8x8(x, ib);
-  }
+  for (i = 0; i < 4; i++)
+    vp9_encode_intra8x8(x, vp9_i8x8_block[i]);
 }
 
-static void encode_intra_uv4x4(MACROBLOCK *x, int ib,
-                               int mode) {
+static void encode_intra_uv4x4(MACROBLOCK *x, int ib, int mode) {
   BLOCKD *b = &x->e_mbd.block[ib];
   BLOCK *be = &x->block[ib];
 
@@ -216,17 +217,13 @@
 }
 
 void vp9_encode_intra8x8mbuv(MACROBLOCK *x) {
-  int i, ib, mode;
-  BLOCKD *b;
+  int i;
 
   for (i = 0; i < 4; i++) {
-    ib = vp9_i8x8_block[i];
-    b = &x->e_mbd.block[ib];
-    mode = b->bmi.as_mode.first;
+    BLOCKD *b = &x->e_mbd.block[vp9_i8x8_block[i]];
+    int mode = b->bmi.as_mode.first;
 
-    /*u */
-    encode_intra_uv4x4(x, i + 16, mode);
-    /*v */
-    encode_intra_uv4x4(x, i + 20, mode);
+    encode_intra_uv4x4(x, i + 16, mode);  // u
+    encode_intra_uv4x4(x, i + 20, mode);  // v
   }
 }
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -29,9 +29,8 @@
   int r, c;
 
   for (r = 0; r < 4; r++) {
-    for (c = 0; c < 4; c++) {
+    for (c = 0; c < 4; c++)
       diff_ptr[c] = src_ptr[c] - pred_ptr[c];
-    }
 
     diff_ptr += pitch;
     pred_ptr += pitch;
@@ -47,9 +46,9 @@
   int r, c;
 
   for (r = 0; r < 8; r++) {
-    for (c = 0; c < 8; c++) {
+    for (c = 0; c < 8; c++)
       diff_ptr[c] = src_ptr[c] - pred_ptr[c];
-    }
+
     diff_ptr += pitch;
     pred_ptr += pitch;
     src_ptr  += src_stride;
@@ -65,9 +64,8 @@
   int r, c;
 
   for (r = 0; r < 8; r++) {
-    for (c = 0; c < 8; c++) {
+    for (c = 0; c < 8; c++)
       udiff[c] = usrc[c] - upred[c];
-    }
 
     udiff += 8;
     upred += dst_stride;
@@ -98,9 +96,8 @@
   int r, c;
 
   for (r = 0; r < 16; r++) {
-    for (c = 0; c < 16; c++) {
+    for (c = 0; c < 16; c++)
       diff[c] = src[c] - pred[c];
-    }
 
     diff += 16;
     pred += dst_stride;
@@ -113,9 +110,8 @@
   int r, c;
 
   for (r = 0; r < 32; r++) {
-    for (c = 0; c < 32; c++) {
+    for (c = 0; c < 32; c++)
       diff[c] = src[c] - pred[c];
-    }
 
     diff += 32;
     pred += dst_stride;
@@ -132,9 +128,8 @@
   int r, c;
 
   for (r = 0; r < 16; r++) {
-    for (c = 0; c < 16; c++) {
+    for (c = 0; c < 16; c++)
       udiff[c] = usrc[c] - upred[c];
-    }
 
     udiff += 16;
     upred += dst_stride;
@@ -142,9 +137,8 @@
   }
 
   for (r = 0; r < 16; r++) {
-    for (c = 0; c < 16; c++) {
+    for (c = 0; c < 16; c++)
       vdiff[c] = vsrc[c] - vpred[c];
-    }
 
     vdiff += 16;
     vpred += dst_stride;
@@ -176,12 +170,10 @@
     if (tx_type != DCT_DCT) {
       vp9_short_fht4x4(b->src_diff, b->coeff, 16, tx_type);
     } else if (!(i & 1) && get_tx_type_4x4(xd, &xd->block[i + 1]) == DCT_DCT) {
-      x->fwd_txm8x4(&x->block[i].src_diff[0],
-                           &x->block[i].coeff[0], 32);
+      x->fwd_txm8x4(x->block[i].src_diff, x->block[i].coeff, 32);
       i++;
     } else {
-      x->fwd_txm4x4(&x->block[i].src_diff[0],
-                           &x->block[i].coeff[0], 32);
+      x->fwd_txm4x4(x->block[i].src_diff, x->block[i].coeff, 32);
     }
   }
 }
@@ -189,10 +181,8 @@
 void vp9_transform_mbuv_4x4(MACROBLOCK *x) {
   int i;
 
-  for (i = 16; i < 24; i += 2) {
-    x->fwd_txm8x4(&x->block[i].src_diff[0],
-                         &x->block[i].coeff[0], 16);
-  }
+  for (i = 16; i < 24; i += 2)
+    x->fwd_txm8x4(x->block[i].src_diff, x->block[i].coeff, 16);
 }
 
 static void transform_mb_4x4(MACROBLOCK *x) {
@@ -211,8 +201,7 @@
     if (tx_type != DCT_DCT) {
       vp9_short_fht8x8(b->src_diff, b->coeff, 16, tx_type);
     } else {
-      x->fwd_txm8x8(&x->block[i].src_diff[0],
-                           &x->block[i].coeff[0], 32);
+      x->fwd_txm8x8(x->block[i].src_diff, x->block[i].coeff, 32);
     }
   }
   for (i = 2; i < 11; i += 8) {
@@ -221,8 +210,7 @@
     if (tx_type != DCT_DCT) {
       vp9_short_fht8x8(b->src_diff, (b + 2)->coeff, 16, tx_type);
     } else {
-      x->fwd_txm8x8(&x->block[i].src_diff[0],
-                           &x->block[i + 2].coeff[0], 32);
+      x->fwd_txm8x8(x->block[i].src_diff, x->block[i + 2].coeff, 32);
     }
   }
 }
@@ -230,10 +218,8 @@
 void vp9_transform_mbuv_8x8(MACROBLOCK *x) {
   int i;
 
-  for (i = 16; i < 24; i += 4) {
-    x->fwd_txm8x8(&x->block[i].src_diff[0],
-                         &x->block[i].coeff[0], 16);
-  }
+  for (i = 16; i < 24; i += 4)
+    x->fwd_txm8x8(x->block[i].src_diff, x->block[i].coeff, 16);
 }
 
 void vp9_transform_mb_8x8(MACROBLOCK *x) {
@@ -249,8 +235,7 @@
   if (tx_type != DCT_DCT) {
     vp9_short_fht16x16(b->src_diff, b->coeff, 16, tx_type);
   } else {
-    x->fwd_txm16x16(&x->block[0].src_diff[0],
-                           &x->block[0].coeff[0], 32);
+    x->fwd_txm16x16(x->block[0].src_diff, x->block[0].coeff, 32);
   }
 }
 
@@ -267,10 +252,8 @@
 void vp9_transform_sbuv_16x16(MACROBLOCK *x) {
   SUPERBLOCK * const x_sb = &x->sb_coeff_data;
   vp9_clear_system_state();
-  x->fwd_txm16x16(x_sb->src_diff + 1024,
-                         x_sb->coeff + 1024, 32);
-  x->fwd_txm16x16(x_sb->src_diff + 1280,
-                         x_sb->coeff + 1280, 32);
+  x->fwd_txm16x16(x_sb->src_diff + 1024, x_sb->coeff + 1024, 32);
+  x->fwd_txm16x16(x_sb->src_diff + 1280, x_sb->coeff + 1280, 32);
 }
 
 #define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -29,8 +29,8 @@
   BLOCKD *d = &xd->block[0];
   vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
   unsigned int best_err;
-  int step_param;
 
+
   int tmp_col_min = x->mv_col_min;
   int tmp_col_max = x->mv_col_max;
   int tmp_row_min = x->mv_row_min;
@@ -38,11 +38,8 @@
   int_mv ref_full;
 
   // Further step/diamond searches as necessary
-  if (cpi->Speed < 8) {
-    step_param = cpi->sf.first_step + ((cpi->Speed > 5) ? 1 : 0);
-  } else {
-    step_param = cpi->sf.first_step + 2;
-  }
+  int step_param = cpi->sf.first_step +
+      (cpi->Speed < 8 ? (cpi->Speed > 5 ? 1 : 0) : 2);
 
   vp9_clamp_mv_min_max(x, ref_mv);
 
@@ -438,10 +435,7 @@
   vpx_free(arf_not_zz);
 }
 
-void vp9_update_mbgraph_stats
-(
-  VP9_COMP *cpi
-) {
+void vp9_update_mbgraph_stats(VP9_COMP *cpi) {
   VP9_COMMON *const cm = &cpi->common;
   int i, n_frames = vp9_lookahead_depth(cpi->lookahead);
   YV12_BUFFER_CONFIG *golden_ref =
--- a/vp9/encoder/vp9_mbgraph.h
+++ b/vp9/encoder/vp9_mbgraph.h
@@ -11,6 +11,6 @@
 #ifndef VP9_ENCODER_VP9_MBGRAPH_H_
 #define VP9_ENCODER_VP9_MBGRAPH_H_
 
-extern void vp9_update_mbgraph_stats(VP9_COMP *cpi);
+void vp9_update_mbgraph_stats(VP9_COMP *cpi);
 
 #endif  // VP9_ENCODER_VP9_MBGRAPH_H_
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -2538,7 +2538,7 @@
 
 }
 
-void select_interp_filter_type(VP9_COMP *cpi) {
+void vp9_select_interp_filter_type(VP9_COMP *cpi) {
   int i;
   int high_filter_index = 0;
   unsigned int thresh;