shithub: libvpx

Download patch

ref: bd990cad72ecdc864ebfd4470dbbd747974f3aa9
parent: cbe62b9c2d2b006aba52c8eebe7d842e59166fe4
author: Johann <johannkoenig@google.com>
date: Wed Aug 23 11:27:25 EDT 2017

quantize x86: dedup some parts

Change-Id: I9f95f47bc7ecbb7980f21cbc3a91f699624141af

--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -279,6 +279,7 @@
 DSP_SRCS-yes            += quantize.c
 DSP_SRCS-yes            += quantize.h
 
+DSP_SRCS-$(HAVE_SSE2)   += x86/quantize_x86.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/quantize_sse2.c
 DSP_SRCS-$(HAVE_SSSE3)  += x86/quantize_ssse3.c
 DSP_SRCS-$(HAVE_AVX)    += x86/quantize_avx.c
--- a/vpx_dsp/x86/quantize_avx.c
+++ b/vpx_dsp/x86/quantize_avx.c
@@ -17,6 +17,7 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx/vpx_integer.h"
 #include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
+#include "vpx_dsp/x86/quantize_x86.h"
 
 void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                         int skip_block, const int16_t *zbin_ptr,
@@ -34,9 +35,7 @@
   __m128i qcoeff0, qcoeff1;
   __m128i cmp_mask0, cmp_mask1;
   __m128i all_zero;
-  __m128i qtmp0, qtmp1;
-  __m128i zero_coeff0, zero_coeff1, iscan0, iscan1;
-  __m128i eob = zero, eob0, eob1;
+  __m128i eob = zero, eob0;
 
   (void)scan_ptr;
   (void)skip_block;
@@ -44,15 +43,8 @@
 
   *eob_ptr = 0;
 
-  // Setup global values.
-  zbin = _mm_load_si128((const __m128i *)zbin_ptr);
-  // x86 has no "greater *or equal*" comparison. Subtract 1 from zbin so
-  // it is a strict "greater" comparison.
-  zbin = _mm_sub_epi16(zbin, _mm_set1_epi16(1));
-  round = _mm_load_si128((const __m128i *)round_ptr);
-  quant = _mm_load_si128((const __m128i *)quant_ptr);
-  dequant = _mm_load_si128((const __m128i *)dequant_ptr);
-  shift = _mm_load_si128((const __m128i *)quant_shift_ptr);
+  load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant,
+                dequant_ptr, &dequant, quant_shift_ptr, &shift);
 
   // Do DC and first 15 AC.
   coeff0 = load_tran_low(coeff_ptr);
@@ -81,20 +73,11 @@
     shift = _mm_unpackhi_epi64(shift, shift);
     dequant = _mm_unpackhi_epi64(dequant, dequant);
   } else {
-    qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+    calculate_qcoeff(&qcoeff0, round, quant, shift);
     round = _mm_unpackhi_epi64(round, round);
-    qcoeff1 = _mm_adds_epi16(qcoeff1, round);
-
-    qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
     quant = _mm_unpackhi_epi64(quant, quant);
-    qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-    qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-    qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-    qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
     shift = _mm_unpackhi_epi64(shift, shift);
-    qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
+    calculate_qcoeff(&qcoeff1, round, quant, shift);
 
     // Reinsert signs
     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
@@ -107,24 +90,15 @@
     store_tran_low(qcoeff0, qcoeff_ptr);
     store_tran_low(qcoeff1, qcoeff_ptr + 8);
 
-    coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+    coeff0 = calculate_dqcoeff(qcoeff0, dequant);
     dequant = _mm_unpackhi_epi64(dequant, dequant);
-    coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+    coeff1 = calculate_dqcoeff(qcoeff1, dequant);
 
     store_tran_low(coeff0, dqcoeff_ptr);
     store_tran_low(coeff1, dqcoeff_ptr + 8);
 
-    // Scan for eob.
-    zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-    zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-    iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr));
-    iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + 8));
-    // Add one to convert from indices to counts
-    iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-    iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-    eob = _mm_andnot_si128(zero_coeff0, iscan0);
-    eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-    eob = _mm_max_epi16(eob, eob1);
+    eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0,
+                       zero);
   }
 
   // AC only loop.
@@ -149,18 +123,9 @@
       continue;
     }
 
-    qcoeff0 = _mm_adds_epi16(qcoeff0, round);
-    qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+    calculate_qcoeff(&qcoeff0, round, quant, shift);
+    calculate_qcoeff(&qcoeff1, round, quant, shift);
 
-    qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
-    qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-    qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-    qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-    qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
-    qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
-
     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
     qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
 
@@ -170,35 +135,18 @@
     store_tran_low(qcoeff0, qcoeff_ptr + index);
     store_tran_low(qcoeff1, qcoeff_ptr + index + 8);
 
-    coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
-    coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+    coeff0 = calculate_dqcoeff(qcoeff0, dequant);
+    coeff1 = calculate_dqcoeff(qcoeff1, dequant);
 
     store_tran_low(coeff0, dqcoeff_ptr + index);
     store_tran_low(coeff1, dqcoeff_ptr + index + 8);
 
-    zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-    zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-    iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + index));
-    iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + index + 8));
-    iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-    iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-    eob0 = _mm_andnot_si128(zero_coeff0, iscan0);
-    eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-    eob0 = _mm_max_epi16(eob0, eob1);
+    eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
+                        index, zero);
     eob = _mm_max_epi16(eob, eob0);
   }
 
-  // Accumulate eob.
-  {
-    __m128i eob_shuffled;
-    eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    *eob_ptr = _mm_extract_epi16(eob, 1);
-  }
+  *eob_ptr = accumulate_eob(eob);
 }
 
 void vpx_quantize_b_32x32_avx(
@@ -217,9 +165,7 @@
   __m128i qcoeff0, qcoeff1;
   __m128i cmp_mask0, cmp_mask1;
   __m128i all_zero;
-  __m128i qtmp0, qtmp1;
-  __m128i zero_coeff0, zero_coeff1, iscan0, iscan1;
-  __m128i eob = zero, eob0, eob1;
+  __m128i eob = zero, eob0;
 
   (void)scan_ptr;
   (void)n_coeffs;
@@ -226,8 +172,6 @@
   (void)skip_block;
   assert(!skip_block);
 
-  *eob_ptr = 0;
-
   // Setup global values.
   // The 32x32 halves zbin and round.
   zbin = _mm_load_si128((const __m128i *)zbin_ptr);
@@ -255,7 +199,7 @@
   qcoeff1 = _mm_abs_epi16(coeff1);
 
   cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
-  zbin = _mm_unpackhi_epi64(zbin, zbin);  // Switch DC to AC
+  zbin = _mm_unpackhi_epi64(zbin, zbin);  // Switch DC to AC.
   cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
 
   all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
@@ -272,26 +216,17 @@
     shift = _mm_unpackhi_epi64(shift, shift);
     dequant = _mm_unpackhi_epi64(dequant, dequant);
   } else {
-    qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+    calculate_qcoeff(&qcoeff0, round, quant, shift);
     round = _mm_unpackhi_epi64(round, round);
-    qcoeff1 = _mm_adds_epi16(qcoeff1, round);
-
-    qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
     quant = _mm_unpackhi_epi64(quant, quant);
-    qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-    qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-    qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-    qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
     shift = _mm_unpackhi_epi64(shift, shift);
-    qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
+    calculate_qcoeff(&qcoeff1, round, quant, shift);
 
-    // Reinsert signs
+    // Reinsert signs.
     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
     qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
 
-    // Mask out zbin threshold coeffs
+    // Mask out zbin threshold coeffs.
     qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
     qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
 
@@ -304,9 +239,9 @@
     coeff0 = _mm_abs_epi16(qcoeff0);
     coeff1 = _mm_abs_epi16(qcoeff1);
 
-    coeff0 = _mm_mullo_epi16(coeff0, dequant);
+    coeff0 = calculate_dqcoeff(coeff0, dequant);
     dequant = _mm_unpackhi_epi64(dequant, dequant);
-    coeff1 = _mm_mullo_epi16(coeff1, dequant);
+    coeff1 = calculate_dqcoeff(coeff1, dequant);
 
     // "Divide" by 2.
     coeff0 = _mm_srli_epi16(coeff0, 1);
@@ -318,17 +253,8 @@
     store_tran_low(coeff0, dqcoeff_ptr);
     store_tran_low(coeff1, dqcoeff_ptr + 8);
 
-    // Scan for eob.
-    zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-    zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-    iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr));
-    iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + 8));
-    // Add one to convert from indices to counts
-    iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-    iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-    eob = _mm_andnot_si128(zero_coeff0, iscan0);
-    eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-    eob = _mm_max_epi16(eob, eob1);
+    eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0,
+                       zero);
   }
 
   // AC only loop.
@@ -353,18 +279,9 @@
       continue;
     }
 
-    qcoeff0 = _mm_adds_epi16(qcoeff0, round);
-    qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+    calculate_qcoeff(&qcoeff0, round, quant, shift);
+    calculate_qcoeff(&qcoeff1, round, quant, shift);
 
-    qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
-    qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-    qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-    qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-    qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
-    qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
-
     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
     qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
 
@@ -377,8 +294,8 @@
     coeff0 = _mm_abs_epi16(qcoeff0);
     coeff1 = _mm_abs_epi16(qcoeff1);
 
-    coeff0 = _mm_mullo_epi16(coeff0, dequant);
-    coeff1 = _mm_mullo_epi16(coeff1, dequant);
+    coeff0 = calculate_dqcoeff(coeff0, dequant);
+    coeff1 = calculate_dqcoeff(coeff1, dequant);
 
     coeff0 = _mm_srli_epi16(coeff0, 1);
     coeff1 = _mm_srli_epi16(coeff1, 1);
@@ -389,27 +306,10 @@
     store_tran_low(coeff0, dqcoeff_ptr + index);
     store_tran_low(coeff1, dqcoeff_ptr + index + 8);
 
-    zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-    zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-    iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + index));
-    iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + index + 8));
-    iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-    iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-    eob0 = _mm_andnot_si128(zero_coeff0, iscan0);
-    eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-    eob0 = _mm_max_epi16(eob0, eob1);
+    eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
+                        index, zero);
     eob = _mm_max_epi16(eob, eob0);
   }
 
-  // Accumulate eob.
-  {
-    __m128i eob_shuffled;
-    eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    *eob_ptr = _mm_extract_epi16(eob, 1);
-  }
+  *eob_ptr = accumulate_eob(eob);
 }
--- a/vpx_dsp/x86/quantize_sse2.c
+++ b/vpx_dsp/x86/quantize_sse2.c
@@ -15,6 +15,7 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx/vpx_integer.h"
 #include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
+#include "vpx_dsp/x86/quantize_x86.h"
 
 void vpx_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                          int skip_block, const int16_t *zbin_ptr,
@@ -30,9 +31,7 @@
   __m128i coeff0, coeff1, coeff0_sign, coeff1_sign;
   __m128i qcoeff0, qcoeff1;
   __m128i cmp_mask0, cmp_mask1;
-  __m128i qtmp0, qtmp1;
-  __m128i zero_coeff0, zero_coeff1, iscan0, iscan1;
-  __m128i eob, eob0, eob1;
+  __m128i eob, eob0;
 
   (void)scan_ptr;
   (void)skip_block;
@@ -39,12 +38,8 @@
   assert(!skip_block);
 
   // Setup global values.
-  zbin = _mm_load_si128((const __m128i *)zbin_ptr);
-  round = _mm_load_si128((const __m128i *)round_ptr);
-  quant = _mm_load_si128((const __m128i *)quant_ptr);
-  zbin = _mm_sub_epi16(zbin, _mm_set1_epi16(1));
-  dequant = _mm_load_si128((const __m128i *)dequant_ptr);
-  shift = _mm_load_si128((const __m128i *)quant_shift_ptr);
+  load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant,
+                dequant_ptr, &dequant, quant_shift_ptr, &shift);
 
   // Do DC and first 15 AC.
   coeff0 = load_tran_low(coeff_ptr);
@@ -53,35 +48,24 @@
   // Poor man's abs().
   coeff0_sign = _mm_srai_epi16(coeff0, 15);
   coeff1_sign = _mm_srai_epi16(coeff1, 15);
-  qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
-  qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
-  qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
-  qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+  qcoeff0 = invert_sign_sse2(coeff0, coeff0_sign);
+  qcoeff1 = invert_sign_sse2(coeff1, coeff1_sign);
 
   cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
   zbin = _mm_unpackhi_epi64(zbin, zbin);  // Switch DC to AC
   cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
 
-  qcoeff0 = _mm_adds_epi16(qcoeff0, round);
-  round = _mm_unpackhi_epi64(round, round);
-  qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+  calculate_qcoeff(&qcoeff0, round, quant, shift);
 
-  qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
+  round = _mm_unpackhi_epi64(round, round);
   quant = _mm_unpackhi_epi64(quant, quant);
-  qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-  qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-  qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-  qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
   shift = _mm_unpackhi_epi64(shift, shift);
-  qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
 
+  calculate_qcoeff(&qcoeff1, round, quant, shift);
+
   // Reinsert signs
-  qcoeff0 = _mm_xor_si128(qcoeff0, coeff0_sign);
-  qcoeff1 = _mm_xor_si128(qcoeff1, coeff1_sign);
-  qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
-  qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+  qcoeff0 = invert_sign_sse2(qcoeff0, coeff0_sign);
+  qcoeff1 = invert_sign_sse2(qcoeff1, coeff1_sign);
 
   // Mask out zbin threshold coeffs
   qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
@@ -90,24 +74,15 @@
   store_tran_low(qcoeff0, qcoeff_ptr);
   store_tran_low(qcoeff1, qcoeff_ptr + 8);
 
-  coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+  coeff0 = calculate_dqcoeff(qcoeff0, dequant);
   dequant = _mm_unpackhi_epi64(dequant, dequant);
-  coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+  coeff1 = calculate_dqcoeff(qcoeff1, dequant);
 
   store_tran_low(coeff0, dqcoeff_ptr);
   store_tran_low(coeff1, dqcoeff_ptr + 8);
 
-  // Scan for eob.
-  zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-  zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-  iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr));
-  iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + 8));
-  // Add one to convert from indices to counts
-  iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-  iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-  eob = _mm_andnot_si128(zero_coeff0, iscan0);
-  eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-  eob = _mm_max_epi16(eob, eob1);
+  eob =
+      scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0, zero);
 
   // AC only loop.
   while (index < n_coeffs) {
@@ -116,31 +91,18 @@
 
     coeff0_sign = _mm_srai_epi16(coeff0, 15);
     coeff1_sign = _mm_srai_epi16(coeff1, 15);
-    qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
-    qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
-    qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
-    qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+    qcoeff0 = invert_sign_sse2(coeff0, coeff0_sign);
+    qcoeff1 = invert_sign_sse2(coeff1, coeff1_sign);
 
     cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
     cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
 
-    qcoeff0 = _mm_adds_epi16(qcoeff0, round);
-    qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+    calculate_qcoeff(&qcoeff0, round, quant, shift);
+    calculate_qcoeff(&qcoeff1, round, quant, shift);
 
-    qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
-    qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
+    qcoeff0 = invert_sign_sse2(qcoeff0, coeff0_sign);
+    qcoeff1 = invert_sign_sse2(qcoeff1, coeff1_sign);
 
-    qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-    qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-    qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
-    qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
-
-    qcoeff0 = _mm_xor_si128(qcoeff0, coeff0_sign);
-    qcoeff1 = _mm_xor_si128(qcoeff1, coeff1_sign);
-    qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
-    qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
-
     qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
     qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
 
@@ -147,35 +109,18 @@
     store_tran_low(qcoeff0, qcoeff_ptr + index);
     store_tran_low(qcoeff1, qcoeff_ptr + index + 8);
 
-    coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
-    coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+    coeff0 = calculate_dqcoeff(qcoeff0, dequant);
+    coeff1 = calculate_dqcoeff(qcoeff1, dequant);
 
     store_tran_low(coeff0, dqcoeff_ptr + index);
     store_tran_low(coeff1, dqcoeff_ptr + index + 8);
 
-    zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-    zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-    iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + index));
-    iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + index + 8));
-    iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-    iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-    eob0 = _mm_andnot_si128(zero_coeff0, iscan0);
-    eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-    eob0 = _mm_max_epi16(eob0, eob1);
+    eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
+                        index, zero);
     eob = _mm_max_epi16(eob, eob0);
 
     index += 16;
   }
 
-  // Accumulate eob.
-  {
-    __m128i eob_shuffled;
-    eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    *eob_ptr = _mm_extract_epi16(eob, 1);
-  }
+  *eob_ptr = accumulate_eob(eob);
 }
--- a/vpx_dsp/x86/quantize_ssse3.c
+++ b/vpx_dsp/x86/quantize_ssse3.c
@@ -14,6 +14,7 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx/vpx_integer.h"
 #include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
+#include "vpx_dsp/x86/quantize_x86.h"
 
 void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                           int skip_block, const int16_t *zbin_ptr,
@@ -23,29 +24,20 @@
                           const int16_t *dequant_ptr, uint16_t *eob_ptr,
                           const int16_t *scan_ptr, const int16_t *iscan_ptr) {
   const __m128i zero = _mm_setzero_si128();
-  intptr_t index = 16;
+  int index = 16;
 
   __m128i zbin, round, quant, dequant, shift;
   __m128i coeff0, coeff1;
   __m128i qcoeff0, qcoeff1;
   __m128i cmp_mask0, cmp_mask1;
-  __m128i qtmp0, qtmp1;
-  __m128i zero_coeff0, zero_coeff1, iscan0, iscan1;
-  __m128i eob, eob0, eob1;
+  __m128i eob, eob0;
 
   (void)scan_ptr;
   (void)skip_block;
   assert(!skip_block);
 
-  // Setup global values.
-  zbin = _mm_load_si128((const __m128i *)zbin_ptr);
-  // x86 has no "greater *or equal*" comparison. Subtract 1 from zbin so
-  // it is a strict "greater" comparison.
-  zbin = _mm_sub_epi16(zbin, _mm_set1_epi16(1));
-  round = _mm_load_si128((const __m128i *)round_ptr);
-  quant = _mm_load_si128((const __m128i *)quant_ptr);
-  dequant = _mm_load_si128((const __m128i *)dequant_ptr);
-  shift = _mm_load_si128((const __m128i *)quant_shift_ptr);
+  load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant,
+                dequant_ptr, &dequant, quant_shift_ptr, &shift);
 
   // Do DC and first 15 AC.
   coeff0 = load_tran_low(coeff_ptr);
@@ -58,20 +50,11 @@
   zbin = _mm_unpackhi_epi64(zbin, zbin);  // Switch DC to AC
   cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
 
-  qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+  calculate_qcoeff(&qcoeff0, round, quant, shift);
   round = _mm_unpackhi_epi64(round, round);
-  qcoeff1 = _mm_adds_epi16(qcoeff1, round);
-
-  qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
   quant = _mm_unpackhi_epi64(quant, quant);
-  qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-  qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-  qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-  qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
   shift = _mm_unpackhi_epi64(shift, shift);
-  qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
+  calculate_qcoeff(&qcoeff1, round, quant, shift);
 
   // Reinsert signs
   qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
@@ -84,24 +67,15 @@
   store_tran_low(qcoeff0, qcoeff_ptr);
   store_tran_low(qcoeff1, qcoeff_ptr + 8);
 
-  coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+  coeff0 = calculate_dqcoeff(qcoeff0, dequant);
   dequant = _mm_unpackhi_epi64(dequant, dequant);
-  coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+  coeff1 = calculate_dqcoeff(qcoeff1, dequant);
 
   store_tran_low(coeff0, dqcoeff_ptr);
   store_tran_low(coeff1, dqcoeff_ptr + 8);
 
-  // Scan for eob.
-  zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-  zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-  iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr));
-  iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + 8));
-  // Add one to convert from indices to counts
-  iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-  iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-  eob = _mm_andnot_si128(zero_coeff0, iscan0);
-  eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-  eob = _mm_max_epi16(eob, eob1);
+  eob =
+      scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0, zero);
 
   // AC only loop.
   while (index < n_coeffs) {
@@ -114,18 +88,9 @@
     cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
     cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
 
-    qcoeff0 = _mm_adds_epi16(qcoeff0, round);
-    qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+    calculate_qcoeff(&qcoeff0, round, quant, shift);
+    calculate_qcoeff(&qcoeff1, round, quant, shift);
 
-    qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
-    qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-    qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-    qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-    qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
-    qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
-
     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
     qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
 
@@ -135,37 +100,20 @@
     store_tran_low(qcoeff0, qcoeff_ptr + index);
     store_tran_low(qcoeff1, qcoeff_ptr + index + 8);
 
-    coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
-    coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+    coeff0 = calculate_dqcoeff(qcoeff0, dequant);
+    coeff1 = calculate_dqcoeff(qcoeff1, dequant);
 
     store_tran_low(coeff0, dqcoeff_ptr + index);
     store_tran_low(coeff1, dqcoeff_ptr + index + 8);
 
-    zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-    zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-    iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + index));
-    iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + index + 8));
-    iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-    iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-    eob0 = _mm_andnot_si128(zero_coeff0, iscan0);
-    eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-    eob0 = _mm_max_epi16(eob0, eob1);
+    eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
+                        index, zero);
     eob = _mm_max_epi16(eob, eob0);
 
     index += 16;
   }
 
-  // Accumulate eob.
-  {
-    __m128i eob_shuffled;
-    eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    *eob_ptr = _mm_extract_epi16(eob, 1);
-  }
+  *eob_ptr = accumulate_eob(eob);
 }
 
 void vpx_quantize_b_32x32_ssse3(
@@ -176,7 +124,7 @@
     const int16_t *scan_ptr, const int16_t *iscan_ptr) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i one = _mm_set1_epi16(1);
-  intptr_t index = 16;
+  int index;
 
   __m128i zbin, round, quant, dequant, shift;
   __m128i coeff0, coeff1;
@@ -183,9 +131,7 @@
   __m128i qcoeff0, qcoeff1;
   __m128i cmp_mask0, cmp_mask1;
   __m128i all_zero;
-  __m128i qtmp0, qtmp1;
-  __m128i zero_coeff0, zero_coeff1, iscan0, iscan1;
-  __m128i eob = zero, eob0, eob1;
+  __m128i eob = zero, eob0;
 
   (void)scan_ptr;
   (void)n_coeffs;
@@ -236,7 +182,7 @@
     _mm_store_si128((__m128i *)(qcoeff_ptr + 12), zero);
     _mm_store_si128((__m128i *)(dqcoeff_ptr + 4), zero);
     _mm_store_si128((__m128i *)(dqcoeff_ptr + 12), zero);
-#endif
+#endif  // CONFIG_HIGHBITDEPTH
 
     round = _mm_unpackhi_epi64(round, round);
     quant = _mm_unpackhi_epi64(quant, quant);
@@ -243,20 +189,11 @@
     shift = _mm_unpackhi_epi64(shift, shift);
     dequant = _mm_unpackhi_epi64(dequant, dequant);
   } else {
-    qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+    calculate_qcoeff(&qcoeff0, round, quant, shift);
     round = _mm_unpackhi_epi64(round, round);
-    qcoeff1 = _mm_adds_epi16(qcoeff1, round);
-
-    qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
     quant = _mm_unpackhi_epi64(quant, quant);
-    qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-    qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-    qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-    qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
     shift = _mm_unpackhi_epi64(shift, shift);
-    qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
+    calculate_qcoeff(&qcoeff1, round, quant, shift);
 
     // Reinsert signs.
     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
@@ -275,9 +212,9 @@
     coeff0 = _mm_abs_epi16(qcoeff0);
     coeff1 = _mm_abs_epi16(qcoeff1);
 
-    coeff0 = _mm_mullo_epi16(coeff0, dequant);
+    coeff0 = calculate_dqcoeff(coeff0, dequant);
     dequant = _mm_unpackhi_epi64(dequant, dequant);
-    coeff1 = _mm_mullo_epi16(coeff1, dequant);
+    coeff1 = calculate_dqcoeff(coeff1, dequant);
 
     // "Divide" by 2.
     coeff0 = _mm_srli_epi16(coeff0, 1);
@@ -289,17 +226,8 @@
     store_tran_low(coeff0, dqcoeff_ptr);
     store_tran_low(coeff1, dqcoeff_ptr + 8);
 
-    // Scan for eob.
-    zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-    zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-    iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr));
-    iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + 8));
-    // Add one to convert from indices to counts.
-    iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-    iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-    eob = _mm_andnot_si128(zero_coeff0, iscan0);
-    eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-    eob = _mm_max_epi16(eob, eob1);
+    eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0,
+                       zero);
   }
 
   // AC only loop.
@@ -324,22 +252,13 @@
       _mm_store_si128((__m128i *)(qcoeff_ptr + index + 12), zero);
       _mm_store_si128((__m128i *)(dqcoeff_ptr + index + 4), zero);
       _mm_store_si128((__m128i *)(dqcoeff_ptr + index + 12), zero);
-#endif
+#endif  // CONFIG_VP9_HIGHBITDEPTH
       continue;
     }
 
-    qcoeff0 = _mm_adds_epi16(qcoeff0, round);
-    qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+    calculate_qcoeff(&qcoeff0, round, quant, shift);
+    calculate_qcoeff(&qcoeff1, round, quant, shift);
 
-    qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
-    qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
-
-    qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
-    qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
-
-    qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
-    qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
-
     qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
     qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
 
@@ -352,8 +271,8 @@
     coeff0 = _mm_abs_epi16(qcoeff0);
     coeff1 = _mm_abs_epi16(qcoeff1);
 
-    coeff0 = _mm_mullo_epi16(coeff0, dequant);
-    coeff1 = _mm_mullo_epi16(coeff1, dequant);
+    coeff0 = calculate_dqcoeff(coeff0, dequant);
+    coeff1 = calculate_dqcoeff(coeff1, dequant);
 
     coeff0 = _mm_srli_epi16(coeff0, 1);
     coeff1 = _mm_srli_epi16(coeff1, 1);
@@ -364,26 +283,10 @@
     store_tran_low(coeff0, dqcoeff_ptr + index);
     store_tran_low(coeff1, dqcoeff_ptr + index + 8);
 
-    zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
-    zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
-    iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + index));
-    iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + index + 8));
-    iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
-    iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
-    eob0 = _mm_andnot_si128(zero_coeff0, iscan0);
-    eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
-    eob0 = _mm_max_epi16(eob0, eob1);
+    eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
+                        index, zero);
     eob = _mm_max_epi16(eob, eob0);
   }
 
-  {
-    __m128i eob_shuffled;
-    eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
-    eob = _mm_max_epi16(eob, eob_shuffled);
-    *eob_ptr = _mm_extract_epi16(eob, 1);
-  }
+  *eob_ptr = accumulate_eob(eob);
 }
--- /dev/null
+++ b/vpx_dsp/x86/quantize_x86.h
@@ -1,0 +1,78 @@
+/*
+ *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h>
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
+
+static INLINE void load_b_values(const int16_t *zbin_ptr, __m128i *zbin,
+                                 const int16_t *round_ptr, __m128i *round,
+                                 const int16_t *quant_ptr, __m128i *quant,
+                                 const int16_t *dequant_ptr, __m128i *dequant,
+                                 const int16_t *shift_ptr, __m128i *shift) {
+  *zbin = _mm_load_si128((const __m128i *)zbin_ptr);
+  *round = _mm_load_si128((const __m128i *)round_ptr);
+  *quant = _mm_load_si128((const __m128i *)quant_ptr);
+  *zbin = _mm_sub_epi16(*zbin, _mm_set1_epi16(1));
+  *dequant = _mm_load_si128((const __m128i *)dequant_ptr);
+  *shift = _mm_load_si128((const __m128i *)shift_ptr);
+}
+
+// With ssse3 and later abs() and sign() are preferred.
+static INLINE __m128i invert_sign_sse2(__m128i a, __m128i sign) {
+  a = _mm_xor_si128(a, sign);
+  return _mm_sub_epi16(a, sign);
+}
+
+static INLINE void calculate_qcoeff(__m128i *coeff, const __m128i round,
+                                    const __m128i quant, const __m128i shift) {
+  __m128i tmp, qcoeff;
+  qcoeff = _mm_adds_epi16(*coeff, round);
+  tmp = _mm_mulhi_epi16(qcoeff, quant);
+  qcoeff = _mm_add_epi16(tmp, qcoeff);
+  *coeff = _mm_mulhi_epi16(qcoeff, shift);
+}
+
+static INLINE __m128i calculate_dqcoeff(__m128i qcoeff, __m128i dequant) {
+  return _mm_mullo_epi16(qcoeff, dequant);
+}
+
+// Scan 16 values for eob reference in scan_ptr. Use masks (-1) from comparing
+// to zbin to add 1 to the index in 'scan'.
+static INLINE __m128i scan_for_eob(__m128i *coeff0, __m128i *coeff1,
+                                   const __m128i zbin_mask0,
+                                   const __m128i zbin_mask1,
+                                   const int16_t *scan_ptr, const int index,
+                                   const __m128i zero) {
+  const __m128i zero_coeff0 = _mm_cmpeq_epi16(*coeff0, zero);
+  const __m128i zero_coeff1 = _mm_cmpeq_epi16(*coeff1, zero);
+  __m128i scan0 = _mm_load_si128((const __m128i *)(scan_ptr + index));
+  __m128i scan1 = _mm_load_si128((const __m128i *)(scan_ptr + index + 8));
+  __m128i eob0, eob1;
+  // Add one to convert from indices to counts
+  scan0 = _mm_sub_epi16(scan0, zbin_mask0);
+  scan1 = _mm_sub_epi16(scan1, zbin_mask1);
+  eob0 = _mm_andnot_si128(zero_coeff0, scan0);
+  eob1 = _mm_andnot_si128(zero_coeff1, scan1);
+  return _mm_max_epi16(eob0, eob1);
+}
+
+static INLINE int16_t accumulate_eob(__m128i eob) {
+  __m128i eob_shuffled;
+  eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
+  eob = _mm_max_epi16(eob, eob_shuffled);
+  eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
+  eob = _mm_max_epi16(eob, eob_shuffled);
+  eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
+  eob = _mm_max_epi16(eob, eob_shuffled);
+  return _mm_extract_epi16(eob, 1);
+}