shithub: libvpx

Download patch

ref: 9e03eedf625c6248f04d1f055f2ca60caac1c877
parent: 492d52b9cc549f0d8d8d6f370425a0cee16237eb
parent: c7e4917e974e30f292ca83f744b8c8bee417b461
author: Linfeng Zhang <linfengz@google.com>
date: Tue Jun 20 20:38:24 EDT 2017

Merge changes Ib26dd515,Ie60dabc3

* changes:
  Clean 8x8 idct x86 optimization
  Remove vpx_idct8x8_64_add_ssse3()

--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -739,7 +739,7 @@
     !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(SSSE3, FwdTrans8x8DCT,
                         ::testing::Values(make_tuple(&vpx_fdct8x8_ssse3,
-                                                     &vpx_idct8x8_64_add_ssse3,
+                                                     &vpx_idct8x8_64_add_sse2,
                                                      0, VPX_BITS_8)));
 #endif
 
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -744,8 +744,6 @@
              &wrapper<vpx_idct32x32_135_add_ssse3>, TX_32X32, 135, 8, 1),
   make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_34_add_c>,
              &wrapper<vpx_idct32x32_34_add_ssse3>, TX_32X32, 34, 8, 1),
-  make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
-             &wrapper<vpx_idct8x8_64_add_ssse3>, TX_8X8, 64, 8, 1),
   make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_12_add_c>,
              &wrapper<vpx_idct8x8_12_add_ssse3>, TX_8X8, 12, 8, 1)
 };
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -584,7 +584,7 @@
   # Note that there are more specializations appended when CONFIG_VP9_HIGHBITDEPTH is off.
   specialize qw/vpx_idct4x4_16_add neon sse2/;
   specialize qw/vpx_idct4x4_1_add neon sse2/;
-  specialize qw/vpx_idct8x8_64_add neon sse2 ssse3/;
+  specialize qw/vpx_idct8x8_64_add neon sse2/;
   specialize qw/vpx_idct8x8_12_add neon sse2 ssse3/;
   specialize qw/vpx_idct8x8_1_add neon sse2/;
   specialize qw/vpx_idct16x16_256_add neon sse2/;
--- a/vpx_dsp/x86/inv_txfm_sse2.c
+++ b/vpx_dsp/x86/inv_txfm_sse2.c
@@ -219,57 +219,18 @@
 
 void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
                              int stride) {
-  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
-
   __m128i in[8];
   int i;
 
   // Load input data.
-  in[0] = load_input_data(input);
-  in[1] = load_input_data(input + 8 * 1);
-  in[2] = load_input_data(input + 8 * 2);
-  in[3] = load_input_data(input + 8 * 3);
-  in[4] = load_input_data(input + 8 * 4);
-  in[5] = load_input_data(input + 8 * 5);
-  in[6] = load_input_data(input + 8 * 6);
-  in[7] = load_input_data(input + 8 * 7);
+  load_buffer_8x8(input, in);
 
   // 2-D
   for (i = 0; i < 2; i++) {
-    // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
-    transpose_16bit_8x8(in, in);
-
-    // 4-stage 1D idct8x8
-    idct8(in, in);
+    idct8_sse2(in);
   }
 
-  // Final rounding and shift
-  in[0] = _mm_adds_epi16(in[0], final_rounding);
-  in[1] = _mm_adds_epi16(in[1], final_rounding);
-  in[2] = _mm_adds_epi16(in[2], final_rounding);
-  in[3] = _mm_adds_epi16(in[3], final_rounding);
-  in[4] = _mm_adds_epi16(in[4], final_rounding);
-  in[5] = _mm_adds_epi16(in[5], final_rounding);
-  in[6] = _mm_adds_epi16(in[6], final_rounding);
-  in[7] = _mm_adds_epi16(in[7], final_rounding);
-
-  in[0] = _mm_srai_epi16(in[0], 5);
-  in[1] = _mm_srai_epi16(in[1], 5);
-  in[2] = _mm_srai_epi16(in[2], 5);
-  in[3] = _mm_srai_epi16(in[3], 5);
-  in[4] = _mm_srai_epi16(in[4], 5);
-  in[5] = _mm_srai_epi16(in[5], 5);
-  in[6] = _mm_srai_epi16(in[6], 5);
-  in[7] = _mm_srai_epi16(in[7], 5);
-
-  recon_and_store(dest + 0 * stride, in[0]);
-  recon_and_store(dest + 1 * stride, in[1]);
-  recon_and_store(dest + 2 * stride, in[2]);
-  recon_and_store(dest + 3 * stride, in[3]);
-  recon_and_store(dest + 4 * stride, in[4]);
-  recon_and_store(dest + 5 * stride, in[5]);
-  recon_and_store(dest + 6 * stride, in[6]);
-  recon_and_store(dest + 7 * stride, in[7]);
+  write_buffer_8x8(in, dest, stride);
 }
 
 void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
@@ -505,7 +466,6 @@
 void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
                              int stride) {
   const __m128i zero = _mm_setzero_si128();
-  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
   const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
   const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
@@ -575,33 +535,7 @@
   in[4] = in[5] = in[6] = in[7] = zero;
 
   idct8(in, in);
-  // Final rounding and shift
-  in[0] = _mm_adds_epi16(in[0], final_rounding);
-  in[1] = _mm_adds_epi16(in[1], final_rounding);
-  in[2] = _mm_adds_epi16(in[2], final_rounding);
-  in[3] = _mm_adds_epi16(in[3], final_rounding);
-  in[4] = _mm_adds_epi16(in[4], final_rounding);
-  in[5] = _mm_adds_epi16(in[5], final_rounding);
-  in[6] = _mm_adds_epi16(in[6], final_rounding);
-  in[7] = _mm_adds_epi16(in[7], final_rounding);
-
-  in[0] = _mm_srai_epi16(in[0], 5);
-  in[1] = _mm_srai_epi16(in[1], 5);
-  in[2] = _mm_srai_epi16(in[2], 5);
-  in[3] = _mm_srai_epi16(in[3], 5);
-  in[4] = _mm_srai_epi16(in[4], 5);
-  in[5] = _mm_srai_epi16(in[5], 5);
-  in[6] = _mm_srai_epi16(in[6], 5);
-  in[7] = _mm_srai_epi16(in[7], 5);
-
-  recon_and_store(dest + 0 * stride, in[0]);
-  recon_and_store(dest + 1 * stride, in[1]);
-  recon_and_store(dest + 2 * stride, in[2]);
-  recon_and_store(dest + 3 * stride, in[3]);
-  recon_and_store(dest + 4 * stride, in[4]);
-  recon_and_store(dest + 5 * stride, in[5]);
-  recon_and_store(dest + 6 * stride, in[6]);
-  recon_and_store(dest + 7 * stride, in[7]);
+  write_buffer_8x8(in, dest, stride);
 }
 
 #define IDCT16                                                               \
--- a/vpx_dsp/x86/inv_txfm_sse2.h
+++ b/vpx_dsp/x86/inv_txfm_sse2.h
@@ -99,6 +99,18 @@
 #endif
 }
 
+static INLINE void load_buffer_8x8(const tran_low_t *const input,
+                                   __m128i *const in) {
+  in[0] = load_input_data(input + 0 * 8);
+  in[1] = load_input_data(input + 1 * 8);
+  in[2] = load_input_data(input + 2 * 8);
+  in[3] = load_input_data(input + 3 * 8);
+  in[4] = load_input_data(input + 4 * 8);
+  in[5] = load_input_data(input + 5 * 8);
+  in[6] = load_input_data(input + 6 * 8);
+  in[7] = load_input_data(input + 7 * 8);
+}
+
 static INLINE void load_buffer_8x16(const tran_low_t *const input,
                                     __m128i *const in) {
   in[0] = load_input_data(input + 0 * 16);
@@ -129,7 +141,41 @@
   _mm_storel_epi64((__m128i *)(dest), d0);
 }
 
-static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
+static INLINE void write_buffer_8x8(const __m128i *const in,
+                                    uint8_t *const dest, const int stride) {
+  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
+  __m128i t[8];
+  // Final rounding and shift
+  t[0] = _mm_adds_epi16(in[0], final_rounding);
+  t[1] = _mm_adds_epi16(in[1], final_rounding);
+  t[2] = _mm_adds_epi16(in[2], final_rounding);
+  t[3] = _mm_adds_epi16(in[3], final_rounding);
+  t[4] = _mm_adds_epi16(in[4], final_rounding);
+  t[5] = _mm_adds_epi16(in[5], final_rounding);
+  t[6] = _mm_adds_epi16(in[6], final_rounding);
+  t[7] = _mm_adds_epi16(in[7], final_rounding);
+
+  t[0] = _mm_srai_epi16(t[0], 5);
+  t[1] = _mm_srai_epi16(t[1], 5);
+  t[2] = _mm_srai_epi16(t[2], 5);
+  t[3] = _mm_srai_epi16(t[3], 5);
+  t[4] = _mm_srai_epi16(t[4], 5);
+  t[5] = _mm_srai_epi16(t[5], 5);
+  t[6] = _mm_srai_epi16(t[6], 5);
+  t[7] = _mm_srai_epi16(t[7], 5);
+
+  recon_and_store(dest + 0 * stride, t[0]);
+  recon_and_store(dest + 1 * stride, t[1]);
+  recon_and_store(dest + 2 * stride, t[2]);
+  recon_and_store(dest + 3 * stride, t[3]);
+  recon_and_store(dest + 4 * stride, t[4]);
+  recon_and_store(dest + 5 * stride, t[5]);
+  recon_and_store(dest + 6 * stride, t[6]);
+  recon_and_store(dest + 7 * stride, t[7]);
+}
+
+static INLINE void write_buffer_8x16(uint8_t *const dest, __m128i *const in,
+                                     const int stride) {
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   // Final rounding and shift
   in[0] = _mm_adds_epi16(in[0], final_rounding);
--- a/vpx_dsp/x86/inv_txfm_ssse3.c
+++ b/vpx_dsp/x86/inv_txfm_ssse3.c
@@ -15,211 +15,10 @@
 #include "vpx_dsp/x86/transpose_sse2.h"
 #include "vpx_dsp/x86/txfm_common_sse2.h"
 
-void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
-                              int stride) {
-  const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
-  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
-  const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
-  const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
-  const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
-  const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64);
-  const __m128i stk2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
-  const __m128i stk2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
-  const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
-  const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
-
-  __m128i in[8];
-  __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
-  __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
-  __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
-  int i;
-
-  // Load input data.
-  in[0] = load_input_data(input);
-  in[1] = load_input_data(input + 8 * 1);
-  in[2] = load_input_data(input + 8 * 2);
-  in[3] = load_input_data(input + 8 * 3);
-  in[4] = load_input_data(input + 8 * 4);
-  in[5] = load_input_data(input + 8 * 5);
-  in[6] = load_input_data(input + 8 * 6);
-  in[7] = load_input_data(input + 8 * 7);
-
-  // 2-D
-  for (i = 0; i < 2; i++) {
-    // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
-    transpose_16bit_8x8(in, in);
-
-    // 4-stage 1D idct8x8
-    {
-      /* Stage1 */
-      {
-        const __m128i lo_17 = _mm_unpacklo_epi16(in[1], in[7]);
-        const __m128i hi_17 = _mm_unpackhi_epi16(in[1], in[7]);
-        const __m128i lo_35 = _mm_unpacklo_epi16(in[3], in[5]);
-        const __m128i hi_35 = _mm_unpackhi_epi16(in[3], in[5]);
-
-        {
-          tmp0 = _mm_madd_epi16(lo_17, stg1_0);
-          tmp1 = _mm_madd_epi16(hi_17, stg1_0);
-          tmp2 = _mm_madd_epi16(lo_17, stg1_1);
-          tmp3 = _mm_madd_epi16(hi_17, stg1_1);
-          tmp4 = _mm_madd_epi16(lo_35, stg1_2);
-          tmp5 = _mm_madd_epi16(hi_35, stg1_2);
-          tmp6 = _mm_madd_epi16(lo_35, stg1_3);
-          tmp7 = _mm_madd_epi16(hi_35, stg1_3);
-
-          tmp0 = _mm_add_epi32(tmp0, rounding);
-          tmp1 = _mm_add_epi32(tmp1, rounding);
-          tmp2 = _mm_add_epi32(tmp2, rounding);
-          tmp3 = _mm_add_epi32(tmp3, rounding);
-          tmp4 = _mm_add_epi32(tmp4, rounding);
-          tmp5 = _mm_add_epi32(tmp5, rounding);
-          tmp6 = _mm_add_epi32(tmp6, rounding);
-          tmp7 = _mm_add_epi32(tmp7, rounding);
-
-          tmp0 = _mm_srai_epi32(tmp0, 14);
-          tmp1 = _mm_srai_epi32(tmp1, 14);
-          tmp2 = _mm_srai_epi32(tmp2, 14);
-          tmp3 = _mm_srai_epi32(tmp3, 14);
-          tmp4 = _mm_srai_epi32(tmp4, 14);
-          tmp5 = _mm_srai_epi32(tmp5, 14);
-          tmp6 = _mm_srai_epi32(tmp6, 14);
-          tmp7 = _mm_srai_epi32(tmp7, 14);
-
-          stp1_4 = _mm_packs_epi32(tmp0, tmp1);
-          stp1_7 = _mm_packs_epi32(tmp2, tmp3);
-          stp1_5 = _mm_packs_epi32(tmp4, tmp5);
-          stp1_6 = _mm_packs_epi32(tmp6, tmp7);
-        }
-      }
-
-      /* Stage2 */
-      {
-        const __m128i lo_26 = _mm_unpacklo_epi16(in[2], in[6]);
-        const __m128i hi_26 = _mm_unpackhi_epi16(in[2], in[6]);
-
-        {
-          tmp0 = _mm_unpacklo_epi16(in[0], in[4]);
-          tmp1 = _mm_unpackhi_epi16(in[0], in[4]);
-
-          tmp2 = _mm_madd_epi16(tmp0, stk2_0);
-          tmp3 = _mm_madd_epi16(tmp1, stk2_0);
-          tmp4 = _mm_madd_epi16(tmp0, stk2_1);
-          tmp5 = _mm_madd_epi16(tmp1, stk2_1);
-
-          tmp2 = _mm_add_epi32(tmp2, rounding);
-          tmp3 = _mm_add_epi32(tmp3, rounding);
-          tmp4 = _mm_add_epi32(tmp4, rounding);
-          tmp5 = _mm_add_epi32(tmp5, rounding);
-
-          tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-          tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-          tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
-          tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
-
-          stp2_0 = _mm_packs_epi32(tmp2, tmp3);
-          stp2_1 = _mm_packs_epi32(tmp4, tmp5);
-
-          tmp0 = _mm_madd_epi16(lo_26, stg2_2);
-          tmp1 = _mm_madd_epi16(hi_26, stg2_2);
-          tmp2 = _mm_madd_epi16(lo_26, stg2_3);
-          tmp3 = _mm_madd_epi16(hi_26, stg2_3);
-
-          tmp0 = _mm_add_epi32(tmp0, rounding);
-          tmp1 = _mm_add_epi32(tmp1, rounding);
-          tmp2 = _mm_add_epi32(tmp2, rounding);
-          tmp3 = _mm_add_epi32(tmp3, rounding);
-
-          tmp0 = _mm_srai_epi32(tmp0, 14);
-          tmp1 = _mm_srai_epi32(tmp1, 14);
-          tmp2 = _mm_srai_epi32(tmp2, 14);
-          tmp3 = _mm_srai_epi32(tmp3, 14);
-
-          stp2_2 = _mm_packs_epi32(tmp0, tmp1);
-          stp2_3 = _mm_packs_epi32(tmp2, tmp3);
-        }
-
-        stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
-        stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
-        stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
-        stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
-      }
-
-      /* Stage3 */
-      {
-        stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
-        stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
-        stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
-        stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
-
-        tmp0 = _mm_unpacklo_epi16(stp2_6, stp2_5);
-        tmp1 = _mm_unpackhi_epi16(stp2_6, stp2_5);
-
-        tmp2 = _mm_madd_epi16(tmp0, stk2_1);
-        tmp3 = _mm_madd_epi16(tmp1, stk2_1);
-        tmp4 = _mm_madd_epi16(tmp0, stk2_0);
-        tmp5 = _mm_madd_epi16(tmp1, stk2_0);
-
-        tmp2 = _mm_add_epi32(tmp2, rounding);
-        tmp3 = _mm_add_epi32(tmp3, rounding);
-        tmp4 = _mm_add_epi32(tmp4, rounding);
-        tmp5 = _mm_add_epi32(tmp5, rounding);
-
-        tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
-        tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
-        tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
-        tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
-
-        stp1_5 = _mm_packs_epi32(tmp2, tmp3);
-        stp1_6 = _mm_packs_epi32(tmp4, tmp5);
-      }
-
-      /* Stage4  */
-      in[0] = _mm_add_epi16(stp1_0, stp2_7);
-      in[1] = _mm_add_epi16(stp1_1, stp1_6);
-      in[2] = _mm_add_epi16(stp1_2, stp1_5);
-      in[3] = _mm_add_epi16(stp1_3, stp2_4);
-      in[4] = _mm_sub_epi16(stp1_3, stp2_4);
-      in[5] = _mm_sub_epi16(stp1_2, stp1_5);
-      in[6] = _mm_sub_epi16(stp1_1, stp1_6);
-      in[7] = _mm_sub_epi16(stp1_0, stp2_7);
-    }
-  }
-
-  // Final rounding and shift
-  in[0] = _mm_adds_epi16(in[0], final_rounding);
-  in[1] = _mm_adds_epi16(in[1], final_rounding);
-  in[2] = _mm_adds_epi16(in[2], final_rounding);
-  in[3] = _mm_adds_epi16(in[3], final_rounding);
-  in[4] = _mm_adds_epi16(in[4], final_rounding);
-  in[5] = _mm_adds_epi16(in[5], final_rounding);
-  in[6] = _mm_adds_epi16(in[6], final_rounding);
-  in[7] = _mm_adds_epi16(in[7], final_rounding);
-
-  in[0] = _mm_srai_epi16(in[0], 5);
-  in[1] = _mm_srai_epi16(in[1], 5);
-  in[2] = _mm_srai_epi16(in[2], 5);
-  in[3] = _mm_srai_epi16(in[3], 5);
-  in[4] = _mm_srai_epi16(in[4], 5);
-  in[5] = _mm_srai_epi16(in[5], 5);
-  in[6] = _mm_srai_epi16(in[6], 5);
-  in[7] = _mm_srai_epi16(in[7], 5);
-
-  recon_and_store(dest + 0 * stride, in[0]);
-  recon_and_store(dest + 1 * stride, in[1]);
-  recon_and_store(dest + 2 * stride, in[2]);
-  recon_and_store(dest + 3 * stride, in[3]);
-  recon_and_store(dest + 4 * stride, in[4]);
-  recon_and_store(dest + 5 * stride, in[5]);
-  recon_and_store(dest + 6 * stride, in[6]);
-  recon_and_store(dest + 7 * stride, in[7]);
-}
-
 void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
                               int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
-  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
   const __m128i stg1_0 = pair_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
   const __m128i stg1_1 = pair_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
   const __m128i stg1_2 = pair_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64);
@@ -348,33 +147,7 @@
   in[6] = _mm_sub_epi16(stp1_1, stp1_6);
   in[7] = _mm_sub_epi16(stp1_0, stp2_7);
 
-  // Final rounding and shift
-  in[0] = _mm_adds_epi16(in[0], final_rounding);
-  in[1] = _mm_adds_epi16(in[1], final_rounding);
-  in[2] = _mm_adds_epi16(in[2], final_rounding);
-  in[3] = _mm_adds_epi16(in[3], final_rounding);
-  in[4] = _mm_adds_epi16(in[4], final_rounding);
-  in[5] = _mm_adds_epi16(in[5], final_rounding);
-  in[6] = _mm_adds_epi16(in[6], final_rounding);
-  in[7] = _mm_adds_epi16(in[7], final_rounding);
-
-  in[0] = _mm_srai_epi16(in[0], 5);
-  in[1] = _mm_srai_epi16(in[1], 5);
-  in[2] = _mm_srai_epi16(in[2], 5);
-  in[3] = _mm_srai_epi16(in[3], 5);
-  in[4] = _mm_srai_epi16(in[4], 5);
-  in[5] = _mm_srai_epi16(in[5], 5);
-  in[6] = _mm_srai_epi16(in[6], 5);
-  in[7] = _mm_srai_epi16(in[7], 5);
-
-  recon_and_store(dest + 0 * stride, in[0]);
-  recon_and_store(dest + 1 * stride, in[1]);
-  recon_and_store(dest + 2 * stride, in[2]);
-  recon_and_store(dest + 3 * stride, in[3]);
-  recon_and_store(dest + 4 * stride, in[4]);
-  recon_and_store(dest + 5 * stride, in[5]);
-  recon_and_store(dest + 6 * stride, in[6]);
-  recon_and_store(dest + 7 * stride, in[7]);
+  write_buffer_8x8(in, dest, stride);
 }
 
 // Only do addition and subtraction butterfly, size = 16, 32