shithub: libvpx

Download patch

ref: d757d7e998d2e247a6fea424e8cb67e5f2b17951
parent: f63eb66ecd9e0128a0775658b25bd259460ad3b8
parent: 6cc76ec73f02c4c6e78cd77f73b09ea385915f34
author: James Zern <jzern@google.com>
date: Wed Nov 23 22:31:25 EST 2016

Merge changes Icc4ead05,Ib019964b,I3b5fd3b3,Ieedadee2

* changes:
  Update vpx_idct4x4_16_add_neon() to pass SingleExtremeCoeff test
  Refine 8-bit 4x4 idct NEON intrinsics
  Add idct speed test.
  Update partial_idct_test.cc to support high bitdepth

--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -25,15 +25,28 @@
 #include "vp9/common/vp9_blockd.h"
 #include "vp9/common/vp9_scan.h"
 #include "vpx/vpx_integer.h"
+#include "vpx_ports/vpx_timer.h"
 
 using libvpx_test::ACMRandom;
 
 namespace {
+
 typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef uint16_t Pixel;
+typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                            int bd);
+#else   // !CONFIG_VP9_HIGHBITDEPTH
+typedef uint8_t Pixel;
 typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
-typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int>
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int,
+                        int>
     PartialInvTxfmParam;
 const int kMaxNumCoeffs = 1024;
+const int kCountTestBlock = 1000;
 
 // https://bugs.chromium.org/p/webm/issues/detail?id=1332
 // The functions specified do not pass with INT16_MIN/MAX. They fail at the
@@ -52,16 +65,11 @@
 
 int16_t MinSupportedCoeff(InvTxfmFunc a) {
   (void)a;
-#if !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \
+    !CONFIG_EMULATE_HARDWARE
   if (a == vpx_idct8x8_64_add_ssse3 || a == vpx_idct8x8_12_add_ssse3) {
     return -23625 + 1;
   }
-#elif HAVE_NEON
-  if (a == vpx_idct4x4_16_add_neon) {
-    return std::numeric_limits<int16_t>::min() + 1;
-  }
-#endif
 #endif  // !CONFIG_EMULATE_HARDWARE
   return std::numeric_limits<int16_t>::min();
 }
@@ -70,11 +78,14 @@
  public:
   virtual ~PartialIDctTest() {}
   virtual void SetUp() {
+    rnd_.Reset(ACMRandom::DeterministicSeed());
     ftxfm_ = GET_PARAM(0);
     full_itxfm_ = GET_PARAM(1);
     partial_itxfm_ = GET_PARAM(2);
     tx_size_ = GET_PARAM(3);
     last_nonzero_ = GET_PARAM(4);
+    bit_depth_ = GET_PARAM(5);
+    mask_ = (1 << bit_depth_) - 1;
 
     switch (tx_size_) {
       case TX_4X4: size_ = 4; break;
@@ -83,14 +94,26 @@
       case TX_32X32: size_ = 32; break;
       default: FAIL() << "Wrong Size!"; break;
     }
-    block_size_ = size_ * size_;
 
+    // Randomize stride_ to a value less than or equal to 1024
+    stride_ = rnd_(1024) + 1;
+    if (stride_ < size_) {
+      stride_ = size_;
+    }
+    // Align stride_ to 16 if it's bigger than 16.
+    if (stride_ > 16) {
+      stride_ &= ~15;
+    }
+
+    input_block_size_ = size_ * size_;
+    output_block_size_ = size_ * stride_;
+
     input_block_ = reinterpret_cast<tran_low_t *>(
-        vpx_memalign(16, sizeof(*input_block_) * block_size_));
-    output_block_ = reinterpret_cast<uint8_t *>(
-        vpx_memalign(16, sizeof(*output_block_) * block_size_));
-    output_block_ref_ = reinterpret_cast<uint8_t *>(
-        vpx_memalign(16, sizeof(*output_block_ref_) * block_size_));
+        vpx_memalign(16, sizeof(*input_block_) * input_block_size_));
+    output_block_ = reinterpret_cast<Pixel *>(
+        vpx_memalign(16, sizeof(*output_block_) * output_block_size_));
+    output_block_ref_ = reinterpret_cast<Pixel *>(
+        vpx_memalign(16, sizeof(*output_block_ref_) * output_block_size_));
   }
 
   virtual void TearDown() {
@@ -103,44 +126,76 @@
     libvpx_test::ClearSystemState();
   }
 
+  void InitMem() {
+    memset(input_block_, 0, sizeof(*input_block_) * input_block_size_);
+    for (int j = 0; j < output_block_size_; ++j) {
+      output_block_[j] = output_block_ref_[j] = rnd_.Rand16() & mask_;
+    }
+  }
+
+  void InitInput() {
+    const int max_coeff = 32766 / 4;
+    int max_energy_leftover = max_coeff * max_coeff;
+    for (int j = 0; j < last_nonzero_; ++j) {
+      int16_t coeff = static_cast<int16_t>(sqrt(1.0 * max_energy_leftover) *
+                                           (rnd_.Rand16() - 32768) / 65536);
+      max_energy_leftover -= coeff * coeff;
+      if (max_energy_leftover < 0) {
+        max_energy_leftover = 0;
+        coeff = 0;
+      }
+      input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = coeff;
+    }
+  }
+
+  void Exec(InvTxfmFunc func, void *out) {
+#if CONFIG_VP9_HIGHBITDEPTH
+    func(input_block_, CONVERT_TO_BYTEPTR(out), stride_, bit_depth_);
+#else
+    func(input_block_, reinterpret_cast<uint8_t *>(out), stride_);
+#endif
+  }
+
  protected:
   int last_nonzero_;
   TX_SIZE tx_size_;
   tran_low_t *input_block_;
-  uint8_t *output_block_;
-  uint8_t *output_block_ref_;
+  Pixel *output_block_;
+  Pixel *output_block_ref_;
   int size_;
-  int block_size_;
+  int stride_;
+  int input_block_size_;
+  int output_block_size_;
+  int bit_depth_;
+  int mask_;
   FwdTxfmFunc ftxfm_;
   InvTxfmFunc full_itxfm_;
   InvTxfmFunc partial_itxfm_;
+  ACMRandom rnd_;
 };
 
 TEST_P(PartialIDctTest, RunQuantCheck) {
-  ACMRandom rnd(ACMRandom::DeterministicSeed());
-
-  const int count_test_block = 1000;
-
   DECLARE_ALIGNED(16, int16_t, input_extreme_block[kMaxNumCoeffs]);
   DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kMaxNumCoeffs]);
 
-  for (int i = 0; i < count_test_block; ++i) {
-    // clear out destination buffer
-    memset(input_block_, 0, sizeof(*input_block_) * block_size_);
-    memset(output_block_, 0, sizeof(*output_block_) * block_size_);
-    memset(output_block_ref_, 0, sizeof(*output_block_ref_) * block_size_);
+  for (int i = 0; i < kCountTestBlock; ++i) {
+    InitMem();
 
     ACMRandom rnd(ACMRandom::DeterministicSeed());
 
-    for (int i = 0; i < count_test_block; ++i) {
-      // Initialize a test block with input range [-255, 255].
-      if (i == 0) {
-        for (int j = 0; j < block_size_; ++j) input_extreme_block[j] = 255;
-      } else if (i == 1) {
-        for (int j = 0; j < block_size_; ++j) input_extreme_block[j] = -255;
+    for (int j = 0; j < kCountTestBlock; ++j) {
+      // Initialize a test block with input range [-mask_, mask_].
+      if (j == 0) {
+        for (int k = 0; k < input_block_size_; ++k) {
+          input_extreme_block[k] = mask_;
+        }
+      } else if (j == 1) {
+        for (int k = 0; k < input_block_size_; ++k) {
+          input_extreme_block[k] = -mask_;
+        }
       } else {
-        for (int j = 0; j < block_size_; ++j) {
-          input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+        for (int k = 0; k < input_block_size_; ++k) {
+          input_extreme_block[k] = rnd.Rand8() % 2 ? mask_ : -mask_;
         }
       }
 
@@ -148,259 +203,356 @@
 
       // quantization with maximum allowed step sizes
       input_block_[0] = (output_ref_block[0] / 1336) * 1336;
-      for (int j = 1; j < last_nonzero_; ++j) {
-        input_block_[vp9_default_scan_orders[tx_size_].scan[j]] =
-            (output_ref_block[j] / 1828) * 1828;
+      for (int k = 1; k < last_nonzero_; ++k) {
+        input_block_[vp9_default_scan_orders[tx_size_].scan[k]] =
+            (output_ref_block[k] / 1828) * 1828;
       }
     }
 
-    ASM_REGISTER_STATE_CHECK(
-        full_itxfm_(input_block_, output_block_ref_, size_));
-    ASM_REGISTER_STATE_CHECK(
-        partial_itxfm_(input_block_, output_block_, size_));
-
+    ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_));
+    ASM_REGISTER_STATE_CHECK(Exec(partial_itxfm_, output_block_));
     ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
-                        sizeof(*output_block_) * block_size_))
+                        sizeof(*output_block_) * output_block_size_))
         << "Error: partial inverse transform produces different results";
   }
 }
 
 TEST_P(PartialIDctTest, ResultsMatch) {
-  ACMRandom rnd(ACMRandom::DeterministicSeed());
-  const int count_test_block = 1000;
-  const int max_coeff = 32766 / 4;
-  for (int i = 0; i < count_test_block; ++i) {
-    // clear out destination buffer
-    memset(input_block_, 0, sizeof(*input_block_) * block_size_);
-    memset(output_block_, 0, sizeof(*output_block_) * block_size_);
-    memset(output_block_ref_, 0, sizeof(*output_block_ref_) * block_size_);
-    int max_energy_leftover = max_coeff * max_coeff;
-    for (int j = 0; j < last_nonzero_; ++j) {
-      int16_t coeff = static_cast<int16_t>(sqrt(1.0 * max_energy_leftover) *
-                                           (rnd.Rand16() - 32768) / 65536);
-      max_energy_leftover -= coeff * coeff;
-      if (max_energy_leftover < 0) {
-        max_energy_leftover = 0;
-        coeff = 0;
-      }
-      input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = coeff;
-    }
+  for (int i = 0; i < kCountTestBlock; ++i) {
+    InitMem();
+    InitInput();
 
-    ASM_REGISTER_STATE_CHECK(
-        full_itxfm_(input_block_, output_block_ref_, size_));
-    ASM_REGISTER_STATE_CHECK(
-        partial_itxfm_(input_block_, output_block_, size_));
+    ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_));
+    ASM_REGISTER_STATE_CHECK(Exec(partial_itxfm_, output_block_));
 
     ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
-                        sizeof(*output_block_) * block_size_))
+                        sizeof(*output_block_) * output_block_size_))
         << "Error: partial inverse transform produces different results";
   }
 }
 
 TEST_P(PartialIDctTest, AddOutputBlock) {
-  ACMRandom rnd(ACMRandom::DeterministicSeed());
-  const int count_test_block = 10;
-  for (int i = 0; i < count_test_block; ++i) {
-    memset(input_block_, 0, sizeof(*input_block_) * block_size_);
+  for (int i = 0; i < kCountTestBlock; ++i) {
+    InitMem();
     for (int j = 0; j < last_nonzero_; ++j) {
       input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = 10;
     }
 
-    for (int j = 0; j < block_size_; ++j) {
-      output_block_[j] = output_block_ref_[j] = rnd.Rand8();
-    }
+    ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_));
+    ASM_REGISTER_STATE_CHECK(Exec(partial_itxfm_, output_block_));
 
-    ASM_REGISTER_STATE_CHECK(
-        full_itxfm_(input_block_, output_block_ref_, size_));
-    ASM_REGISTER_STATE_CHECK(
-        partial_itxfm_(input_block_, output_block_, size_));
-
     ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
-                        sizeof(*output_block_) * block_size_))
+                        sizeof(*output_block_) * output_block_size_))
         << "Error: Transform results are not correctly added to output.";
   }
 }
 
 TEST_P(PartialIDctTest, SingleExtremeCoeff) {
-  ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int16_t max_coeff = MaxSupportedCoeff(partial_itxfm_);
   const int16_t min_coeff = MinSupportedCoeff(partial_itxfm_);
   for (int i = 0; i < last_nonzero_; ++i) {
-    memset(input_block_, 0, sizeof(*input_block_) * block_size_);
+    memset(input_block_, 0, sizeof(*input_block_) * input_block_size_);
     // Run once for min and once for max.
     for (int j = 0; j < 2; ++j) {
       const int coeff = j ? min_coeff : max_coeff;
 
-      memset(output_block_, 0, sizeof(*output_block_) * block_size_);
-      memset(output_block_ref_, 0, sizeof(*output_block_ref_) * block_size_);
+      memset(output_block_, 0, sizeof(*output_block_) * output_block_size_);
+      memset(output_block_ref_, 0,
+             sizeof(*output_block_ref_) * output_block_size_);
       input_block_[vp9_default_scan_orders[tx_size_].scan[i]] = coeff;
 
-      ASM_REGISTER_STATE_CHECK(
-          full_itxfm_(input_block_, output_block_ref_, size_));
-      ASM_REGISTER_STATE_CHECK(
-          partial_itxfm_(input_block_, output_block_, size_));
+      ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_));
+      ASM_REGISTER_STATE_CHECK(Exec(partial_itxfm_, output_block_));
 
       ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
-                          sizeof(*output_block_) * block_size_))
+                          sizeof(*output_block_) * output_block_size_))
           << "Error: Fails with single coeff of " << coeff << " at " << i
           << ".";
     }
   }
 }
+
+TEST_P(PartialIDctTest, DISABLED_Speed) {
+  // Keep runtime stable with transform size.
+  const int kCountSpeedTestBlock = 500000000 / input_block_size_;
+  InitMem();
+  InitInput();
+
+  for (int i = 0; i < kCountSpeedTestBlock; ++i) {
+    ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_));
+  }
+  vpx_usec_timer timer;
+  vpx_usec_timer_start(&timer);
+  for (int i = 0; i < kCountSpeedTestBlock; ++i) {
+    Exec(partial_itxfm_, output_block_);
+  }
+  libvpx_test::ClearSystemState();
+  vpx_usec_timer_mark(&timer);
+  const int elapsed_time =
+      static_cast<int>(vpx_usec_timer_elapsed(&timer) / 1000);
+  printf("idct%dx%d_%d (bitdepth %d) time: %5d ms ", size_, size_,
+         last_nonzero_, bit_depth_, elapsed_time);
+
+  ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
+                      sizeof(*output_block_) * output_block_size_))
+      << "Error: partial inverse transform produces different results";
+}
+
 using std::tr1::make_tuple;
 
+#if CONFIG_VP9_HIGHBITDEPTH
+
 INSTANTIATE_TEST_CASE_P(
     C, PartialIDctTest,
+    ::testing::Values(
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1024_add_c, TX_32X32, 1024, 8),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1024_add_c, TX_32X32, 1024, 10),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1024_add_c, TX_32X32, 1024, 12),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_34_add_c, TX_32X32, 34, 8),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_34_add_c, TX_32X32, 34, 10),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_34_add_c, TX_32X32, 34, 12),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1_add_c, TX_32X32, 1, 8),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1_add_c, TX_32X32, 1, 10),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1_add_c, TX_32X32, 1, 12),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_256_add_c, TX_16X16, 256, 8),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_256_add_c, TX_16X16, 256, 10),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_256_add_c, TX_16X16, 256, 12),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_10_add_c, TX_16X16, 10, 8),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_10_add_c, TX_16X16, 10, 10),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_10_add_c, TX_16X16, 10, 12),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_1_add_c, TX_16X16, 1, 8),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_1_add_c, TX_16X16, 1, 10),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_1_add_c, TX_16X16, 1, 12),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_64_add_c, TX_8X8, 64, 8),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_64_add_c, TX_8X8, 64, 10),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_64_add_c, TX_8X8, 64, 12),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_12_add_c, TX_8X8, 12, 8),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_12_add_c, TX_8X8, 12, 10),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_12_add_c, TX_8X8, 12, 12),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_1_add_c, TX_8X8, 1, 8),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_1_add_c, TX_8X8, 1, 10),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_1_add_c, TX_8X8, 1, 12),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_16_add_c, TX_4X4, 16, 8),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_16_add_c, TX_4X4, 16, 10),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_16_add_c, TX_4X4, 16, 12),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_1_add_c, TX_4X4, 1, 8),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_1_add_c, TX_4X4, 1, 10),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_1_add_c, TX_4X4, 1, 12)));
+
+#if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+    SSE2, PartialIDctTest,
+    ::testing::Values(
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1_add_sse2, TX_32X32, 1, 8),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1_add_sse2, TX_32X32, 1, 10),
+        make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c,
+                   &vpx_highbd_idct32x32_1_add_sse2, TX_32X32, 1, 12),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_256_add_sse2, TX_16X16, 256, 8),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_256_add_sse2, TX_16X16, 256, 10),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_256_add_sse2, TX_16X16, 256, 12),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_10_add_sse2, TX_16X16, 10, 8),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_10_add_sse2, TX_16X16, 10, 10),
+        make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c,
+                   &vpx_highbd_idct16x16_10_add_sse2, TX_16X16, 10, 12),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_64_add_sse2, TX_8X8, 64, 8),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_64_add_sse2, TX_8X8, 64, 10),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_64_add_sse2, TX_8X8, 64, 12),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_12_add_sse2, TX_8X8, 12, 8),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_12_add_sse2, TX_8X8, 12, 10),
+        make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c,
+                   &vpx_highbd_idct8x8_12_add_sse2, TX_8X8, 12, 12),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_16_add_sse2, TX_4X4, 1, 8),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_16_add_sse2, TX_4X4, 1, 10),
+        make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c,
+                   &vpx_highbd_idct4x4_16_add_sse2, TX_4X4, 1, 12)));
+#endif  // HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
+
+#else  // !CONFIG_VP9_HIGHBITDEPTH
+
+INSTANTIATE_TEST_CASE_P(
+    C, PartialIDctTest,
     ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1024_add_c, TX_32X32, 1024),
+                                 &vpx_idct32x32_1024_add_c, TX_32X32, 1024, 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_135_add_c, TX_32X32, 135),
+                                 &vpx_idct32x32_135_add_c, TX_32X32, 135, 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_c, TX_32X32, 34),
+                                 &vpx_idct32x32_34_add_c, TX_32X32, 34, 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_c, TX_32X32, 1),
+                                 &vpx_idct32x32_1_add_c, TX_32X32, 1, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_256_add_c, TX_16X16, 256),
+                                 &vpx_idct16x16_256_add_c, TX_16X16, 256, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_10_add_c, TX_16X16, 10),
+                                 &vpx_idct16x16_10_add_c, TX_16X16, 10, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_c, TX_16X16, 1),
+                                 &vpx_idct16x16_1_add_c, TX_16X16, 1, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_64_add_c, TX_8X8, 64),
+                                 &vpx_idct8x8_64_add_c, TX_8X8, 64, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_c, TX_8X8, 12),
+                                 &vpx_idct8x8_12_add_c, TX_8X8, 12, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_c, TX_8X8, 1),
+                                 &vpx_idct8x8_1_add_c, TX_8X8, 1, 8),
                       make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_16_add_c, TX_4X4, 16),
+                                 &vpx_idct4x4_16_add_c, TX_4X4, 16, 8),
                       make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_c, TX_4X4, 1)));
+                                 &vpx_idct4x4_1_add_c, TX_4X4, 1, 8)));
 
 #if HAVE_NEON && !CONFIG_EMULATE_HARDWARE
-#if CONFIG_VP9_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     NEON, PartialIDctTest,
     ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_neon, TX_32X32, 1),
+                                 &vpx_idct32x32_1024_add_neon, TX_32X32, 1024,
+                                 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_neon, TX_32X32, 34),
-                      make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_neon, TX_16X16, 1),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_64_add_neon, TX_8X8, 64),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_neon, TX_8X8, 12),
-                      make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_neon, TX_8X8, 1),
-                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_16_add_neon, TX_4X4, 16),
-                      make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_neon, TX_4X4, 1)));
-#else   // !CONFIG_VP9_HIGHBITDEPTH
-INSTANTIATE_TEST_CASE_P(
-    NEON, PartialIDctTest,
-    ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1024_add_neon, TX_32X32, 1024),
+                                 &vpx_idct32x32_135_add_neon, TX_32X32, 135, 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_135_add_neon, TX_32X32, 135),
+                                 &vpx_idct32x32_34_add_neon, TX_32X32, 34, 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_neon, TX_32X32, 34),
-                      make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_neon, TX_32X32, 1),
+                                 &vpx_idct32x32_1_add_neon, TX_32X32, 1, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_256_add_neon, TX_16X16, 256),
+                                 &vpx_idct16x16_256_add_neon, TX_16X16, 256, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_10_add_neon, TX_16X16, 10),
+                                 &vpx_idct16x16_10_add_neon, TX_16X16, 10, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_neon, TX_16X16, 1),
+                                 &vpx_idct16x16_1_add_neon, TX_16X16, 1, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_64_add_neon, TX_8X8, 64),
+                                 &vpx_idct8x8_64_add_neon, TX_8X8, 64, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_neon, TX_8X8, 12),
+                                 &vpx_idct8x8_12_add_neon, TX_8X8, 12, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_neon, TX_8X8, 1),
+                                 &vpx_idct8x8_1_add_neon, TX_8X8, 1, 8),
                       make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_16_add_neon, TX_4X4, 16),
+                                 &vpx_idct4x4_16_add_neon, TX_4X4, 16, 8),
                       make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_neon, TX_4X4, 1)));
-#endif  // CONFIG_VP9_HIGHBITDEPTH
+                                 &vpx_idct4x4_1_add_neon, TX_4X4, 1, 8)));
 #endif  // HAVE_NEON && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
 // 32x32_135_ is implemented using the 1024 version.
 INSTANTIATE_TEST_CASE_P(
     SSE2, PartialIDctTest,
     ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1024_add_sse2, TX_32X32, 1024),
+                                 &vpx_idct32x32_1024_add_sse2, TX_32X32, 1024,
+                                 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1024_add_sse2, TX_32X32, 135),
+                                 &vpx_idct32x32_1024_add_sse2, TX_32X32, 135,
+                                 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_sse2, TX_32X32, 34),
+                                 &vpx_idct32x32_34_add_sse2, TX_32X32, 34, 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_sse2, TX_32X32, 1),
+                                 &vpx_idct32x32_1_add_sse2, TX_32X32, 1, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_256_add_sse2, TX_16X16, 256),
+                                 &vpx_idct16x16_256_add_sse2, TX_16X16, 256, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_10_add_sse2, TX_16X16, 10),
+                                 &vpx_idct16x16_10_add_sse2, TX_16X16, 10, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_sse2, TX_16X16, 1),
+                                 &vpx_idct16x16_1_add_sse2, TX_16X16, 1, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_64_add_sse2, TX_8X8, 64),
+                                 &vpx_idct8x8_64_add_sse2, TX_8X8, 64, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_sse2, TX_8X8, 12),
+                                 &vpx_idct8x8_12_add_sse2, TX_8X8, 12, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_sse2, TX_8X8, 1),
+                                 &vpx_idct8x8_1_add_sse2, TX_8X8, 1, 8),
                       make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_16_add_sse2, TX_4X4, 16),
+                                 &vpx_idct4x4_16_add_sse2, TX_4X4, 16, 8),
                       make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_sse2, TX_4X4, 1)));
-#endif  // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                                 &vpx_idct4x4_1_add_sse2, TX_4X4, 1, 8)));
+#endif  // HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \
-    !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSSE3_64, PartialIDctTest,
     ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1024_add_ssse3, TX_32X32, 1024),
+                                 &vpx_idct32x32_1024_add_ssse3, TX_32X32, 1024,
+                                 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_135_add_ssse3, TX_32X32, 135),
+                                 &vpx_idct32x32_135_add_ssse3, TX_32X32, 135,
+                                 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_ssse3, TX_32X32, 34),
+                                 &vpx_idct32x32_34_add_ssse3, TX_32X32, 34, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_64_add_ssse3, TX_8X8, 64),
+                                 &vpx_idct8x8_64_add_ssse3, TX_8X8, 64, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_ssse3, TX_8X8, 12)));
-#endif  // HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH &&
-        // !CONFIG_EMULATE_HARDWARE
+                                 &vpx_idct8x8_12_add_ssse3, TX_8X8, 12, 8)));
+#endif  // HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_EMULATE_HARDWARE
 // 32x32_135_ is implemented using the 1024 version.
 INSTANTIATE_TEST_CASE_P(
     MSA, PartialIDctTest,
     ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1024_add_msa, TX_32X32, 1024),
+                                 &vpx_idct32x32_1024_add_msa, TX_32X32, 1024,
+                                 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1024_add_msa, TX_32X32, 135),
+                                 &vpx_idct32x32_1024_add_msa, TX_32X32, 135, 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_34_add_msa, TX_32X32, 34),
+                                 &vpx_idct32x32_34_add_msa, TX_32X32, 34, 8),
                       make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
-                                 &vpx_idct32x32_1_add_msa, TX_32X32, 1),
+                                 &vpx_idct32x32_1_add_msa, TX_32X32, 1, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_256_add_msa, TX_16X16, 256),
+                                 &vpx_idct16x16_256_add_msa, TX_16X16, 256, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_10_add_msa, TX_16X16, 10),
+                                 &vpx_idct16x16_10_add_msa, TX_16X16, 10, 8),
                       make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
-                                 &vpx_idct16x16_1_add_msa, TX_16X16, 1),
+                                 &vpx_idct16x16_1_add_msa, TX_16X16, 1, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_64_add_msa, TX_8X8, 64),
+                                 &vpx_idct8x8_64_add_msa, TX_8X8, 64, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_12_add_msa, TX_8X8, 10),
+                                 &vpx_idct8x8_12_add_msa, TX_8X8, 10, 8),
                       make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
-                                 &vpx_idct8x8_1_add_msa, TX_8X8, 1),
+                                 &vpx_idct8x8_1_add_msa, TX_8X8, 1, 8),
                       make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_16_add_msa, TX_4X4, 16),
+                                 &vpx_idct4x4_16_add_msa, TX_4X4, 16, 8),
                       make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
-                                 &vpx_idct4x4_1_add_msa, TX_4X4, 1)));
-#endif  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+                                 &vpx_idct4x4_1_add_msa, TX_4X4, 1, 8)));
+#endif  // HAVE_MSA && !CONFIG_EMULATE_HARDWARE
+
+#endif  // CONFIG_VP9_HIGHBITDEPTH
 
 }  // namespace
--- a/vpx_dsp/arm/idct4x4_1_add_neon.c
+++ b/vpx_dsp/arm/idct4x4_1_add_neon.c
@@ -9,39 +9,33 @@
  */
 
 #include <arm_neon.h>
+#include <assert.h>
 
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_dsp/inv_txfm.h"
-#include "vpx_ports/mem.h"
 
 void vpx_idct4x4_1_add_neon(const tran_low_t *input, uint8_t *dest,
                             int dest_stride) {
-  uint8x8_t d6u8;
-  uint32x2_t d2u32 = vdup_n_u32(0);
-  uint16x8_t q8u16;
-  int16x8_t q0s16;
-  uint8_t *d1, *d2;
-  int16_t i, a1;
-  int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
-  out = dct_const_round_shift(out * cospi_16_64);
-  a1 = ROUND_POWER_OF_TWO(out, 4);
+  int i;
+  const int16_t out0 = dct_const_round_shift((int16_t)input[0] * cospi_16_64);
+  const int16_t out1 = dct_const_round_shift(out0 * cospi_16_64);
+  const int16_t a1 = ROUND_POWER_OF_TWO(out1, 4);
+  const int16x8_t dc = vdupq_n_s16(a1);
+  uint32x2_t d = vdup_n_u32(0);
+  uint16x8_t a;
+  uint8x8_t b;
 
-  q0s16 = vdupq_n_s16(a1);
+  assert(!((intptr_t)dest % sizeof(uint32_t)));
+  assert(!(dest_stride % sizeof(uint32_t)));
 
-  // dc_only_idct_add
-  d1 = d2 = dest;
   for (i = 0; i < 2; i++) {
-    d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 0);
-    d1 += dest_stride;
-    d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 1);
-    d1 += dest_stride;
-
-    q8u16 = vaddw_u8(vreinterpretq_u16_s16(q0s16), vreinterpret_u8_u32(d2u32));
-    d6u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-
-    vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 0);
-    d2 += dest_stride;
-    vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 1);
-    d2 += dest_stride;
+    d = vld1_lane_u32((const uint32_t *)dest, d, 0);
+    d = vld1_lane_u32((const uint32_t *)(dest + dest_stride), d, 1);
+    a = vaddw_u8(vreinterpretq_u16_s16(dc), vreinterpret_u8_u32(d));
+    b = vqmovun_s16(vreinterpretq_s16_u16(a));
+    vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(b), 0);
+    dest += dest_stride;
+    vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(b), 1);
+    dest += dest_stride;
   }
 }
--- a/vpx_dsp/arm/idct4x4_add_neon.asm
+++ b/vpx_dsp/arm/idct4x4_add_neon.asm
@@ -72,16 +72,15 @@
     ; do the transform on transposed rows
 
     ; stage 1
-    vadd.s16  d23, d16, d18         ; (input[0] + input[2])
-    vsub.s16  d24, d16, d18         ; (input[0] - input[2])
-
     vmull.s16 q15, d17, d22         ; input[1] * cospi_24_64
     vmull.s16 q1,  d17, d20         ; input[1] * cospi_8_64
 
     ; (input[0] + input[2]) * cospi_16_64;
     ; (input[0] - input[2]) * cospi_16_64;
-    vmull.s16 q13, d23, d21
-    vmull.s16 q14, d24, d21
+    vmull.s16 q8,  d16, d21
+    vmull.s16 q14, d18, d21
+    vadd.s32  q13, q8,  q14
+    vsub.s32  q14, q8,  q14
 
     ; input[1] * cospi_24_64 - input[3] * cospi_8_64;
     ; input[1] * cospi_8_64  + input[3] * cospi_24_64;
--- a/vpx_dsp/arm/idct4x4_add_neon.c
+++ b/vpx_dsp/arm/idct4x4_add_neon.c
@@ -9,139 +9,89 @@
  */
 
 #include <arm_neon.h>
+#include <assert.h>
 
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_dsp/arm/idct_neon.h"
+#include "vpx_dsp/arm/transpose_neon.h"
 #include "vpx_dsp/txfm_common.h"
 
+static INLINE void idct4x4_16_kernel(const int16x4_t cospis, int16x8_t *a0,
+                                     int16x8_t *a1) {
+  int16x4_t b0, b1, b2, b3;
+  int32x4_t c0, c1, c2, c3;
+  int16x8_t d0, d1;
+
+  transpose_s16_4x4q(a0, a1);
+  b0 = vget_low_s16(*a0);
+  b1 = vget_high_s16(*a0);
+  b2 = vget_low_s16(*a1);
+  b3 = vget_high_s16(*a1);
+  c0 = vmull_lane_s16(b0, cospis, 2);
+  c2 = vmull_lane_s16(b1, cospis, 2);
+  c1 = vsubq_s32(c0, c2);
+  c0 = vaddq_s32(c0, c2);
+  c2 = vmull_lane_s16(b2, cospis, 3);
+  c3 = vmull_lane_s16(b2, cospis, 1);
+  c2 = vmlsl_lane_s16(c2, b3, cospis, 1);
+  c3 = vmlal_lane_s16(c3, b3, cospis, 3);
+  b0 = vrshrn_n_s32(c0, 14);
+  b1 = vrshrn_n_s32(c1, 14);
+  b2 = vrshrn_n_s32(c2, 14);
+  b3 = vrshrn_n_s32(c3, 14);
+  d0 = vcombine_s16(b0, b1);
+  d1 = vcombine_s16(b3, b2);
+  *a0 = vaddq_s16(d0, d1);
+  *a1 = vsubq_s16(d0, d1);
+}
+
 void vpx_idct4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
                              int dest_stride) {
-  uint8x8_t d26u8, d27u8;
-  uint32x2_t d26u32, d27u32;
-  uint16x8_t q8u16, q9u16;
-  int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16;
-  int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16;
-  int16x8_t q8s16, q9s16, q13s16, q14s16;
-  int32x4_t q1s32, q13s32, q14s32, q15s32;
-  int16x4x2_t d0x2s16, d1x2s16;
-  int32x4x2_t q0x2s32;
-  uint8_t *d;
+  DECLARE_ALIGNED(16, static const int16_t, cospi[4]) = {
+    0, (int16_t)cospi_8_64, (int16_t)cospi_16_64, (int16_t)cospi_24_64
+  };
+  const uint8_t *dst = dest;
+  const int16x4_t cospis = vld1_s16(cospi);
+  uint32x2_t dest01_u32 = vdup_n_u32(0);
+  uint32x2_t dest32_u32 = vdup_n_u32(0);
+  int16x8_t a0, a1;
+  uint8x8_t d01, d32;
+  uint16x8_t d01_u16, d32_u16;
 
-  d26u32 = d27u32 = vdup_n_u32(0);
+  assert(!((intptr_t)dest % sizeof(uint32_t)));
+  assert(!(dest_stride % sizeof(uint32_t)));
 
-  q8s16 = load_tran_low_to_s16(input);
-  q9s16 = load_tran_low_to_s16(input + 8);
+  // Rows
+  a0 = load_tran_low_to_s16(input);
+  a1 = load_tran_low_to_s16(input + 8);
+  idct4x4_16_kernel(cospis, &a0, &a1);
 
-  d16s16 = vget_low_s16(q8s16);
-  d17s16 = vget_high_s16(q8s16);
-  d18s16 = vget_low_s16(q9s16);
-  d19s16 = vget_high_s16(q9s16);
+  // Columns
+  a1 = vcombine_s16(vget_high_s16(a1), vget_low_s16(a1));
+  idct4x4_16_kernel(cospis, &a0, &a1);
+  a0 = vrshrq_n_s16(a0, 4);
+  a1 = vrshrq_n_s16(a1, 4);
 
-  d0x2s16 = vtrn_s16(d16s16, d17s16);
-  d1x2s16 = vtrn_s16(d18s16, d19s16);
-  q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
-  q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);
+  dest01_u32 = vld1_lane_u32((const uint32_t *)dst, dest01_u32, 0);
+  dst += dest_stride;
+  dest01_u32 = vld1_lane_u32((const uint32_t *)dst, dest01_u32, 1);
+  dst += dest_stride;
+  dest32_u32 = vld1_lane_u32((const uint32_t *)dst, dest32_u32, 1);
+  dst += dest_stride;
+  dest32_u32 = vld1_lane_u32((const uint32_t *)dst, dest32_u32, 0);
 
-  d20s16 = vdup_n_s16((int16_t)cospi_8_64);
-  d21s16 = vdup_n_s16((int16_t)cospi_16_64);
+  d01_u16 =
+      vaddw_u8(vreinterpretq_u16_s16(a0), vreinterpret_u8_u32(dest01_u32));
+  d32_u16 =
+      vaddw_u8(vreinterpretq_u16_s16(a1), vreinterpret_u8_u32(dest32_u32));
+  d01 = vqmovun_s16(vreinterpretq_s16_u16(d01_u16));
+  d32 = vqmovun_s16(vreinterpretq_s16_u16(d32_u16));
 
-  q0x2s32 =
-      vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16));
-  d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
-  d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
-  d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
-  d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
-
-  d22s16 = vdup_n_s16((int16_t)cospi_24_64);
-
-  // stage 1
-  d23s16 = vadd_s16(d16s16, d18s16);
-  d24s16 = vsub_s16(d16s16, d18s16);
-
-  q15s32 = vmull_s16(d17s16, d22s16);
-  q1s32 = vmull_s16(d17s16, d20s16);
-  q13s32 = vmull_s16(d23s16, d21s16);
-  q14s32 = vmull_s16(d24s16, d21s16);
-
-  q15s32 = vmlsl_s16(q15s32, d19s16, d20s16);
-  q1s32 = vmlal_s16(q1s32, d19s16, d22s16);
-
-  d26s16 = vqrshrn_n_s32(q13s32, 14);
-  d27s16 = vqrshrn_n_s32(q14s32, 14);
-  d29s16 = vqrshrn_n_s32(q15s32, 14);
-  d28s16 = vqrshrn_n_s32(q1s32, 14);
-  q13s16 = vcombine_s16(d26s16, d27s16);
-  q14s16 = vcombine_s16(d28s16, d29s16);
-
-  // stage 2
-  q8s16 = vaddq_s16(q13s16, q14s16);
-  q9s16 = vsubq_s16(q13s16, q14s16);
-
-  d16s16 = vget_low_s16(q8s16);
-  d17s16 = vget_high_s16(q8s16);
-  d18s16 = vget_high_s16(q9s16);  // vswp d18 d19
-  d19s16 = vget_low_s16(q9s16);
-
-  d0x2s16 = vtrn_s16(d16s16, d17s16);
-  d1x2s16 = vtrn_s16(d18s16, d19s16);
-  q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
-  q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);
-
-  q0x2s32 =
-      vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16));
-  d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
-  d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0]));
-  d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
-  d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
-
-  // do the transform on columns
-  // stage 1
-  d23s16 = vadd_s16(d16s16, d18s16);
-  d24s16 = vsub_s16(d16s16, d18s16);
-
-  q15s32 = vmull_s16(d17s16, d22s16);
-  q1s32 = vmull_s16(d17s16, d20s16);
-  q13s32 = vmull_s16(d23s16, d21s16);
-  q14s32 = vmull_s16(d24s16, d21s16);
-
-  q15s32 = vmlsl_s16(q15s32, d19s16, d20s16);
-  q1s32 = vmlal_s16(q1s32, d19s16, d22s16);
-
-  d26s16 = vqrshrn_n_s32(q13s32, 14);
-  d27s16 = vqrshrn_n_s32(q14s32, 14);
-  d29s16 = vqrshrn_n_s32(q15s32, 14);
-  d28s16 = vqrshrn_n_s32(q1s32, 14);
-  q13s16 = vcombine_s16(d26s16, d27s16);
-  q14s16 = vcombine_s16(d28s16, d29s16);
-
-  // stage 2
-  q8s16 = vaddq_s16(q13s16, q14s16);
-  q9s16 = vsubq_s16(q13s16, q14s16);
-
-  q8s16 = vrshrq_n_s16(q8s16, 4);
-  q9s16 = vrshrq_n_s16(q9s16, 4);
-
-  d = dest;
-  d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0);
-  d += dest_stride;
-  d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1);
-  d += dest_stride;
-  d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1);
-  d += dest_stride;
-  d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0);
-
-  q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32));
-  q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32));
-
-  d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
-  d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
-
-  d = dest;
-  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0);
-  d += dest_stride;
-  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1);
-  d += dest_stride;
-  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1);
-  d += dest_stride;
-  vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0);
+  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d01), 0);
+  dest += dest_stride;
+  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d01), 1);
+  dest += dest_stride;
+  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d32), 1);
+  dest += dest_stride;
+  vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d32), 0);
 }
--- a/vpx_dsp/arm/transpose_neon.h
+++ b/vpx_dsp/arm/transpose_neon.h
@@ -110,6 +110,37 @@
   *a3 = vreinterpret_s16_s32(c1.val[1]);
 }
 
+static INLINE void transpose_s16_4x4q(int16x8_t *a0, int16x8_t *a1) {
+  // Swap 32 bit elements. Goes from:
+  // a0: 00 01 02 03  10 11 12 13
+  // a1: 20 21 22 23  30 31 32 33
+  // to:
+  // b0.val[0]: 00 01 20 21  10 11 30 31
+  // b0.val[1]: 02 03 22 23  12 13 32 33
+
+  const int32x4x2_t b0 =
+      vtrnq_s32(vreinterpretq_s32_s16(*a0), vreinterpretq_s32_s16(*a1));
+
+  // Swap 64 bit elements resulting in:
+  // c0.val[0]: 00 01 20 21  02 03 22 23
+  // c0.val[1]: 10 11 30 31  12 13 32 33
+
+  const int32x4_t c0 =
+      vcombine_s32(vget_low_s32(b0.val[0]), vget_low_s32(b0.val[1]));
+  const int32x4_t c1 =
+      vcombine_s32(vget_high_s32(b0.val[0]), vget_high_s32(b0.val[1]));
+
+  // Swap 16 bit elements resulting in:
+  // d0.val[0]: 00 10 20 30  02 12 22 32
+  // d0.val[1]: 01 11 21 31  03 13 23 33
+
+  const int16x8x2_t d0 =
+      vtrnq_s16(vreinterpretq_s16_s32(c0), vreinterpretq_s16_s32(c1));
+
+  *a0 = d0.val[0];
+  *a1 = d0.val[1];
+}
+
 static INLINE void transpose_u16_4x4q(uint16x8_t *a0, uint16x8_t *a1) {
   // Swap 32 bit elements. Goes from:
   // a0: 00 01 02 03  10 11 12 13