shithub: libvpx

Download patch

ref: 9b187954df6044fcc533b1ad43761f3f8cdf8b1a
parent: 3c47a0dc6f0c04f0e537f98155ab7476c1f32be7
author: Linfeng Zhang <linfengz@google.com>
date: Tue Dec 27 11:28:53 EST 2016

Add high bitdepth 8x8 idct NEON intrinsics

BUG=webm:1301

Change-Id: I56e3bc3aab9214e2debac93796389a7194991084

--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -446,6 +446,30 @@
 #if HAVE_NEON && !CONFIG_EMULATE_HARDWARE
 const PartialInvTxfmParam neon_partial_idct_tests[] = {
 #if CONFIG_VP9_HIGHBITDEPTH
+  make_tuple(&vpx_highbd_fdct8x8_c,
+             &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
+             &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 8, 2),
+  make_tuple(
+      &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
+      &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 10, 2),
+  make_tuple(
+      &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
+      &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 12, 2),
+  make_tuple(&vpx_highbd_fdct8x8_c,
+             &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
+             &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 8, 2),
+  make_tuple(
+      &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
+      &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 10, 2),
+  make_tuple(
+      &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
+      &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 12, 2),
+  make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
+             &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 8, 2),
+  make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
+             &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 10, 2),
+  make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
+             &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 12, 2),
   make_tuple(&vpx_highbd_fdct4x4_c,
              &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
              &highbd_wrapper<vpx_highbd_idct4x4_16_add_neon>, TX_4X4, 16, 8, 2),
--- /dev/null
+++ b/vpx_dsp/arm/highbd_idct8x8_add_neon.c
@@ -1,0 +1,614 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/arm/idct_neon.h"
+#include "vpx_dsp/arm/transpose_neon.h"
+#include "vpx_dsp/inv_txfm.h"
+
+static INLINE void highbd_idct8x8_1_add_kernel(uint16_t **dest,
+                                               const int stride,
+                                               const int16x8_t res,
+                                               const int16x8_t max) {
+  const uint16x8_t a = vld1q_u16(*dest);
+  const int16x8_t b = vaddq_s16(res, vreinterpretq_s16_u16(a));
+  const int16x8_t c = vminq_s16(b, max);
+  const uint16x8_t d = vqshluq_n_s16(c, 0);
+  vst1q_u16(*dest, d);
+  *dest += stride;
+}
+
+void vpx_highbd_idct8x8_1_add_neon(const tran_low_t *input, uint8_t *dest8,
+                                   int stride, int bd) {
+  const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
+  const tran_low_t out0 =
+      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
+  const tran_low_t out1 =
+      HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
+  const int16_t a1 = ROUND_POWER_OF_TWO(out1, 5);
+  const int16x8_t dc = vdupq_n_s16(a1);
+  uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
+
+  highbd_idct8x8_1_add_kernel(&dest, stride, dc, max);
+  highbd_idct8x8_1_add_kernel(&dest, stride, dc, max);
+  highbd_idct8x8_1_add_kernel(&dest, stride, dc, max);
+  highbd_idct8x8_1_add_kernel(&dest, stride, dc, max);
+  highbd_idct8x8_1_add_kernel(&dest, stride, dc, max);
+  highbd_idct8x8_1_add_kernel(&dest, stride, dc, max);
+  highbd_idct8x8_1_add_kernel(&dest, stride, dc, max);
+  highbd_idct8x8_1_add_kernel(&dest, stride, dc, max);
+}
+
+static INLINE void idct8x8_12_half1d_bd10(
+    const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0,
+    int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3,
+    int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6,
+    int32x4_t *const io7) {
+  int32x4_t step1[8], step2[8];
+
+  transpose_s32_4x4(io0, io1, io2, io3);
+
+  // stage 1
+  step1[4] = vmulq_lane_s32(*io1, vget_high_s32(cospis1), 1);
+  step1[5] = vmulq_lane_s32(*io3, vget_high_s32(cospis1), 0);
+  step1[6] = vmulq_lane_s32(*io3, vget_low_s32(cospis1), 1);
+  step1[7] = vmulq_lane_s32(*io1, vget_low_s32(cospis1), 0);
+  step1[4] = vrshrq_n_s32(step1[4], 14);
+  step1[5] = vrshrq_n_s32(step1[5], 14);
+  step1[6] = vrshrq_n_s32(step1[6], 14);
+  step1[7] = vrshrq_n_s32(step1[7], 14);
+
+  // stage 2
+  step2[1] = vmulq_lane_s32(*io0, vget_high_s32(cospis0), 0);
+  step2[2] = vmulq_lane_s32(*io2, vget_high_s32(cospis0), 1);
+  step2[3] = vmulq_lane_s32(*io2, vget_low_s32(cospis0), 1);
+  step2[1] = vrshrq_n_s32(step2[1], 14);
+  step2[2] = vrshrq_n_s32(step2[2], 14);
+  step2[3] = vrshrq_n_s32(step2[3], 14);
+
+  step2[4] = vaddq_s32(step1[4], step1[5]);
+  step2[5] = vsubq_s32(step1[4], step1[5]);
+  step2[6] = vsubq_s32(step1[7], step1[6]);
+  step2[7] = vaddq_s32(step1[7], step1[6]);
+
+  // stage 3
+  step1[0] = vaddq_s32(step2[1], step2[3]);
+  step1[1] = vaddq_s32(step2[1], step2[2]);
+  step1[2] = vsubq_s32(step2[1], step2[2]);
+  step1[3] = vsubq_s32(step2[1], step2[3]);
+
+  step1[6] = vmulq_lane_s32(step2[6], vget_high_s32(cospis0), 0);
+  step1[5] = vmlsq_lane_s32(step1[6], step2[5], vget_high_s32(cospis0), 0);
+  step1[6] = vmlaq_lane_s32(step1[6], step2[5], vget_high_s32(cospis0), 0);
+  step1[5] = vrshrq_n_s32(step1[5], 14);
+  step1[6] = vrshrq_n_s32(step1[6], 14);
+
+  // stage 4
+  *io0 = vaddq_s32(step1[0], step2[7]);
+  *io1 = vaddq_s32(step1[1], step1[6]);
+  *io2 = vaddq_s32(step1[2], step1[5]);
+  *io3 = vaddq_s32(step1[3], step2[4]);
+  *io4 = vsubq_s32(step1[3], step2[4]);
+  *io5 = vsubq_s32(step1[2], step1[5]);
+  *io6 = vsubq_s32(step1[1], step1[6]);
+  *io7 = vsubq_s32(step1[0], step2[7]);
+}
+
+static INLINE void idct8x8_12_half1d_bd12(
+    const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0,
+    int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3,
+    int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6,
+    int32x4_t *const io7) {
+  int32x2_t input_1l, input_1h, input_3l, input_3h;
+  int32x2_t step1l[2], step1h[2];
+  int32x4_t step1[8], step2[8];
+  int64x2_t t64[8];
+  int32x2_t t32[8];
+
+  transpose_s32_4x4(io0, io1, io2, io3);
+
+  // stage 1
+  input_1l = vget_low_s32(*io1);
+  input_1h = vget_high_s32(*io1);
+  input_3l = vget_low_s32(*io3);
+  input_3h = vget_high_s32(*io3);
+  step1l[0] = vget_low_s32(*io0);
+  step1h[0] = vget_high_s32(*io0);
+  step1l[1] = vget_low_s32(*io2);
+  step1h[1] = vget_high_s32(*io2);
+
+  t64[0] = vmull_lane_s32(input_1l, vget_high_s32(cospis1), 1);
+  t64[1] = vmull_lane_s32(input_1h, vget_high_s32(cospis1), 1);
+  t64[2] = vmull_lane_s32(input_3l, vget_high_s32(cospis1), 0);
+  t64[3] = vmull_lane_s32(input_3h, vget_high_s32(cospis1), 0);
+  t64[4] = vmull_lane_s32(input_3l, vget_low_s32(cospis1), 1);
+  t64[5] = vmull_lane_s32(input_3h, vget_low_s32(cospis1), 1);
+  t64[6] = vmull_lane_s32(input_1l, vget_low_s32(cospis1), 0);
+  t64[7] = vmull_lane_s32(input_1h, vget_low_s32(cospis1), 0);
+  t32[0] = vrshrn_n_s64(t64[0], 14);
+  t32[1] = vrshrn_n_s64(t64[1], 14);
+  t32[2] = vrshrn_n_s64(t64[2], 14);
+  t32[3] = vrshrn_n_s64(t64[3], 14);
+  t32[4] = vrshrn_n_s64(t64[4], 14);
+  t32[5] = vrshrn_n_s64(t64[5], 14);
+  t32[6] = vrshrn_n_s64(t64[6], 14);
+  t32[7] = vrshrn_n_s64(t64[7], 14);
+  step1[4] = vcombine_s32(t32[0], t32[1]);
+  step1[5] = vcombine_s32(t32[2], t32[3]);
+  step1[6] = vcombine_s32(t32[4], t32[5]);
+  step1[7] = vcombine_s32(t32[6], t32[7]);
+
+  // stage 2
+  t64[2] = vmull_lane_s32(step1l[0], vget_high_s32(cospis0), 0);
+  t64[3] = vmull_lane_s32(step1h[0], vget_high_s32(cospis0), 0);
+  t64[4] = vmull_lane_s32(step1l[1], vget_high_s32(cospis0), 1);
+  t64[5] = vmull_lane_s32(step1h[1], vget_high_s32(cospis0), 1);
+  t64[6] = vmull_lane_s32(step1l[1], vget_low_s32(cospis0), 1);
+  t64[7] = vmull_lane_s32(step1h[1], vget_low_s32(cospis0), 1);
+  t32[2] = vrshrn_n_s64(t64[2], 14);
+  t32[3] = vrshrn_n_s64(t64[3], 14);
+  t32[4] = vrshrn_n_s64(t64[4], 14);
+  t32[5] = vrshrn_n_s64(t64[5], 14);
+  t32[6] = vrshrn_n_s64(t64[6], 14);
+  t32[7] = vrshrn_n_s64(t64[7], 14);
+  step2[1] = vcombine_s32(t32[2], t32[3]);
+  step2[2] = vcombine_s32(t32[4], t32[5]);
+  step2[3] = vcombine_s32(t32[6], t32[7]);
+
+  step2[4] = vaddq_s32(step1[4], step1[5]);
+  step2[5] = vsubq_s32(step1[4], step1[5]);
+  step2[6] = vsubq_s32(step1[7], step1[6]);
+  step2[7] = vaddq_s32(step1[7], step1[6]);
+
+  // stage 3
+  step1[0] = vaddq_s32(step2[1], step2[3]);
+  step1[1] = vaddq_s32(step2[1], step2[2]);
+  step1[2] = vsubq_s32(step2[1], step2[2]);
+  step1[3] = vsubq_s32(step2[1], step2[3]);
+
+  t64[2] = vmull_lane_s32(vget_low_s32(step2[6]), vget_high_s32(cospis0), 0);
+  t64[3] = vmull_lane_s32(vget_high_s32(step2[6]), vget_high_s32(cospis0), 0);
+  t64[0] =
+      vmlsl_lane_s32(t64[2], vget_low_s32(step2[5]), vget_high_s32(cospis0), 0);
+  t64[1] = vmlsl_lane_s32(t64[3], vget_high_s32(step2[5]),
+                          vget_high_s32(cospis0), 0);
+  t64[2] =
+      vmlal_lane_s32(t64[2], vget_low_s32(step2[5]), vget_high_s32(cospis0), 0);
+  t64[3] = vmlal_lane_s32(t64[3], vget_high_s32(step2[5]),
+                          vget_high_s32(cospis0), 0);
+  t32[0] = vrshrn_n_s64(t64[0], 14);
+  t32[1] = vrshrn_n_s64(t64[1], 14);
+  t32[2] = vrshrn_n_s64(t64[2], 14);
+  t32[3] = vrshrn_n_s64(t64[3], 14);
+  step1[5] = vcombine_s32(t32[0], t32[1]);
+  step1[6] = vcombine_s32(t32[2], t32[3]);
+
+  // stage 4
+  *io0 = vaddq_s32(step1[0], step2[7]);
+  *io1 = vaddq_s32(step1[1], step1[6]);
+  *io2 = vaddq_s32(step1[2], step1[5]);
+  *io3 = vaddq_s32(step1[3], step2[4]);
+  *io4 = vsubq_s32(step1[3], step2[4]);
+  *io5 = vsubq_s32(step1[2], step1[5]);
+  *io6 = vsubq_s32(step1[1], step1[6]);
+  *io7 = vsubq_s32(step1[0], step2[7]);
+}
+
+static INLINE void highbd_add8x8(int16x8_t a0, int16x8_t a1, int16x8_t a2,
+                                 int16x8_t a3, int16x8_t a4, int16x8_t a5,
+                                 int16x8_t a6, int16x8_t a7, uint16_t *dest,
+                                 const int stride, const int bd) {
+  const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
+  const uint16_t *dst = dest;
+  uint16x8_t d0, d1, d2, d3, d4, d5, d6, d7;
+  uint16x8_t d0_u16, d1_u16, d2_u16, d3_u16, d4_u16, d5_u16, d6_u16, d7_u16;
+  int16x8_t d0_s16, d1_s16, d2_s16, d3_s16, d4_s16, d5_s16, d6_s16, d7_s16;
+
+  d0 = vld1q_u16(dst);
+  dst += stride;
+  d1 = vld1q_u16(dst);
+  dst += stride;
+  d2 = vld1q_u16(dst);
+  dst += stride;
+  d3 = vld1q_u16(dst);
+  dst += stride;
+  d4 = vld1q_u16(dst);
+  dst += stride;
+  d5 = vld1q_u16(dst);
+  dst += stride;
+  d6 = vld1q_u16(dst);
+  dst += stride;
+  d7 = vld1q_u16(dst);
+
+  d0_s16 = vqaddq_s16(a0, vreinterpretq_s16_u16(d0));
+  d1_s16 = vqaddq_s16(a1, vreinterpretq_s16_u16(d1));
+  d2_s16 = vqaddq_s16(a2, vreinterpretq_s16_u16(d2));
+  d3_s16 = vqaddq_s16(a3, vreinterpretq_s16_u16(d3));
+  d4_s16 = vqaddq_s16(a4, vreinterpretq_s16_u16(d4));
+  d5_s16 = vqaddq_s16(a5, vreinterpretq_s16_u16(d5));
+  d6_s16 = vqaddq_s16(a6, vreinterpretq_s16_u16(d6));
+  d7_s16 = vqaddq_s16(a7, vreinterpretq_s16_u16(d7));
+
+  d0_s16 = vminq_s16(d0_s16, max);
+  d1_s16 = vminq_s16(d1_s16, max);
+  d2_s16 = vminq_s16(d2_s16, max);
+  d3_s16 = vminq_s16(d3_s16, max);
+  d4_s16 = vminq_s16(d4_s16, max);
+  d5_s16 = vminq_s16(d5_s16, max);
+  d6_s16 = vminq_s16(d6_s16, max);
+  d7_s16 = vminq_s16(d7_s16, max);
+  d0_u16 = vqshluq_n_s16(d0_s16, 0);
+  d1_u16 = vqshluq_n_s16(d1_s16, 0);
+  d2_u16 = vqshluq_n_s16(d2_s16, 0);
+  d3_u16 = vqshluq_n_s16(d3_s16, 0);
+  d4_u16 = vqshluq_n_s16(d4_s16, 0);
+  d5_u16 = vqshluq_n_s16(d5_s16, 0);
+  d6_u16 = vqshluq_n_s16(d6_s16, 0);
+  d7_u16 = vqshluq_n_s16(d7_s16, 0);
+
+  vst1q_u16(dest, d0_u16);
+  dest += stride;
+  vst1q_u16(dest, d1_u16);
+  dest += stride;
+  vst1q_u16(dest, d2_u16);
+  dest += stride;
+  vst1q_u16(dest, d3_u16);
+  dest += stride;
+  vst1q_u16(dest, d4_u16);
+  dest += stride;
+  vst1q_u16(dest, d5_u16);
+  dest += stride;
+  vst1q_u16(dest, d6_u16);
+  dest += stride;
+  vst1q_u16(dest, d7_u16);
+}
+
+void vpx_highbd_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
+  uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
+  int32x4_t a0 = vld1q_s32(input);
+  int32x4_t a1 = vld1q_s32(input + 8);
+  int32x4_t a2 = vld1q_s32(input + 16);
+  int32x4_t a3 = vld1q_s32(input + 24);
+  int16x8_t c0, c1, c2, c3, c4, c5, c6, c7;
+
+  if (bd == 8) {
+    const int16x8_t cospis = vld1q_s16(kCospi);
+    const int16x8_t cospisd = vaddq_s16(cospis, cospis);
+    const int16x4_t cospis0 = vget_low_s16(cospis);     // cospi 0, 8, 16, 24
+    const int16x4_t cospisd0 = vget_low_s16(cospisd);   // doubled 0, 8, 16, 24
+    const int16x4_t cospisd1 = vget_high_s16(cospisd);  // doubled 4, 12, 20, 28
+    int16x4_t b0 = vmovn_s32(a0);
+    int16x4_t b1 = vmovn_s32(a1);
+    int16x4_t b2 = vmovn_s32(a2);
+    int16x4_t b3 = vmovn_s32(a3);
+    int16x4_t b4, b5, b6, b7;
+
+    idct8x8_12_pass1_bd8(cospis0, cospisd0, cospisd1, &b0, &b1, &b2, &b3, &b4,
+                         &b5, &b6, &b7);
+    idct8x8_12_pass2_bd8(cospis0, cospisd0, cospisd1, b0, b1, b2, b3, b4, b5,
+                         b6, b7, &c0, &c1, &c2, &c3, &c4, &c5, &c6, &c7);
+    c0 = vrshrq_n_s16(c0, 5);
+    c1 = vrshrq_n_s16(c1, 5);
+    c2 = vrshrq_n_s16(c2, 5);
+    c3 = vrshrq_n_s16(c3, 5);
+    c4 = vrshrq_n_s16(c4, 5);
+    c5 = vrshrq_n_s16(c5, 5);
+    c6 = vrshrq_n_s16(c6, 5);
+    c7 = vrshrq_n_s16(c7, 5);
+  } else {
+    const int32x4_t cospis0 = vld1q_s32(kCospi32);      // cospi 0, 8, 16, 24
+    const int32x4_t cospis1 = vld1q_s32(kCospi32 + 4);  // cospi 4, 12, 20, 28
+    int32x4_t a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15;
+
+    if (bd == 10) {
+      idct8x8_12_half1d_bd10(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5,
+                             &a6, &a7);
+      idct8x8_12_half1d_bd10(cospis0, cospis1, &a0, &a1, &a2, &a3, &a8, &a9,
+                             &a10, &a11);
+      idct8x8_12_half1d_bd10(cospis0, cospis1, &a4, &a5, &a6, &a7, &a12, &a13,
+                             &a14, &a15);
+    } else {
+      idct8x8_12_half1d_bd12(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5,
+                             &a6, &a7);
+      idct8x8_12_half1d_bd12(cospis0, cospis1, &a0, &a1, &a2, &a3, &a8, &a9,
+                             &a10, &a11);
+      idct8x8_12_half1d_bd12(cospis0, cospis1, &a4, &a5, &a6, &a7, &a12, &a13,
+                             &a14, &a15);
+    }
+    c0 = vcombine_s16(vrshrn_n_s32(a0, 5), vrshrn_n_s32(a4, 5));
+    c1 = vcombine_s16(vrshrn_n_s32(a1, 5), vrshrn_n_s32(a5, 5));
+    c2 = vcombine_s16(vrshrn_n_s32(a2, 5), vrshrn_n_s32(a6, 5));
+    c3 = vcombine_s16(vrshrn_n_s32(a3, 5), vrshrn_n_s32(a7, 5));
+    c4 = vcombine_s16(vrshrn_n_s32(a8, 5), vrshrn_n_s32(a12, 5));
+    c5 = vcombine_s16(vrshrn_n_s32(a9, 5), vrshrn_n_s32(a13, 5));
+    c6 = vcombine_s16(vrshrn_n_s32(a10, 5), vrshrn_n_s32(a14, 5));
+    c7 = vcombine_s16(vrshrn_n_s32(a11, 5), vrshrn_n_s32(a15, 5));
+  }
+  highbd_add8x8(c0, c1, c2, c3, c4, c5, c6, c7, dest, stride, bd);
+}
+
+static INLINE void idct8x8_64_half1d_bd10(
+    const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0,
+    int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3,
+    int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6,
+    int32x4_t *const io7) {
+  int32x4_t step1[8], step2[8];
+
+  transpose_s32_8x4(io0, io1, io2, io3, io4, io5, io6, io7);
+
+  // stage 1
+  step1[4] = vmulq_lane_s32(*io1, vget_high_s32(cospis1), 1);
+  step1[5] = vmulq_lane_s32(*io3, vget_high_s32(cospis1), 0);
+  step1[6] = vmulq_lane_s32(*io3, vget_low_s32(cospis1), 1);
+  step1[7] = vmulq_lane_s32(*io1, vget_low_s32(cospis1), 0);
+
+  step1[4] = vmlsq_lane_s32(step1[4], *io7, vget_low_s32(cospis1), 0);
+  step1[5] = vmlaq_lane_s32(step1[5], *io5, vget_low_s32(cospis1), 1);
+  step1[6] = vmlsq_lane_s32(step1[6], *io5, vget_high_s32(cospis1), 0);
+  step1[7] = vmlaq_lane_s32(step1[7], *io7, vget_high_s32(cospis1), 1);
+
+  step1[4] = vrshrq_n_s32(step1[4], 14);
+  step1[5] = vrshrq_n_s32(step1[5], 14);
+  step1[6] = vrshrq_n_s32(step1[6], 14);
+  step1[7] = vrshrq_n_s32(step1[7], 14);
+
+  // stage 2
+  step2[1] = vmulq_lane_s32(*io0, vget_high_s32(cospis0), 0);
+  step2[2] = vmulq_lane_s32(*io2, vget_high_s32(cospis0), 1);
+  step2[3] = vmulq_lane_s32(*io2, vget_low_s32(cospis0), 1);
+
+  step2[0] = vmlaq_lane_s32(step2[1], *io4, vget_high_s32(cospis0), 0);
+  step2[1] = vmlsq_lane_s32(step2[1], *io4, vget_high_s32(cospis0), 0);
+  step2[2] = vmlsq_lane_s32(step2[2], *io6, vget_low_s32(cospis0), 1);
+  step2[3] = vmlaq_lane_s32(step2[3], *io6, vget_high_s32(cospis0), 1);
+
+  step2[0] = vrshrq_n_s32(step2[0], 14);
+  step2[1] = vrshrq_n_s32(step2[1], 14);
+  step2[2] = vrshrq_n_s32(step2[2], 14);
+  step2[3] = vrshrq_n_s32(step2[3], 14);
+
+  step2[4] = vaddq_s32(step1[4], step1[5]);
+  step2[5] = vsubq_s32(step1[4], step1[5]);
+  step2[6] = vsubq_s32(step1[7], step1[6]);
+  step2[7] = vaddq_s32(step1[7], step1[6]);
+
+  // stage 3
+  step1[0] = vaddq_s32(step2[0], step2[3]);
+  step1[1] = vaddq_s32(step2[1], step2[2]);
+  step1[2] = vsubq_s32(step2[1], step2[2]);
+  step1[3] = vsubq_s32(step2[0], step2[3]);
+
+  step1[6] = vmulq_lane_s32(step2[6], vget_high_s32(cospis0), 0);
+  step1[5] = vmlsq_lane_s32(step1[6], step2[5], vget_high_s32(cospis0), 0);
+  step1[6] = vmlaq_lane_s32(step1[6], step2[5], vget_high_s32(cospis0), 0);
+  step1[5] = vrshrq_n_s32(step1[5], 14);
+  step1[6] = vrshrq_n_s32(step1[6], 14);
+
+  // stage 4
+  *io0 = vaddq_s32(step1[0], step2[7]);
+  *io1 = vaddq_s32(step1[1], step1[6]);
+  *io2 = vaddq_s32(step1[2], step1[5]);
+  *io3 = vaddq_s32(step1[3], step2[4]);
+  *io4 = vsubq_s32(step1[3], step2[4]);
+  *io5 = vsubq_s32(step1[2], step1[5]);
+  *io6 = vsubq_s32(step1[1], step1[6]);
+  *io7 = vsubq_s32(step1[0], step2[7]);
+}
+
+static INLINE void idct8x8_64_half1d_bd12(
+    const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0,
+    int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3,
+    int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6,
+    int32x4_t *const io7) {
+  int32x2_t input_1l, input_1h, input_3l, input_3h, input_5l, input_5h,
+      input_7l, input_7h;
+  int32x2_t step1l[4], step1h[4];
+  int32x4_t step1[8], step2[8];
+  int64x2_t t64[8];
+  int32x2_t t32[8];
+
+  transpose_s32_8x4(io0, io1, io2, io3, io4, io5, io6, io7);
+
+  // stage 1
+  input_1l = vget_low_s32(*io1);
+  input_1h = vget_high_s32(*io1);
+  input_3l = vget_low_s32(*io3);
+  input_3h = vget_high_s32(*io3);
+  input_5l = vget_low_s32(*io5);
+  input_5h = vget_high_s32(*io5);
+  input_7l = vget_low_s32(*io7);
+  input_7h = vget_high_s32(*io7);
+  step1l[0] = vget_low_s32(*io0);
+  step1h[0] = vget_high_s32(*io0);
+  step1l[1] = vget_low_s32(*io2);
+  step1h[1] = vget_high_s32(*io2);
+  step1l[2] = vget_low_s32(*io4);
+  step1h[2] = vget_high_s32(*io4);
+  step1l[3] = vget_low_s32(*io6);
+  step1h[3] = vget_high_s32(*io6);
+
+  t64[0] = vmull_lane_s32(input_1l, vget_high_s32(cospis1), 1);
+  t64[1] = vmull_lane_s32(input_1h, vget_high_s32(cospis1), 1);
+  t64[2] = vmull_lane_s32(input_3l, vget_high_s32(cospis1), 0);
+  t64[3] = vmull_lane_s32(input_3h, vget_high_s32(cospis1), 0);
+  t64[4] = vmull_lane_s32(input_3l, vget_low_s32(cospis1), 1);
+  t64[5] = vmull_lane_s32(input_3h, vget_low_s32(cospis1), 1);
+  t64[6] = vmull_lane_s32(input_1l, vget_low_s32(cospis1), 0);
+  t64[7] = vmull_lane_s32(input_1h, vget_low_s32(cospis1), 0);
+  t64[0] = vmlsl_lane_s32(t64[0], input_7l, vget_low_s32(cospis1), 0);
+  t64[1] = vmlsl_lane_s32(t64[1], input_7h, vget_low_s32(cospis1), 0);
+  t64[2] = vmlal_lane_s32(t64[2], input_5l, vget_low_s32(cospis1), 1);
+  t64[3] = vmlal_lane_s32(t64[3], input_5h, vget_low_s32(cospis1), 1);
+  t64[4] = vmlsl_lane_s32(t64[4], input_5l, vget_high_s32(cospis1), 0);
+  t64[5] = vmlsl_lane_s32(t64[5], input_5h, vget_high_s32(cospis1), 0);
+  t64[6] = vmlal_lane_s32(t64[6], input_7l, vget_high_s32(cospis1), 1);
+  t64[7] = vmlal_lane_s32(t64[7], input_7h, vget_high_s32(cospis1), 1);
+  t32[0] = vrshrn_n_s64(t64[0], 14);
+  t32[1] = vrshrn_n_s64(t64[1], 14);
+  t32[2] = vrshrn_n_s64(t64[2], 14);
+  t32[3] = vrshrn_n_s64(t64[3], 14);
+  t32[4] = vrshrn_n_s64(t64[4], 14);
+  t32[5] = vrshrn_n_s64(t64[5], 14);
+  t32[6] = vrshrn_n_s64(t64[6], 14);
+  t32[7] = vrshrn_n_s64(t64[7], 14);
+  step1[4] = vcombine_s32(t32[0], t32[1]);
+  step1[5] = vcombine_s32(t32[2], t32[3]);
+  step1[6] = vcombine_s32(t32[4], t32[5]);
+  step1[7] = vcombine_s32(t32[6], t32[7]);
+
+  // stage 2
+  t64[2] = vmull_lane_s32(step1l[0], vget_high_s32(cospis0), 0);
+  t64[3] = vmull_lane_s32(step1h[0], vget_high_s32(cospis0), 0);
+  t64[4] = vmull_lane_s32(step1l[1], vget_high_s32(cospis0), 1);
+  t64[5] = vmull_lane_s32(step1h[1], vget_high_s32(cospis0), 1);
+  t64[6] = vmull_lane_s32(step1l[1], vget_low_s32(cospis0), 1);
+  t64[7] = vmull_lane_s32(step1h[1], vget_low_s32(cospis0), 1);
+  t64[0] = vmlal_lane_s32(t64[2], step1l[2], vget_high_s32(cospis0), 0);
+  t64[1] = vmlal_lane_s32(t64[3], step1h[2], vget_high_s32(cospis0), 0);
+  t64[2] = vmlsl_lane_s32(t64[2], step1l[2], vget_high_s32(cospis0), 0);
+  t64[3] = vmlsl_lane_s32(t64[3], step1h[2], vget_high_s32(cospis0), 0);
+  t64[4] = vmlsl_lane_s32(t64[4], step1l[3], vget_low_s32(cospis0), 1);
+  t64[5] = vmlsl_lane_s32(t64[5], step1h[3], vget_low_s32(cospis0), 1);
+  t64[6] = vmlal_lane_s32(t64[6], step1l[3], vget_high_s32(cospis0), 1);
+  t64[7] = vmlal_lane_s32(t64[7], step1h[3], vget_high_s32(cospis0), 1);
+  t32[0] = vrshrn_n_s64(t64[0], 14);
+  t32[1] = vrshrn_n_s64(t64[1], 14);
+  t32[2] = vrshrn_n_s64(t64[2], 14);
+  t32[3] = vrshrn_n_s64(t64[3], 14);
+  t32[4] = vrshrn_n_s64(t64[4], 14);
+  t32[5] = vrshrn_n_s64(t64[5], 14);
+  t32[6] = vrshrn_n_s64(t64[6], 14);
+  t32[7] = vrshrn_n_s64(t64[7], 14);
+  step2[0] = vcombine_s32(t32[0], t32[1]);
+  step2[1] = vcombine_s32(t32[2], t32[3]);
+  step2[2] = vcombine_s32(t32[4], t32[5]);
+  step2[3] = vcombine_s32(t32[6], t32[7]);
+
+  step2[4] = vaddq_s32(step1[4], step1[5]);
+  step2[5] = vsubq_s32(step1[4], step1[5]);
+  step2[6] = vsubq_s32(step1[7], step1[6]);
+  step2[7] = vaddq_s32(step1[7], step1[6]);
+
+  // stage 3
+  step1[0] = vaddq_s32(step2[0], step2[3]);
+  step1[1] = vaddq_s32(step2[1], step2[2]);
+  step1[2] = vsubq_s32(step2[1], step2[2]);
+  step1[3] = vsubq_s32(step2[0], step2[3]);
+
+  t64[2] = vmull_lane_s32(vget_low_s32(step2[6]), vget_high_s32(cospis0), 0);
+  t64[3] = vmull_lane_s32(vget_high_s32(step2[6]), vget_high_s32(cospis0), 0);
+  t64[0] =
+      vmlsl_lane_s32(t64[2], vget_low_s32(step2[5]), vget_high_s32(cospis0), 0);
+  t64[1] = vmlsl_lane_s32(t64[3], vget_high_s32(step2[5]),
+                          vget_high_s32(cospis0), 0);
+  t64[2] =
+      vmlal_lane_s32(t64[2], vget_low_s32(step2[5]), vget_high_s32(cospis0), 0);
+  t64[3] = vmlal_lane_s32(t64[3], vget_high_s32(step2[5]),
+                          vget_high_s32(cospis0), 0);
+  t32[0] = vrshrn_n_s64(t64[0], 14);
+  t32[1] = vrshrn_n_s64(t64[1], 14);
+  t32[2] = vrshrn_n_s64(t64[2], 14);
+  t32[3] = vrshrn_n_s64(t64[3], 14);
+  step1[5] = vcombine_s32(t32[0], t32[1]);
+  step1[6] = vcombine_s32(t32[2], t32[3]);
+
+  // stage 4
+  *io0 = vaddq_s32(step1[0], step2[7]);
+  *io1 = vaddq_s32(step1[1], step1[6]);
+  *io2 = vaddq_s32(step1[2], step1[5]);
+  *io3 = vaddq_s32(step1[3], step2[4]);
+  *io4 = vsubq_s32(step1[3], step2[4]);
+  *io5 = vsubq_s32(step1[2], step1[5]);
+  *io6 = vsubq_s32(step1[1], step1[6]);
+  *io7 = vsubq_s32(step1[0], step2[7]);
+}
+
+void vpx_highbd_idct8x8_64_add_neon(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
+  uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
+  int32x4_t a0 = vld1q_s32(input);
+  int32x4_t a1 = vld1q_s32(input + 4);
+  int32x4_t a2 = vld1q_s32(input + 8);
+  int32x4_t a3 = vld1q_s32(input + 12);
+  int32x4_t a4 = vld1q_s32(input + 16);
+  int32x4_t a5 = vld1q_s32(input + 20);
+  int32x4_t a6 = vld1q_s32(input + 24);
+  int32x4_t a7 = vld1q_s32(input + 28);
+  int32x4_t a8 = vld1q_s32(input + 32);
+  int32x4_t a9 = vld1q_s32(input + 36);
+  int32x4_t a10 = vld1q_s32(input + 40);
+  int32x4_t a11 = vld1q_s32(input + 44);
+  int32x4_t a12 = vld1q_s32(input + 48);
+  int32x4_t a13 = vld1q_s32(input + 52);
+  int32x4_t a14 = vld1q_s32(input + 56);
+  int32x4_t a15 = vld1q_s32(input + 60);
+  int16x8_t c0, c1, c2, c3, c4, c5, c6, c7;
+
+  if (bd == 8) {
+    const int16x8_t cospis = vld1q_s16(kCospi);
+    const int16x4_t cospis0 = vget_low_s16(cospis);   // cospi 0, 8, 16, 24
+    const int16x4_t cospis1 = vget_high_s16(cospis);  // cospi 4, 12, 20, 28
+    int16x8_t b0 = vcombine_s16(vmovn_s32(a0), vmovn_s32(a1));
+    int16x8_t b1 = vcombine_s16(vmovn_s32(a2), vmovn_s32(a3));
+    int16x8_t b2 = vcombine_s16(vmovn_s32(a4), vmovn_s32(a5));
+    int16x8_t b3 = vcombine_s16(vmovn_s32(a6), vmovn_s32(a7));
+    int16x8_t b4 = vcombine_s16(vmovn_s32(a8), vmovn_s32(a9));
+    int16x8_t b5 = vcombine_s16(vmovn_s32(a10), vmovn_s32(a11));
+    int16x8_t b6 = vcombine_s16(vmovn_s32(a12), vmovn_s32(a13));
+    int16x8_t b7 = vcombine_s16(vmovn_s32(a14), vmovn_s32(a15));
+
+    idct8x8_64_1d_bd8(cospis0, cospis1, &b0, &b1, &b2, &b3, &b4, &b5, &b6, &b7);
+    idct8x8_64_1d_bd8(cospis0, cospis1, &b0, &b1, &b2, &b3, &b4, &b5, &b6, &b7);
+
+    c0 = vrshrq_n_s16(b0, 5);
+    c1 = vrshrq_n_s16(b1, 5);
+    c2 = vrshrq_n_s16(b2, 5);
+    c3 = vrshrq_n_s16(b3, 5);
+    c4 = vrshrq_n_s16(b4, 5);
+    c5 = vrshrq_n_s16(b5, 5);
+    c6 = vrshrq_n_s16(b6, 5);
+    c7 = vrshrq_n_s16(b7, 5);
+  } else {
+    const int32x4_t cospis0 = vld1q_s32(kCospi32);      // cospi 0, 8, 16, 24
+    const int32x4_t cospis1 = vld1q_s32(kCospi32 + 4);  // cospi 4, 12, 20, 28
+
+    if (bd == 10) {
+      idct8x8_64_half1d_bd10(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5,
+                             &a6, &a7);
+      idct8x8_64_half1d_bd10(cospis0, cospis1, &a8, &a9, &a10, &a11, &a12, &a13,
+                             &a14, &a15);
+      idct8x8_64_half1d_bd10(cospis0, cospis1, &a0, &a8, &a1, &a9, &a2, &a10,
+                             &a3, &a11);
+      idct8x8_64_half1d_bd10(cospis0, cospis1, &a4, &a12, &a5, &a13, &a6, &a14,
+                             &a7, &a15);
+    } else {
+      idct8x8_64_half1d_bd12(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5,
+                             &a6, &a7);
+      idct8x8_64_half1d_bd12(cospis0, cospis1, &a8, &a9, &a10, &a11, &a12, &a13,
+                             &a14, &a15);
+      idct8x8_64_half1d_bd12(cospis0, cospis1, &a0, &a8, &a1, &a9, &a2, &a10,
+                             &a3, &a11);
+      idct8x8_64_half1d_bd12(cospis0, cospis1, &a4, &a12, &a5, &a13, &a6, &a14,
+                             &a7, &a15);
+    }
+    c0 = vcombine_s16(vrshrn_n_s32(a0, 5), vrshrn_n_s32(a4, 5));
+    c1 = vcombine_s16(vrshrn_n_s32(a8, 5), vrshrn_n_s32(a12, 5));
+    c2 = vcombine_s16(vrshrn_n_s32(a1, 5), vrshrn_n_s32(a5, 5));
+    c3 = vcombine_s16(vrshrn_n_s32(a9, 5), vrshrn_n_s32(a13, 5));
+    c4 = vcombine_s16(vrshrn_n_s32(a2, 5), vrshrn_n_s32(a6, 5));
+    c5 = vcombine_s16(vrshrn_n_s32(a10, 5), vrshrn_n_s32(a14, 5));
+    c6 = vcombine_s16(vrshrn_n_s32(a3, 5), vrshrn_n_s32(a7, 5));
+    c7 = vcombine_s16(vrshrn_n_s32(a11, 5), vrshrn_n_s32(a15, 5));
+  }
+  highbd_add8x8(c0, c1, c2, c3, c4, c5, c6, c7, dest, stride, bd);
+}
--- a/vpx_dsp/arm/idct8x8_add_neon.c
+++ b/vpx_dsp/arm/idct8x8_add_neon.c
@@ -16,132 +16,6 @@
 #include "vpx_dsp/arm/transpose_neon.h"
 #include "vpx_dsp/txfm_common.h"
 
-static INLINE void idct8x8_64_1d_bd8(const int16x4_t cospis0,
-                                     const int16x4_t cospis1,
-                                     int16x8_t *const io0, int16x8_t *const io1,
-                                     int16x8_t *const io2, int16x8_t *const io3,
-                                     int16x8_t *const io4, int16x8_t *const io5,
-                                     int16x8_t *const io6,
-                                     int16x8_t *const io7) {
-  int16x4_t input_1l, input_1h, input_3l, input_3h, input_5l, input_5h,
-      input_7l, input_7h;
-  int16x4_t step1l[4], step1h[4];
-  int16x8_t step1[8], step2[8];
-  int32x4_t t32[8];
-  int16x4_t t16[8];
-
-  transpose_s16_8x8(io0, io1, io2, io3, io4, io5, io6, io7);
-
-  // stage 1
-  input_1l = vget_low_s16(*io1);
-  input_1h = vget_high_s16(*io1);
-  input_3l = vget_low_s16(*io3);
-  input_3h = vget_high_s16(*io3);
-  input_5l = vget_low_s16(*io5);
-  input_5h = vget_high_s16(*io5);
-  input_7l = vget_low_s16(*io7);
-  input_7h = vget_high_s16(*io7);
-  step1l[0] = vget_low_s16(*io0);
-  step1h[0] = vget_high_s16(*io0);
-  step1l[1] = vget_low_s16(*io2);
-  step1h[1] = vget_high_s16(*io2);
-  step1l[2] = vget_low_s16(*io4);
-  step1h[2] = vget_high_s16(*io4);
-  step1l[3] = vget_low_s16(*io6);
-  step1h[3] = vget_high_s16(*io6);
-
-  t32[0] = vmull_lane_s16(input_1l, cospis1, 3);
-  t32[1] = vmull_lane_s16(input_1h, cospis1, 3);
-  t32[2] = vmull_lane_s16(input_3l, cospis1, 2);
-  t32[3] = vmull_lane_s16(input_3h, cospis1, 2);
-  t32[4] = vmull_lane_s16(input_3l, cospis1, 1);
-  t32[5] = vmull_lane_s16(input_3h, cospis1, 1);
-  t32[6] = vmull_lane_s16(input_1l, cospis1, 0);
-  t32[7] = vmull_lane_s16(input_1h, cospis1, 0);
-  t32[0] = vmlsl_lane_s16(t32[0], input_7l, cospis1, 0);
-  t32[1] = vmlsl_lane_s16(t32[1], input_7h, cospis1, 0);
-  t32[2] = vmlal_lane_s16(t32[2], input_5l, cospis1, 1);
-  t32[3] = vmlal_lane_s16(t32[3], input_5h, cospis1, 1);
-  t32[4] = vmlsl_lane_s16(t32[4], input_5l, cospis1, 2);
-  t32[5] = vmlsl_lane_s16(t32[5], input_5h, cospis1, 2);
-  t32[6] = vmlal_lane_s16(t32[6], input_7l, cospis1, 3);
-  t32[7] = vmlal_lane_s16(t32[7], input_7h, cospis1, 3);
-  t16[0] = vrshrn_n_s32(t32[0], 14);
-  t16[1] = vrshrn_n_s32(t32[1], 14);
-  t16[2] = vrshrn_n_s32(t32[2], 14);
-  t16[3] = vrshrn_n_s32(t32[3], 14);
-  t16[4] = vrshrn_n_s32(t32[4], 14);
-  t16[5] = vrshrn_n_s32(t32[5], 14);
-  t16[6] = vrshrn_n_s32(t32[6], 14);
-  t16[7] = vrshrn_n_s32(t32[7], 14);
-  step1[4] = vcombine_s16(t16[0], t16[1]);
-  step1[5] = vcombine_s16(t16[2], t16[3]);
-  step1[6] = vcombine_s16(t16[4], t16[5]);
-  step1[7] = vcombine_s16(t16[6], t16[7]);
-
-  // stage 2
-  t32[2] = vmull_lane_s16(step1l[0], cospis0, 2);
-  t32[3] = vmull_lane_s16(step1h[0], cospis0, 2);
-  t32[4] = vmull_lane_s16(step1l[1], cospis0, 3);
-  t32[5] = vmull_lane_s16(step1h[1], cospis0, 3);
-  t32[6] = vmull_lane_s16(step1l[1], cospis0, 1);
-  t32[7] = vmull_lane_s16(step1h[1], cospis0, 1);
-  t32[0] = vmlal_lane_s16(t32[2], step1l[2], cospis0, 2);
-  t32[1] = vmlal_lane_s16(t32[3], step1h[2], cospis0, 2);
-  t32[2] = vmlsl_lane_s16(t32[2], step1l[2], cospis0, 2);
-  t32[3] = vmlsl_lane_s16(t32[3], step1h[2], cospis0, 2);
-  t32[4] = vmlsl_lane_s16(t32[4], step1l[3], cospis0, 1);
-  t32[5] = vmlsl_lane_s16(t32[5], step1h[3], cospis0, 1);
-  t32[6] = vmlal_lane_s16(t32[6], step1l[3], cospis0, 3);
-  t32[7] = vmlal_lane_s16(t32[7], step1h[3], cospis0, 3);
-  t16[0] = vrshrn_n_s32(t32[0], 14);
-  t16[1] = vrshrn_n_s32(t32[1], 14);
-  t16[2] = vrshrn_n_s32(t32[2], 14);
-  t16[3] = vrshrn_n_s32(t32[3], 14);
-  t16[4] = vrshrn_n_s32(t32[4], 14);
-  t16[5] = vrshrn_n_s32(t32[5], 14);
-  t16[6] = vrshrn_n_s32(t32[6], 14);
-  t16[7] = vrshrn_n_s32(t32[7], 14);
-  step2[0] = vcombine_s16(t16[0], t16[1]);
-  step2[1] = vcombine_s16(t16[2], t16[3]);
-  step2[2] = vcombine_s16(t16[4], t16[5]);
-  step2[3] = vcombine_s16(t16[6], t16[7]);
-
-  step2[4] = vaddq_s16(step1[4], step1[5]);
-  step2[5] = vsubq_s16(step1[4], step1[5]);
-  step2[6] = vsubq_s16(step1[7], step1[6]);
-  step2[7] = vaddq_s16(step1[7], step1[6]);
-
-  // stage 3
-  step1[0] = vaddq_s16(step2[0], step2[3]);
-  step1[1] = vaddq_s16(step2[1], step2[2]);
-  step1[2] = vsubq_s16(step2[1], step2[2]);
-  step1[3] = vsubq_s16(step2[0], step2[3]);
-
-  t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
-  t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
-  t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
-  t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
-  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
-  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
-  t16[0] = vrshrn_n_s32(t32[0], 14);
-  t16[1] = vrshrn_n_s32(t32[1], 14);
-  t16[2] = vrshrn_n_s32(t32[2], 14);
-  t16[3] = vrshrn_n_s32(t32[3], 14);
-  step1[5] = vcombine_s16(t16[0], t16[1]);
-  step1[6] = vcombine_s16(t16[2], t16[3]);
-
-  // stage 4
-  *io0 = vaddq_s16(step1[0], step2[7]);
-  *io1 = vaddq_s16(step1[1], step1[6]);
-  *io2 = vaddq_s16(step1[2], step1[5]);
-  *io3 = vaddq_s16(step1[3], step2[4]);
-  *io4 = vsubq_s16(step1[3], step2[4]);
-  *io5 = vsubq_s16(step1[2], step1[5]);
-  *io6 = vsubq_s16(step1[1], step1[6]);
-  *io7 = vsubq_s16(step1[0], step2[7]);
-}
-
 static INLINE void add8x8(int16x8_t a0, int16x8_t a1, int16x8_t a2,
                           int16x8_t a3, int16x8_t a4, int16x8_t a5,
                           int16x8_t a6, int16x8_t a7, uint8_t *dest,
@@ -227,118 +101,6 @@
   idct8x8_64_1d_bd8(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
   idct8x8_64_1d_bd8(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
   add8x8(a0, a1, a2, a3, a4, a5, a6, a7, dest, stride);
-}
-
-static INLINE void idct8x8_12_pass1_bd8(
-    const int16x4_t cospis0, const int16x4_t cospisd0, const int16x4_t cospisd1,
-    int16x4_t *const io0, int16x4_t *const io1, int16x4_t *const io2,
-    int16x4_t *const io3, int16x4_t *const io4, int16x4_t *const io5,
-    int16x4_t *const io6, int16x4_t *const io7) {
-  int16x4_t step1[8], step2[8];
-  int32x4_t t32[2];
-
-  transpose_s16_4x4d(io0, io1, io2, io3);
-
-  // stage 1
-  step1[4] = vqrdmulh_lane_s16(*io1, cospisd1, 3);
-  step1[5] = vqrdmulh_lane_s16(*io3, cospisd1, 2);
-  step1[6] = vqrdmulh_lane_s16(*io3, cospisd1, 1);
-  step1[7] = vqrdmulh_lane_s16(*io1, cospisd1, 0);
-
-  // stage 2
-  step2[0] = vqrdmulh_lane_s16(*io0, cospisd0, 2);
-  step2[2] = vqrdmulh_lane_s16(*io2, cospisd0, 3);
-  step2[3] = vqrdmulh_lane_s16(*io2, cospisd0, 1);
-
-  step2[4] = vadd_s16(step1[4], step1[5]);
-  step2[5] = vsub_s16(step1[4], step1[5]);
-  step2[6] = vsub_s16(step1[7], step1[6]);
-  step2[7] = vadd_s16(step1[7], step1[6]);
-
-  // stage 3
-  step1[0] = vadd_s16(step2[0], step2[3]);
-  step1[1] = vadd_s16(step2[0], step2[2]);
-  step1[2] = vsub_s16(step2[0], step2[2]);
-  step1[3] = vsub_s16(step2[0], step2[3]);
-
-  t32[1] = vmull_lane_s16(step2[6], cospis0, 2);
-  t32[0] = vmlsl_lane_s16(t32[1], step2[5], cospis0, 2);
-  t32[1] = vmlal_lane_s16(t32[1], step2[5], cospis0, 2);
-  step1[5] = vrshrn_n_s32(t32[0], 14);
-  step1[6] = vrshrn_n_s32(t32[1], 14);
-
-  // stage 4
-  *io0 = vadd_s16(step1[0], step2[7]);
-  *io1 = vadd_s16(step1[1], step1[6]);
-  *io2 = vadd_s16(step1[2], step1[5]);
-  *io3 = vadd_s16(step1[3], step2[4]);
-  *io4 = vsub_s16(step1[3], step2[4]);
-  *io5 = vsub_s16(step1[2], step1[5]);
-  *io6 = vsub_s16(step1[1], step1[6]);
-  *io7 = vsub_s16(step1[0], step2[7]);
-}
-
-static INLINE void idct8x8_12_pass2_bd8(
-    const int16x4_t cospis0, const int16x4_t cospisd0, const int16x4_t cospisd1,
-    const int16x4_t input0, const int16x4_t input1, const int16x4_t input2,
-    const int16x4_t input3, const int16x4_t input4, const int16x4_t input5,
-    const int16x4_t input6, const int16x4_t input7, int16x8_t *const output0,
-    int16x8_t *const output1, int16x8_t *const output2,
-    int16x8_t *const output3, int16x8_t *const output4,
-    int16x8_t *const output5, int16x8_t *const output6,
-    int16x8_t *const output7) {
-  int16x8_t in[4];
-  int16x8_t step1[8], step2[8];
-  int32x4_t t32[8];
-  int16x4_t t16[8];
-
-  transpose_s16_4x8(input0, input1, input2, input3, input4, input5, input6,
-                    input7, &in[0], &in[1], &in[2], &in[3]);
-
-  // stage 1
-  step1[4] = vqrdmulhq_lane_s16(in[1], cospisd1, 3);
-  step1[5] = vqrdmulhq_lane_s16(in[3], cospisd1, 2);
-  step1[6] = vqrdmulhq_lane_s16(in[3], cospisd1, 1);
-  step1[7] = vqrdmulhq_lane_s16(in[1], cospisd1, 0);
-
-  // stage 2
-  step2[0] = vqrdmulhq_lane_s16(in[0], cospisd0, 2);
-  step2[2] = vqrdmulhq_lane_s16(in[2], cospisd0, 3);
-  step2[3] = vqrdmulhq_lane_s16(in[2], cospisd0, 1);
-
-  step2[4] = vaddq_s16(step1[4], step1[5]);
-  step2[5] = vsubq_s16(step1[4], step1[5]);
-  step2[6] = vsubq_s16(step1[7], step1[6]);
-  step2[7] = vaddq_s16(step1[7], step1[6]);
-
-  // stage 3
-  step1[0] = vaddq_s16(step2[0], step2[3]);
-  step1[1] = vaddq_s16(step2[0], step2[2]);
-  step1[2] = vsubq_s16(step2[0], step2[2]);
-  step1[3] = vsubq_s16(step2[0], step2[3]);
-
-  t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
-  t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
-  t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
-  t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
-  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
-  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
-  t16[0] = vrshrn_n_s32(t32[0], 14);
-  t16[1] = vrshrn_n_s32(t32[1], 14);
-  t16[2] = vrshrn_n_s32(t32[2], 14);
-  t16[3] = vrshrn_n_s32(t32[3], 14);
-  step1[5] = vcombine_s16(t16[0], t16[1]);
-  step1[6] = vcombine_s16(t16[2], t16[3]);
-
-  // stage 4
-  *output0 = vaddq_s16(step1[0], step2[7]);
-  *output1 = vaddq_s16(step1[1], step1[6]);
-  *output2 = vaddq_s16(step1[2], step1[5]);
-  *output3 = vaddq_s16(step1[3], step2[4]);
-  *output4 = vsubq_s16(step1[3], step2[4]);
-  *output5 = vsubq_s16(step1[2], step1[5]);
-  *output6 = vsubq_s16(step1[1], step1[6]);
-  *output7 = vsubq_s16(step1[0], step2[7]);
 }
 
 void vpx_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest,
--- a/vpx_dsp/arm/idct_neon.h
+++ b/vpx_dsp/arm/idct_neon.h
@@ -24,6 +24,13 @@
   -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */
 };
 
+DECLARE_ALIGNED(16, static const int32_t, kCospi32[8]) = {
+  16384 /*  cospi_0_64 */,  15137 /* cospi_8_64 */,
+  11585 /*  cospi_16_64 */, 6270 /* cospi_24_64 */,
+  16069 /*  cospi_4_64 */,  13623 /* cospi_12_64 */,
+  -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */
+};
+
 //------------------------------------------------------------------------------
 // Helper functions used to load tran_low_t into int16, narrowing if necessary.
 
@@ -209,6 +216,244 @@
   d1 = vcombine_s16(b3, b2);
   *a0 = vaddq_s16(d0, d1);
   *a1 = vsubq_s16(d0, d1);
+}
+
+static INLINE void idct8x8_12_pass1_bd8(
+    const int16x4_t cospis0, const int16x4_t cospisd0, const int16x4_t cospisd1,
+    int16x4_t *const io0, int16x4_t *const io1, int16x4_t *const io2,
+    int16x4_t *const io3, int16x4_t *const io4, int16x4_t *const io5,
+    int16x4_t *const io6, int16x4_t *const io7) {
+  int16x4_t step1[8], step2[8];
+  int32x4_t t32[2];
+
+  transpose_s16_4x4d(io0, io1, io2, io3);
+
+  // stage 1
+  step1[4] = vqrdmulh_lane_s16(*io1, cospisd1, 3);
+  step1[5] = vqrdmulh_lane_s16(*io3, cospisd1, 2);
+  step1[6] = vqrdmulh_lane_s16(*io3, cospisd1, 1);
+  step1[7] = vqrdmulh_lane_s16(*io1, cospisd1, 0);
+
+  // stage 2
+  step2[1] = vqrdmulh_lane_s16(*io0, cospisd0, 2);
+  step2[2] = vqrdmulh_lane_s16(*io2, cospisd0, 3);
+  step2[3] = vqrdmulh_lane_s16(*io2, cospisd0, 1);
+
+  step2[4] = vadd_s16(step1[4], step1[5]);
+  step2[5] = vsub_s16(step1[4], step1[5]);
+  step2[6] = vsub_s16(step1[7], step1[6]);
+  step2[7] = vadd_s16(step1[7], step1[6]);
+
+  // stage 3
+  step1[0] = vadd_s16(step2[1], step2[3]);
+  step1[1] = vadd_s16(step2[1], step2[2]);
+  step1[2] = vsub_s16(step2[1], step2[2]);
+  step1[3] = vsub_s16(step2[1], step2[3]);
+
+  t32[1] = vmull_lane_s16(step2[6], cospis0, 2);
+  t32[0] = vmlsl_lane_s16(t32[1], step2[5], cospis0, 2);
+  t32[1] = vmlal_lane_s16(t32[1], step2[5], cospis0, 2);
+  step1[5] = vrshrn_n_s32(t32[0], 14);
+  step1[6] = vrshrn_n_s32(t32[1], 14);
+
+  // stage 4
+  *io0 = vadd_s16(step1[0], step2[7]);
+  *io1 = vadd_s16(step1[1], step1[6]);
+  *io2 = vadd_s16(step1[2], step1[5]);
+  *io3 = vadd_s16(step1[3], step2[4]);
+  *io4 = vsub_s16(step1[3], step2[4]);
+  *io5 = vsub_s16(step1[2], step1[5]);
+  *io6 = vsub_s16(step1[1], step1[6]);
+  *io7 = vsub_s16(step1[0], step2[7]);
+}
+
+static INLINE void idct8x8_12_pass2_bd8(
+    const int16x4_t cospis0, const int16x4_t cospisd0, const int16x4_t cospisd1,
+    const int16x4_t input0, const int16x4_t input1, const int16x4_t input2,
+    const int16x4_t input3, const int16x4_t input4, const int16x4_t input5,
+    const int16x4_t input6, const int16x4_t input7, int16x8_t *const output0,
+    int16x8_t *const output1, int16x8_t *const output2,
+    int16x8_t *const output3, int16x8_t *const output4,
+    int16x8_t *const output5, int16x8_t *const output6,
+    int16x8_t *const output7) {
+  int16x8_t in[4];
+  int16x8_t step1[8], step2[8];
+  int32x4_t t32[8];
+  int16x4_t t16[8];
+
+  transpose_s16_4x8(input0, input1, input2, input3, input4, input5, input6,
+                    input7, &in[0], &in[1], &in[2], &in[3]);
+
+  // stage 1
+  step1[4] = vqrdmulhq_lane_s16(in[1], cospisd1, 3);
+  step1[5] = vqrdmulhq_lane_s16(in[3], cospisd1, 2);
+  step1[6] = vqrdmulhq_lane_s16(in[3], cospisd1, 1);
+  step1[7] = vqrdmulhq_lane_s16(in[1], cospisd1, 0);
+
+  // stage 2
+  step2[1] = vqrdmulhq_lane_s16(in[0], cospisd0, 2);
+  step2[2] = vqrdmulhq_lane_s16(in[2], cospisd0, 3);
+  step2[3] = vqrdmulhq_lane_s16(in[2], cospisd0, 1);
+
+  step2[4] = vaddq_s16(step1[4], step1[5]);
+  step2[5] = vsubq_s16(step1[4], step1[5]);
+  step2[6] = vsubq_s16(step1[7], step1[6]);
+  step2[7] = vaddq_s16(step1[7], step1[6]);
+
+  // stage 3
+  step1[0] = vaddq_s16(step2[1], step2[3]);
+  step1[1] = vaddq_s16(step2[1], step2[2]);
+  step1[2] = vsubq_s16(step2[1], step2[2]);
+  step1[3] = vsubq_s16(step2[1], step2[3]);
+
+  t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
+  t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
+  t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
+  t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
+  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
+  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
+  t16[0] = vrshrn_n_s32(t32[0], 14);
+  t16[1] = vrshrn_n_s32(t32[1], 14);
+  t16[2] = vrshrn_n_s32(t32[2], 14);
+  t16[3] = vrshrn_n_s32(t32[3], 14);
+  step1[5] = vcombine_s16(t16[0], t16[1]);
+  step1[6] = vcombine_s16(t16[2], t16[3]);
+
+  // stage 4
+  *output0 = vaddq_s16(step1[0], step2[7]);
+  *output1 = vaddq_s16(step1[1], step1[6]);
+  *output2 = vaddq_s16(step1[2], step1[5]);
+  *output3 = vaddq_s16(step1[3], step2[4]);
+  *output4 = vsubq_s16(step1[3], step2[4]);
+  *output5 = vsubq_s16(step1[2], step1[5]);
+  *output6 = vsubq_s16(step1[1], step1[6]);
+  *output7 = vsubq_s16(step1[0], step2[7]);
+}
+
+static INLINE void idct8x8_64_1d_bd8(const int16x4_t cospis0,
+                                     const int16x4_t cospis1,
+                                     int16x8_t *const io0, int16x8_t *const io1,
+                                     int16x8_t *const io2, int16x8_t *const io3,
+                                     int16x8_t *const io4, int16x8_t *const io5,
+                                     int16x8_t *const io6,
+                                     int16x8_t *const io7) {
+  int16x4_t input_1l, input_1h, input_3l, input_3h, input_5l, input_5h,
+      input_7l, input_7h;
+  int16x4_t step1l[4], step1h[4];
+  int16x8_t step1[8], step2[8];
+  int32x4_t t32[8];
+  int16x4_t t16[8];
+
+  transpose_s16_8x8(io0, io1, io2, io3, io4, io5, io6, io7);
+
+  // stage 1
+  input_1l = vget_low_s16(*io1);
+  input_1h = vget_high_s16(*io1);
+  input_3l = vget_low_s16(*io3);
+  input_3h = vget_high_s16(*io3);
+  input_5l = vget_low_s16(*io5);
+  input_5h = vget_high_s16(*io5);
+  input_7l = vget_low_s16(*io7);
+  input_7h = vget_high_s16(*io7);
+  step1l[0] = vget_low_s16(*io0);
+  step1h[0] = vget_high_s16(*io0);
+  step1l[1] = vget_low_s16(*io2);
+  step1h[1] = vget_high_s16(*io2);
+  step1l[2] = vget_low_s16(*io4);
+  step1h[2] = vget_high_s16(*io4);
+  step1l[3] = vget_low_s16(*io6);
+  step1h[3] = vget_high_s16(*io6);
+
+  t32[0] = vmull_lane_s16(input_1l, cospis1, 3);
+  t32[1] = vmull_lane_s16(input_1h, cospis1, 3);
+  t32[2] = vmull_lane_s16(input_3l, cospis1, 2);
+  t32[3] = vmull_lane_s16(input_3h, cospis1, 2);
+  t32[4] = vmull_lane_s16(input_3l, cospis1, 1);
+  t32[5] = vmull_lane_s16(input_3h, cospis1, 1);
+  t32[6] = vmull_lane_s16(input_1l, cospis1, 0);
+  t32[7] = vmull_lane_s16(input_1h, cospis1, 0);
+  t32[0] = vmlsl_lane_s16(t32[0], input_7l, cospis1, 0);
+  t32[1] = vmlsl_lane_s16(t32[1], input_7h, cospis1, 0);
+  t32[2] = vmlal_lane_s16(t32[2], input_5l, cospis1, 1);
+  t32[3] = vmlal_lane_s16(t32[3], input_5h, cospis1, 1);
+  t32[4] = vmlsl_lane_s16(t32[4], input_5l, cospis1, 2);
+  t32[5] = vmlsl_lane_s16(t32[5], input_5h, cospis1, 2);
+  t32[6] = vmlal_lane_s16(t32[6], input_7l, cospis1, 3);
+  t32[7] = vmlal_lane_s16(t32[7], input_7h, cospis1, 3);
+  t16[0] = vrshrn_n_s32(t32[0], 14);
+  t16[1] = vrshrn_n_s32(t32[1], 14);
+  t16[2] = vrshrn_n_s32(t32[2], 14);
+  t16[3] = vrshrn_n_s32(t32[3], 14);
+  t16[4] = vrshrn_n_s32(t32[4], 14);
+  t16[5] = vrshrn_n_s32(t32[5], 14);
+  t16[6] = vrshrn_n_s32(t32[6], 14);
+  t16[7] = vrshrn_n_s32(t32[7], 14);
+  step1[4] = vcombine_s16(t16[0], t16[1]);
+  step1[5] = vcombine_s16(t16[2], t16[3]);
+  step1[6] = vcombine_s16(t16[4], t16[5]);
+  step1[7] = vcombine_s16(t16[6], t16[7]);
+
+  // stage 2
+  t32[2] = vmull_lane_s16(step1l[0], cospis0, 2);
+  t32[3] = vmull_lane_s16(step1h[0], cospis0, 2);
+  t32[4] = vmull_lane_s16(step1l[1], cospis0, 3);
+  t32[5] = vmull_lane_s16(step1h[1], cospis0, 3);
+  t32[6] = vmull_lane_s16(step1l[1], cospis0, 1);
+  t32[7] = vmull_lane_s16(step1h[1], cospis0, 1);
+  t32[0] = vmlal_lane_s16(t32[2], step1l[2], cospis0, 2);
+  t32[1] = vmlal_lane_s16(t32[3], step1h[2], cospis0, 2);
+  t32[2] = vmlsl_lane_s16(t32[2], step1l[2], cospis0, 2);
+  t32[3] = vmlsl_lane_s16(t32[3], step1h[2], cospis0, 2);
+  t32[4] = vmlsl_lane_s16(t32[4], step1l[3], cospis0, 1);
+  t32[5] = vmlsl_lane_s16(t32[5], step1h[3], cospis0, 1);
+  t32[6] = vmlal_lane_s16(t32[6], step1l[3], cospis0, 3);
+  t32[7] = vmlal_lane_s16(t32[7], step1h[3], cospis0, 3);
+  t16[0] = vrshrn_n_s32(t32[0], 14);
+  t16[1] = vrshrn_n_s32(t32[1], 14);
+  t16[2] = vrshrn_n_s32(t32[2], 14);
+  t16[3] = vrshrn_n_s32(t32[3], 14);
+  t16[4] = vrshrn_n_s32(t32[4], 14);
+  t16[5] = vrshrn_n_s32(t32[5], 14);
+  t16[6] = vrshrn_n_s32(t32[6], 14);
+  t16[7] = vrshrn_n_s32(t32[7], 14);
+  step2[0] = vcombine_s16(t16[0], t16[1]);
+  step2[1] = vcombine_s16(t16[2], t16[3]);
+  step2[2] = vcombine_s16(t16[4], t16[5]);
+  step2[3] = vcombine_s16(t16[6], t16[7]);
+
+  step2[4] = vaddq_s16(step1[4], step1[5]);
+  step2[5] = vsubq_s16(step1[4], step1[5]);
+  step2[6] = vsubq_s16(step1[7], step1[6]);
+  step2[7] = vaddq_s16(step1[7], step1[6]);
+
+  // stage 3
+  step1[0] = vaddq_s16(step2[0], step2[3]);
+  step1[1] = vaddq_s16(step2[1], step2[2]);
+  step1[2] = vsubq_s16(step2[1], step2[2]);
+  step1[3] = vsubq_s16(step2[0], step2[3]);
+
+  t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
+  t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
+  t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
+  t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
+  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
+  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
+  t16[0] = vrshrn_n_s32(t32[0], 14);
+  t16[1] = vrshrn_n_s32(t32[1], 14);
+  t16[2] = vrshrn_n_s32(t32[2], 14);
+  t16[3] = vrshrn_n_s32(t32[3], 14);
+  step1[5] = vcombine_s16(t16[0], t16[1]);
+  step1[6] = vcombine_s16(t16[2], t16[3]);
+
+  // stage 4
+  *io0 = vaddq_s16(step1[0], step2[7]);
+  *io1 = vaddq_s16(step1[1], step1[6]);
+  *io2 = vaddq_s16(step1[2], step1[5]);
+  *io3 = vaddq_s16(step1[3], step2[4]);
+  *io4 = vsubq_s16(step1[3], step2[4]);
+  *io5 = vsubq_s16(step1[2], step1[5]);
+  *io6 = vsubq_s16(step1[1], step1[6]);
+  *io7 = vsubq_s16(step1[0], step2[7]);
 }
 
 #endif  // VPX_DSP_ARM_IDCT_NEON_H_
--- a/vpx_dsp/arm/transpose_neon.h
+++ b/vpx_dsp/arm/transpose_neon.h
@@ -21,7 +21,7 @@
 //
 // b0.val[0]: 00 01 02 03 16 17 18 19
 // b0.val[1]: 04 05 06 07 20 21 22 23
-static INLINE int16x8x2_t vpx_vtrnq_s64(int32x4_t a0, int32x4_t a1) {
+static INLINE int16x8x2_t vpx_vtrnq_s64_to_s16(int32x4_t a0, int32x4_t a1) {
   int16x8x2_t b0;
   b0.val[0] = vcombine_s16(vreinterpret_s16_s32(vget_low_s32(a0)),
                            vreinterpret_s16_s32(vget_low_s32(a1)));
@@ -37,7 +37,16 @@
   return b0;
 }
 
-static INLINE uint8x16x2_t vpx_vtrnq_u64(uint32x4_t a0, uint32x4_t a1) {
+static INLINE int64x2x2_t vpx_vtrnq_s64(int32x4_t a0, int32x4_t a1) {
+  int64x2x2_t b0;
+  b0.val[0] = vcombine_s64(vreinterpret_s64_s32(vget_low_s32(a0)),
+                           vreinterpret_s64_s32(vget_low_s32(a1)));
+  b0.val[1] = vcombine_s64(vreinterpret_s64_s32(vget_high_s32(a0)),
+                           vreinterpret_s64_s32(vget_high_s32(a1)));
+  return b0;
+}
+
+static INLINE uint8x16x2_t vpx_vtrnq_u64_to_u8(uint32x4_t a0, uint32x4_t a1) {
   uint8x16x2_t b0;
   b0.val[0] = vcombine_u8(vreinterpret_u8_u32(vget_low_u32(a0)),
                           vreinterpret_u8_u32(vget_low_u32(a1)));
@@ -270,8 +279,8 @@
                                      const int16x4_t a2, const int16x4_t a3,
                                      const int16x4_t a4, const int16x4_t a5,
                                      const int16x4_t a6, const int16x4_t a7,
-                                     int16x8_t *o0, int16x8_t *o1,
-                                     int16x8_t *o2, int16x8_t *o3) {
+                                     int16x8_t *const o0, int16x8_t *const o1,
+                                     int16x8_t *const o2, int16x8_t *const o3) {
   // Swap 16 bit elements. Goes from:
   // a0: 00 01 02 03
   // a1: 10 11 12 13
@@ -331,6 +340,59 @@
                      vreinterpret_s16_s32(c3.val[1]));
 }
 
+static INLINE void transpose_s32_4x8(int32x4_t *const a0, int32x4_t *const a1,
+                                     int32x4_t *const a2, int32x4_t *const a3,
+                                     int32x4_t *const a4, int32x4_t *const a5,
+                                     int32x4_t *const a6, int32x4_t *const a7) {
+  // Swap 32 bit elements. Goes from:
+  // a0: 00 01 02 03
+  // a1: 10 11 12 13
+  // a2: 20 21 22 23
+  // a3: 30 31 32 33
+  // a4: 40 41 42 43
+  // a5: 50 51 52 53
+  // a6: 60 61 62 63
+  // a7: 70 71 72 73
+  // to:
+  // b0.val[0]: 00 10 02 12
+  // b0.val[1]: 01 11 03 13
+  // b1.val[0]: 20 30 22 32
+  // b1.val[1]: 21 31 23 33
+  // b2.val[0]: 40 50 42 52
+  // b2.val[1]: 41 51 43 53
+  // b3.val[0]: 60 70 62 72
+  // b3.val[1]: 61 71 63 73
+
+  const int32x4x2_t b0 = vtrnq_s32(*a0, *a1);
+  const int32x4x2_t b1 = vtrnq_s32(*a2, *a3);
+  const int32x4x2_t b2 = vtrnq_s32(*a4, *a5);
+  const int32x4x2_t b3 = vtrnq_s32(*a6, *a7);
+
+  // Swap 64 bit elements resulting in:
+  // c0.val[0]: 00 10 20 30
+  // c0.val[1]: 02 12 22 32
+  // c1.val[0]: 01 11 21 31
+  // c1.val[1]: 03 13 23 33
+  // c2.val[0]: 40 50 60 70
+  // c2.val[1]: 42 52 62 72
+  // c3.val[0]: 41 51 61 71
+  // c3.val[1]: 43 53 63 73
+
+  const int64x2x2_t c0 = vpx_vtrnq_s64(b0.val[0], b1.val[0]);
+  const int64x2x2_t c1 = vpx_vtrnq_s64(b0.val[1], b1.val[1]);
+  const int64x2x2_t c2 = vpx_vtrnq_s64(b2.val[0], b3.val[0]);
+  const int64x2x2_t c3 = vpx_vtrnq_s64(b2.val[1], b3.val[1]);
+
+  *a0 = vreinterpretq_s32_s64(c0.val[0]);
+  *a1 = vreinterpretq_s32_s64(c2.val[0]);
+  *a2 = vreinterpretq_s32_s64(c1.val[0]);
+  *a3 = vreinterpretq_s32_s64(c3.val[0]);
+  *a4 = vreinterpretq_s32_s64(c0.val[1]);
+  *a5 = vreinterpretq_s32_s64(c2.val[1]);
+  *a6 = vreinterpretq_s32_s64(c1.val[1]);
+  *a7 = vreinterpretq_s32_s64(c3.val[1]);
+}
+
 static INLINE void transpose_u8_8x4(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2,
                                     uint8x8_t *a3) {
   // Swap 8 bit elements. Goes from:
@@ -397,6 +459,59 @@
   *a3 = vreinterpretq_u16_u32(c1.val[1]);
 }
 
+static INLINE void transpose_s32_8x4(int32x4_t *const a0, int32x4_t *const a1,
+                                     int32x4_t *const a2, int32x4_t *const a3,
+                                     int32x4_t *const a4, int32x4_t *const a5,
+                                     int32x4_t *const a6, int32x4_t *const a7) {
+  // Swap 32 bit elements. Goes from:
+  // a0: 00 01 02 03
+  // a1: 04 05 06 07
+  // a2: 10 11 12 13
+  // a3: 14 15 16 17
+  // a4: 20 21 22 23
+  // a5: 24 25 26 27
+  // a6: 30 31 32 33
+  // a7: 34 35 36 37
+  // to:
+  // b0.val[0]: 00 10 02 12
+  // b0.val[1]: 01 11 03 13
+  // b1.val[0]: 04 14 06 16
+  // b1.val[1]: 05 15 07 17
+  // b2.val[0]: 20 30 22 32
+  // b2.val[1]: 21 31 23 33
+  // b3.val[0]: 24 34 26 36
+  // b3.val[1]: 25 35 27 37
+
+  const int32x4x2_t b0 = vtrnq_s32(*a0, *a2);
+  const int32x4x2_t b1 = vtrnq_s32(*a1, *a3);
+  const int32x4x2_t b2 = vtrnq_s32(*a4, *a6);
+  const int32x4x2_t b3 = vtrnq_s32(*a5, *a7);
+
+  // Swap 64 bit elements resulting in:
+  // c0.val[0]: 00 10 20 30
+  // c0.val[1]: 02 12 22 32
+  // c1.val[0]: 01 11 21 31
+  // c1.val[1]: 03 13 23 33
+  // c2.val[0]: 04 14 24 34
+  // c2.val[1]: 06 16 26 36
+  // c3.val[0]: 05 15 25 35
+  // c3.val[1]: 07 17 27 37
+
+  const int64x2x2_t c0 = vpx_vtrnq_s64(b0.val[0], b2.val[0]);
+  const int64x2x2_t c1 = vpx_vtrnq_s64(b0.val[1], b2.val[1]);
+  const int64x2x2_t c2 = vpx_vtrnq_s64(b1.val[0], b3.val[0]);
+  const int64x2x2_t c3 = vpx_vtrnq_s64(b1.val[1], b3.val[1]);
+
+  *a0 = vreinterpretq_s32_s64(c0.val[0]);
+  *a1 = vreinterpretq_s32_s64(c1.val[0]);
+  *a2 = vreinterpretq_s32_s64(c0.val[1]);
+  *a3 = vreinterpretq_s32_s64(c1.val[1]);
+  *a4 = vreinterpretq_s32_s64(c2.val[0]);
+  *a5 = vreinterpretq_s32_s64(c3.val[0]);
+  *a6 = vreinterpretq_s32_s64(c2.val[1]);
+  *a7 = vreinterpretq_s32_s64(c3.val[1]);
+}
+
 // Note: Using 'd' registers or 'q' registers has almost identical speed. We use
 // 'q' registers here to save some instructions.
 static INLINE void transpose_u8_8x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2,
@@ -509,10 +624,10 @@
   // d2.val[1]: 06 16 26 36 46 56 66 76
   // d3.val[0]: 03 13 23 33 43 53 63 73
   // d3.val[1]: 07 17 27 37 47 57 67 77
-  const int16x8x2_t d0 = vpx_vtrnq_s64(c0.val[0], c2.val[0]);
-  const int16x8x2_t d1 = vpx_vtrnq_s64(c1.val[0], c3.val[0]);
-  const int16x8x2_t d2 = vpx_vtrnq_s64(c0.val[1], c2.val[1]);
-  const int16x8x2_t d3 = vpx_vtrnq_s64(c1.val[1], c3.val[1]);
+  const int16x8x2_t d0 = vpx_vtrnq_s64_to_s16(c0.val[0], c2.val[0]);
+  const int16x8x2_t d1 = vpx_vtrnq_s64_to_s16(c1.val[0], c3.val[0]);
+  const int16x8x2_t d2 = vpx_vtrnq_s64_to_s16(c0.val[1], c2.val[1]);
+  const int16x8x2_t d3 = vpx_vtrnq_s64_to_s16(c1.val[1], c3.val[1]);
 
   *a0 = d0.val[0];
   *a1 = d1.val[0];
@@ -948,14 +1063,14 @@
   // e6.val[1]: 0E 1E 2E 3E 4E 5E 6E 7E  8E 9E AE BE CE DE EE FE
   // e7.val[0]: 07 17 27 37 47 57 67 77  87 97 A7 B7 C7 D7 E7 F7
   // e7.val[1]: 0F 1F 2F 3F 4F 5F 6F 7F  8F 9F AF BF CF DF EF FF
-  const uint8x16x2_t e0 = vpx_vtrnq_u64(d0.val[0], d4.val[0]);
-  const uint8x16x2_t e1 = vpx_vtrnq_u64(d2.val[0], d6.val[0]);
-  const uint8x16x2_t e2 = vpx_vtrnq_u64(d1.val[0], d5.val[0]);
-  const uint8x16x2_t e3 = vpx_vtrnq_u64(d3.val[0], d7.val[0]);
-  const uint8x16x2_t e4 = vpx_vtrnq_u64(d0.val[1], d4.val[1]);
-  const uint8x16x2_t e5 = vpx_vtrnq_u64(d2.val[1], d6.val[1]);
-  const uint8x16x2_t e6 = vpx_vtrnq_u64(d1.val[1], d5.val[1]);
-  const uint8x16x2_t e7 = vpx_vtrnq_u64(d3.val[1], d7.val[1]);
+  const uint8x16x2_t e0 = vpx_vtrnq_u64_to_u8(d0.val[0], d4.val[0]);
+  const uint8x16x2_t e1 = vpx_vtrnq_u64_to_u8(d2.val[0], d6.val[0]);
+  const uint8x16x2_t e2 = vpx_vtrnq_u64_to_u8(d1.val[0], d5.val[0]);
+  const uint8x16x2_t e3 = vpx_vtrnq_u64_to_u8(d3.val[0], d7.val[0]);
+  const uint8x16x2_t e4 = vpx_vtrnq_u64_to_u8(d0.val[1], d4.val[1]);
+  const uint8x16x2_t e5 = vpx_vtrnq_u64_to_u8(d2.val[1], d6.val[1]);
+  const uint8x16x2_t e6 = vpx_vtrnq_u64_to_u8(d1.val[1], d5.val[1]);
+  const uint8x16x2_t e7 = vpx_vtrnq_u64_to_u8(d3.val[1], d7.val[1]);
 
   // Output:
   // o0 : 00 10 20 30 40 50 60 70  80 90 A0 B0 C0 D0 E0 F0
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -216,6 +216,7 @@
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_cols_dspr2.c
 else  # CONFIG_VP9_HIGHBITDEPTH
 DSP_SRCS-$(HAVE_NEON)  += arm/highbd_idct4x4_add_neon.c
+DSP_SRCS-$(HAVE_NEON)  += arm/highbd_idct8x8_add_neon.c
 endif  # !CONFIG_VP9_HIGHBITDEPTH
 
 ifeq ($(HAVE_NEON_ASM),yes)
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -621,6 +621,7 @@
   specialize qw/vpx_highbd_idct4x4_1_add neon/;
 
   add_proto qw/void vpx_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
+  specialize qw/vpx_highbd_idct8x8_1_add neon/;
 
   add_proto qw/void vpx_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
 
@@ -713,10 +714,10 @@
     specialize qw/vpx_highbd_idct4x4_16_add neon sse2/;
 
     add_proto qw/void vpx_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
-    specialize qw/vpx_highbd_idct8x8_64_add sse2/;
+    specialize qw/vpx_highbd_idct8x8_64_add neon sse2/;
 
     add_proto qw/void vpx_highbd_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
-    specialize qw/vpx_highbd_idct8x8_12_add sse2/;
+    specialize qw/vpx_highbd_idct8x8_12_add neon sse2/;
 
     add_proto qw/void vpx_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
     specialize qw/vpx_highbd_idct16x16_256_add sse2/;