shithub: libvpx

Download patch

ref: de9ae32b93d2d4ff67895dde7cda9828e08f3ef3
parent: 4526ec790737b5098e6cea17c7cffc34a50134fc
parent: 5ad4159ebb7d195d93bace18b1292d2f7032487a
author: Linfeng Zhang <linfengz@google.com>
date: Mon Feb 13 20:15:34 EST 2017

Merge "Add vpx_highbd_idct16x16_256_add_neon()"

--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -461,6 +461,15 @@
       &highbd_wrapper<vpx_highbd_idct32x32_1_add_neon>, TX_32X32, 1, 12, 2),
   make_tuple(
       &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
+      &highbd_wrapper<vpx_highbd_idct16x16_256_add_neon>, TX_16X16, 256, 8, 2),
+  make_tuple(
+      &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
+      &highbd_wrapper<vpx_highbd_idct16x16_256_add_neon>, TX_16X16, 256, 10, 2),
+  make_tuple(
+      &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
+      &highbd_wrapper<vpx_highbd_idct16x16_256_add_neon>, TX_16X16, 256, 12, 2),
+  make_tuple(
+      &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
       &highbd_wrapper<vpx_highbd_idct16x16_1_add_neon>, TX_16X16, 1, 8, 2),
   make_tuple(
       &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
--- a/vpx_dsp/arm/highbd_idct16x16_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct16x16_add_neon.c
@@ -14,17 +14,775 @@
 #include "vpx_dsp/arm/idct_neon.h"
 #include "vpx_dsp/inv_txfm.h"
 
+static INLINE void highbd_idct16x16_add_wrap_low_8x2(const int64x2x2_t *const t,
+                                                     int32x4x2_t *const d0,
+                                                     int32x4x2_t *const d1) {
+  int32x2x2_t t32[4];
+
+  t32[0].val[0] = vrshrn_n_s64(t[0].val[0], 14);
+  t32[0].val[1] = vrshrn_n_s64(t[0].val[1], 14);
+  t32[1].val[0] = vrshrn_n_s64(t[1].val[0], 14);
+  t32[1].val[1] = vrshrn_n_s64(t[1].val[1], 14);
+  t32[2].val[0] = vrshrn_n_s64(t[2].val[0], 14);
+  t32[2].val[1] = vrshrn_n_s64(t[2].val[1], 14);
+  t32[3].val[0] = vrshrn_n_s64(t[3].val[0], 14);
+  t32[3].val[1] = vrshrn_n_s64(t[3].val[1], 14);
+  d0->val[0] = vcombine_s32(t32[0].val[0], t32[0].val[1]);
+  d0->val[1] = vcombine_s32(t32[1].val[0], t32[1].val[1]);
+  d1->val[0] = vcombine_s32(t32[2].val[0], t32[2].val[1]);
+  d1->val[1] = vcombine_s32(t32[3].val[0], t32[3].val[1]);
+}
+
+static INLINE void highbd_idct_cospi_2_30(const int32x4x2_t s0,
+                                          const int32x4x2_t s1,
+                                          const int32x4_t cospi_2_30_10_22,
+                                          int32x4x2_t *const d0,
+                                          int32x4x2_t *const d1) {
+  int64x2x2_t t[4];
+
+  t[0].val[0] = vmull_lane_s32(vget_low_s32(s0.val[0]),
+                               vget_low_s32(cospi_2_30_10_22), 1);
+  t[0].val[1] = vmull_lane_s32(vget_high_s32(s0.val[0]),
+                               vget_low_s32(cospi_2_30_10_22), 1);
+  t[1].val[0] = vmull_lane_s32(vget_low_s32(s0.val[1]),
+                               vget_low_s32(cospi_2_30_10_22), 1);
+  t[1].val[1] = vmull_lane_s32(vget_high_s32(s0.val[1]),
+                               vget_low_s32(cospi_2_30_10_22), 1);
+  t[2].val[0] = vmull_lane_s32(vget_low_s32(s1.val[0]),
+                               vget_low_s32(cospi_2_30_10_22), 1);
+  t[2].val[1] = vmull_lane_s32(vget_high_s32(s1.val[0]),
+                               vget_low_s32(cospi_2_30_10_22), 1);
+  t[3].val[0] = vmull_lane_s32(vget_low_s32(s1.val[1]),
+                               vget_low_s32(cospi_2_30_10_22), 1);
+  t[3].val[1] = vmull_lane_s32(vget_high_s32(s1.val[1]),
+                               vget_low_s32(cospi_2_30_10_22), 1);
+  t[0].val[0] = vmlsl_lane_s32(t[0].val[0], vget_low_s32(s1.val[0]),
+                               vget_low_s32(cospi_2_30_10_22), 0);
+  t[0].val[1] = vmlsl_lane_s32(t[0].val[1], vget_high_s32(s1.val[0]),
+                               vget_low_s32(cospi_2_30_10_22), 0);
+  t[1].val[0] = vmlsl_lane_s32(t[1].val[0], vget_low_s32(s1.val[1]),
+                               vget_low_s32(cospi_2_30_10_22), 0);
+  t[1].val[1] = vmlsl_lane_s32(t[1].val[1], vget_high_s32(s1.val[1]),
+                               vget_low_s32(cospi_2_30_10_22), 0);
+  t[2].val[0] = vmlal_lane_s32(t[2].val[0], vget_low_s32(s0.val[0]),
+                               vget_low_s32(cospi_2_30_10_22), 0);
+  t[2].val[1] = vmlal_lane_s32(t[2].val[1], vget_high_s32(s0.val[0]),
+                               vget_low_s32(cospi_2_30_10_22), 0);
+  t[3].val[0] = vmlal_lane_s32(t[3].val[0], vget_low_s32(s0.val[1]),
+                               vget_low_s32(cospi_2_30_10_22), 0);
+  t[3].val[1] = vmlal_lane_s32(t[3].val[1], vget_high_s32(s0.val[1]),
+                               vget_low_s32(cospi_2_30_10_22), 0);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct_cospi_4_28(const int32x4x2_t s0,
+                                          const int32x4x2_t s1,
+                                          const int32x4_t cospi_4_12_20N_28,
+                                          int32x4x2_t *const d0,
+                                          int32x4x2_t *const d1) {
+  int64x2x2_t t[4];
+
+  t[0].val[0] = vmull_lane_s32(vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_4_12_20N_28), 1);
+  t[0].val[1] = vmull_lane_s32(vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_4_12_20N_28), 1);
+  t[1].val[0] = vmull_lane_s32(vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_4_12_20N_28), 1);
+  t[1].val[1] = vmull_lane_s32(vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_4_12_20N_28), 1);
+  t[2].val[0] = vmull_lane_s32(vget_low_s32(s1.val[0]),
+                               vget_high_s32(cospi_4_12_20N_28), 1);
+  t[2].val[1] = vmull_lane_s32(vget_high_s32(s1.val[0]),
+                               vget_high_s32(cospi_4_12_20N_28), 1);
+  t[3].val[0] = vmull_lane_s32(vget_low_s32(s1.val[1]),
+                               vget_high_s32(cospi_4_12_20N_28), 1);
+  t[3].val[1] = vmull_lane_s32(vget_high_s32(s1.val[1]),
+                               vget_high_s32(cospi_4_12_20N_28), 1);
+  t[0].val[0] = vmlsl_lane_s32(t[0].val[0], vget_low_s32(s1.val[0]),
+                               vget_low_s32(cospi_4_12_20N_28), 0);
+  t[0].val[1] = vmlsl_lane_s32(t[0].val[1], vget_high_s32(s1.val[0]),
+                               vget_low_s32(cospi_4_12_20N_28), 0);
+  t[1].val[0] = vmlsl_lane_s32(t[1].val[0], vget_low_s32(s1.val[1]),
+                               vget_low_s32(cospi_4_12_20N_28), 0);
+  t[1].val[1] = vmlsl_lane_s32(t[1].val[1], vget_high_s32(s1.val[1]),
+                               vget_low_s32(cospi_4_12_20N_28), 0);
+  t[2].val[0] = vmlal_lane_s32(t[2].val[0], vget_low_s32(s0.val[0]),
+                               vget_low_s32(cospi_4_12_20N_28), 0);
+  t[2].val[1] = vmlal_lane_s32(t[2].val[1], vget_high_s32(s0.val[0]),
+                               vget_low_s32(cospi_4_12_20N_28), 0);
+  t[3].val[0] = vmlal_lane_s32(t[3].val[0], vget_low_s32(s0.val[1]),
+                               vget_low_s32(cospi_4_12_20N_28), 0);
+  t[3].val[1] = vmlal_lane_s32(t[3].val[1], vget_high_s32(s0.val[1]),
+                               vget_low_s32(cospi_4_12_20N_28), 0);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct_cospi_6_26(const int32x4x2_t s0,
+                                          const int32x4x2_t s1,
+                                          const int32x4_t cospi_6_26_14_18N,
+                                          int32x4x2_t *const d0,
+                                          int32x4x2_t *const d1) {
+  int64x2x2_t t[4];
+
+  t[0].val[0] = vmull_lane_s32(vget_low_s32(s0.val[0]),
+                               vget_low_s32(cospi_6_26_14_18N), 0);
+  t[0].val[1] = vmull_lane_s32(vget_high_s32(s0.val[0]),
+                               vget_low_s32(cospi_6_26_14_18N), 0);
+  t[1].val[0] = vmull_lane_s32(vget_low_s32(s0.val[1]),
+                               vget_low_s32(cospi_6_26_14_18N), 0);
+  t[1].val[1] = vmull_lane_s32(vget_high_s32(s0.val[1]),
+                               vget_low_s32(cospi_6_26_14_18N), 0);
+  t[2].val[0] = vmull_lane_s32(vget_low_s32(s1.val[0]),
+                               vget_low_s32(cospi_6_26_14_18N), 0);
+  t[2].val[1] = vmull_lane_s32(vget_high_s32(s1.val[0]),
+                               vget_low_s32(cospi_6_26_14_18N), 0);
+  t[3].val[0] = vmull_lane_s32(vget_low_s32(s1.val[1]),
+                               vget_low_s32(cospi_6_26_14_18N), 0);
+  t[3].val[1] = vmull_lane_s32(vget_high_s32(s1.val[1]),
+                               vget_low_s32(cospi_6_26_14_18N), 0);
+  t[0].val[0] = vmlal_lane_s32(t[0].val[0], vget_low_s32(s1.val[0]),
+                               vget_low_s32(cospi_6_26_14_18N), 1);
+  t[0].val[1] = vmlal_lane_s32(t[0].val[1], vget_high_s32(s1.val[0]),
+                               vget_low_s32(cospi_6_26_14_18N), 1);
+  t[1].val[0] = vmlal_lane_s32(t[1].val[0], vget_low_s32(s1.val[1]),
+                               vget_low_s32(cospi_6_26_14_18N), 1);
+  t[1].val[1] = vmlal_lane_s32(t[1].val[1], vget_high_s32(s1.val[1]),
+                               vget_low_s32(cospi_6_26_14_18N), 1);
+  t[2].val[0] = vmlsl_lane_s32(t[2].val[0], vget_low_s32(s0.val[0]),
+                               vget_low_s32(cospi_6_26_14_18N), 1);
+  t[2].val[1] = vmlsl_lane_s32(t[2].val[1], vget_high_s32(s0.val[0]),
+                               vget_low_s32(cospi_6_26_14_18N), 1);
+  t[3].val[0] = vmlsl_lane_s32(t[3].val[0], vget_low_s32(s0.val[1]),
+                               vget_low_s32(cospi_6_26_14_18N), 1);
+  t[3].val[1] = vmlsl_lane_s32(t[3].val[1], vget_high_s32(s0.val[1]),
+                               vget_low_s32(cospi_6_26_14_18N), 1);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct_cospi_10_22(const int32x4x2_t s0,
+                                           const int32x4x2_t s1,
+                                           const int32x4_t cospi_2_30_10_22,
+                                           int32x4x2_t *const d0,
+                                           int32x4x2_t *const d1) {
+  int64x2x2_t t[4];
+
+  t[0].val[0] = vmull_lane_s32(vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_2_30_10_22), 1);
+  t[0].val[1] = vmull_lane_s32(vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_2_30_10_22), 1);
+  t[1].val[0] = vmull_lane_s32(vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_2_30_10_22), 1);
+  t[1].val[1] = vmull_lane_s32(vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_2_30_10_22), 1);
+  t[2].val[0] = vmull_lane_s32(vget_low_s32(s1.val[0]),
+                               vget_high_s32(cospi_2_30_10_22), 1);
+  t[2].val[1] = vmull_lane_s32(vget_high_s32(s1.val[0]),
+                               vget_high_s32(cospi_2_30_10_22), 1);
+  t[3].val[0] = vmull_lane_s32(vget_low_s32(s1.val[1]),
+                               vget_high_s32(cospi_2_30_10_22), 1);
+  t[3].val[1] = vmull_lane_s32(vget_high_s32(s1.val[1]),
+                               vget_high_s32(cospi_2_30_10_22), 1);
+  t[0].val[0] = vmlsl_lane_s32(t[0].val[0], vget_low_s32(s1.val[0]),
+                               vget_high_s32(cospi_2_30_10_22), 0);
+  t[0].val[1] = vmlsl_lane_s32(t[0].val[1], vget_high_s32(s1.val[0]),
+                               vget_high_s32(cospi_2_30_10_22), 0);
+  t[1].val[0] = vmlsl_lane_s32(t[1].val[0], vget_low_s32(s1.val[1]),
+                               vget_high_s32(cospi_2_30_10_22), 0);
+  t[1].val[1] = vmlsl_lane_s32(t[1].val[1], vget_high_s32(s1.val[1]),
+                               vget_high_s32(cospi_2_30_10_22), 0);
+  t[2].val[0] = vmlal_lane_s32(t[2].val[0], vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_2_30_10_22), 0);
+  t[2].val[1] = vmlal_lane_s32(t[2].val[1], vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_2_30_10_22), 0);
+  t[3].val[0] = vmlal_lane_s32(t[3].val[0], vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_2_30_10_22), 0);
+  t[3].val[1] = vmlal_lane_s32(t[3].val[1], vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_2_30_10_22), 0);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct_cospi_12_20(const int32x4x2_t s0,
+                                           const int32x4x2_t s1,
+                                           const int32x4_t cospi_4_12_20N_28,
+                                           int32x4x2_t *const d0,
+                                           int32x4x2_t *const d1) {
+  int64x2x2_t t[4];
+
+  t[0].val[0] = vmull_lane_s32(vget_low_s32(s0.val[0]),
+                               vget_low_s32(cospi_4_12_20N_28), 1);
+  t[0].val[1] = vmull_lane_s32(vget_high_s32(s0.val[0]),
+                               vget_low_s32(cospi_4_12_20N_28), 1);
+  t[1].val[0] = vmull_lane_s32(vget_low_s32(s0.val[1]),
+                               vget_low_s32(cospi_4_12_20N_28), 1);
+  t[1].val[1] = vmull_lane_s32(vget_high_s32(s0.val[1]),
+                               vget_low_s32(cospi_4_12_20N_28), 1);
+  t[2].val[0] = vmull_lane_s32(vget_low_s32(s1.val[0]),
+                               vget_low_s32(cospi_4_12_20N_28), 1);
+  t[2].val[1] = vmull_lane_s32(vget_high_s32(s1.val[0]),
+                               vget_low_s32(cospi_4_12_20N_28), 1);
+  t[3].val[0] = vmull_lane_s32(vget_low_s32(s1.val[1]),
+                               vget_low_s32(cospi_4_12_20N_28), 1);
+  t[3].val[1] = vmull_lane_s32(vget_high_s32(s1.val[1]),
+                               vget_low_s32(cospi_4_12_20N_28), 1);
+  t[0].val[0] = vmlal_lane_s32(t[0].val[0], vget_low_s32(s1.val[0]),
+                               vget_high_s32(cospi_4_12_20N_28), 0);
+  t[0].val[1] = vmlal_lane_s32(t[0].val[1], vget_high_s32(s1.val[0]),
+                               vget_high_s32(cospi_4_12_20N_28), 0);
+  t[1].val[0] = vmlal_lane_s32(t[1].val[0], vget_low_s32(s1.val[1]),
+                               vget_high_s32(cospi_4_12_20N_28), 0);
+  t[1].val[1] = vmlal_lane_s32(t[1].val[1], vget_high_s32(s1.val[1]),
+                               vget_high_s32(cospi_4_12_20N_28), 0);
+  t[2].val[0] = vmlsl_lane_s32(t[2].val[0], vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_4_12_20N_28), 0);
+  t[2].val[1] = vmlsl_lane_s32(t[2].val[1], vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_4_12_20N_28), 0);
+  t[3].val[0] = vmlsl_lane_s32(t[3].val[0], vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_4_12_20N_28), 0);
+  t[3].val[1] = vmlsl_lane_s32(t[3].val[1], vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_4_12_20N_28), 0);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct_cospi_14_18(const int32x4x2_t s0,
+                                           const int32x4x2_t s1,
+                                           const int32x4_t cospi_6_26_14_18N,
+                                           int32x4x2_t *const d0,
+                                           int32x4x2_t *const d1) {
+  int64x2x2_t t[4];
+
+  t[0].val[0] = vmull_lane_s32(vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_6_26_14_18N), 0);
+  t[0].val[1] = vmull_lane_s32(vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_6_26_14_18N), 0);
+  t[1].val[0] = vmull_lane_s32(vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_6_26_14_18N), 0);
+  t[1].val[1] = vmull_lane_s32(vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_6_26_14_18N), 0);
+  t[2].val[0] = vmull_lane_s32(vget_low_s32(s1.val[0]),
+                               vget_high_s32(cospi_6_26_14_18N), 0);
+  t[2].val[1] = vmull_lane_s32(vget_high_s32(s1.val[0]),
+                               vget_high_s32(cospi_6_26_14_18N), 0);
+  t[3].val[0] = vmull_lane_s32(vget_low_s32(s1.val[1]),
+                               vget_high_s32(cospi_6_26_14_18N), 0);
+  t[3].val[1] = vmull_lane_s32(vget_high_s32(s1.val[1]),
+                               vget_high_s32(cospi_6_26_14_18N), 0);
+  t[0].val[0] = vmlal_lane_s32(t[0].val[0], vget_low_s32(s1.val[0]),
+                               vget_high_s32(cospi_6_26_14_18N), 1);
+  t[0].val[1] = vmlal_lane_s32(t[0].val[1], vget_high_s32(s1.val[0]),
+                               vget_high_s32(cospi_6_26_14_18N), 1);
+  t[1].val[0] = vmlal_lane_s32(t[1].val[0], vget_low_s32(s1.val[1]),
+                               vget_high_s32(cospi_6_26_14_18N), 1);
+  t[1].val[1] = vmlal_lane_s32(t[1].val[1], vget_high_s32(s1.val[1]),
+                               vget_high_s32(cospi_6_26_14_18N), 1);
+  t[2].val[0] = vmlsl_lane_s32(t[2].val[0], vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_6_26_14_18N), 1);
+  t[2].val[1] = vmlsl_lane_s32(t[2].val[1], vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_6_26_14_18N), 1);
+  t[3].val[0] = vmlsl_lane_s32(t[3].val[0], vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_6_26_14_18N), 1);
+  t[3].val[1] = vmlsl_lane_s32(t[3].val[1], vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_6_26_14_18N), 1);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct_cospi_8_24_q_kernel(
+    const int32x4x2_t s0, const int32x4x2_t s1, const int32x4_t cospi_0_8_16_24,
+    int64x2x2_t *const t) {
+  t[0].val[0] = vmull_lane_s32(vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 1);
+  t[0].val[1] = vmull_lane_s32(vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 1);
+  t[1].val[0] = vmull_lane_s32(vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 1);
+  t[1].val[1] = vmull_lane_s32(vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 1);
+  t[2].val[0] = vmull_lane_s32(vget_low_s32(s1.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 1);
+  t[2].val[1] = vmull_lane_s32(vget_high_s32(s1.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 1);
+  t[3].val[0] = vmull_lane_s32(vget_low_s32(s1.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 1);
+  t[3].val[1] = vmull_lane_s32(vget_high_s32(s1.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 1);
+  t[0].val[0] = vmlsl_lane_s32(t[0].val[0], vget_low_s32(s1.val[0]),
+                               vget_low_s32(cospi_0_8_16_24), 1);
+  t[0].val[1] = vmlsl_lane_s32(t[0].val[1], vget_high_s32(s1.val[0]),
+                               vget_low_s32(cospi_0_8_16_24), 1);
+  t[1].val[0] = vmlsl_lane_s32(t[1].val[0], vget_low_s32(s1.val[1]),
+                               vget_low_s32(cospi_0_8_16_24), 1);
+  t[1].val[1] = vmlsl_lane_s32(t[1].val[1], vget_high_s32(s1.val[1]),
+                               vget_low_s32(cospi_0_8_16_24), 1);
+  t[2].val[0] = vmlal_lane_s32(t[2].val[0], vget_low_s32(s0.val[0]),
+                               vget_low_s32(cospi_0_8_16_24), 1);
+  t[2].val[1] = vmlal_lane_s32(t[2].val[1], vget_high_s32(s0.val[0]),
+                               vget_low_s32(cospi_0_8_16_24), 1);
+  t[3].val[0] = vmlal_lane_s32(t[3].val[0], vget_low_s32(s0.val[1]),
+                               vget_low_s32(cospi_0_8_16_24), 1);
+  t[3].val[1] = vmlal_lane_s32(t[3].val[1], vget_high_s32(s0.val[1]),
+                               vget_low_s32(cospi_0_8_16_24), 1);
+}
+
+static INLINE void highbd_idct_cospi_8_24_q(const int32x4x2_t s0,
+                                            const int32x4x2_t s1,
+                                            const int32x4_t cospi_0_8_16_24,
+                                            int32x4x2_t *const d0,
+                                            int32x4x2_t *const d1) {
+  int64x2x2_t t[4];
+
+  highbd_idct_cospi_8_24_q_kernel(s0, s1, cospi_0_8_16_24, t);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct_cospi_8_24_neg_q(const int32x4x2_t s0,
+                                                const int32x4x2_t s1,
+                                                const int32x4_t cospi_0_8_16_24,
+                                                int32x4x2_t *const d0,
+                                                int32x4x2_t *const d1) {
+  int64x2x2_t t[4];
+
+  highbd_idct_cospi_8_24_q_kernel(s0, s1, cospi_0_8_16_24, t);
+  t[2].val[0] = vsubq_s64(vdupq_n_s64(0), t[2].val[0]);
+  t[2].val[1] = vsubq_s64(vdupq_n_s64(0), t[2].val[1]);
+  t[3].val[0] = vsubq_s64(vdupq_n_s64(0), t[3].val[0]);
+  t[3].val[1] = vsubq_s64(vdupq_n_s64(0), t[3].val[1]);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct_cospi_16_16_q(const int32x4x2_t s0,
+                                             const int32x4x2_t s1,
+                                             const int32x4_t cospi_0_8_16_24,
+                                             int32x4x2_t *const d0,
+                                             int32x4x2_t *const d1) {
+  int64x2x2_t t[6];
+
+  t[4].val[0] = vmull_lane_s32(vget_low_s32(s1.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[4].val[1] = vmull_lane_s32(vget_high_s32(s1.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[5].val[0] = vmull_lane_s32(vget_low_s32(s1.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[5].val[1] = vmull_lane_s32(vget_high_s32(s1.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[0].val[0] = vmlsl_lane_s32(t[4].val[0], vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[0].val[1] = vmlsl_lane_s32(t[4].val[1], vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[1].val[0] = vmlsl_lane_s32(t[5].val[0], vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[1].val[1] = vmlsl_lane_s32(t[5].val[1], vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[2].val[0] = vmlal_lane_s32(t[4].val[0], vget_low_s32(s0.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[2].val[1] = vmlal_lane_s32(t[4].val[1], vget_high_s32(s0.val[0]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[3].val[0] = vmlal_lane_s32(t[5].val[0], vget_low_s32(s0.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  t[3].val[1] = vmlal_lane_s32(t[5].val[1], vget_high_s32(s0.val[1]),
+                               vget_high_s32(cospi_0_8_16_24), 0);
+  highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
+}
+
+static INLINE void highbd_idct16x16_add_stage7(const int32x4x2_t *const step2,
+                                               int32x4x2_t *const out) {
+  out[0].val[0] = vaddq_s32(step2[0].val[0], step2[15].val[0]);
+  out[0].val[1] = vaddq_s32(step2[0].val[1], step2[15].val[1]);
+  out[1].val[0] = vaddq_s32(step2[1].val[0], step2[14].val[0]);
+  out[1].val[1] = vaddq_s32(step2[1].val[1], step2[14].val[1]);
+  out[2].val[0] = vaddq_s32(step2[2].val[0], step2[13].val[0]);
+  out[2].val[1] = vaddq_s32(step2[2].val[1], step2[13].val[1]);
+  out[3].val[0] = vaddq_s32(step2[3].val[0], step2[12].val[0]);
+  out[3].val[1] = vaddq_s32(step2[3].val[1], step2[12].val[1]);
+  out[4].val[0] = vaddq_s32(step2[4].val[0], step2[11].val[0]);
+  out[4].val[1] = vaddq_s32(step2[4].val[1], step2[11].val[1]);
+  out[5].val[0] = vaddq_s32(step2[5].val[0], step2[10].val[0]);
+  out[5].val[1] = vaddq_s32(step2[5].val[1], step2[10].val[1]);
+  out[6].val[0] = vaddq_s32(step2[6].val[0], step2[9].val[0]);
+  out[6].val[1] = vaddq_s32(step2[6].val[1], step2[9].val[1]);
+  out[7].val[0] = vaddq_s32(step2[7].val[0], step2[8].val[0]);
+  out[7].val[1] = vaddq_s32(step2[7].val[1], step2[8].val[1]);
+  out[8].val[0] = vsubq_s32(step2[7].val[0], step2[8].val[0]);
+  out[8].val[1] = vsubq_s32(step2[7].val[1], step2[8].val[1]);
+  out[9].val[0] = vsubq_s32(step2[6].val[0], step2[9].val[0]);
+  out[9].val[1] = vsubq_s32(step2[6].val[1], step2[9].val[1]);
+  out[10].val[0] = vsubq_s32(step2[5].val[0], step2[10].val[0]);
+  out[10].val[1] = vsubq_s32(step2[5].val[1], step2[10].val[1]);
+  out[11].val[0] = vsubq_s32(step2[4].val[0], step2[11].val[0]);
+  out[11].val[1] = vsubq_s32(step2[4].val[1], step2[11].val[1]);
+  out[12].val[0] = vsubq_s32(step2[3].val[0], step2[12].val[0]);
+  out[12].val[1] = vsubq_s32(step2[3].val[1], step2[12].val[1]);
+  out[13].val[0] = vsubq_s32(step2[2].val[0], step2[13].val[0]);
+  out[13].val[1] = vsubq_s32(step2[2].val[1], step2[13].val[1]);
+  out[14].val[0] = vsubq_s32(step2[1].val[0], step2[14].val[0]);
+  out[14].val[1] = vsubq_s32(step2[1].val[1], step2[14].val[1]);
+  out[15].val[0] = vsubq_s32(step2[0].val[0], step2[15].val[0]);
+  out[15].val[1] = vsubq_s32(step2[0].val[1], step2[15].val[1]);
+}
+
+static void highbd_idct16x16_256_add_half1d(const int32_t *input,
+                                            int32_t *output, uint16_t *dest,
+                                            const int stride, const int bd) {
+  const int32x4_t cospi_0_8_16_24 = vld1q_s32(kCospi32 + 0);
+  const int32x4_t cospi_4_12_20N_28 = vld1q_s32(kCospi32 + 4);
+  const int32x4_t cospi_2_30_10_22 = vld1q_s32(kCospi32 + 8);
+  const int32x4_t cospi_6_26_14_18N = vld1q_s32(kCospi32 + 12);
+  int32x4x2_t in[16], step1[16], step2[16], out[16];
+
+  // Load input (16x8)
+  in[0].val[0] = vld1q_s32(input);
+  in[0].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[8].val[0] = vld1q_s32(input);
+  in[8].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[1].val[0] = vld1q_s32(input);
+  in[1].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[9].val[0] = vld1q_s32(input);
+  in[9].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[2].val[0] = vld1q_s32(input);
+  in[2].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[10].val[0] = vld1q_s32(input);
+  in[10].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[3].val[0] = vld1q_s32(input);
+  in[3].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[11].val[0] = vld1q_s32(input);
+  in[11].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[4].val[0] = vld1q_s32(input);
+  in[4].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[12].val[0] = vld1q_s32(input);
+  in[12].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[5].val[0] = vld1q_s32(input);
+  in[5].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[13].val[0] = vld1q_s32(input);
+  in[13].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[6].val[0] = vld1q_s32(input);
+  in[6].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[14].val[0] = vld1q_s32(input);
+  in[14].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[7].val[0] = vld1q_s32(input);
+  in[7].val[1] = vld1q_s32(input + 4);
+  input += 8;
+  in[15].val[0] = vld1q_s32(input);
+  in[15].val[1] = vld1q_s32(input + 4);
+
+  // Transpose
+  transpose_s32_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
+                    &in[7]);
+  transpose_s32_8x8(&in[8], &in[9], &in[10], &in[11], &in[12], &in[13], &in[14],
+                    &in[15]);
+
+  // stage 1
+  step1[0] = in[0 / 2];
+  step1[1] = in[16 / 2];
+  step1[2] = in[8 / 2];
+  step1[3] = in[24 / 2];
+  step1[4] = in[4 / 2];
+  step1[5] = in[20 / 2];
+  step1[6] = in[12 / 2];
+  step1[7] = in[28 / 2];
+  step1[8] = in[2 / 2];
+  step1[9] = in[18 / 2];
+  step1[10] = in[10 / 2];
+  step1[11] = in[26 / 2];
+  step1[12] = in[6 / 2];
+  step1[13] = in[22 / 2];
+  step1[14] = in[14 / 2];
+  step1[15] = in[30 / 2];
+
+  // stage 2
+  step2[0] = step1[0];
+  step2[1] = step1[1];
+  step2[2] = step1[2];
+  step2[3] = step1[3];
+  step2[4] = step1[4];
+  step2[5] = step1[5];
+  step2[6] = step1[6];
+  step2[7] = step1[7];
+  highbd_idct_cospi_2_30(step1[8], step1[15], cospi_2_30_10_22, &step2[8],
+                         &step2[15]);
+  highbd_idct_cospi_14_18(step1[9], step1[14], cospi_6_26_14_18N, &step2[9],
+                          &step2[14]);
+  highbd_idct_cospi_10_22(step1[10], step1[13], cospi_2_30_10_22, &step2[10],
+                          &step2[13]);
+  highbd_idct_cospi_6_26(step1[11], step1[12], cospi_6_26_14_18N, &step2[11],
+                         &step2[12]);
+
+  // stage 3
+  step1[0] = step2[0];
+  step1[1] = step2[1];
+  step1[2] = step2[2];
+  step1[3] = step2[3];
+  highbd_idct_cospi_4_28(step2[4], step2[7], cospi_4_12_20N_28, &step1[4],
+                         &step1[7]);
+  highbd_idct_cospi_12_20(step2[5], step2[6], cospi_4_12_20N_28, &step1[5],
+                          &step1[6]);
+  step1[8].val[0] = vaddq_s32(step2[8].val[0], step2[9].val[0]);
+  step1[8].val[1] = vaddq_s32(step2[8].val[1], step2[9].val[1]);
+  step1[9].val[0] = vsubq_s32(step2[8].val[0], step2[9].val[0]);
+  step1[9].val[1] = vsubq_s32(step2[8].val[1], step2[9].val[1]);
+  step1[10].val[0] = vsubq_s32(step2[11].val[0], step2[10].val[0]);
+  step1[10].val[1] = vsubq_s32(step2[11].val[1], step2[10].val[1]);
+  step1[11].val[0] = vaddq_s32(step2[11].val[0], step2[10].val[0]);
+  step1[11].val[1] = vaddq_s32(step2[11].val[1], step2[10].val[1]);
+  step1[12].val[0] = vaddq_s32(step2[12].val[0], step2[13].val[0]);
+  step1[12].val[1] = vaddq_s32(step2[12].val[1], step2[13].val[1]);
+  step1[13].val[0] = vsubq_s32(step2[12].val[0], step2[13].val[0]);
+  step1[13].val[1] = vsubq_s32(step2[12].val[1], step2[13].val[1]);
+  step1[14].val[0] = vsubq_s32(step2[15].val[0], step2[14].val[0]);
+  step1[14].val[1] = vsubq_s32(step2[15].val[1], step2[14].val[1]);
+  step1[15].val[0] = vaddq_s32(step2[15].val[0], step2[14].val[0]);
+  step1[15].val[1] = vaddq_s32(step2[15].val[1], step2[14].val[1]);
+
+  // stage 4
+  highbd_idct_cospi_16_16_q(step1[1], step1[0], cospi_0_8_16_24, &step2[1],
+                            &step2[0]);
+  highbd_idct_cospi_8_24_q(step1[2], step1[3], cospi_0_8_16_24, &step2[2],
+                           &step2[3]);
+  step2[4].val[0] = vaddq_s32(step1[4].val[0], step1[5].val[0]);
+  step2[4].val[1] = vaddq_s32(step1[4].val[1], step1[5].val[1]);
+  step2[5].val[0] = vsubq_s32(step1[4].val[0], step1[5].val[0]);
+  step2[5].val[1] = vsubq_s32(step1[4].val[1], step1[5].val[1]);
+  step2[6].val[0] = vsubq_s32(step1[7].val[0], step1[6].val[0]);
+  step2[6].val[1] = vsubq_s32(step1[7].val[1], step1[6].val[1]);
+  step2[7].val[0] = vaddq_s32(step1[7].val[0], step1[6].val[0]);
+  step2[7].val[1] = vaddq_s32(step1[7].val[1], step1[6].val[1]);
+  step2[8] = step1[8];
+  highbd_idct_cospi_8_24_q(step1[14], step1[9], cospi_0_8_16_24, &step2[9],
+                           &step2[14]);
+  highbd_idct_cospi_8_24_neg_q(step1[13], step1[10], cospi_0_8_16_24,
+                               &step2[13], &step2[10]);
+  step2[11] = step1[11];
+  step2[12] = step1[12];
+  step2[15] = step1[15];
+
+  // stage 5
+  step1[0].val[0] = vaddq_s32(step2[0].val[0], step2[3].val[0]);
+  step1[0].val[1] = vaddq_s32(step2[0].val[1], step2[3].val[1]);
+  step1[1].val[0] = vaddq_s32(step2[1].val[0], step2[2].val[0]);
+  step1[1].val[1] = vaddq_s32(step2[1].val[1], step2[2].val[1]);
+  step1[2].val[0] = vsubq_s32(step2[1].val[0], step2[2].val[0]);
+  step1[2].val[1] = vsubq_s32(step2[1].val[1], step2[2].val[1]);
+  step1[3].val[0] = vsubq_s32(step2[0].val[0], step2[3].val[0]);
+  step1[3].val[1] = vsubq_s32(step2[0].val[1], step2[3].val[1]);
+  step1[4] = step2[4];
+  highbd_idct_cospi_16_16_q(step2[5], step2[6], cospi_0_8_16_24, &step1[5],
+                            &step1[6]);
+  step1[7] = step2[7];
+  step1[8].val[0] = vaddq_s32(step2[8].val[0], step2[11].val[0]);
+  step1[8].val[1] = vaddq_s32(step2[8].val[1], step2[11].val[1]);
+  step1[9].val[0] = vaddq_s32(step2[9].val[0], step2[10].val[0]);
+  step1[9].val[1] = vaddq_s32(step2[9].val[1], step2[10].val[1]);
+  step1[10].val[0] = vsubq_s32(step2[9].val[0], step2[10].val[0]);
+  step1[10].val[1] = vsubq_s32(step2[9].val[1], step2[10].val[1]);
+  step1[11].val[0] = vsubq_s32(step2[8].val[0], step2[11].val[0]);
+  step1[11].val[1] = vsubq_s32(step2[8].val[1], step2[11].val[1]);
+  step1[12].val[0] = vsubq_s32(step2[15].val[0], step2[12].val[0]);
+  step1[12].val[1] = vsubq_s32(step2[15].val[1], step2[12].val[1]);
+  step1[13].val[0] = vsubq_s32(step2[14].val[0], step2[13].val[0]);
+  step1[13].val[1] = vsubq_s32(step2[14].val[1], step2[13].val[1]);
+  step1[14].val[0] = vaddq_s32(step2[14].val[0], step2[13].val[0]);
+  step1[14].val[1] = vaddq_s32(step2[14].val[1], step2[13].val[1]);
+  step1[15].val[0] = vaddq_s32(step2[15].val[0], step2[12].val[0]);
+  step1[15].val[1] = vaddq_s32(step2[15].val[1], step2[12].val[1]);
+
+  // stage 6
+  step2[0].val[0] = vaddq_s32(step1[0].val[0], step1[7].val[0]);
+  step2[0].val[1] = vaddq_s32(step1[0].val[1], step1[7].val[1]);
+  step2[1].val[0] = vaddq_s32(step1[1].val[0], step1[6].val[0]);
+  step2[1].val[1] = vaddq_s32(step1[1].val[1], step1[6].val[1]);
+  step2[2].val[0] = vaddq_s32(step1[2].val[0], step1[5].val[0]);
+  step2[2].val[1] = vaddq_s32(step1[2].val[1], step1[5].val[1]);
+  step2[3].val[0] = vaddq_s32(step1[3].val[0], step1[4].val[0]);
+  step2[3].val[1] = vaddq_s32(step1[3].val[1], step1[4].val[1]);
+  step2[4].val[0] = vsubq_s32(step1[3].val[0], step1[4].val[0]);
+  step2[4].val[1] = vsubq_s32(step1[3].val[1], step1[4].val[1]);
+  step2[5].val[0] = vsubq_s32(step1[2].val[0], step1[5].val[0]);
+  step2[5].val[1] = vsubq_s32(step1[2].val[1], step1[5].val[1]);
+  step2[6].val[0] = vsubq_s32(step1[1].val[0], step1[6].val[0]);
+  step2[6].val[1] = vsubq_s32(step1[1].val[1], step1[6].val[1]);
+  step2[7].val[0] = vsubq_s32(step1[0].val[0], step1[7].val[0]);
+  step2[7].val[1] = vsubq_s32(step1[0].val[1], step1[7].val[1]);
+  highbd_idct_cospi_16_16_q(step1[10], step1[13], cospi_0_8_16_24, &step2[10],
+                            &step2[13]);
+  highbd_idct_cospi_16_16_q(step1[11], step1[12], cospi_0_8_16_24, &step2[11],
+                            &step2[12]);
+  step2[8] = step1[8];
+  step2[9] = step1[9];
+  step2[14] = step1[14];
+  step2[15] = step1[15];
+
+  // stage 7
+  highbd_idct16x16_add_stage7(step2, out);
+
+  if (output) {
+    // pass 1: save the result into output
+    vst1q_s32(output + 0, out[0].val[0]);
+    vst1q_s32(output + 4, out[0].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[1].val[0]);
+    vst1q_s32(output + 4, out[1].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[2].val[0]);
+    vst1q_s32(output + 4, out[2].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[3].val[0]);
+    vst1q_s32(output + 4, out[3].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[4].val[0]);
+    vst1q_s32(output + 4, out[4].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[5].val[0]);
+    vst1q_s32(output + 4, out[5].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[6].val[0]);
+    vst1q_s32(output + 4, out[6].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[7].val[0]);
+    vst1q_s32(output + 4, out[7].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[8].val[0]);
+    vst1q_s32(output + 4, out[8].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[9].val[0]);
+    vst1q_s32(output + 4, out[9].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[10].val[0]);
+    vst1q_s32(output + 4, out[10].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[11].val[0]);
+    vst1q_s32(output + 4, out[11].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[12].val[0]);
+    vst1q_s32(output + 4, out[12].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[13].val[0]);
+    vst1q_s32(output + 4, out[13].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[14].val[0]);
+    vst1q_s32(output + 4, out[14].val[1]);
+    output += 16;
+    vst1q_s32(output + 0, out[15].val[0]);
+    vst1q_s32(output + 4, out[15].val[1]);
+  } else {
+    // pass 2: add the result to dest.
+    const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
+    int16x8_t o[16];
+    o[0] = vcombine_s16(vrshrn_n_s32(out[0].val[0], 6),
+                        vrshrn_n_s32(out[0].val[1], 6));
+    o[1] = vcombine_s16(vrshrn_n_s32(out[1].val[0], 6),
+                        vrshrn_n_s32(out[1].val[1], 6));
+    o[2] = vcombine_s16(vrshrn_n_s32(out[2].val[0], 6),
+                        vrshrn_n_s32(out[2].val[1], 6));
+    o[3] = vcombine_s16(vrshrn_n_s32(out[3].val[0], 6),
+                        vrshrn_n_s32(out[3].val[1], 6));
+    o[4] = vcombine_s16(vrshrn_n_s32(out[4].val[0], 6),
+                        vrshrn_n_s32(out[4].val[1], 6));
+    o[5] = vcombine_s16(vrshrn_n_s32(out[5].val[0], 6),
+                        vrshrn_n_s32(out[5].val[1], 6));
+    o[6] = vcombine_s16(vrshrn_n_s32(out[6].val[0], 6),
+                        vrshrn_n_s32(out[6].val[1], 6));
+    o[7] = vcombine_s16(vrshrn_n_s32(out[7].val[0], 6),
+                        vrshrn_n_s32(out[7].val[1], 6));
+    o[8] = vcombine_s16(vrshrn_n_s32(out[8].val[0], 6),
+                        vrshrn_n_s32(out[8].val[1], 6));
+    o[9] = vcombine_s16(vrshrn_n_s32(out[9].val[0], 6),
+                        vrshrn_n_s32(out[9].val[1], 6));
+    o[10] = vcombine_s16(vrshrn_n_s32(out[10].val[0], 6),
+                         vrshrn_n_s32(out[10].val[1], 6));
+    o[11] = vcombine_s16(vrshrn_n_s32(out[11].val[0], 6),
+                         vrshrn_n_s32(out[11].val[1], 6));
+    o[12] = vcombine_s16(vrshrn_n_s32(out[12].val[0], 6),
+                         vrshrn_n_s32(out[12].val[1], 6));
+    o[13] = vcombine_s16(vrshrn_n_s32(out[13].val[0], 6),
+                         vrshrn_n_s32(out[13].val[1], 6));
+    o[14] = vcombine_s16(vrshrn_n_s32(out[14].val[0], 6),
+                         vrshrn_n_s32(out[14].val[1], 6));
+    o[15] = vcombine_s16(vrshrn_n_s32(out[15].val[0], 6),
+                         vrshrn_n_s32(out[15].val[1], 6));
+    highbd_idct16x16_add8x1(o[0], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[1], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[2], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[3], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[4], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[5], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[6], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[7], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[8], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[9], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[10], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[11], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[12], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[13], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[14], max, &dest, stride);
+    highbd_idct16x16_add8x1(o[15], max, &dest, stride);
+  }
+}
+
+void vpx_highbd_idct16x16_256_add_neon(const tran_low_t *input, uint8_t *dest8,
+                                       int stride, int bd) {
+  uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
+
+  if (bd == 8) {
+    int16_t row_idct_output[16 * 16];
+
+    // pass 1
+    // Parallel idct on the upper 8 rows
+    idct16x16_256_add_half1d(input, row_idct_output, dest, stride, 1);
+
+    // Parallel idct on the lower 8 rows
+    idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest, stride,
+                             1);
+
+    // pass 2
+    // Parallel idct to get the left 8 columns
+    idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, 1);
+
+    // Parallel idct to get the right 8 columns
+    idct16x16_256_add_half1d(row_idct_output + 8 * 16, NULL, dest + 8, stride,
+                             1);
+  } else {
+    int32_t row_idct_output[16 * 16];
+
+    // pass 1
+    // Parallel idct on the upper 8 rows
+    highbd_idct16x16_256_add_half1d(input, row_idct_output, dest, stride, bd);
+
+    // Parallel idct on the lower 8 rows
+    highbd_idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest,
+                                    stride, bd);
+
+    // pass 2
+    // Parallel idct to get the left 8 columns
+    highbd_idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, bd);
+
+    // Parallel idct to get the right 8 columns
+    highbd_idct16x16_256_add_half1d(row_idct_output + 8 * 16, NULL, dest + 8,
+                                    stride, bd);
+  }
+}
+
 static INLINE void highbd_idct16x16_1_add_pos_kernel(uint16_t **dest,
                                                      const int stride,
                                                      const int16x8_t res,
                                                      const int16x8_t max) {
-  const uint16x8_t a0 = vld1q_u16(*dest);
+  const uint16x8_t a0 = vld1q_u16(*dest + 0);
   const uint16x8_t a1 = vld1q_u16(*dest + 8);
   const int16x8_t b0 = vaddq_s16(res, vreinterpretq_s16_u16(a0));
   const int16x8_t b1 = vaddq_s16(res, vreinterpretq_s16_u16(a1));
   const int16x8_t c0 = vminq_s16(b0, max);
   const int16x8_t c1 = vminq_s16(b1, max);
-  vst1q_u16(*dest, vreinterpretq_u16_s16(c0));
+  vst1q_u16(*dest + 0, vreinterpretq_u16_s16(c0));
   vst1q_u16(*dest + 8, vreinterpretq_u16_s16(c1));
   *dest += stride;
 }
@@ -32,13 +790,13 @@
 static INLINE void highbd_idct16x16_1_add_neg_kernel(uint16_t **dest,
                                                      const int stride,
                                                      const int16x8_t res) {
-  const uint16x8_t a0 = vld1q_u16(*dest);
+  const uint16x8_t a0 = vld1q_u16(*dest + 0);
   const uint16x8_t a1 = vld1q_u16(*dest + 8);
   const int16x8_t b0 = vaddq_s16(res, vreinterpretq_s16_u16(a0));
   const int16x8_t b1 = vaddq_s16(res, vreinterpretq_s16_u16(a1));
   const uint16x8_t c0 = vqshluq_n_s16(b0, 0);
   const uint16x8_t c1 = vqshluq_n_s16(b1, 0);
-  vst1q_u16(*dest, c0);
+  vst1q_u16(*dest + 0, c0);
   vst1q_u16(*dest + 8, c1);
   *dest += stride;
 }
--- a/vpx_dsp/arm/idct16x16_add_neon.c
+++ b/vpx_dsp/arm/idct16x16_add_neon.c
@@ -20,54 +20,6 @@
   *d1 = vrshrn_n_s32(t32[1], 14);
 }
 
-static INLINE void idct_cospi_2_30(const int16x8_t s0, const int16x8_t s1,
-                                   const int16x4_t cospi_2_30_10_22,
-                                   int16x8_t *const d0, int16x8_t *const d1) {
-  int32x4_t t32[4];
-
-  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_2_30_10_22, 1);
-  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_2_30_10_22, 1);
-  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_2_30_10_22, 1);
-  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_2_30_10_22, 1);
-  t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_2_30_10_22, 0);
-  t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_2_30_10_22, 0);
-  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_2_30_10_22, 0);
-  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_2_30_10_22, 0);
-  idct16x16_add_wrap_low_8x2(t32, d0, d1);
-}
-
-static INLINE void idct_cospi_4_28(const int16x8_t s0, const int16x8_t s1,
-                                   const int16x4_t cospi_4_12_20N_28,
-                                   int16x8_t *const d0, int16x8_t *const d1) {
-  int32x4_t t32[4];
-
-  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_4_12_20N_28, 3);
-  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_4_12_20N_28, 3);
-  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_4_12_20N_28, 3);
-  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_4_12_20N_28, 3);
-  t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_4_12_20N_28, 0);
-  t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_4_12_20N_28, 0);
-  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_4_12_20N_28, 0);
-  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_4_12_20N_28, 0);
-  idct16x16_add_wrap_low_8x2(t32, d0, d1);
-}
-
-static INLINE void idct_cospi_6_26(const int16x8_t s0, const int16x8_t s1,
-                                   const int16x4_t cospi_6_26_14_18N,
-                                   int16x8_t *const d0, int16x8_t *const d1) {
-  int32x4_t t32[4];
-
-  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_6_26_14_18N, 0);
-  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_6_26_14_18N, 0);
-  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_6_26_14_18N, 0);
-  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_6_26_14_18N, 0);
-  t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_6_26_14_18N, 1);
-  t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_6_26_14_18N, 1);
-  t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_6_26_14_18N, 1);
-  t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_6_26_14_18N, 1);
-  idct16x16_add_wrap_low_8x2(t32, d0, d1);
-}
-
 static INLINE void idct_cospi_8_24_d_kernel(const int16x4_t s0,
                                             const int16x4_t s1,
                                             const int16x4_t cospi_0_8_16_24,
@@ -98,54 +50,6 @@
   wrap_low_4x2(t32, d0, d1);
 }
 
-static INLINE void idct_cospi_10_22(const int16x8_t s0, const int16x8_t s1,
-                                    const int16x4_t cospi_2_30_10_22,
-                                    int16x8_t *const d0, int16x8_t *const d1) {
-  int32x4_t t32[4];
-
-  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_2_30_10_22, 3);
-  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_2_30_10_22, 3);
-  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_2_30_10_22, 3);
-  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_2_30_10_22, 3);
-  t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_2_30_10_22, 2);
-  t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_2_30_10_22, 2);
-  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_2_30_10_22, 2);
-  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_2_30_10_22, 2);
-  idct16x16_add_wrap_low_8x2(t32, d0, d1);
-}
-
-static INLINE void idct_cospi_12_20(const int16x8_t s0, const int16x8_t s1,
-                                    const int16x4_t cospi_4_12_20N_28,
-                                    int16x8_t *const d0, int16x8_t *const d1) {
-  int32x4_t t32[4];
-
-  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_4_12_20N_28, 1);
-  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_4_12_20N_28, 1);
-  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_4_12_20N_28, 1);
-  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_4_12_20N_28, 1);
-  t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_4_12_20N_28, 2);
-  t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_4_12_20N_28, 2);
-  t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_4_12_20N_28, 2);
-  t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_4_12_20N_28, 2);
-  idct16x16_add_wrap_low_8x2(t32, d0, d1);
-}
-
-static INLINE void idct_cospi_14_18(const int16x8_t s0, const int16x8_t s1,
-                                    const int16x4_t cospi_6_26_14_18N,
-                                    int16x8_t *const d0, int16x8_t *const d1) {
-  int32x4_t t32[4];
-
-  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_6_26_14_18N, 2);
-  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_6_26_14_18N, 2);
-  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_6_26_14_18N, 2);
-  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_6_26_14_18N, 2);
-  t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_6_26_14_18N, 3);
-  t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_6_26_14_18N, 3);
-  t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_6_26_14_18N, 3);
-  t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_6_26_14_18N, 3);
-  idct16x16_add_wrap_low_8x2(t32, d0, d1);
-}
-
 static INLINE void idct_cospi_16_16_d(const int16x4_t s0, const int16x4_t s1,
                                       const int16x4_t cospi_0_8_16_24,
                                       int16x4_t *const d0,
@@ -158,46 +62,6 @@
   wrap_low_4x2(t32, d0, d1);
 }
 
-static INLINE void idct16x16_add_stage7(const int16x8_t *const step2,
-                                        int16x8_t *const out) {
-#if CONFIG_VP9_HIGHBITDEPTH
-  // Use saturating add/sub to avoid overflow in 2nd pass
-  out[0] = vqaddq_s16(step2[0], step2[15]);
-  out[1] = vqaddq_s16(step2[1], step2[14]);
-  out[2] = vqaddq_s16(step2[2], step2[13]);
-  out[3] = vqaddq_s16(step2[3], step2[12]);
-  out[4] = vqaddq_s16(step2[4], step2[11]);
-  out[5] = vqaddq_s16(step2[5], step2[10]);
-  out[6] = vqaddq_s16(step2[6], step2[9]);
-  out[7] = vqaddq_s16(step2[7], step2[8]);
-  out[8] = vqsubq_s16(step2[7], step2[8]);
-  out[9] = vqsubq_s16(step2[6], step2[9]);
-  out[10] = vqsubq_s16(step2[5], step2[10]);
-  out[11] = vqsubq_s16(step2[4], step2[11]);
-  out[12] = vqsubq_s16(step2[3], step2[12]);
-  out[13] = vqsubq_s16(step2[2], step2[13]);
-  out[14] = vqsubq_s16(step2[1], step2[14]);
-  out[15] = vqsubq_s16(step2[0], step2[15]);
-#else
-  out[0] = vaddq_s16(step2[0], step2[15]);
-  out[1] = vaddq_s16(step2[1], step2[14]);
-  out[2] = vaddq_s16(step2[2], step2[13]);
-  out[3] = vaddq_s16(step2[3], step2[12]);
-  out[4] = vaddq_s16(step2[4], step2[11]);
-  out[5] = vaddq_s16(step2[5], step2[10]);
-  out[6] = vaddq_s16(step2[6], step2[9]);
-  out[7] = vaddq_s16(step2[7], step2[8]);
-  out[8] = vsubq_s16(step2[7], step2[8]);
-  out[9] = vsubq_s16(step2[6], step2[9]);
-  out[10] = vsubq_s16(step2[5], step2[10]);
-  out[11] = vsubq_s16(step2[4], step2[11]);
-  out[12] = vsubq_s16(step2[3], step2[12]);
-  out[13] = vsubq_s16(step2[2], step2[13]);
-  out[14] = vsubq_s16(step2[1], step2[14]);
-  out[15] = vsubq_s16(step2[0], step2[15]);
-#endif
-}
-
 static INLINE void idct16x16_store_pass1(const int16x8_t *const out,
                                          int16_t *output) {
   // Save the result into output
@@ -255,8 +119,9 @@
   idct16x16_add8x1(out[15], &dest, stride);
 }
 
-static void idct16x16_256_add_half1d(const void *const input, int16_t *output,
-                                     uint8_t *dest, const int stride) {
+void idct16x16_256_add_half1d(const void *const input, int16_t *output,
+                              void *const dest, const int stride,
+                              const int highbd_flag) {
   const int16x8_t cospis0 = vld1q_s16(kCospi);
   const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
   const int16x4_t cospi_0_8_16_24 = vget_low_s16(cospis0);
@@ -448,7 +313,45 @@
   if (output) {
     idct16x16_store_pass1(out, output);
   } else {
-    idct16x16_add_store(out, dest, stride);
+    if (highbd_flag) {
+      // pass 2: add the result to dest.
+      const int16x8_t max = vdupq_n_s16((1 << 8) - 1);
+      uint16_t *destT = dest;
+      out[0] = vrshrq_n_s16(out[0], 6);
+      out[1] = vrshrq_n_s16(out[1], 6);
+      out[2] = vrshrq_n_s16(out[2], 6);
+      out[3] = vrshrq_n_s16(out[3], 6);
+      out[4] = vrshrq_n_s16(out[4], 6);
+      out[5] = vrshrq_n_s16(out[5], 6);
+      out[6] = vrshrq_n_s16(out[6], 6);
+      out[7] = vrshrq_n_s16(out[7], 6);
+      out[8] = vrshrq_n_s16(out[8], 6);
+      out[9] = vrshrq_n_s16(out[9], 6);
+      out[10] = vrshrq_n_s16(out[10], 6);
+      out[11] = vrshrq_n_s16(out[11], 6);
+      out[12] = vrshrq_n_s16(out[12], 6);
+      out[13] = vrshrq_n_s16(out[13], 6);
+      out[14] = vrshrq_n_s16(out[14], 6);
+      out[15] = vrshrq_n_s16(out[15], 6);
+      highbd_idct16x16_add8x1(out[0], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[1], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[2], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[3], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[4], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[5], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[6], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[7], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[8], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[9], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[10], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[11], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[12], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[13], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[14], max, &destT, stride);
+      highbd_idct16x16_add8x1(out[15], max, &destT, stride);
+    } else {
+      idct16x16_add_store(out, dest, stride);
+    }
   }
 }
 
@@ -905,17 +808,18 @@
 
   // pass 1
   // Parallel idct on the upper 8 rows
-  idct16x16_256_add_half1d(input, row_idct_output, dest, stride);
+  idct16x16_256_add_half1d(input, row_idct_output, dest, stride, 0);
 
   // Parallel idct on the lower 8 rows
-  idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest, stride);
+  idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest, stride,
+                           0);
 
   // pass 2
   // Parallel idct to get the left 8 columns
-  idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride);
+  idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, 0);
 
   // Parallel idct to get the right 8 columns
-  idct16x16_256_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride);
+  idct16x16_256_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride, 0);
 }
 
 void vpx_idct16x16_38_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -924,8 +828,7 @@
 
   // pass 1
   // Parallel idct on the upper 8 rows
-  idct16x16_38_add_half1d((const int16_t *)input, row_idct_output, dest,
-                          stride);
+  idct16x16_38_add_half1d(input, row_idct_output, dest, stride);
 
   // pass 2
   // Parallel idct to get the left 8 columns
--- a/vpx_dsp/arm/idct_neon.h
+++ b/vpx_dsp/arm/idct_neon.h
@@ -28,11 +28,15 @@
   12665 /*  cospi_14_64 */, -10394 /* -cospi_18_64 */
 };
 
-DECLARE_ALIGNED(16, static const int32_t, kCospi32[8]) = {
-  16384 /*  cospi_0_64  */, 15137 /* cospi_8_64  */,
-  11585 /*  cospi_16_64 */, 6270 /* cospi_24_64 */,
-  16069 /*  cospi_4_64  */, 13623 /* cospi_12_64 */,
-  -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */
+DECLARE_ALIGNED(16, static const int32_t, kCospi32[16]) = {
+  16384 /*  cospi_0_64  */, 15137 /*  cospi_8_64  */,
+  11585 /*  cospi_16_64 */, 6270 /*  cospi_24_64 */,
+  16069 /*  cospi_4_64  */, 13623 /*  cospi_12_64 */,
+  -9102 /* -cospi_20_64 */, 3196 /*  cospi_28_64 */,
+  16305 /*  cospi_2_64  */, 1606 /*  cospi_30_64 */,
+  14449 /*  cospi_10_64 */, 7723 /*  cospi_22_64 */,
+  15679 /*  cospi_6_64  */, -4756 /* -cospi_26_64 */,
+  12665 /*  cospi_14_64 */, -10394 /* -cospi_18_64 */
 };
 
 //------------------------------------------------------------------------------
@@ -540,6 +544,142 @@
   idct16x16_add_wrap_low_8x2(t32, d0, d1);
 }
 
+static INLINE void idct_cospi_2_30(const int16x8_t s0, const int16x8_t s1,
+                                   const int16x4_t cospi_2_30_10_22,
+                                   int16x8_t *const d0, int16x8_t *const d1) {
+  int32x4_t t32[4];
+
+  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_2_30_10_22, 1);
+  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_2_30_10_22, 1);
+  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_2_30_10_22, 1);
+  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_2_30_10_22, 1);
+  t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_2_30_10_22, 0);
+  t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_2_30_10_22, 0);
+  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_2_30_10_22, 0);
+  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_2_30_10_22, 0);
+  idct16x16_add_wrap_low_8x2(t32, d0, d1);
+}
+
+static INLINE void idct_cospi_4_28(const int16x8_t s0, const int16x8_t s1,
+                                   const int16x4_t cospi_4_12_20N_28,
+                                   int16x8_t *const d0, int16x8_t *const d1) {
+  int32x4_t t32[4];
+
+  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_4_12_20N_28, 3);
+  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_4_12_20N_28, 3);
+  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_4_12_20N_28, 3);
+  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_4_12_20N_28, 3);
+  t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_4_12_20N_28, 0);
+  t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_4_12_20N_28, 0);
+  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_4_12_20N_28, 0);
+  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_4_12_20N_28, 0);
+  idct16x16_add_wrap_low_8x2(t32, d0, d1);
+}
+
+static INLINE void idct_cospi_6_26(const int16x8_t s0, const int16x8_t s1,
+                                   const int16x4_t cospi_6_26_14_18N,
+                                   int16x8_t *const d0, int16x8_t *const d1) {
+  int32x4_t t32[4];
+
+  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_6_26_14_18N, 0);
+  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_6_26_14_18N, 0);
+  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_6_26_14_18N, 0);
+  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_6_26_14_18N, 0);
+  t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_6_26_14_18N, 1);
+  t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_6_26_14_18N, 1);
+  t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_6_26_14_18N, 1);
+  t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_6_26_14_18N, 1);
+  idct16x16_add_wrap_low_8x2(t32, d0, d1);
+}
+
+static INLINE void idct_cospi_10_22(const int16x8_t s0, const int16x8_t s1,
+                                    const int16x4_t cospi_2_30_10_22,
+                                    int16x8_t *const d0, int16x8_t *const d1) {
+  int32x4_t t32[4];
+
+  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_2_30_10_22, 3);
+  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_2_30_10_22, 3);
+  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_2_30_10_22, 3);
+  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_2_30_10_22, 3);
+  t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_2_30_10_22, 2);
+  t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_2_30_10_22, 2);
+  t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_2_30_10_22, 2);
+  t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_2_30_10_22, 2);
+  idct16x16_add_wrap_low_8x2(t32, d0, d1);
+}
+
+static INLINE void idct_cospi_12_20(const int16x8_t s0, const int16x8_t s1,
+                                    const int16x4_t cospi_4_12_20N_28,
+                                    int16x8_t *const d0, int16x8_t *const d1) {
+  int32x4_t t32[4];
+
+  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_4_12_20N_28, 1);
+  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_4_12_20N_28, 1);
+  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_4_12_20N_28, 1);
+  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_4_12_20N_28, 1);
+  t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_4_12_20N_28, 2);
+  t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_4_12_20N_28, 2);
+  t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_4_12_20N_28, 2);
+  t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_4_12_20N_28, 2);
+  idct16x16_add_wrap_low_8x2(t32, d0, d1);
+}
+
+static INLINE void idct_cospi_14_18(const int16x8_t s0, const int16x8_t s1,
+                                    const int16x4_t cospi_6_26_14_18N,
+                                    int16x8_t *const d0, int16x8_t *const d1) {
+  int32x4_t t32[4];
+
+  t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_6_26_14_18N, 2);
+  t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_6_26_14_18N, 2);
+  t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_6_26_14_18N, 2);
+  t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_6_26_14_18N, 2);
+  t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_6_26_14_18N, 3);
+  t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_6_26_14_18N, 3);
+  t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_6_26_14_18N, 3);
+  t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_6_26_14_18N, 3);
+  idct16x16_add_wrap_low_8x2(t32, d0, d1);
+}
+
+static INLINE void idct16x16_add_stage7(const int16x8_t *const step2,
+                                        int16x8_t *const out) {
+#if CONFIG_VP9_HIGHBITDEPTH
+  // Use saturating add/sub to avoid overflow in 2nd pass
+  out[0] = vqaddq_s16(step2[0], step2[15]);
+  out[1] = vqaddq_s16(step2[1], step2[14]);
+  out[2] = vqaddq_s16(step2[2], step2[13]);
+  out[3] = vqaddq_s16(step2[3], step2[12]);
+  out[4] = vqaddq_s16(step2[4], step2[11]);
+  out[5] = vqaddq_s16(step2[5], step2[10]);
+  out[6] = vqaddq_s16(step2[6], step2[9]);
+  out[7] = vqaddq_s16(step2[7], step2[8]);
+  out[8] = vqsubq_s16(step2[7], step2[8]);
+  out[9] = vqsubq_s16(step2[6], step2[9]);
+  out[10] = vqsubq_s16(step2[5], step2[10]);
+  out[11] = vqsubq_s16(step2[4], step2[11]);
+  out[12] = vqsubq_s16(step2[3], step2[12]);
+  out[13] = vqsubq_s16(step2[2], step2[13]);
+  out[14] = vqsubq_s16(step2[1], step2[14]);
+  out[15] = vqsubq_s16(step2[0], step2[15]);
+#else
+  out[0] = vaddq_s16(step2[0], step2[15]);
+  out[1] = vaddq_s16(step2[1], step2[14]);
+  out[2] = vaddq_s16(step2[2], step2[13]);
+  out[3] = vaddq_s16(step2[3], step2[12]);
+  out[4] = vaddq_s16(step2[4], step2[11]);
+  out[5] = vaddq_s16(step2[5], step2[10]);
+  out[6] = vaddq_s16(step2[6], step2[9]);
+  out[7] = vaddq_s16(step2[7], step2[8]);
+  out[8] = vsubq_s16(step2[7], step2[8]);
+  out[9] = vsubq_s16(step2[6], step2[9]);
+  out[10] = vsubq_s16(step2[5], step2[10]);
+  out[11] = vsubq_s16(step2[4], step2[11]);
+  out[12] = vsubq_s16(step2[3], step2[12]);
+  out[13] = vsubq_s16(step2[2], step2[13]);
+  out[14] = vsubq_s16(step2[1], step2[14]);
+  out[15] = vsubq_s16(step2[0], step2[15]);
+#endif
+}
+
 static INLINE void idct16x16_add8x1(int16x8_t res, uint8_t **dest,
                                     const int stride) {
   uint8x8_t d = vld1_u8(*dest);
@@ -551,5 +691,20 @@
   vst1_u8(*dest, d);
   *dest += stride;
 }
+
+static INLINE void highbd_idct16x16_add8x1(int16x8_t res, const int16x8_t max,
+                                           uint16_t **dest, const int stride) {
+  uint16x8_t d = vld1q_u16(*dest);
+
+  res = vqaddq_s16(res, vreinterpretq_s16_u16(d));
+  res = vminq_s16(res, max);
+  d = vqshluq_n_s16(res, 0);
+  vst1q_u16(*dest, d);
+  *dest += stride;
+}
+
+void idct16x16_256_add_half1d(const void *const input, int16_t *output,
+                              void *const dest, const int stride,
+                              const int highbd_flag);
 
 #endif  // VPX_DSP_ARM_IDCT_NEON_H_
--- a/vpx_dsp/arm/transpose_neon.h
+++ b/vpx_dsp/arm/transpose_neon.h
@@ -710,6 +710,83 @@
   *a7 = d3.val[1];
 }
 
+static INLINE void transpose_s32_8x8(int32x4x2_t *a0, int32x4x2_t *a1,
+                                     int32x4x2_t *a2, int32x4x2_t *a3,
+                                     int32x4x2_t *a4, int32x4x2_t *a5,
+                                     int32x4x2_t *a6, int32x4x2_t *a7) {
+  // Swap 32 bit elements. Goes from:
+  // a0: 00 01 02 03 04 05 06 07
+  // a1: 10 11 12 13 14 15 16 17
+  // a2: 20 21 22 23 24 25 26 27
+  // a3: 30 31 32 33 34 35 36 37
+  // a4: 40 41 42 43 44 45 46 47
+  // a5: 50 51 52 53 54 55 56 57
+  // a6: 60 61 62 63 64 65 66 67
+  // a7: 70 71 72 73 74 75 76 77
+  // to:
+  // b0: 00 10 02 12 01 11 03 13
+  // b1: 20 30 22 32 21 31 23 33
+  // b2: 40 50 42 52 41 51 43 53
+  // b3: 60 70 62 72 61 71 63 73
+  // b4: 04 14 06 16 05 15 07 17
+  // b5: 24 34 26 36 25 35 27 37
+  // b6: 44 54 46 56 45 55 47 57
+  // b7: 64 74 66 76 65 75 67 77
+
+  const int32x4x2_t b0 = vtrnq_s32(a0->val[0], a1->val[0]);
+  const int32x4x2_t b1 = vtrnq_s32(a2->val[0], a3->val[0]);
+  const int32x4x2_t b2 = vtrnq_s32(a4->val[0], a5->val[0]);
+  const int32x4x2_t b3 = vtrnq_s32(a6->val[0], a7->val[0]);
+  const int32x4x2_t b4 = vtrnq_s32(a0->val[1], a1->val[1]);
+  const int32x4x2_t b5 = vtrnq_s32(a2->val[1], a3->val[1]);
+  const int32x4x2_t b6 = vtrnq_s32(a4->val[1], a5->val[1]);
+  const int32x4x2_t b7 = vtrnq_s32(a6->val[1], a7->val[1]);
+
+  // Swap 64 bit elements resulting in:
+  // c0: 00 10 20 30 02 12 22 32
+  // c1: 01 11 21 31 03 13 23 33
+  // c2: 40 50 60 70 42 52 62 72
+  // c3: 41 51 61 71 43 53 63 73
+  // c4: 04 14 24 34 06 16 26 36
+  // c5: 05 15 25 35 07 17 27 37
+  // c6: 44 54 64 74 46 56 66 76
+  // c7: 45 55 65 75 47 57 67 77
+  const int32x4x2_t c0 = vpx_vtrnq_s64_to_s32(b0.val[0], b1.val[0]);
+  const int32x4x2_t c1 = vpx_vtrnq_s64_to_s32(b0.val[1], b1.val[1]);
+  const int32x4x2_t c2 = vpx_vtrnq_s64_to_s32(b2.val[0], b3.val[0]);
+  const int32x4x2_t c3 = vpx_vtrnq_s64_to_s32(b2.val[1], b3.val[1]);
+  const int32x4x2_t c4 = vpx_vtrnq_s64_to_s32(b4.val[0], b5.val[0]);
+  const int32x4x2_t c5 = vpx_vtrnq_s64_to_s32(b4.val[1], b5.val[1]);
+  const int32x4x2_t c6 = vpx_vtrnq_s64_to_s32(b6.val[0], b7.val[0]);
+  const int32x4x2_t c7 = vpx_vtrnq_s64_to_s32(b6.val[1], b7.val[1]);
+
+  // Swap 128 bit elements resulting in:
+  // a0: 00 10 20 30 40 50 60 70
+  // a1: 01 11 21 31 41 51 61 71
+  // a2: 02 12 22 32 42 52 62 72
+  // a3: 03 13 23 33 43 53 63 73
+  // a4: 04 14 24 34 44 54 64 74
+  // a5: 05 15 25 35 45 55 65 75
+  // a6: 06 16 26 36 46 56 66 76
+  // a7: 07 17 27 37 47 57 67 77
+  a0->val[0] = c0.val[0];
+  a0->val[1] = c2.val[0];
+  a1->val[0] = c1.val[0];
+  a1->val[1] = c3.val[0];
+  a2->val[0] = c0.val[1];
+  a2->val[1] = c2.val[1];
+  a3->val[0] = c1.val[1];
+  a3->val[1] = c3.val[1];
+  a4->val[0] = c4.val[0];
+  a4->val[1] = c6.val[0];
+  a5->val[0] = c5.val[0];
+  a5->val[1] = c7.val[0];
+  a6->val[0] = c4.val[1];
+  a6->val[1] = c6.val[1];
+  a7->val[0] = c5.val[1];
+  a7->val[1] = c7.val[1];
+}
+
 static INLINE void transpose_u8_16x8(
     const uint8x16_t i0, const uint8x16_t i1, const uint8x16_t i2,
     const uint8x16_t i3, const uint8x16_t i4, const uint8x16_t i5,
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -728,7 +728,7 @@
     specialize qw/vpx_highbd_idct8x8_12_add neon sse2/;
 
     add_proto qw/void vpx_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
-    specialize qw/vpx_highbd_idct16x16_256_add sse2/;
+    specialize qw/vpx_highbd_idct16x16_256_add neon sse2/;
 
     add_proto qw/void vpx_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
     specialize qw/vpx_highbd_idct16x16_10_add sse2/;