shithub: libvpx

Download patch

ref: 228c9940ea87643368c5e79b744a34fe901d15d4
parent: cb339d628f9e7f9c3e404b49541cb495d7423b7d
parent: 8befcd008924a111c08dc58fa740e905bc0b0d5f
author: James Zern <jzern@google.com>
date: Tue Dec 6 20:40:28 EST 2016

Merge changes Ibad079f2,I7858a0a1

* changes:
  enable vpx_idct16x16_10_add_neon in hbd builds
  idct16x16,NEON: rm output_stride from pass1 fns

--- a/vpx_dsp/arm/idct16x16_add_neon.asm
+++ b/vpx_dsp/arm/idct16x16_add_neon.asm
@@ -18,6 +18,8 @@
 
     AREA ||.text||, CODE, READONLY, ALIGN=2
 
+    INCLUDE vpx_dsp/arm/idct_neon.asm.S
+
     ; Transpose a 8x8 16bit data matrix. Datas are loaded in q8-q15.
     MACRO
     TRANSPOSE8X8
@@ -36,12 +38,10 @@
     MEND
 
     AREA    Block, CODE, READONLY ; name this block of code
-;void |vpx_idct16x16_256_add_neon_pass1|(const int16_t *input,
-;                                        int16_t *output, int output_stride)
+;void |vpx_idct16x16_256_add_neon_pass1|(const int16_t *input, int16_t *output)
 ;
 ; r0  const int16_t *input
 ; r1  int16_t *output
-; r2  int output_stride
 
 ; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
 ; will be stored back into q8-q15 registers. This function will touch q0-q7
@@ -247,22 +247,10 @@
     vsub.s16        q15, q0, q15              ; step2[7] = step1[0] - step1[7];
 
     ; store the data
-    vst1.64         {d16}, [r1], r2
-    vst1.64         {d17}, [r1], r2
-    vst1.64         {d18}, [r1], r2
-    vst1.64         {d19}, [r1], r2
-    vst1.64         {d20}, [r1], r2
-    vst1.64         {d21}, [r1], r2
-    vst1.64         {d22}, [r1], r2
-    vst1.64         {d23}, [r1], r2
-    vst1.64         {d24}, [r1], r2
-    vst1.64         {d25}, [r1], r2
-    vst1.64         {d26}, [r1], r2
-    vst1.64         {d27}, [r1], r2
-    vst1.64         {d28}, [r1], r2
-    vst1.64         {d29}, [r1], r2
-    vst1.64         {d30}, [r1], r2
-    vst1.64         {d31}, [r1], r2
+    vst1.64         {q8-q9}, [r1]!
+    vst1.64         {q10-q11}, [r1]!
+    vst1.64         {q12-q13}, [r1]!
+    vst1.64         {q14-q15}, [r1]
 
     bx              lr
     ENDP  ; |vpx_idct16x16_256_add_neon_pass1|
@@ -767,12 +755,11 @@
     bx              lr
     ENDP  ; |vpx_idct16x16_256_add_neon_pass2|
 
-;void |vpx_idct16x16_10_add_neon_pass1|(const int16_t *input,
-;                                       int16_t *output, int output_stride)
+;void |vpx_idct16x16_10_add_neon_pass1|(const tran_low_t *input,
+;                                       int16_t *output)
 ;
-; r0  const int16_t *input
+; r0  const tran_low_t *input
 ; r1  int16_t *output
-; r2  int output_stride
 
 ; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
 ; will be stored back into q8-q15 registers. This function will touch q0-q7
@@ -781,14 +768,14 @@
 
     ; TODO(hkuang): Find a better way to load the elements.
     ; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15
-    vld2.s16        {q8,q9}, [r0]!
-    vld2.s16        {q9,q10}, [r0]!
-    vld2.s16        {q10,q11}, [r0]!
-    vld2.s16        {q11,q12}, [r0]!
-    vld2.s16        {q12,q13}, [r0]!
-    vld2.s16        {q13,q14}, [r0]!
-    vld2.s16        {q14,q15}, [r0]!
-    vld2.s16        {q1,q2}, [r0]!
+    LOAD_TRAN_LOW_TO_S16X2 d16, d17, d18, d19, r0
+    LOAD_TRAN_LOW_TO_S16X2 d18, d19, d20, d21, r0
+    LOAD_TRAN_LOW_TO_S16X2 d20, d21, d22, d23, r0
+    LOAD_TRAN_LOW_TO_S16X2 d22, d23, d24, d25, r0
+    LOAD_TRAN_LOW_TO_S16X2 d24, d25, d26, d27, r0
+    LOAD_TRAN_LOW_TO_S16X2 d26, d27, d28, d29, r0
+    LOAD_TRAN_LOW_TO_S16X2 d28, d29, d30, d31, r0
+    LOAD_TRAN_LOW_TO_S16X2 d2, d3, d4, d5, r0
     vmov.s16        q15, q1
 
     ; cospi_28_64*2 = 6392
@@ -864,30 +851,19 @@
     vsub.s16        q15, q8, q7               ; step2[7] = step1[0] - step1[7];
 
     ; store the data
-    vst1.64         {d4}, [r1], r2
-    vst1.64         {d5}, [r1], r2
-    vst1.64         {d18}, [r1], r2
-    vst1.64         {d19}, [r1], r2
-    vst1.64         {d20}, [r1], r2
-    vst1.64         {d21}, [r1], r2
-    vst1.64         {d22}, [r1], r2
-    vst1.64         {d23}, [r1], r2
-    vst1.64         {d24}, [r1], r2
-    vst1.64         {d25}, [r1], r2
-    vst1.64         {d26}, [r1], r2
-    vst1.64         {d27}, [r1], r2
-    vst1.64         {d28}, [r1], r2
-    vst1.64         {d29}, [r1], r2
-    vst1.64         {d30}, [r1], r2
-    vst1.64         {d31}, [r1], r2
+    vst1.64         {q2}, [r1]!
+    vst1.64         {q9-q10}, [r1]!
+    vst1.64         {q11-q12}, [r1]!
+    vst1.64         {q13-q14}, [r1]!
+    vst1.64         {q15}, [r1]
 
     bx              lr
     ENDP  ; |vpx_idct16x16_10_add_neon_pass1|
 
-;void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output,
+;void vpx_idct16x16_10_add_neon_pass2(const tran_low_t *src, int16_t *output,
 ;                                     int16_t *pass1_output)
 ;
-; r0  const int16_t *src
+; r0  const tran_low_t *src
 ; r1  int16_t *output
 ; r2  int16_t *pass1_output
 
@@ -899,14 +875,14 @@
 
     ; TODO(hkuang): Find a better way to load the elements.
     ; load elements of 1, 3, 5, 7, 9, 11, 13, 15 into q8 - q15
-    vld2.s16        {q8,q9}, [r0]!
-    vld2.s16        {q9,q10}, [r0]!
-    vld2.s16        {q10,q11}, [r0]!
-    vld2.s16        {q11,q12}, [r0]!
-    vld2.s16        {q12,q13}, [r0]!
-    vld2.s16        {q13,q14}, [r0]!
-    vld2.s16        {q14,q15}, [r0]!
-    vld2.s16        {q0,q1}, [r0]!
+    LOAD_TRAN_LOW_TO_S16X2 d16, d17, d18, d19, r0
+    LOAD_TRAN_LOW_TO_S16X2 d18, d19, d20, d21, r0
+    LOAD_TRAN_LOW_TO_S16X2 d20, d21, d22, d23, r0
+    LOAD_TRAN_LOW_TO_S16X2 d22, d23, d24, d25, r0
+    LOAD_TRAN_LOW_TO_S16X2 d24, d25, d26, d27, r0
+    LOAD_TRAN_LOW_TO_S16X2 d26, d27, d28, d29, r0
+    LOAD_TRAN_LOW_TO_S16X2 d28, d29, d30, d31, r0
+    LOAD_TRAN_LOW_TO_S16X2 d0, d1, d2, d3, r0
     vmov.s16        q15, q0;
 
     ; 2*cospi_30_64 = 3212
--- a/vpx_dsp/arm/idct16x16_add_neon.c
+++ b/vpx_dsp/arm/idct16x16_add_neon.c
@@ -10,18 +10,14 @@
 
 #include <arm_neon.h>
 
-#include "./vpx_config.h"
-#include "vpx_dsp/arm/transpose_neon.h"
+#include "vpx_dsp/arm/idct_neon.h"
 #include "vpx_dsp/txfm_common.h"
 
-void vpx_idct16x16_256_add_neon_pass1(const int16_t *in, int16_t *out,
-                                      int output_stride) {
+void vpx_idct16x16_256_add_neon_pass1(const int16_t *in, int16_t *out) {
   int16x4_t d0s16, d1s16, d2s16, d3s16;
   int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
   int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
   int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
-  uint64x1_t d16u64, d17u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64;
-  uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
   int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
   int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
   int32x4_t q0s32, q1s32, q2s32, q3s32, q5s32, q6s32, q9s32;
@@ -197,56 +193,22 @@
   q14s16 = vsubq_s16(q1s16, q6s16);
   q15s16 = vsubq_s16(q0s16, q15s16);
 
-  d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16));
-  d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16));
-  d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
-  d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
-  d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16));
-  d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16));
-  d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16));
-  d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16));
-  d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
-  d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
-  d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
-  d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
-  d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
-  d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
-  d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
-  d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
-
   // store the data
-  output_stride >>= 1;  // output_stride / 2, out is int16_t
-  vst1_u64((uint64_t *)out, d16u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d17u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d18u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d19u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d20u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d21u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d22u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d23u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d24u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d25u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d26u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d27u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d28u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d29u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d30u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d31u64);
+  vst1q_s16(out, q8s16);
+  out += 8;
+  vst1q_s16(out, q9s16);
+  out += 8;
+  vst1q_s16(out, q10s16);
+  out += 8;
+  vst1q_s16(out, q11s16);
+  out += 8;
+  vst1q_s16(out, q12s16);
+  out += 8;
+  vst1q_s16(out, q13s16);
+  out += 8;
+  vst1q_s16(out, q14s16);
+  out += 8;
+  vst1q_s16(out, q15s16);
 }
 
 void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *out,
@@ -798,12 +760,9 @@
   }
 }
 
-void vpx_idct16x16_10_add_neon_pass1(const int16_t *in, int16_t *out,
-                                     int output_stride) {
+void vpx_idct16x16_10_add_neon_pass1(const tran_low_t *in, int16_t *out) {
   int16x4_t d4s16;
   int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-  uint64x1_t d4u64, d5u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64;
-  uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
   int16x8_t q0s16, q1s16, q2s16, q4s16, q5s16, q6s16, q7s16;
   int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
   int32x4_t q6s32, q9s32;
@@ -810,28 +769,28 @@
   int32x4_t q10s32, q11s32, q12s32, q15s32;
   int16x8x2_t q0x2s16;
 
-  q0x2s16 = vld2q_s16(in);
+  q0x2s16 = load_tran_low_to_s16x2q(in);
   q8s16 = q0x2s16.val[0];
   in += 16;
-  q0x2s16 = vld2q_s16(in);
+  q0x2s16 = load_tran_low_to_s16x2q(in);
   q9s16 = q0x2s16.val[0];
   in += 16;
-  q0x2s16 = vld2q_s16(in);
+  q0x2s16 = load_tran_low_to_s16x2q(in);
   q10s16 = q0x2s16.val[0];
   in += 16;
-  q0x2s16 = vld2q_s16(in);
+  q0x2s16 = load_tran_low_to_s16x2q(in);
   q11s16 = q0x2s16.val[0];
   in += 16;
-  q0x2s16 = vld2q_s16(in);
+  q0x2s16 = load_tran_low_to_s16x2q(in);
   q12s16 = q0x2s16.val[0];
   in += 16;
-  q0x2s16 = vld2q_s16(in);
+  q0x2s16 = load_tran_low_to_s16x2q(in);
   q13s16 = q0x2s16.val[0];
   in += 16;
-  q0x2s16 = vld2q_s16(in);
+  q0x2s16 = load_tran_low_to_s16x2q(in);
   q14s16 = q0x2s16.val[0];
   in += 16;
-  q0x2s16 = vld2q_s16(in);
+  q0x2s16 = load_tran_low_to_s16x2q(in);
   q15s16 = q0x2s16.val[0];
 
   transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
@@ -881,59 +840,25 @@
   q14s16 = vsubq_s16(q8s16, q6s16);
   q15s16 = vsubq_s16(q8s16, q7s16);
 
-  d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16));
-  d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16));
-  d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
-  d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
-  d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16));
-  d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16));
-  d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16));
-  d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16));
-  d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
-  d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
-  d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
-  d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
-  d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
-  d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
-  d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
-  d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
-
   // store the data
-  output_stride >>= 1;  // output_stride / 2, out is int16_t
-  vst1_u64((uint64_t *)out, d4u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d5u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d18u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d19u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d20u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d21u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d22u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d23u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d24u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d25u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d26u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d27u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d28u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d29u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d30u64);
-  out += output_stride;
-  vst1_u64((uint64_t *)out, d31u64);
+  vst1q_s16(out, q2s16);
+  out += 8;
+  vst1q_s16(out, q9s16);
+  out += 8;
+  vst1q_s16(out, q10s16);
+  out += 8;
+  vst1q_s16(out, q11s16);
+  out += 8;
+  vst1q_s16(out, q12s16);
+  out += 8;
+  vst1q_s16(out, q13s16);
+  out += 8;
+  vst1q_s16(out, q14s16);
+  out += 8;
+  vst1q_s16(out, q15s16);
 }
 
-void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *out,
+void vpx_idct16x16_10_add_neon_pass2(const tran_low_t *src, int16_t *out,
                                      int16_t *pass1_output) {
   int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
   int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
@@ -948,28 +873,28 @@
   int32x4_t q10s32, q11s32, q12s32, q13s32;
   int16x8x2_t q0x2s16;
 
-  q0x2s16 = vld2q_s16(src);
+  q0x2s16 = load_tran_low_to_s16x2q(src);
   q8s16 = q0x2s16.val[0];
   src += 16;
-  q0x2s16 = vld2q_s16(src);
+  q0x2s16 = load_tran_low_to_s16x2q(src);
   q9s16 = q0x2s16.val[0];
   src += 16;
-  q0x2s16 = vld2q_s16(src);
+  q0x2s16 = load_tran_low_to_s16x2q(src);
   q10s16 = q0x2s16.val[0];
   src += 16;
-  q0x2s16 = vld2q_s16(src);
+  q0x2s16 = load_tran_low_to_s16x2q(src);
   q11s16 = q0x2s16.val[0];
   src += 16;
-  q0x2s16 = vld2q_s16(src);
+  q0x2s16 = load_tran_low_to_s16x2q(src);
   q12s16 = q0x2s16.val[0];
   src += 16;
-  q0x2s16 = vld2q_s16(src);
+  q0x2s16 = load_tran_low_to_s16x2q(src);
   q13s16 = q0x2s16.val[0];
   src += 16;
-  q0x2s16 = vld2q_s16(src);
+  q0x2s16 = load_tran_low_to_s16x2q(src);
   q14s16 = q0x2s16.val[0];
   src += 16;
-  q0x2s16 = vld2q_s16(src);
+  q0x2s16 = load_tran_low_to_s16x2q(src);
   q15s16 = q0x2s16.val[0];
 
   transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
--- a/vpx_dsp/arm/idct16x16_neon.c
+++ b/vpx_dsp/arm/idct16x16_neon.c
@@ -11,15 +11,13 @@
 #include "./vpx_dsp_rtcd.h"
 #include "vpx_dsp/vpx_dsp_common.h"
 
-void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output,
-                                      int output_stride);
+void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output);
 void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output,
                                       int16_t *pass1_output,
                                       int16_t skip_adding, uint8_t *dest,
                                       int dest_stride);
-void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output,
-                                     int output_stride);
-void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output,
+void vpx_idct16x16_10_add_neon_pass1(const tran_low_t *input, int16_t *output);
+void vpx_idct16x16_10_add_neon_pass2(const tran_low_t *src, int16_t *output,
                                      int16_t *pass1_output);
 
 #if HAVE_NEON_ASM
@@ -44,7 +42,7 @@
   /* Parallel idct on the upper 8 rows */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(input, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(input, pass1_output);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
@@ -55,7 +53,7 @@
   /* Parallel idct on the lower 8 rows */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
@@ -66,7 +64,7 @@
   /* Parallel idct on the left 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
@@ -77,7 +75,7 @@
   /* Parallel idct on the right 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
@@ -92,7 +90,7 @@
 #endif
 }
 
-void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
+void vpx_idct16x16_10_add_neon(const tran_low_t *input, uint8_t *dest,
                                int dest_stride) {
 #if HAVE_NEON_ASM
   int64_t store_reg[8];
@@ -108,7 +106,7 @@
   /* Parallel idct on the upper 8 rows */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_10_add_neon_pass1(input, pass1_output, 8);
+  vpx_idct16x16_10_add_neon_pass1(input, pass1_output);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7
@@ -120,7 +118,7 @@
   /* Parallel idct on the left 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
@@ -131,7 +129,7 @@
   /* Parallel idct on the right 8 columns */
   // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
   // stage 6 result in pass1_output.
-  vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
+  vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output);
 
   // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
   // with result in pass1(pass1_output) to calculate final result in stage 7.
--- a/vpx_dsp/arm/idct_neon.asm
+++ b/vpx_dsp/arm/idct_neon.asm
@@ -10,8 +10,9 @@
 
     INCLUDE ./vpx_config.asm
 
-    ; Helper function used to load tran_low_t into int16, narrowing if
+    ; Helper functions used to load tran_low_t into int16, narrowing if
     ; necessary.
+
     ; $dst0..3 are d registers with the pairs assumed to be contiguous in
     ; non-high-bitdepth builds. q0-q3 are used as temporaries in high-bitdepth.
     MACRO
@@ -25,6 +26,21 @@
     vmovn.i32       $dst3, q3
     ELSE
     vld1.s16        {$dst0-$dst1,$dst2-$dst3}, [$src]!
+    ENDIF
+    MEND
+
+    ; $dst0..3 are d registers. q0-q3 are used as temporaries in high-bitdepth.
+    MACRO
+    LOAD_TRAN_LOW_TO_S16X2 $dst0, $dst1, $dst2, $dst3, $src
+    IF CONFIG_VP9_HIGHBITDEPTH
+    vld2.s32        {q0,q1}, [$src]!
+    vld2.s32        {q2,q3}, [$src]!
+    vmovn.i32       $dst0, q0
+    vmovn.i32       $dst1, q2
+    vmovn.i32       $dst2, q1
+    vmovn.i32       $dst3, q3
+    ELSE
+    vld2.s16        {$dst0,$dst1,$dst2,$dst3}, [$src]!
     ENDIF
     MEND
     END
--- a/vpx_dsp/arm/idct_neon.h
+++ b/vpx_dsp/arm/idct_neon.h
@@ -27,6 +27,23 @@
 //------------------------------------------------------------------------------
 // Helper functions used to load tran_low_t into int16, narrowing if necessary.
 
+static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) {
+#if CONFIG_VP9_HIGHBITDEPTH
+  const int32x4x2_t v0 = vld2q_s32(buf);
+  const int32x4x2_t v1 = vld2q_s32(buf + 8);
+  const int16x4_t s0 = vmovn_s32(v0.val[0]);
+  const int16x4_t s1 = vmovn_s32(v0.val[1]);
+  const int16x4_t s2 = vmovn_s32(v1.val[0]);
+  const int16x4_t s3 = vmovn_s32(v1.val[1]);
+  int16x8x2_t res;
+  res.val[0] = vcombine_s16(s0, s2);
+  res.val[1] = vcombine_s16(s1, s3);
+  return res;
+#else
+  return vld2q_s16(buf);
+#endif
+}
+
 static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
 #if CONFIG_VP9_HIGHBITDEPTH
   const int32x4_t v0 = vld1q_s32(buf);
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -201,14 +201,6 @@
 DSP_SRCS-$(HAVE_NEON_ASM) += arm/save_reg_neon$(ASM)
 
 ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
-ifeq ($(HAVE_NEON_ASM),yes)
-DSP_SRCS-yes  += arm/idct16x16_add_neon$(ASM)
-else
-ifeq ($(HAVE_NEON),yes)
-DSP_SRCS-yes  += arm/idct16x16_add_neon.c
-endif  # HAVE_NEON
-endif  # HAVE_NEON_ASM
-DSP_SRCS-$(HAVE_NEON)  += arm/idct16x16_neon.c
 DSP_SRCS-$(HAVE_NEON)  += arm/idct32x32_add_neon.c
 
 DSP_SRCS-$(HAVE_MSA)   += mips/inv_txfm_msa.h
@@ -234,6 +226,7 @@
 DSP_SRCS-yes += arm/idct8x8_1_add_neon$(ASM)
 DSP_SRCS-yes += arm/idct8x8_add_neon$(ASM)
 DSP_SRCS-yes += arm/idct16x16_1_add_neon$(ASM)
+DSP_SRCS-yes += arm/idct16x16_add_neon$(ASM)
 else
 DSP_SRCS-$(HAVE_NEON) += arm/idct4x4_1_add_neon.c
 DSP_SRCS-$(HAVE_NEON) += arm/idct4x4_add_neon.c
@@ -240,8 +233,10 @@
 DSP_SRCS-$(HAVE_NEON) += arm/idct8x8_1_add_neon.c
 DSP_SRCS-$(HAVE_NEON) += arm/idct8x8_add_neon.c
 DSP_SRCS-$(HAVE_NEON) += arm/idct16x16_1_add_neon.c
+DSP_SRCS-$(HAVE_NEON) += arm/idct16x16_add_neon.c
 endif  # HAVE_NEON_ASM
 DSP_SRCS-$(HAVE_NEON) += arm/idct_neon.h
+DSP_SRCS-$(HAVE_NEON) += arm/idct16x16_neon.c
 DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_1_add_neon.c
 DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_34_add_neon.c
 DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_135_add_neon.c
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -690,7 +690,7 @@
     specialize qw/vpx_idct16x16_256_add sse2/;
 
     add_proto qw/void vpx_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vpx_idct16x16_10_add sse2/;
+    specialize qw/vpx_idct16x16_10_add neon sse2/;
 
     add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
     specialize qw/vpx_idct16x16_1_add neon sse2/;