shithub: libvpx

Download patch

ref: 4f7060a431ac8e9f15d06f9cb44bd6792c3d1fae
parent: 9022ba3cb9b4f0df9ef93ba48d4a6b1275824a5b
parent: aa8dd897c157952e24ad562babf0c825bd0e29fa
author: Johann <johannkoenig@google.com>
date: Tue Dec 16 11:15:48 EST 2014

Merge "VP9 common for ARMv8 by using NEON intrinsics 16"

--- a/vp9/common/arm/neon/vp9_reconintra_neon.asm
+++ /dev/null
@@ -1,634 +1,0 @@
-;
-;  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-    EXPORT  |vp9_v_predictor_4x4_neon|
-    EXPORT  |vp9_v_predictor_8x8_neon|
-    EXPORT  |vp9_v_predictor_16x16_neon|
-    EXPORT  |vp9_v_predictor_32x32_neon|
-    EXPORT  |vp9_h_predictor_4x4_neon|
-    EXPORT  |vp9_h_predictor_8x8_neon|
-    EXPORT  |vp9_h_predictor_16x16_neon|
-    EXPORT  |vp9_h_predictor_32x32_neon|
-    EXPORT  |vp9_tm_predictor_4x4_neon|
-    EXPORT  |vp9_tm_predictor_8x8_neon|
-    EXPORT  |vp9_tm_predictor_16x16_neon|
-    EXPORT  |vp9_tm_predictor_32x32_neon|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-;void vp9_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
-;                              const uint8_t *above,
-;                              const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_v_predictor_4x4_neon| PROC
-    vld1.32             {d0[0]}, [r2]
-    vst1.32             {d0[0]}, [r0], r1
-    vst1.32             {d0[0]}, [r0], r1
-    vst1.32             {d0[0]}, [r0], r1
-    vst1.32             {d0[0]}, [r0], r1
-    bx                  lr
-    ENDP                ; |vp9_v_predictor_4x4_neon|
-
-;void vp9_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
-;                              const uint8_t *above,
-;                              const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_v_predictor_8x8_neon| PROC
-    vld1.8              {d0}, [r2]
-    vst1.8              {d0}, [r0], r1
-    vst1.8              {d0}, [r0], r1
-    vst1.8              {d0}, [r0], r1
-    vst1.8              {d0}, [r0], r1
-    vst1.8              {d0}, [r0], r1
-    vst1.8              {d0}, [r0], r1
-    vst1.8              {d0}, [r0], r1
-    vst1.8              {d0}, [r0], r1
-    bx                  lr
-    ENDP                ; |vp9_v_predictor_8x8_neon|
-
-;void vp9_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
-;                                const uint8_t *above,
-;                                const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_v_predictor_16x16_neon| PROC
-    vld1.8              {q0}, [r2]
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    vst1.8              {q0}, [r0], r1
-    bx                  lr
-    ENDP                ; |vp9_v_predictor_16x16_neon|
-
-;void vp9_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
-;                                const uint8_t *above,
-;                                const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_v_predictor_32x32_neon| PROC
-    vld1.8              {q0, q1}, [r2]
-    mov                 r2, #2
-loop_v
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    vst1.8              {q0, q1}, [r0], r1
-    subs                r2, r2, #1
-    bgt                 loop_v
-    bx                  lr
-    ENDP                ; |vp9_v_predictor_32x32_neon|
-
-;void vp9_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
-;                              const uint8_t *above,
-;                              const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_h_predictor_4x4_neon| PROC
-    vld1.32             {d1[0]}, [r3]
-    vdup.8              d0, d1[0]
-    vst1.32             {d0[0]}, [r0], r1
-    vdup.8              d0, d1[1]
-    vst1.32             {d0[0]}, [r0], r1
-    vdup.8              d0, d1[2]
-    vst1.32             {d0[0]}, [r0], r1
-    vdup.8              d0, d1[3]
-    vst1.32             {d0[0]}, [r0], r1
-    bx                  lr
-    ENDP                ; |vp9_h_predictor_4x4_neon|
-
-;void vp9_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
-;                              const uint8_t *above,
-;                              const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_h_predictor_8x8_neon| PROC
-    vld1.64             {d1}, [r3]
-    vdup.8              d0, d1[0]
-    vst1.64             {d0}, [r0], r1
-    vdup.8              d0, d1[1]
-    vst1.64             {d0}, [r0], r1
-    vdup.8              d0, d1[2]
-    vst1.64             {d0}, [r0], r1
-    vdup.8              d0, d1[3]
-    vst1.64             {d0}, [r0], r1
-    vdup.8              d0, d1[4]
-    vst1.64             {d0}, [r0], r1
-    vdup.8              d0, d1[5]
-    vst1.64             {d0}, [r0], r1
-    vdup.8              d0, d1[6]
-    vst1.64             {d0}, [r0], r1
-    vdup.8              d0, d1[7]
-    vst1.64             {d0}, [r0], r1
-    bx                  lr
-    ENDP                ; |vp9_h_predictor_8x8_neon|
-
-;void vp9_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
-;                                const uint8_t *above,
-;                                const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_h_predictor_16x16_neon| PROC
-    vld1.8              {q1}, [r3]
-    vdup.8              q0, d2[0]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[1]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[2]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[3]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[4]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[5]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[6]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[7]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[0]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[1]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[2]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[3]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[4]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[5]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[6]
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[7]
-    vst1.8              {q0}, [r0], r1
-    bx                  lr
-    ENDP                ; |vp9_h_predictor_16x16_neon|
-
-;void vp9_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
-;                                const uint8_t *above,
-;                                const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_h_predictor_32x32_neon| PROC
-    sub                 r1, r1, #16
-    mov                 r2, #2
-loop_h
-    vld1.8              {q1}, [r3]!
-    vdup.8              q0, d2[0]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[1]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[2]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[3]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[4]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[5]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[6]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d2[7]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[0]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[1]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[2]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[3]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[4]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[5]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[6]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    vdup.8              q0, d3[7]
-    vst1.8              {q0}, [r0]!
-    vst1.8              {q0}, [r0], r1
-    subs                r2, r2, #1
-    bgt                 loop_h
-    bx                  lr
-    ENDP                ; |vp9_h_predictor_32x32_neon|
-
-;void vp9_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride,
-;                                const uint8_t *above,
-;                                const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_tm_predictor_4x4_neon| PROC
-    ; Load ytop_left = above[-1];
-    sub                 r12, r2, #1
-    ldrb                r12, [r12]
-    vdup.u8             d0, r12
-
-    ; Load above 4 pixels
-    vld1.32             {d2[0]}, [r2]
-
-    ; Compute above - ytop_left
-    vsubl.u8            q3, d2, d0
-
-    ; Load left row by row and compute left + (above - ytop_left)
-    ; 1st row and 2nd row
-    ldrb                r12, [r3], #1
-    ldrb                r2, [r3], #1
-    vdup.u16            q1, r12
-    vdup.u16            q2, r2
-    vadd.s16            q1, q1, q3
-    vadd.s16            q2, q2, q3
-    vqmovun.s16         d0, q1
-    vqmovun.s16         d1, q2
-    vst1.32             {d0[0]}, [r0], r1
-    vst1.32             {d1[0]}, [r0], r1
-
-    ; 3rd row and 4th row
-    ldrb                r12, [r3], #1
-    ldrb                r2, [r3], #1
-    vdup.u16            q1, r12
-    vdup.u16            q2, r2
-    vadd.s16            q1, q1, q3
-    vadd.s16            q2, q2, q3
-    vqmovun.s16         d0, q1
-    vqmovun.s16         d1, q2
-    vst1.32             {d0[0]}, [r0], r1
-    vst1.32             {d1[0]}, [r0], r1
-    bx                  lr
-    ENDP                ; |vp9_tm_predictor_4x4_neon|
-
-;void vp9_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride,
-;                                const uint8_t *above,
-;                                const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_tm_predictor_8x8_neon| PROC
-    ; Load ytop_left = above[-1];
-    sub                 r12, r2, #1
-    ldrb                r12, [r12]
-    vdup.u8             d0, r12
-
-    ; preload 8 left
-    vld1.8              {d30}, [r3]
-
-    ; Load above 8 pixels
-    vld1.64             {d2}, [r2]
-
-    vmovl.u8            q10, d30
-
-    ; Compute above - ytop_left
-    vsubl.u8            q3, d2, d0
-
-    ; Load left row by row and compute left + (above - ytop_left)
-    ; 1st row and 2nd row
-    vdup.16             q0, d20[0]
-    vdup.16             q1, d20[1]
-    vadd.s16            q0, q3, q0
-    vadd.s16            q1, q3, q1
-
-    ; 3rd row and 4th row
-    vdup.16             q8, d20[2]
-    vdup.16             q9, d20[3]
-    vadd.s16            q8, q3, q8
-    vadd.s16            q9, q3, q9
-
-    vqmovun.s16         d0, q0
-    vqmovun.s16         d1, q1
-    vqmovun.s16         d2, q8
-    vqmovun.s16         d3, q9
-
-    vst1.64             {d0}, [r0], r1
-    vst1.64             {d1}, [r0], r1
-    vst1.64             {d2}, [r0], r1
-    vst1.64             {d3}, [r0], r1
-
-    ; 5th row and 6th row
-    vdup.16             q0, d21[0]
-    vdup.16             q1, d21[1]
-    vadd.s16            q0, q3, q0
-    vadd.s16            q1, q3, q1
-
-    ; 7th row and 8th row
-    vdup.16             q8, d21[2]
-    vdup.16             q9, d21[3]
-    vadd.s16            q8, q3, q8
-    vadd.s16            q9, q3, q9
-
-    vqmovun.s16         d0, q0
-    vqmovun.s16         d1, q1
-    vqmovun.s16         d2, q8
-    vqmovun.s16         d3, q9
-
-    vst1.64             {d0}, [r0], r1
-    vst1.64             {d1}, [r0], r1
-    vst1.64             {d2}, [r0], r1
-    vst1.64             {d3}, [r0], r1
-
-    bx                  lr
-    ENDP                ; |vp9_tm_predictor_8x8_neon|
-
-;void vp9_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride,
-;                                const uint8_t *above,
-;                                const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_tm_predictor_16x16_neon| PROC
-    ; Load ytop_left = above[-1];
-    sub                 r12, r2, #1
-    ldrb                r12, [r12]
-    vdup.u8             q0, r12
-
-    ; Load above 8 pixels
-    vld1.8              {q1}, [r2]
-
-    ; preload 8 left into r12
-    vld1.8              {d18}, [r3]!
-
-    ; Compute above - ytop_left
-    vsubl.u8            q2, d2, d0
-    vsubl.u8            q3, d3, d1
-
-    vmovl.u8            q10, d18
-
-    ; Load left row by row and compute left + (above - ytop_left)
-    ; Process 8 rows in each single loop and loop 2 times to process 16 rows.
-    mov                 r2, #2
-
-loop_16x16_neon
-    ; Process two rows.
-    vdup.16             q0, d20[0]
-    vdup.16             q8, d20[1]
-    vadd.s16            q1, q0, q2
-    vadd.s16            q0, q0, q3
-    vadd.s16            q11, q8, q2
-    vadd.s16            q8, q8, q3
-    vqmovun.s16         d2, q1
-    vqmovun.s16         d3, q0
-    vqmovun.s16         d22, q11
-    vqmovun.s16         d23, q8
-    vdup.16             q0, d20[2]                  ; proload next 2 rows data
-    vdup.16             q8, d20[3]
-    vst1.64             {d2,d3}, [r0], r1
-    vst1.64             {d22,d23}, [r0], r1
-
-    ; Process two rows.
-    vadd.s16            q1, q0, q2
-    vadd.s16            q0, q0, q3
-    vadd.s16            q11, q8, q2
-    vadd.s16            q8, q8, q3
-    vqmovun.s16         d2, q1
-    vqmovun.s16         d3, q0
-    vqmovun.s16         d22, q11
-    vqmovun.s16         d23, q8
-    vdup.16             q0, d21[0]                  ; proload next 2 rows data
-    vdup.16             q8, d21[1]
-    vst1.64             {d2,d3}, [r0], r1
-    vst1.64             {d22,d23}, [r0], r1
-
-    vadd.s16            q1, q0, q2
-    vadd.s16            q0, q0, q3
-    vadd.s16            q11, q8, q2
-    vadd.s16            q8, q8, q3
-    vqmovun.s16         d2, q1
-    vqmovun.s16         d3, q0
-    vqmovun.s16         d22, q11
-    vqmovun.s16         d23, q8
-    vdup.16             q0, d21[2]                  ; proload next 2 rows data
-    vdup.16             q8, d21[3]
-    vst1.64             {d2,d3}, [r0], r1
-    vst1.64             {d22,d23}, [r0], r1
-
-
-    vadd.s16            q1, q0, q2
-    vadd.s16            q0, q0, q3
-    vadd.s16            q11, q8, q2
-    vadd.s16            q8, q8, q3
-    vqmovun.s16         d2, q1
-    vqmovun.s16         d3, q0
-    vqmovun.s16         d22, q11
-    vqmovun.s16         d23, q8
-    vld1.8              {d18}, [r3]!                  ; preload 8 left into r12
-    vmovl.u8            q10, d18
-    vst1.64             {d2,d3}, [r0], r1
-    vst1.64             {d22,d23}, [r0], r1
-
-    subs                r2, r2, #1
-    bgt                 loop_16x16_neon
-
-    bx                  lr
-    ENDP                ; |vp9_tm_predictor_16x16_neon|
-
-;void vp9_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride,
-;                                  const uint8_t *above,
-;                                  const uint8_t *left)
-; r0  uint8_t *dst
-; r1  ptrdiff_t y_stride
-; r2  const uint8_t *above
-; r3  const uint8_t *left
-
-|vp9_tm_predictor_32x32_neon| PROC
-    ; Load ytop_left = above[-1];
-    sub                 r12, r2, #1
-    ldrb                r12, [r12]
-    vdup.u8             q0, r12
-
-    ; Load above 32 pixels
-    vld1.8              {q1}, [r2]!
-    vld1.8              {q2}, [r2]
-
-    ; preload 8 left pixels
-    vld1.8              {d26}, [r3]!
-
-    ; Compute above - ytop_left
-    vsubl.u8            q8, d2, d0
-    vsubl.u8            q9, d3, d1
-    vsubl.u8            q10, d4, d0
-    vsubl.u8            q11, d5, d1
-
-    vmovl.u8            q3, d26
-
-    ; Load left row by row and compute left + (above - ytop_left)
-    ; Process 8 rows in each single loop and loop 4 times to process 32 rows.
-    mov                 r2, #4
-
-loop_32x32_neon
-    ; Process two rows.
-    vdup.16             q0, d6[0]
-    vdup.16             q2, d6[1]
-    vadd.s16            q12, q0, q8
-    vadd.s16            q13, q0, q9
-    vadd.s16            q14, q0, q10
-    vadd.s16            q15, q0, q11
-    vqmovun.s16         d0, q12
-    vqmovun.s16         d1, q13
-    vadd.s16            q12, q2, q8
-    vadd.s16            q13, q2, q9
-    vqmovun.s16         d2, q14
-    vqmovun.s16         d3, q15
-    vadd.s16            q14, q2, q10
-    vadd.s16            q15, q2, q11
-    vst1.64             {d0-d3}, [r0], r1
-    vqmovun.s16         d24, q12
-    vqmovun.s16         d25, q13
-    vqmovun.s16         d26, q14
-    vqmovun.s16         d27, q15
-    vdup.16             q1, d6[2]
-    vdup.16             q2, d6[3]
-    vst1.64             {d24-d27}, [r0], r1
-
-    ; Process two rows.
-    vadd.s16            q12, q1, q8
-    vadd.s16            q13, q1, q9
-    vadd.s16            q14, q1, q10
-    vadd.s16            q15, q1, q11
-    vqmovun.s16         d0, q12
-    vqmovun.s16         d1, q13
-    vadd.s16            q12, q2, q8
-    vadd.s16            q13, q2, q9
-    vqmovun.s16         d2, q14
-    vqmovun.s16         d3, q15
-    vadd.s16            q14, q2, q10
-    vadd.s16            q15, q2, q11
-    vst1.64             {d0-d3}, [r0], r1
-    vqmovun.s16         d24, q12
-    vqmovun.s16         d25, q13
-    vqmovun.s16         d26, q14
-    vqmovun.s16         d27, q15
-    vdup.16             q0, d7[0]
-    vdup.16             q2, d7[1]
-    vst1.64             {d24-d27}, [r0], r1
-
-    ; Process two rows.
-    vadd.s16            q12, q0, q8
-    vadd.s16            q13, q0, q9
-    vadd.s16            q14, q0, q10
-    vadd.s16            q15, q0, q11
-    vqmovun.s16         d0, q12
-    vqmovun.s16         d1, q13
-    vadd.s16            q12, q2, q8
-    vadd.s16            q13, q2, q9
-    vqmovun.s16         d2, q14
-    vqmovun.s16         d3, q15
-    vadd.s16            q14, q2, q10
-    vadd.s16            q15, q2, q11
-    vst1.64             {d0-d3}, [r0], r1
-    vqmovun.s16         d24, q12
-    vqmovun.s16         d25, q13
-    vqmovun.s16         d26, q14
-    vqmovun.s16         d27, q15
-    vdup.16             q0, d7[2]
-    vdup.16             q2, d7[3]
-    vst1.64             {d24-d27}, [r0], r1
-
-    ; Process two rows.
-    vadd.s16            q12, q0, q8
-    vadd.s16            q13, q0, q9
-    vadd.s16            q14, q0, q10
-    vadd.s16            q15, q0, q11
-    vqmovun.s16         d0, q12
-    vqmovun.s16         d1, q13
-    vadd.s16            q12, q2, q8
-    vadd.s16            q13, q2, q9
-    vqmovun.s16         d2, q14
-    vqmovun.s16         d3, q15
-    vadd.s16            q14, q2, q10
-    vadd.s16            q15, q2, q11
-    vst1.64             {d0-d3}, [r0], r1
-    vqmovun.s16         d24, q12
-    vqmovun.s16         d25, q13
-    vld1.8              {d0}, [r3]!                   ; preload 8 left pixels
-    vqmovun.s16         d26, q14
-    vqmovun.s16         d27, q15
-    vmovl.u8            q3, d0
-    vst1.64             {d24-d27}, [r0], r1
-
-    subs                r2, r2, #1
-    bgt                 loop_32x32_neon
-
-    bx                  lr
-    ENDP                ; |vp9_tm_predictor_32x32_neon|
-
-    END
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_reconintra_neon.c
@@ -1,0 +1,473 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <arm_neon.h>
+
+void vp9_v_predictor_4x4_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int i;
+    uint32x2_t d0u32 = vdup_n_u32(0);
+    (void)left;
+
+    d0u32 = vld1_lane_u32((const uint32_t *)above, d0u32, 0);
+    for (i = 0; i < 4; i++, dst += y_stride)
+        vst1_lane_u32((uint32_t *)dst, d0u32, 0);
+    return;
+}
+
+void vp9_v_predictor_8x8_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int i;
+    uint8x8_t d0u8 = vdup_n_u8(0);
+    (void)left;
+
+    d0u8 = vld1_u8(above);
+    for (i = 0; i < 8; i++, dst += y_stride)
+        vst1_u8(dst, d0u8);
+    return;
+}
+
+void vp9_v_predictor_16x16_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int i;
+    uint8x16_t q0u8 = vdupq_n_u8(0);
+    (void)left;
+
+    q0u8 = vld1q_u8(above);
+    for (i = 0; i < 16; i++, dst += y_stride)
+        vst1q_u8(dst, q0u8);
+    return;
+}
+
+void vp9_v_predictor_32x32_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int i;
+    uint8x16_t q0u8 = vdupq_n_u8(0);
+    uint8x16_t q1u8 = vdupq_n_u8(0);
+    (void)left;
+
+    q0u8 = vld1q_u8(above);
+    q1u8 = vld1q_u8(above + 16);
+    for (i = 0; i < 32; i++, dst += y_stride) {
+        vst1q_u8(dst, q0u8);
+        vst1q_u8(dst + 16, q1u8);
+    }
+    return;
+}
+
+void vp9_h_predictor_4x4_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    uint8x8_t d0u8 = vdup_n_u8(0);
+    uint32x2_t d1u32 = vdup_n_u32(0);
+    (void)above;
+
+    d1u32 = vld1_lane_u32((const uint32_t *)left, d1u32, 0);
+
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 0);
+    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 1);
+    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 2);
+    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 3);
+    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+    return;
+}
+
+void vp9_h_predictor_8x8_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    uint8x8_t d0u8 = vdup_n_u8(0);
+    uint64x1_t d1u64 = vdup_n_u64(0);
+    (void)above;
+
+    d1u64 = vld1_u64((const uint64_t *)left);
+
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 0);
+    vst1_u8(dst, d0u8);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 1);
+    vst1_u8(dst, d0u8);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 2);
+    vst1_u8(dst, d0u8);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 3);
+    vst1_u8(dst, d0u8);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 4);
+    vst1_u8(dst, d0u8);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 5);
+    vst1_u8(dst, d0u8);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 6);
+    vst1_u8(dst, d0u8);
+    dst += y_stride;
+    d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 7);
+    vst1_u8(dst, d0u8);
+    return;
+}
+
+void vp9_h_predictor_16x16_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int j;
+    uint8x8_t d2u8 = vdup_n_u8(0);
+    uint8x16_t q0u8 = vdupq_n_u8(0);
+    uint8x16_t q1u8 = vdupq_n_u8(0);
+    (void)above;
+
+    q1u8 = vld1q_u8(left);
+    d2u8 = vget_low_u8(q1u8);
+    for (j = 0; j < 2; j++, d2u8 = vget_high_u8(q1u8)) {
+        q0u8 = vdupq_lane_u8(d2u8, 0);
+        vst1q_u8(dst, q0u8);
+        dst += y_stride;
+        q0u8 = vdupq_lane_u8(d2u8, 1);
+        vst1q_u8(dst, q0u8);
+        dst += y_stride;
+        q0u8 = vdupq_lane_u8(d2u8, 2);
+        vst1q_u8(dst, q0u8);
+        dst += y_stride;
+        q0u8 = vdupq_lane_u8(d2u8, 3);
+        vst1q_u8(dst, q0u8);
+        dst += y_stride;
+        q0u8 = vdupq_lane_u8(d2u8, 4);
+        vst1q_u8(dst, q0u8);
+        dst += y_stride;
+        q0u8 = vdupq_lane_u8(d2u8, 5);
+        vst1q_u8(dst, q0u8);
+        dst += y_stride;
+        q0u8 = vdupq_lane_u8(d2u8, 6);
+        vst1q_u8(dst, q0u8);
+        dst += y_stride;
+        q0u8 = vdupq_lane_u8(d2u8, 7);
+        vst1q_u8(dst, q0u8);
+        dst += y_stride;
+    }
+    return;
+}
+
+void vp9_h_predictor_32x32_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int j, k;
+    uint8x8_t d2u8 = vdup_n_u8(0);
+    uint8x16_t q0u8 = vdupq_n_u8(0);
+    uint8x16_t q1u8 = vdupq_n_u8(0);
+    (void)above;
+
+    for (k = 0; k < 2; k++, left += 16) {
+        q1u8 = vld1q_u8(left);
+        d2u8 = vget_low_u8(q1u8);
+        for (j = 0; j < 2; j++, d2u8 = vget_high_u8(q1u8)) {
+            q0u8 = vdupq_lane_u8(d2u8, 0);
+            vst1q_u8(dst, q0u8);
+            vst1q_u8(dst + 16, q0u8);
+            dst += y_stride;
+            q0u8 = vdupq_lane_u8(d2u8, 1);
+            vst1q_u8(dst, q0u8);
+            vst1q_u8(dst + 16, q0u8);
+            dst += y_stride;
+            q0u8 = vdupq_lane_u8(d2u8, 2);
+            vst1q_u8(dst, q0u8);
+            vst1q_u8(dst + 16, q0u8);
+            dst += y_stride;
+            q0u8 = vdupq_lane_u8(d2u8, 3);
+            vst1q_u8(dst, q0u8);
+            vst1q_u8(dst + 16, q0u8);
+            dst += y_stride;
+            q0u8 = vdupq_lane_u8(d2u8, 4);
+            vst1q_u8(dst, q0u8);
+            vst1q_u8(dst + 16, q0u8);
+            dst += y_stride;
+            q0u8 = vdupq_lane_u8(d2u8, 5);
+            vst1q_u8(dst, q0u8);
+            vst1q_u8(dst + 16, q0u8);
+            dst += y_stride;
+            q0u8 = vdupq_lane_u8(d2u8, 6);
+            vst1q_u8(dst, q0u8);
+            vst1q_u8(dst + 16, q0u8);
+            dst += y_stride;
+            q0u8 = vdupq_lane_u8(d2u8, 7);
+            vst1q_u8(dst, q0u8);
+            vst1q_u8(dst + 16, q0u8);
+            dst += y_stride;
+        }
+    }
+    return;
+}
+
+void vp9_tm_predictor_4x4_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int i;
+    uint16x8_t q1u16, q3u16;
+    int16x8_t q1s16;
+    uint8x8_t d0u8 = vdup_n_u8(0);
+    uint32x2_t d2u32 = vdup_n_u32(0);
+
+    d0u8 = vdup_n_u8(above[-1]);
+    d2u32 = vld1_lane_u32((const uint32_t *)above, d2u32, 0);
+    q3u16 = vsubl_u8(vreinterpret_u8_u32(d2u32), d0u8);
+    for (i = 0; i < 4; i++, dst += y_stride) {
+        q1u16 = vdupq_n_u16((uint16_t)left[i]);
+        q1s16 = vaddq_s16(vreinterpretq_s16_u16(q1u16),
+                          vreinterpretq_s16_u16(q3u16));
+        d0u8 = vqmovun_s16(q1s16);
+        vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+    }
+    return;
+}
+
+void vp9_tm_predictor_8x8_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int j;
+    uint16x8_t q0u16, q3u16, q10u16;
+    int16x8_t q0s16;
+    uint16x4_t d20u16;
+    uint8x8_t d0u8, d2u8, d30u8;
+
+    d0u8 = vdup_n_u8(above[-1]);
+    d30u8 = vld1_u8(left);
+    d2u8 = vld1_u8(above);
+    q10u16 = vmovl_u8(d30u8);
+    q3u16 = vsubl_u8(d2u8, d0u8);
+    d20u16 = vget_low_u16(q10u16);
+    for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) {
+        q0u16 = vdupq_lane_u16(d20u16, 0);
+        q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
+                          vreinterpretq_s16_u16(q0u16));
+        d0u8 = vqmovun_s16(q0s16);
+        vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
+        dst += y_stride;
+        q0u16 = vdupq_lane_u16(d20u16, 1);
+        q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
+                          vreinterpretq_s16_u16(q0u16));
+        d0u8 = vqmovun_s16(q0s16);
+        vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
+        dst += y_stride;
+        q0u16 = vdupq_lane_u16(d20u16, 2);
+        q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
+                          vreinterpretq_s16_u16(q0u16));
+        d0u8 = vqmovun_s16(q0s16);
+        vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
+        dst += y_stride;
+        q0u16 = vdupq_lane_u16(d20u16, 3);
+        q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
+                          vreinterpretq_s16_u16(q0u16));
+        d0u8 = vqmovun_s16(q0s16);
+        vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
+        dst += y_stride;
+    }
+    return;
+}
+
+void vp9_tm_predictor_16x16_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int j, k;
+    uint16x8_t q0u16, q2u16, q3u16, q8u16, q10u16;
+    uint8x16_t q0u8, q1u8;
+    int16x8_t q0s16, q1s16, q8s16, q11s16;
+    uint16x4_t d20u16;
+    uint8x8_t d2u8, d3u8, d18u8, d22u8, d23u8;
+
+    q0u8 = vdupq_n_u8(above[-1]);
+    q1u8 = vld1q_u8(above);
+    q2u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q0u8));
+    q3u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q0u8));
+    for (k = 0; k < 2; k++, left += 8) {
+        d18u8 = vld1_u8(left);
+        q10u16 = vmovl_u8(d18u8);
+        d20u16 = vget_low_u16(q10u16);
+        for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) {
+            q0u16 = vdupq_lane_u16(d20u16, 0);
+            q8u16 = vdupq_lane_u16(d20u16, 1);
+            q1s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                              vreinterpretq_s16_u16(q2u16));
+            q0s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                              vreinterpretq_s16_u16(q3u16));
+            q11s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16),
+                              vreinterpretq_s16_u16(q2u16));
+            q8s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16),
+                              vreinterpretq_s16_u16(q3u16));
+            d2u8 = vqmovun_s16(q1s16);
+            d3u8 = vqmovun_s16(q0s16);
+            d22u8 = vqmovun_s16(q11s16);
+            d23u8 = vqmovun_s16(q8s16);
+            vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d2u8));
+            vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d3u8));
+            dst += y_stride;
+            vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d22u8));
+            vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d23u8));
+            dst += y_stride;
+
+            q0u16 = vdupq_lane_u16(d20u16, 2);
+            q8u16 = vdupq_lane_u16(d20u16, 3);
+            q1s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                              vreinterpretq_s16_u16(q2u16));
+            q0s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                              vreinterpretq_s16_u16(q3u16));
+            q11s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16),
+                              vreinterpretq_s16_u16(q2u16));
+            q8s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16),
+                              vreinterpretq_s16_u16(q3u16));
+            d2u8 = vqmovun_s16(q1s16);
+            d3u8 = vqmovun_s16(q0s16);
+            d22u8 = vqmovun_s16(q11s16);
+            d23u8 = vqmovun_s16(q8s16);
+            vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d2u8));
+            vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d3u8));
+            dst += y_stride;
+            vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d22u8));
+            vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d23u8));
+            dst += y_stride;
+        }
+    }
+    return;
+}
+
+void vp9_tm_predictor_32x32_neon(
+        uint8_t *dst,
+        ptrdiff_t y_stride,
+        const uint8_t *above,
+        const uint8_t *left) {
+    int j, k;
+    uint16x8_t q0u16, q3u16, q8u16, q9u16, q10u16, q11u16;
+    uint8x16_t q0u8, q1u8, q2u8;
+    int16x8_t q12s16, q13s16, q14s16, q15s16;
+    uint16x4_t d6u16;
+    uint8x8_t d0u8, d1u8, d2u8, d3u8, d26u8;
+
+    q0u8 = vdupq_n_u8(above[-1]);
+    q1u8 = vld1q_u8(above);
+    q2u8 = vld1q_u8(above + 16);
+    q8u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q0u8));
+    q9u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q0u8));
+    q10u16 = vsubl_u8(vget_low_u8(q2u8), vget_low_u8(q0u8));
+    q11u16 = vsubl_u8(vget_high_u8(q2u8), vget_high_u8(q0u8));
+    for (k = 0; k < 4; k++, left += 8) {
+        d26u8 = vld1_u8(left);
+        q3u16 = vmovl_u8(d26u8);
+        d6u16 = vget_low_u16(q3u16);
+        for (j = 0; j < 2; j++, d6u16 = vget_high_u16(q3u16)) {
+            q0u16 = vdupq_lane_u16(d6u16, 0);
+            q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q8u16));
+            q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q9u16));
+            q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q10u16));
+            q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q11u16));
+            d0u8 = vqmovun_s16(q12s16);
+            d1u8 = vqmovun_s16(q13s16);
+            d2u8 = vqmovun_s16(q14s16);
+            d3u8 = vqmovun_s16(q15s16);
+            q0u8 = vcombine_u8(d0u8, d1u8);
+            q1u8 = vcombine_u8(d2u8, d3u8);
+            vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
+            vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
+            dst += y_stride;
+
+            q0u16 = vdupq_lane_u16(d6u16, 1);
+            q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q8u16));
+            q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q9u16));
+            q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q10u16));
+            q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q11u16));
+            d0u8 = vqmovun_s16(q12s16);
+            d1u8 = vqmovun_s16(q13s16);
+            d2u8 = vqmovun_s16(q14s16);
+            d3u8 = vqmovun_s16(q15s16);
+            q0u8 = vcombine_u8(d0u8, d1u8);
+            q1u8 = vcombine_u8(d2u8, d3u8);
+            vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
+            vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
+            dst += y_stride;
+
+            q0u16 = vdupq_lane_u16(d6u16, 2);
+            q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q8u16));
+            q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q9u16));
+            q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q10u16));
+            q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q11u16));
+            d0u8 = vqmovun_s16(q12s16);
+            d1u8 = vqmovun_s16(q13s16);
+            d2u8 = vqmovun_s16(q14s16);
+            d3u8 = vqmovun_s16(q15s16);
+            q0u8 = vcombine_u8(d0u8, d1u8);
+            q1u8 = vcombine_u8(d2u8, d3u8);
+            vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
+            vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
+            dst += y_stride;
+
+            q0u16 = vdupq_lane_u16(d6u16, 3);
+            q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q8u16));
+            q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q9u16));
+            q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q10u16));
+            q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
+                               vreinterpretq_s16_u16(q11u16));
+            d0u8 = vqmovun_s16(q12s16);
+            d1u8 = vqmovun_s16(q13s16);
+            d2u8 = vqmovun_s16(q14s16);
+            d3u8 = vqmovun_s16(q15s16);
+            q0u8 = vcombine_u8(d0u8, d1u8);
+            q1u8 = vcombine_u8(d2u8, d3u8);
+            vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
+            vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
+            dst += y_stride;
+        }
+    }
+    return;
+}
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_reconintra_neon_asm.asm
@@ -1,0 +1,634 @@
+;
+;  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+    EXPORT  |vp9_v_predictor_4x4_neon|
+    EXPORT  |vp9_v_predictor_8x8_neon|
+    EXPORT  |vp9_v_predictor_16x16_neon|
+    EXPORT  |vp9_v_predictor_32x32_neon|
+    EXPORT  |vp9_h_predictor_4x4_neon|
+    EXPORT  |vp9_h_predictor_8x8_neon|
+    EXPORT  |vp9_h_predictor_16x16_neon|
+    EXPORT  |vp9_h_predictor_32x32_neon|
+    EXPORT  |vp9_tm_predictor_4x4_neon|
+    EXPORT  |vp9_tm_predictor_8x8_neon|
+    EXPORT  |vp9_tm_predictor_16x16_neon|
+    EXPORT  |vp9_tm_predictor_32x32_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+;void vp9_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+;                              const uint8_t *above,
+;                              const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_v_predictor_4x4_neon| PROC
+    vld1.32             {d0[0]}, [r2]
+    vst1.32             {d0[0]}, [r0], r1
+    vst1.32             {d0[0]}, [r0], r1
+    vst1.32             {d0[0]}, [r0], r1
+    vst1.32             {d0[0]}, [r0], r1
+    bx                  lr
+    ENDP                ; |vp9_v_predictor_4x4_neon|
+
+;void vp9_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+;                              const uint8_t *above,
+;                              const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_v_predictor_8x8_neon| PROC
+    vld1.8              {d0}, [r2]
+    vst1.8              {d0}, [r0], r1
+    vst1.8              {d0}, [r0], r1
+    vst1.8              {d0}, [r0], r1
+    vst1.8              {d0}, [r0], r1
+    vst1.8              {d0}, [r0], r1
+    vst1.8              {d0}, [r0], r1
+    vst1.8              {d0}, [r0], r1
+    vst1.8              {d0}, [r0], r1
+    bx                  lr
+    ENDP                ; |vp9_v_predictor_8x8_neon|
+
+;void vp9_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+;                                const uint8_t *above,
+;                                const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_v_predictor_16x16_neon| PROC
+    vld1.8              {q0}, [r2]
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    vst1.8              {q0}, [r0], r1
+    bx                  lr
+    ENDP                ; |vp9_v_predictor_16x16_neon|
+
+;void vp9_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+;                                const uint8_t *above,
+;                                const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_v_predictor_32x32_neon| PROC
+    vld1.8              {q0, q1}, [r2]
+    mov                 r2, #2
+loop_v
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    vst1.8              {q0, q1}, [r0], r1
+    subs                r2, r2, #1
+    bgt                 loop_v
+    bx                  lr
+    ENDP                ; |vp9_v_predictor_32x32_neon|
+
+;void vp9_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+;                              const uint8_t *above,
+;                              const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_h_predictor_4x4_neon| PROC
+    vld1.32             {d1[0]}, [r3]
+    vdup.8              d0, d1[0]
+    vst1.32             {d0[0]}, [r0], r1
+    vdup.8              d0, d1[1]
+    vst1.32             {d0[0]}, [r0], r1
+    vdup.8              d0, d1[2]
+    vst1.32             {d0[0]}, [r0], r1
+    vdup.8              d0, d1[3]
+    vst1.32             {d0[0]}, [r0], r1
+    bx                  lr
+    ENDP                ; |vp9_h_predictor_4x4_neon|
+
+;void vp9_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+;                              const uint8_t *above,
+;                              const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_h_predictor_8x8_neon| PROC
+    vld1.64             {d1}, [r3]
+    vdup.8              d0, d1[0]
+    vst1.64             {d0}, [r0], r1
+    vdup.8              d0, d1[1]
+    vst1.64             {d0}, [r0], r1
+    vdup.8              d0, d1[2]
+    vst1.64             {d0}, [r0], r1
+    vdup.8              d0, d1[3]
+    vst1.64             {d0}, [r0], r1
+    vdup.8              d0, d1[4]
+    vst1.64             {d0}, [r0], r1
+    vdup.8              d0, d1[5]
+    vst1.64             {d0}, [r0], r1
+    vdup.8              d0, d1[6]
+    vst1.64             {d0}, [r0], r1
+    vdup.8              d0, d1[7]
+    vst1.64             {d0}, [r0], r1
+    bx                  lr
+    ENDP                ; |vp9_h_predictor_8x8_neon|
+
+;void vp9_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+;                                const uint8_t *above,
+;                                const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_h_predictor_16x16_neon| PROC
+    vld1.8              {q1}, [r3]
+    vdup.8              q0, d2[0]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[1]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[2]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[3]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[4]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[5]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[6]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[7]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[0]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[1]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[2]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[3]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[4]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[5]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[6]
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[7]
+    vst1.8              {q0}, [r0], r1
+    bx                  lr
+    ENDP                ; |vp9_h_predictor_16x16_neon|
+
+;void vp9_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+;                                const uint8_t *above,
+;                                const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_h_predictor_32x32_neon| PROC
+    sub                 r1, r1, #16
+    mov                 r2, #2
+loop_h
+    vld1.8              {q1}, [r3]!
+    vdup.8              q0, d2[0]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[1]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[2]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[3]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[4]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[5]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[6]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d2[7]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[0]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[1]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[2]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[3]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[4]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[5]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[6]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    vdup.8              q0, d3[7]
+    vst1.8              {q0}, [r0]!
+    vst1.8              {q0}, [r0], r1
+    subs                r2, r2, #1
+    bgt                 loop_h
+    bx                  lr
+    ENDP                ; |vp9_h_predictor_32x32_neon|
+
+;void vp9_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride,
+;                                const uint8_t *above,
+;                                const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_tm_predictor_4x4_neon| PROC
+    ; Load ytop_left = above[-1];
+    sub                 r12, r2, #1
+    ldrb                r12, [r12]
+    vdup.u8             d0, r12
+
+    ; Load above 4 pixels
+    vld1.32             {d2[0]}, [r2]
+
+    ; Compute above - ytop_left
+    vsubl.u8            q3, d2, d0
+
+    ; Load left row by row and compute left + (above - ytop_left)
+    ; 1st row and 2nd row
+    ldrb                r12, [r3], #1
+    ldrb                r2, [r3], #1
+    vdup.u16            q1, r12
+    vdup.u16            q2, r2
+    vadd.s16            q1, q1, q3
+    vadd.s16            q2, q2, q3
+    vqmovun.s16         d0, q1
+    vqmovun.s16         d1, q2
+    vst1.32             {d0[0]}, [r0], r1
+    vst1.32             {d1[0]}, [r0], r1
+
+    ; 3rd row and 4th row
+    ldrb                r12, [r3], #1
+    ldrb                r2, [r3], #1
+    vdup.u16            q1, r12
+    vdup.u16            q2, r2
+    vadd.s16            q1, q1, q3
+    vadd.s16            q2, q2, q3
+    vqmovun.s16         d0, q1
+    vqmovun.s16         d1, q2
+    vst1.32             {d0[0]}, [r0], r1
+    vst1.32             {d1[0]}, [r0], r1
+    bx                  lr
+    ENDP                ; |vp9_tm_predictor_4x4_neon|
+
+;void vp9_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride,
+;                                const uint8_t *above,
+;                                const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_tm_predictor_8x8_neon| PROC
+    ; Load ytop_left = above[-1];
+    sub                 r12, r2, #1
+    ldrb                r12, [r12]
+    vdup.u8             d0, r12
+
+    ; preload 8 left
+    vld1.8              {d30}, [r3]
+
+    ; Load above 8 pixels
+    vld1.64             {d2}, [r2]
+
+    vmovl.u8            q10, d30
+
+    ; Compute above - ytop_left
+    vsubl.u8            q3, d2, d0
+
+    ; Load left row by row and compute left + (above - ytop_left)
+    ; 1st row and 2nd row
+    vdup.16             q0, d20[0]
+    vdup.16             q1, d20[1]
+    vadd.s16            q0, q3, q0
+    vadd.s16            q1, q3, q1
+
+    ; 3rd row and 4th row
+    vdup.16             q8, d20[2]
+    vdup.16             q9, d20[3]
+    vadd.s16            q8, q3, q8
+    vadd.s16            q9, q3, q9
+
+    vqmovun.s16         d0, q0
+    vqmovun.s16         d1, q1
+    vqmovun.s16         d2, q8
+    vqmovun.s16         d3, q9
+
+    vst1.64             {d0}, [r0], r1
+    vst1.64             {d1}, [r0], r1
+    vst1.64             {d2}, [r0], r1
+    vst1.64             {d3}, [r0], r1
+
+    ; 5th row and 6th row
+    vdup.16             q0, d21[0]
+    vdup.16             q1, d21[1]
+    vadd.s16            q0, q3, q0
+    vadd.s16            q1, q3, q1
+
+    ; 7th row and 8th row
+    vdup.16             q8, d21[2]
+    vdup.16             q9, d21[3]
+    vadd.s16            q8, q3, q8
+    vadd.s16            q9, q3, q9
+
+    vqmovun.s16         d0, q0
+    vqmovun.s16         d1, q1
+    vqmovun.s16         d2, q8
+    vqmovun.s16         d3, q9
+
+    vst1.64             {d0}, [r0], r1
+    vst1.64             {d1}, [r0], r1
+    vst1.64             {d2}, [r0], r1
+    vst1.64             {d3}, [r0], r1
+
+    bx                  lr
+    ENDP                ; |vp9_tm_predictor_8x8_neon|
+
+;void vp9_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride,
+;                                const uint8_t *above,
+;                                const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_tm_predictor_16x16_neon| PROC
+    ; Load ytop_left = above[-1];
+    sub                 r12, r2, #1
+    ldrb                r12, [r12]
+    vdup.u8             q0, r12
+
+    ; Load above 8 pixels
+    vld1.8              {q1}, [r2]
+
+    ; preload 8 left into r12
+    vld1.8              {d18}, [r3]!
+
+    ; Compute above - ytop_left
+    vsubl.u8            q2, d2, d0
+    vsubl.u8            q3, d3, d1
+
+    vmovl.u8            q10, d18
+
+    ; Load left row by row and compute left + (above - ytop_left)
+    ; Process 8 rows in each single loop and loop 2 times to process 16 rows.
+    mov                 r2, #2
+
+loop_16x16_neon
+    ; Process two rows.
+    vdup.16             q0, d20[0]
+    vdup.16             q8, d20[1]
+    vadd.s16            q1, q0, q2
+    vadd.s16            q0, q0, q3
+    vadd.s16            q11, q8, q2
+    vadd.s16            q8, q8, q3
+    vqmovun.s16         d2, q1
+    vqmovun.s16         d3, q0
+    vqmovun.s16         d22, q11
+    vqmovun.s16         d23, q8
+    vdup.16             q0, d20[2]                  ; proload next 2 rows data
+    vdup.16             q8, d20[3]
+    vst1.64             {d2,d3}, [r0], r1
+    vst1.64             {d22,d23}, [r0], r1
+
+    ; Process two rows.
+    vadd.s16            q1, q0, q2
+    vadd.s16            q0, q0, q3
+    vadd.s16            q11, q8, q2
+    vadd.s16            q8, q8, q3
+    vqmovun.s16         d2, q1
+    vqmovun.s16         d3, q0
+    vqmovun.s16         d22, q11
+    vqmovun.s16         d23, q8
+    vdup.16             q0, d21[0]                  ; proload next 2 rows data
+    vdup.16             q8, d21[1]
+    vst1.64             {d2,d3}, [r0], r1
+    vst1.64             {d22,d23}, [r0], r1
+
+    vadd.s16            q1, q0, q2
+    vadd.s16            q0, q0, q3
+    vadd.s16            q11, q8, q2
+    vadd.s16            q8, q8, q3
+    vqmovun.s16         d2, q1
+    vqmovun.s16         d3, q0
+    vqmovun.s16         d22, q11
+    vqmovun.s16         d23, q8
+    vdup.16             q0, d21[2]                  ; proload next 2 rows data
+    vdup.16             q8, d21[3]
+    vst1.64             {d2,d3}, [r0], r1
+    vst1.64             {d22,d23}, [r0], r1
+
+
+    vadd.s16            q1, q0, q2
+    vadd.s16            q0, q0, q3
+    vadd.s16            q11, q8, q2
+    vadd.s16            q8, q8, q3
+    vqmovun.s16         d2, q1
+    vqmovun.s16         d3, q0
+    vqmovun.s16         d22, q11
+    vqmovun.s16         d23, q8
+    vld1.8              {d18}, [r3]!                  ; preload 8 left into r12
+    vmovl.u8            q10, d18
+    vst1.64             {d2,d3}, [r0], r1
+    vst1.64             {d22,d23}, [r0], r1
+
+    subs                r2, r2, #1
+    bgt                 loop_16x16_neon
+
+    bx                  lr
+    ENDP                ; |vp9_tm_predictor_16x16_neon|
+
+;void vp9_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride,
+;                                  const uint8_t *above,
+;                                  const uint8_t *left)
+; r0  uint8_t *dst
+; r1  ptrdiff_t y_stride
+; r2  const uint8_t *above
+; r3  const uint8_t *left
+
+|vp9_tm_predictor_32x32_neon| PROC
+    ; Load ytop_left = above[-1];
+    sub                 r12, r2, #1
+    ldrb                r12, [r12]
+    vdup.u8             q0, r12
+
+    ; Load above 32 pixels
+    vld1.8              {q1}, [r2]!
+    vld1.8              {q2}, [r2]
+
+    ; preload 8 left pixels
+    vld1.8              {d26}, [r3]!
+
+    ; Compute above - ytop_left
+    vsubl.u8            q8, d2, d0
+    vsubl.u8            q9, d3, d1
+    vsubl.u8            q10, d4, d0
+    vsubl.u8            q11, d5, d1
+
+    vmovl.u8            q3, d26
+
+    ; Load left row by row and compute left + (above - ytop_left)
+    ; Process 8 rows in each single loop and loop 4 times to process 32 rows.
+    mov                 r2, #4
+
+loop_32x32_neon
+    ; Process two rows.
+    vdup.16             q0, d6[0]
+    vdup.16             q2, d6[1]
+    vadd.s16            q12, q0, q8
+    vadd.s16            q13, q0, q9
+    vadd.s16            q14, q0, q10
+    vadd.s16            q15, q0, q11
+    vqmovun.s16         d0, q12
+    vqmovun.s16         d1, q13
+    vadd.s16            q12, q2, q8
+    vadd.s16            q13, q2, q9
+    vqmovun.s16         d2, q14
+    vqmovun.s16         d3, q15
+    vadd.s16            q14, q2, q10
+    vadd.s16            q15, q2, q11
+    vst1.64             {d0-d3}, [r0], r1
+    vqmovun.s16         d24, q12
+    vqmovun.s16         d25, q13
+    vqmovun.s16         d26, q14
+    vqmovun.s16         d27, q15
+    vdup.16             q1, d6[2]
+    vdup.16             q2, d6[3]
+    vst1.64             {d24-d27}, [r0], r1
+
+    ; Process two rows.
+    vadd.s16            q12, q1, q8
+    vadd.s16            q13, q1, q9
+    vadd.s16            q14, q1, q10
+    vadd.s16            q15, q1, q11
+    vqmovun.s16         d0, q12
+    vqmovun.s16         d1, q13
+    vadd.s16            q12, q2, q8
+    vadd.s16            q13, q2, q9
+    vqmovun.s16         d2, q14
+    vqmovun.s16         d3, q15
+    vadd.s16            q14, q2, q10
+    vadd.s16            q15, q2, q11
+    vst1.64             {d0-d3}, [r0], r1
+    vqmovun.s16         d24, q12
+    vqmovun.s16         d25, q13
+    vqmovun.s16         d26, q14
+    vqmovun.s16         d27, q15
+    vdup.16             q0, d7[0]
+    vdup.16             q2, d7[1]
+    vst1.64             {d24-d27}, [r0], r1
+
+    ; Process two rows.
+    vadd.s16            q12, q0, q8
+    vadd.s16            q13, q0, q9
+    vadd.s16            q14, q0, q10
+    vadd.s16            q15, q0, q11
+    vqmovun.s16         d0, q12
+    vqmovun.s16         d1, q13
+    vadd.s16            q12, q2, q8
+    vadd.s16            q13, q2, q9
+    vqmovun.s16         d2, q14
+    vqmovun.s16         d3, q15
+    vadd.s16            q14, q2, q10
+    vadd.s16            q15, q2, q11
+    vst1.64             {d0-d3}, [r0], r1
+    vqmovun.s16         d24, q12
+    vqmovun.s16         d25, q13
+    vqmovun.s16         d26, q14
+    vqmovun.s16         d27, q15
+    vdup.16             q0, d7[2]
+    vdup.16             q2, d7[3]
+    vst1.64             {d24-d27}, [r0], r1
+
+    ; Process two rows.
+    vadd.s16            q12, q0, q8
+    vadd.s16            q13, q0, q9
+    vadd.s16            q14, q0, q10
+    vadd.s16            q15, q0, q11
+    vqmovun.s16         d0, q12
+    vqmovun.s16         d1, q13
+    vadd.s16            q12, q2, q8
+    vadd.s16            q13, q2, q9
+    vqmovun.s16         d2, q14
+    vqmovun.s16         d3, q15
+    vadd.s16            q14, q2, q10
+    vadd.s16            q15, q2, q11
+    vst1.64             {d0-d3}, [r0], r1
+    vqmovun.s16         d24, q12
+    vqmovun.s16         d25, q13
+    vld1.8              {d0}, [r3]!                   ; preload 8 left pixels
+    vqmovun.s16         d26, q14
+    vqmovun.s16         d27, q15
+    vmovl.u8            q3, d0
+    vst1.64             {d24-d27}, [r0], r1
+
+    subs                r2, r2, #1
+    bgt                 loop_32x32_neon
+
+    bx                  lr
+    ENDP                ; |vp9_tm_predictor_32x32_neon|
+
+    END
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -66,8 +66,7 @@
 specialize qw/vp9_d63_predictor_4x4/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_h_predictor_4x4 neon_asm dspr2/, "$ssse3_x86inc";
-$vp9_h_predictor_4x4_neon_asm=vp9_h_predictor_4x4_neon;
+specialize qw/vp9_h_predictor_4x4 neon dspr2/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_d117_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
 specialize qw/vp9_d117_predictor_4x4/;
@@ -79,12 +78,10 @@
 specialize qw/vp9_d153_predictor_4x4/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_v_predictor_4x4 neon_asm/, "$sse_x86inc";
-$vp9_v_predictor_4x4_neon_asm=vp9_v_predictor_4x4_neon;
+specialize qw/vp9_v_predictor_4x4 neon/, "$sse_x86inc";
 
 add_proto qw/void vp9_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_tm_predictor_4x4 neon_asm dspr2/, "$sse_x86inc";
-$vp9_tm_predictor_4x4_neon_asm=vp9_tm_predictor_4x4_neon;
+specialize qw/vp9_tm_predictor_4x4 neon dspr2/, "$sse_x86inc";
 
 add_proto qw/void vp9_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
 specialize qw/vp9_dc_predictor_4x4 dspr2/, "$sse_x86inc";
@@ -108,8 +105,7 @@
 specialize qw/vp9_d63_predictor_8x8/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_h_predictor_8x8 neon_asm dspr2/, "$ssse3_x86inc";
-$vp9_h_predictor_8x8_neon_asm=vp9_h_predictor_8x8_neon;
+specialize qw/vp9_h_predictor_8x8 neon dspr2/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
 specialize qw/vp9_d117_predictor_8x8/;
@@ -121,12 +117,10 @@
 specialize qw/vp9_d153_predictor_8x8/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_v_predictor_8x8 neon_asm/, "$sse_x86inc";
-$vp9_v_predictor_8x8_neon_asm=vp9_v_predictor_8x8_neon;
+specialize qw/vp9_v_predictor_8x8 neon/, "$sse_x86inc";
 
 add_proto qw/void vp9_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_tm_predictor_8x8 neon_asm dspr2/, "$sse2_x86inc";
-$vp9_tm_predictor_8x8_neon_asm=vp9_tm_predictor_8x8_neon;
+specialize qw/vp9_tm_predictor_8x8 neon dspr2/, "$sse2_x86inc";
 
 add_proto qw/void vp9_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
 specialize qw/vp9_dc_predictor_8x8 dspr2/, "$sse_x86inc";
@@ -150,8 +144,7 @@
 specialize qw/vp9_d63_predictor_16x16/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_h_predictor_16x16 neon_asm dspr2/, "$ssse3_x86inc";
-$vp9_h_predictor_16x16_neon_asm=vp9_h_predictor_16x16_neon;
+specialize qw/vp9_h_predictor_16x16 neon dspr2/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
 specialize qw/vp9_d117_predictor_16x16/;
@@ -163,12 +156,10 @@
 specialize qw/vp9_d153_predictor_16x16/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_v_predictor_16x16 neon_asm/, "$sse2_x86inc";
-$vp9_v_predictor_16x16_neon_asm=vp9_v_predictor_16x16_neon;
+specialize qw/vp9_v_predictor_16x16 neon/, "$sse2_x86inc";
 
 add_proto qw/void vp9_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_tm_predictor_16x16 neon_asm/, "$sse2_x86inc";
-$vp9_tm_predictor_16x16_neon_asm=vp9_tm_predictor_16x16_neon;
+specialize qw/vp9_tm_predictor_16x16 neon/, "$sse2_x86inc";
 
 add_proto qw/void vp9_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
 specialize qw/vp9_dc_predictor_16x16 dspr2/, "$sse2_x86inc";
@@ -192,8 +183,7 @@
 specialize qw/vp9_d63_predictor_32x32/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_h_predictor_32x32 neon_asm/, "$ssse3_x86inc";
-$vp9_h_predictor_32x32_neon_asm=vp9_h_predictor_32x32_neon;
+specialize qw/vp9_h_predictor_32x32 neon/, "$ssse3_x86inc";
 
 add_proto qw/void vp9_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
 specialize qw/vp9_d117_predictor_32x32/;
@@ -205,12 +195,10 @@
 specialize qw/vp9_d153_predictor_32x32/;
 
 add_proto qw/void vp9_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_v_predictor_32x32 neon_asm/, "$sse2_x86inc";
-$vp9_v_predictor_32x32_neon_asm=vp9_v_predictor_32x32_neon;
+specialize qw/vp9_v_predictor_32x32 neon/, "$sse2_x86inc";
 
 add_proto qw/void vp9_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vp9_tm_predictor_32x32 neon_asm/, "$sse2_x86_64";
-$vp9_tm_predictor_32x32_neon_asm=vp9_tm_predictor_32x32_neon;
+specialize qw/vp9_tm_predictor_32x32 neon/, "$sse2_x86_64";
 
 add_proto qw/void vp9_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
 specialize qw/vp9_dc_predictor_32x32/, "$sse2_x86inc";
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -136,7 +136,6 @@
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_loopfilter_16_neon_asm$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_mb_lpf_neon$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_save_reg_neon$(ASM)
-VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_reconintra_neon$(ASM)
 
 VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht4x4_add_neon.c
 VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht8x8_add_neon.c
@@ -160,6 +159,7 @@
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_add_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_reconintra_neon_asm$(ASM)
 else
 ifeq ($(HAVE_NEON), yes)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_avg_neon.c
@@ -178,6 +178,7 @@
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_add_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_reconintra_neon.c
 endif  # HAVE_NEON
 endif  # HAVE_NEON_ASM