shithub: libvpx

Download patch

ref: 2d8f581330114f7fa73ce714b827724c19ff0465
parent: 913d0adbaff91d0b20502a5f4c99f271a9e0f02d
parent: 01fc6f51e0edfd052878df7d7c5d0b436e5be960
author: Johann <johannkoenig@google.com>
date: Wed Dec 10 06:40:46 EST 2014

Merge "VP9 common for ARMv8 by using NEON intrinsics 07"

--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -1768,9 +1768,9 @@
 #else  // HAVE_NEON
 const ConvolveFunctions convolve8_neon(
     vp9_convolve_copy_neon, vp9_convolve_avg_neon,
-    vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_neon,
-    vp9_convolve8_vert_c, vp9_convolve8_avg_vert_neon,
-    vp9_convolve8_c, vp9_convolve8_avg_c, 0);
+    vp9_convolve8_horiz_neon, vp9_convolve8_avg_horiz_neon,
+    vp9_convolve8_vert_neon, vp9_convolve8_avg_vert_neon,
+    vp9_convolve8_neon, vp9_convolve8_avg_neon, 0);
 #endif  // HAVE_NEON_ASM
 
 INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values(
--- a/vp9/common/arm/neon/vp9_convolve8_neon.asm
+++ /dev/null
@@ -1,280 +1,0 @@
-;
-;  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    ; These functions are only valid when:
-    ; x_step_q4 == 16
-    ; w%4 == 0
-    ; h%4 == 0
-    ; taps == 8
-    ; VP9_FILTER_WEIGHT == 128
-    ; VP9_FILTER_SHIFT == 7
-
-    EXPORT  |vp9_convolve8_horiz_neon|
-    EXPORT  |vp9_convolve8_vert_neon|
-    IMPORT  |vp9_convolve8_horiz_c|
-    IMPORT  |vp9_convolve8_vert_c|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-    ; Multiply and accumulate by q0
-    MACRO
-    MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7
-    vmull.s16 $dst, $src0, d0[0]
-    vmlal.s16 $dst, $src1, d0[1]
-    vmlal.s16 $dst, $src2, d0[2]
-    vmlal.s16 $dst, $src3, d0[3]
-    vmlal.s16 $dst, $src4, d1[0]
-    vmlal.s16 $dst, $src5, d1[1]
-    vmlal.s16 $dst, $src6, d1[2]
-    vmlal.s16 $dst, $src7, d1[3]
-    MEND
-
-; r0    const uint8_t *src
-; r1    int src_stride
-; r2    uint8_t *dst
-; r3    int dst_stride
-; sp[]const int16_t *filter_x
-; sp[]int x_step_q4
-; sp[]const int16_t *filter_y ; unused
-; sp[]int y_step_q4           ; unused
-; sp[]int w
-; sp[]int h
-
-|vp9_convolve8_horiz_neon| PROC
-    ldr             r12, [sp, #4]           ; x_step_q4
-    cmp             r12, #16
-    bne             vp9_convolve8_horiz_c
-
-    push            {r4-r10, lr}
-
-    sub             r0, r0, #3              ; adjust for taps
-
-    ldr             r5, [sp, #32]           ; filter_x
-    ldr             r6, [sp, #48]           ; w
-    ldr             r7, [sp, #52]           ; h
-
-    vld1.s16        {q0}, [r5]              ; filter_x
-
-    sub             r8, r1, r1, lsl #2      ; -src_stride * 3
-    add             r8, r8, #4              ; -src_stride * 3 + 4
-
-    sub             r4, r3, r3, lsl #2      ; -dst_stride * 3
-    add             r4, r4, #4              ; -dst_stride * 3 + 4
-
-    rsb             r9, r6, r1, lsl #2      ; reset src for outer loop
-    sub             r9, r9, #7
-    rsb             r12, r6, r3, lsl #2     ; reset dst for outer loop
-
-    mov             r10, r6                 ; w loop counter
-
-vp9_convolve8_loop_horiz_v
-    vld1.8          {d24}, [r0], r1
-    vld1.8          {d25}, [r0], r1
-    vld1.8          {d26}, [r0], r1
-    vld1.8          {d27}, [r0], r8
-
-    vtrn.16         q12, q13
-    vtrn.8          d24, d25
-    vtrn.8          d26, d27
-
-    pld             [r0, r1, lsl #2]
-
-    vmovl.u8        q8, d24
-    vmovl.u8        q9, d25
-    vmovl.u8        q10, d26
-    vmovl.u8        q11, d27
-
-    ; save a few instructions in the inner loop
-    vswp            d17, d18
-    vmov            d23, d21
-
-    add             r0, r0, #3
-
-vp9_convolve8_loop_horiz
-    add             r5, r0, #64
-
-    vld1.32         {d28[]}, [r0], r1
-    vld1.32         {d29[]}, [r0], r1
-    vld1.32         {d31[]}, [r0], r1
-    vld1.32         {d30[]}, [r0], r8
-
-    pld             [r5]
-
-    vtrn.16         d28, d31
-    vtrn.16         d29, d30
-    vtrn.8          d28, d29
-    vtrn.8          d31, d30
-
-    pld             [r5, r1]
-
-    ; extract to s16
-    vtrn.32         q14, q15
-    vmovl.u8        q12, d28
-    vmovl.u8        q13, d29
-
-    pld             [r5, r1, lsl #1]
-
-    ; src[] * filter_x
-    MULTIPLY_BY_Q0  q1,  d16, d17, d20, d22, d18, d19, d23, d24
-    MULTIPLY_BY_Q0  q2,  d17, d20, d22, d18, d19, d23, d24, d26
-    MULTIPLY_BY_Q0  q14, d20, d22, d18, d19, d23, d24, d26, d27
-    MULTIPLY_BY_Q0  q15, d22, d18, d19, d23, d24, d26, d27, d25
-
-    pld             [r5, -r8]
-
-    ; += 64 >> 7
-    vqrshrun.s32    d2, q1, #7
-    vqrshrun.s32    d3, q2, #7
-    vqrshrun.s32    d4, q14, #7
-    vqrshrun.s32    d5, q15, #7
-
-    ; saturate
-    vqmovn.u16      d2, q1
-    vqmovn.u16      d3, q2
-
-    ; transpose
-    vtrn.16         d2, d3
-    vtrn.32         d2, d3
-    vtrn.8          d2, d3
-
-    vst1.u32        {d2[0]}, [r2@32], r3
-    vst1.u32        {d3[0]}, [r2@32], r3
-    vst1.u32        {d2[1]}, [r2@32], r3
-    vst1.u32        {d3[1]}, [r2@32], r4
-
-    vmov            q8,  q9
-    vmov            d20, d23
-    vmov            q11, q12
-    vmov            q9,  q13
-
-    subs            r6, r6, #4              ; w -= 4
-    bgt             vp9_convolve8_loop_horiz
-
-    ; outer loop
-    mov             r6, r10                 ; restore w counter
-    add             r0, r0, r9              ; src += src_stride * 4 - w
-    add             r2, r2, r12             ; dst += dst_stride * 4 - w
-    subs            r7, r7, #4              ; h -= 4
-    bgt vp9_convolve8_loop_horiz_v
-
-    pop             {r4-r10, pc}
-
-    ENDP
-
-|vp9_convolve8_vert_neon| PROC
-    ldr             r12, [sp, #12]
-    cmp             r12, #16
-    bne             vp9_convolve8_vert_c
-
-    push            {r4-r8, lr}
-
-    ; adjust for taps
-    sub             r0, r0, r1
-    sub             r0, r0, r1, lsl #1
-
-    ldr             r4, [sp, #32]           ; filter_y
-    ldr             r6, [sp, #40]           ; w
-    ldr             lr, [sp, #44]           ; h
-
-    vld1.s16        {q0}, [r4]              ; filter_y
-
-    lsl             r1, r1, #1
-    lsl             r3, r3, #1
-
-vp9_convolve8_loop_vert_h
-    mov             r4, r0
-    add             r7, r0, r1, asr #1
-    mov             r5, r2
-    add             r8, r2, r3, asr #1
-    mov             r12, lr                 ; h loop counter
-
-    vld1.u32        {d16[0]}, [r4], r1
-    vld1.u32        {d16[1]}, [r7], r1
-    vld1.u32        {d18[0]}, [r4], r1
-    vld1.u32        {d18[1]}, [r7], r1
-    vld1.u32        {d20[0]}, [r4], r1
-    vld1.u32        {d20[1]}, [r7], r1
-    vld1.u32        {d22[0]}, [r4], r1
-
-    vmovl.u8        q8, d16
-    vmovl.u8        q9, d18
-    vmovl.u8        q10, d20
-    vmovl.u8        q11, d22
-
-vp9_convolve8_loop_vert
-    ; always process a 4x4 block at a time
-    vld1.u32        {d24[0]}, [r7], r1
-    vld1.u32        {d26[0]}, [r4], r1
-    vld1.u32        {d26[1]}, [r7], r1
-    vld1.u32        {d24[1]}, [r4], r1
-
-    ; extract to s16
-    vmovl.u8        q12, d24
-    vmovl.u8        q13, d26
-
-    pld             [r5]
-    pld             [r8]
-
-    ; src[] * filter_y
-    MULTIPLY_BY_Q0  q1,  d16, d17, d18, d19, d20, d21, d22, d24
-
-    pld             [r5, r3]
-    pld             [r8, r3]
-
-    MULTIPLY_BY_Q0  q2,  d17, d18, d19, d20, d21, d22, d24, d26
-
-    pld             [r7]
-    pld             [r4]
-
-    MULTIPLY_BY_Q0  q14, d18, d19, d20, d21, d22, d24, d26, d27
-
-    pld             [r7, r1]
-    pld             [r4, r1]
-
-    MULTIPLY_BY_Q0  q15, d19, d20, d21, d22, d24, d26, d27, d25
-
-    ; += 64 >> 7
-    vqrshrun.s32    d2, q1, #7
-    vqrshrun.s32    d3, q2, #7
-    vqrshrun.s32    d4, q14, #7
-    vqrshrun.s32    d5, q15, #7
-
-    ; saturate
-    vqmovn.u16      d2, q1
-    vqmovn.u16      d3, q2
-
-    vst1.u32        {d2[0]}, [r5@32], r3
-    vst1.u32        {d2[1]}, [r8@32], r3
-    vst1.u32        {d3[0]}, [r5@32], r3
-    vst1.u32        {d3[1]}, [r8@32], r3
-
-    vmov            q8, q10
-    vmov            d18, d22
-    vmov            d19, d24
-    vmov            q10, q13
-    vmov            d22, d25
-
-    subs            r12, r12, #4            ; h -= 4
-    bgt             vp9_convolve8_loop_vert
-
-    ; outer loop
-    add             r0, r0, #4
-    add             r2, r2, #4
-    subs            r6, r6, #4              ; w -= 4
-    bgt             vp9_convolve8_loop_vert_h
-
-    pop             {r4-r8, pc}
-
-    ENDP
-    END
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_convolve8_neon.c
@@ -1,0 +1,354 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+#include <arm_neon.h>
+
+void vp9_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
+                           uint8_t *dst, ptrdiff_t dst_stride,
+                           const int16_t *filter_x, int x_step_q4,
+                           const int16_t *filter_y, int y_step_q4,
+                           int w, int h);
+void vp9_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
+                           uint8_t *dst, ptrdiff_t dst_stride,
+                           const int16_t *filter_x, int x_step_q4,
+                           const int16_t *filter_y, int y_step_q4,
+                           int w, int h);
+
+static inline int32x4_t MULTIPLY_BY_Q0(
+        int16x4_t dsrc0,
+        int16x4_t dsrc1,
+        int16x4_t dsrc2,
+        int16x4_t dsrc3,
+        int16x4_t dsrc4,
+        int16x4_t dsrc5,
+        int16x4_t dsrc6,
+        int16x4_t dsrc7,
+        int16x8_t q0s16) {
+    int32x4_t qdst;
+    int16x4_t d0s16, d1s16;
+
+    d0s16 = vget_low_s16(q0s16);
+    d1s16 = vget_high_s16(q0s16);
+
+    qdst = vmull_lane_s16(dsrc0, d0s16, 0);
+    qdst = vmlal_lane_s16(qdst, dsrc1, d0s16, 1);
+    qdst = vmlal_lane_s16(qdst, dsrc2, d0s16, 2);
+    qdst = vmlal_lane_s16(qdst, dsrc3, d0s16, 3);
+    qdst = vmlal_lane_s16(qdst, dsrc4, d1s16, 0);
+    qdst = vmlal_lane_s16(qdst, dsrc5, d1s16, 1);
+    qdst = vmlal_lane_s16(qdst, dsrc6, d1s16, 2);
+    qdst = vmlal_lane_s16(qdst, dsrc7, d1s16, 3);
+    return qdst;
+}
+
+void vp9_convolve8_horiz_neon(
+        uint8_t *src,
+        ptrdiff_t src_stride,
+        uint8_t *dst,
+        ptrdiff_t dst_stride,
+        const int16_t *filter_x,
+        int x_step_q4,
+        const int16_t *filter_y,  // unused
+        int y_step_q4,            // unused
+        int w,
+        int h) {
+    int width;
+    uint8_t *s, *d, *psrc, *pdst;
+    uint8x8_t d2u8, d3u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8;
+    uint32x2_t d2u32, d3u32, d28u32, d29u32, d30u32, d31u32;
+    uint8x16_t q12u8, q13u8, q14u8, q15u8;
+    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d22s16, d23s16;
+    int16x4_t d24s16, d25s16, d26s16, d27s16;
+    uint16x4_t d2u16, d3u16, d4u16, d5u16, d16u16, d17u16, d18u16, d19u16;
+    int16x8_t q0s16;
+    uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
+    int32x4_t q1s32, q2s32, q14s32, q15s32;
+    uint16x8x2_t q0x2u16;
+    uint8x8x2_t d0x2u8, d1x2u8;
+    uint32x2x2_t d0x2u32;
+    uint16x4x2_t d0x2u16, d1x2u16;
+    uint32x4x2_t q0x2u32;
+
+    if (x_step_q4 != 16) {
+        vp9_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+                              filter_x, x_step_q4,
+                              filter_y, y_step_q4, w, h);
+        return;
+    }
+
+    q0s16 = vld1q_s16(filter_x);
+
+    src -= 3;  // adjust for taps
+    for (; h > 0; h -= 4,
+        src += src_stride * 4,
+        dst += dst_stride * 4) {  // loop_horiz_v
+        s = src;
+        d24u8 = vld1_u8(s);
+        s += src_stride;
+        d25u8 = vld1_u8(s);
+        s += src_stride;
+        d26u8 = vld1_u8(s);
+        s += src_stride;
+        d27u8 = vld1_u8(s);
+
+        q12u8 = vcombine_u8(d24u8, d25u8);
+        q13u8 = vcombine_u8(d26u8, d27u8);
+
+        q0x2u16 = vtrnq_u16(vreinterpretq_u16_u8(q12u8),
+                            vreinterpretq_u16_u8(q13u8));
+        d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0]));
+        d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0]));
+        d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1]));
+        d27u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[1]));
+        d0x2u8 = vtrn_u8(d24u8, d25u8);
+        d1x2u8 = vtrn_u8(d26u8, d27u8);
+
+        __builtin_prefetch(src + src_stride * 4);
+        __builtin_prefetch(src + src_stride * 5);
+        __builtin_prefetch(src + src_stride * 6);
+
+        q8u16  = vmovl_u8(d0x2u8.val[0]);
+        q9u16  = vmovl_u8(d0x2u8.val[1]);
+        q10u16 = vmovl_u8(d1x2u8.val[0]);
+        q11u16 = vmovl_u8(d1x2u8.val[1]);
+
+        d16u16 = vget_low_u16(q8u16);
+        d17u16 = vget_high_u16(q8u16);
+        d18u16 = vget_low_u16(q9u16);
+        d19u16 = vget_high_u16(q9u16);
+        q8u16 = vcombine_u16(d16u16, d18u16);  // vswp 17 18
+        q9u16 = vcombine_u16(d17u16, d19u16);
+
+        d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
+        d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));  // vmov 23 21
+        for (width = w, psrc = src + 7, pdst = dst;
+             width > 0;
+             width -= 4, psrc += 4, pdst += 4) {  // loop_horiz
+            s = psrc;
+            d28u32 = vld1_dup_u32((const uint32_t *)s);
+            s += src_stride;
+            d29u32 = vld1_dup_u32((const uint32_t *)s);
+            s += src_stride;
+            d31u32 = vld1_dup_u32((const uint32_t *)s);
+            s += src_stride;
+            d30u32 = vld1_dup_u32((const uint32_t *)s);
+
+            __builtin_prefetch(psrc + 64);
+
+            d0x2u16 = vtrn_u16(vreinterpret_u16_u32(d28u32),
+                               vreinterpret_u16_u32(d31u32));
+            d1x2u16 = vtrn_u16(vreinterpret_u16_u32(d29u32),
+                               vreinterpret_u16_u32(d30u32));
+            d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]),   // d28
+                             vreinterpret_u8_u16(d1x2u16.val[0]));  // d29
+            d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]),   // d31
+                             vreinterpret_u8_u16(d1x2u16.val[1]));  // d30
+
+            __builtin_prefetch(psrc + 64 + src_stride);
+
+            q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]);
+            q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]);
+            q0x2u32 = vtrnq_u32(vreinterpretq_u32_u8(q14u8),
+                                vreinterpretq_u32_u8(q15u8));
+
+            d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0]));
+            d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0]));
+            q12u16 = vmovl_u8(d28u8);
+            q13u16 = vmovl_u8(d29u8);
+
+            __builtin_prefetch(psrc + 64 + src_stride * 2);
+
+            d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
+            d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
+            d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
+            d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
+            d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
+            d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
+            d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
+            d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
+            d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
+
+            q1s32  = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16,
+                                    d18s16, d19s16, d23s16, d24s16, q0s16);
+            q2s32  = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16,
+                                    d19s16, d23s16, d24s16, d26s16, q0s16);
+            q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16,
+                                    d23s16, d24s16, d26s16, d27s16, q0s16);
+            q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16,
+                                    d24s16, d26s16, d27s16, d25s16, q0s16);
+
+            __builtin_prefetch(psrc + 60 + src_stride * 3);
+
+            d2u16 = vqrshrun_n_s32(q1s32, 7);
+            d3u16 = vqrshrun_n_s32(q2s32, 7);
+            d4u16 = vqrshrun_n_s32(q14s32, 7);
+            d5u16 = vqrshrun_n_s32(q15s32, 7);
+
+            q1u16 = vcombine_u16(d2u16, d3u16);
+            q2u16 = vcombine_u16(d4u16, d5u16);
+
+            d2u8 = vqmovn_u16(q1u16);
+            d3u8 = vqmovn_u16(q2u16);
+
+            d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8),
+                               vreinterpret_u16_u8(d3u8));
+            d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]),
+                               vreinterpret_u32_u16(d0x2u16.val[1]));
+            d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]),
+                             vreinterpret_u8_u32(d0x2u32.val[1]));
+
+            d2u32 = vreinterpret_u32_u8(d0x2u8.val[0]);
+            d3u32 = vreinterpret_u32_u8(d0x2u8.val[1]);
+
+            d = pdst;
+            vst1_lane_u32((uint32_t *)d, d2u32, 0);
+            d += dst_stride;
+            vst1_lane_u32((uint32_t *)d, d3u32, 0);
+            d += dst_stride;
+            vst1_lane_u32((uint32_t *)d, d2u32, 1);
+            d += dst_stride;
+            vst1_lane_u32((uint32_t *)d, d3u32, 1);
+
+            q8u16 = q9u16;
+            d20s16 = d23s16;
+            q11u16 = q12u16;
+            q9u16 = q13u16;
+            d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
+        }
+    }
+    return;
+}
+
+void vp9_convolve8_vert_neon(
+        uint8_t *src,
+        ptrdiff_t src_stride,
+        uint8_t *dst,
+        ptrdiff_t dst_stride,
+        const int16_t *filter_x,  // unused
+        int x_step_q4,            // unused
+        const int16_t *filter_y,
+        int y_step_q4,
+        int w,
+        int h) {
+    int height;
+    uint8_t *s, *d;
+    uint32x2_t d2u32, d3u32;
+    uint32x2_t d16u32, d18u32, d20u32, d22u32, d24u32, d26u32;
+    int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16;
+    int16x4_t d24s16, d25s16, d26s16, d27s16;
+    uint16x4_t d2u16, d3u16, d4u16, d5u16;
+    int16x8_t q0s16;
+    uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
+    int32x4_t q1s32, q2s32, q14s32, q15s32;
+
+    if (y_step_q4 != 16) {
+        vp9_convolve8_vert_c(src, src_stride, dst, dst_stride,
+                             filter_x, x_step_q4,
+                             filter_y, y_step_q4, w, h);
+        return;
+    }
+
+    src -= src_stride * 3;
+    q0s16 = vld1q_s16(filter_y);
+    for (; w > 0; w -= 4, src += 4, dst += 4) {  // loop_vert_h
+        s = src;
+        d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 0);
+        s += src_stride;
+        d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 1);
+        s += src_stride;
+        d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 0);
+        s += src_stride;
+        d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 1);
+        s += src_stride;
+        d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 0);
+        s += src_stride;
+        d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 1);
+        s += src_stride;
+        d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0);
+        s += src_stride;
+
+        q8u16  = vmovl_u8(vreinterpret_u8_u32(d16u32));
+        q9u16  = vmovl_u8(vreinterpret_u8_u32(d18u32));
+        q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32));
+        q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32));
+
+        d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
+        d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
+        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
+        d = dst;
+        for (height = h; height > 0; height -= 4) {  // loop_vert
+            d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 0);
+            s += src_stride;
+            d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 0);
+            s += src_stride;
+            d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 1);
+            s += src_stride;
+            d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 1);
+            s += src_stride;
+
+            q12u16 = vmovl_u8(vreinterpret_u8_u32(d24u32));
+            q13u16 = vmovl_u8(vreinterpret_u8_u32(d26u32));
+
+            d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
+            d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
+            d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
+            d21s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));
+            d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
+            d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
+            d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
+            d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
+
+            __builtin_prefetch(d);
+            __builtin_prefetch(d + dst_stride);
+            q1s32  = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16,
+                                    d20s16, d21s16, d22s16, d24s16, q0s16);
+            __builtin_prefetch(d + dst_stride * 2);
+            __builtin_prefetch(d + dst_stride * 3);
+            q2s32  = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16,
+                                    d21s16, d22s16, d24s16, d26s16, q0s16);
+            __builtin_prefetch(s);
+            __builtin_prefetch(s + src_stride);
+            q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16,
+                                    d22s16, d24s16, d26s16, d27s16, q0s16);
+            __builtin_prefetch(s + src_stride * 2);
+            __builtin_prefetch(s + src_stride * 3);
+            q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16,
+                                    d24s16, d26s16, d27s16, d25s16, q0s16);
+
+            d2u16 = vqrshrun_n_s32(q1s32, 7);
+            d3u16 = vqrshrun_n_s32(q2s32, 7);
+            d4u16 = vqrshrun_n_s32(q14s32, 7);
+            d5u16 = vqrshrun_n_s32(q15s32, 7);
+
+            q1u16 = vcombine_u16(d2u16, d3u16);
+            q2u16 = vcombine_u16(d4u16, d5u16);
+
+            d2u32 = vreinterpret_u32_u8(vqmovn_u16(q1u16));
+            d3u32 = vreinterpret_u32_u8(vqmovn_u16(q2u16));
+
+            vst1_lane_u32((uint32_t *)d, d2u32, 0);
+            d += dst_stride;
+            vst1_lane_u32((uint32_t *)d, d2u32, 1);
+            d += dst_stride;
+            vst1_lane_u32((uint32_t *)d, d3u32, 0);
+            d += dst_stride;
+            vst1_lane_u32((uint32_t *)d, d3u32, 1);
+            d += dst_stride;
+
+            q8u16 = q10u16;
+            d18s16 = d22s16;
+            d19s16 = d24s16;
+            q10u16 = q13u16;
+            d22s16 = d25s16;
+        }
+    }
+    return;
+}
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_convolve8_neon_asm.asm
@@ -1,0 +1,280 @@
+;
+;  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    ; These functions are only valid when:
+    ; x_step_q4 == 16
+    ; w%4 == 0
+    ; h%4 == 0
+    ; taps == 8
+    ; VP9_FILTER_WEIGHT == 128
+    ; VP9_FILTER_SHIFT == 7
+
+    EXPORT  |vp9_convolve8_horiz_neon|
+    EXPORT  |vp9_convolve8_vert_neon|
+    IMPORT  |vp9_convolve8_horiz_c|
+    IMPORT  |vp9_convolve8_vert_c|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+    ; Multiply and accumulate by q0
+    MACRO
+    MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7
+    vmull.s16 $dst, $src0, d0[0]
+    vmlal.s16 $dst, $src1, d0[1]
+    vmlal.s16 $dst, $src2, d0[2]
+    vmlal.s16 $dst, $src3, d0[3]
+    vmlal.s16 $dst, $src4, d1[0]
+    vmlal.s16 $dst, $src5, d1[1]
+    vmlal.s16 $dst, $src6, d1[2]
+    vmlal.s16 $dst, $src7, d1[3]
+    MEND
+
+; r0    const uint8_t *src
+; r1    int src_stride
+; r2    uint8_t *dst
+; r3    int dst_stride
+; sp[]const int16_t *filter_x
+; sp[]int x_step_q4
+; sp[]const int16_t *filter_y ; unused
+; sp[]int y_step_q4           ; unused
+; sp[]int w
+; sp[]int h
+
+|vp9_convolve8_horiz_neon| PROC
+    ldr             r12, [sp, #4]           ; x_step_q4
+    cmp             r12, #16
+    bne             vp9_convolve8_horiz_c
+
+    push            {r4-r10, lr}
+
+    sub             r0, r0, #3              ; adjust for taps
+
+    ldr             r5, [sp, #32]           ; filter_x
+    ldr             r6, [sp, #48]           ; w
+    ldr             r7, [sp, #52]           ; h
+
+    vld1.s16        {q0}, [r5]              ; filter_x
+
+    sub             r8, r1, r1, lsl #2      ; -src_stride * 3
+    add             r8, r8, #4              ; -src_stride * 3 + 4
+
+    sub             r4, r3, r3, lsl #2      ; -dst_stride * 3
+    add             r4, r4, #4              ; -dst_stride * 3 + 4
+
+    rsb             r9, r6, r1, lsl #2      ; reset src for outer loop
+    sub             r9, r9, #7
+    rsb             r12, r6, r3, lsl #2     ; reset dst for outer loop
+
+    mov             r10, r6                 ; w loop counter
+
+vp9_convolve8_loop_horiz_v
+    vld1.8          {d24}, [r0], r1
+    vld1.8          {d25}, [r0], r1
+    vld1.8          {d26}, [r0], r1
+    vld1.8          {d27}, [r0], r8
+
+    vtrn.16         q12, q13
+    vtrn.8          d24, d25
+    vtrn.8          d26, d27
+
+    pld             [r0, r1, lsl #2]
+
+    vmovl.u8        q8, d24
+    vmovl.u8        q9, d25
+    vmovl.u8        q10, d26
+    vmovl.u8        q11, d27
+
+    ; save a few instructions in the inner loop
+    vswp            d17, d18
+    vmov            d23, d21
+
+    add             r0, r0, #3
+
+vp9_convolve8_loop_horiz
+    add             r5, r0, #64
+
+    vld1.32         {d28[]}, [r0], r1
+    vld1.32         {d29[]}, [r0], r1
+    vld1.32         {d31[]}, [r0], r1
+    vld1.32         {d30[]}, [r0], r8
+
+    pld             [r5]
+
+    vtrn.16         d28, d31
+    vtrn.16         d29, d30
+    vtrn.8          d28, d29
+    vtrn.8          d31, d30
+
+    pld             [r5, r1]
+
+    ; extract to s16
+    vtrn.32         q14, q15
+    vmovl.u8        q12, d28
+    vmovl.u8        q13, d29
+
+    pld             [r5, r1, lsl #1]
+
+    ; src[] * filter_x
+    MULTIPLY_BY_Q0  q1,  d16, d17, d20, d22, d18, d19, d23, d24
+    MULTIPLY_BY_Q0  q2,  d17, d20, d22, d18, d19, d23, d24, d26
+    MULTIPLY_BY_Q0  q14, d20, d22, d18, d19, d23, d24, d26, d27
+    MULTIPLY_BY_Q0  q15, d22, d18, d19, d23, d24, d26, d27, d25
+
+    pld             [r5, -r8]
+
+    ; += 64 >> 7
+    vqrshrun.s32    d2, q1, #7
+    vqrshrun.s32    d3, q2, #7
+    vqrshrun.s32    d4, q14, #7
+    vqrshrun.s32    d5, q15, #7
+
+    ; saturate
+    vqmovn.u16      d2, q1
+    vqmovn.u16      d3, q2
+
+    ; transpose
+    vtrn.16         d2, d3
+    vtrn.32         d2, d3
+    vtrn.8          d2, d3
+
+    vst1.u32        {d2[0]}, [r2@32], r3
+    vst1.u32        {d3[0]}, [r2@32], r3
+    vst1.u32        {d2[1]}, [r2@32], r3
+    vst1.u32        {d3[1]}, [r2@32], r4
+
+    vmov            q8,  q9
+    vmov            d20, d23
+    vmov            q11, q12
+    vmov            q9,  q13
+
+    subs            r6, r6, #4              ; w -= 4
+    bgt             vp9_convolve8_loop_horiz
+
+    ; outer loop
+    mov             r6, r10                 ; restore w counter
+    add             r0, r0, r9              ; src += src_stride * 4 - w
+    add             r2, r2, r12             ; dst += dst_stride * 4 - w
+    subs            r7, r7, #4              ; h -= 4
+    bgt vp9_convolve8_loop_horiz_v
+
+    pop             {r4-r10, pc}
+
+    ENDP
+
+|vp9_convolve8_vert_neon| PROC
+    ldr             r12, [sp, #12]
+    cmp             r12, #16
+    bne             vp9_convolve8_vert_c
+
+    push            {r4-r8, lr}
+
+    ; adjust for taps
+    sub             r0, r0, r1
+    sub             r0, r0, r1, lsl #1
+
+    ldr             r4, [sp, #32]           ; filter_y
+    ldr             r6, [sp, #40]           ; w
+    ldr             lr, [sp, #44]           ; h
+
+    vld1.s16        {q0}, [r4]              ; filter_y
+
+    lsl             r1, r1, #1
+    lsl             r3, r3, #1
+
+vp9_convolve8_loop_vert_h
+    mov             r4, r0
+    add             r7, r0, r1, asr #1
+    mov             r5, r2
+    add             r8, r2, r3, asr #1
+    mov             r12, lr                 ; h loop counter
+
+    vld1.u32        {d16[0]}, [r4], r1
+    vld1.u32        {d16[1]}, [r7], r1
+    vld1.u32        {d18[0]}, [r4], r1
+    vld1.u32        {d18[1]}, [r7], r1
+    vld1.u32        {d20[0]}, [r4], r1
+    vld1.u32        {d20[1]}, [r7], r1
+    vld1.u32        {d22[0]}, [r4], r1
+
+    vmovl.u8        q8, d16
+    vmovl.u8        q9, d18
+    vmovl.u8        q10, d20
+    vmovl.u8        q11, d22
+
+vp9_convolve8_loop_vert
+    ; always process a 4x4 block at a time
+    vld1.u32        {d24[0]}, [r7], r1
+    vld1.u32        {d26[0]}, [r4], r1
+    vld1.u32        {d26[1]}, [r7], r1
+    vld1.u32        {d24[1]}, [r4], r1
+
+    ; extract to s16
+    vmovl.u8        q12, d24
+    vmovl.u8        q13, d26
+
+    pld             [r5]
+    pld             [r8]
+
+    ; src[] * filter_y
+    MULTIPLY_BY_Q0  q1,  d16, d17, d18, d19, d20, d21, d22, d24
+
+    pld             [r5, r3]
+    pld             [r8, r3]
+
+    MULTIPLY_BY_Q0  q2,  d17, d18, d19, d20, d21, d22, d24, d26
+
+    pld             [r7]
+    pld             [r4]
+
+    MULTIPLY_BY_Q0  q14, d18, d19, d20, d21, d22, d24, d26, d27
+
+    pld             [r7, r1]
+    pld             [r4, r1]
+
+    MULTIPLY_BY_Q0  q15, d19, d20, d21, d22, d24, d26, d27, d25
+
+    ; += 64 >> 7
+    vqrshrun.s32    d2, q1, #7
+    vqrshrun.s32    d3, q2, #7
+    vqrshrun.s32    d4, q14, #7
+    vqrshrun.s32    d5, q15, #7
+
+    ; saturate
+    vqmovn.u16      d2, q1
+    vqmovn.u16      d3, q2
+
+    vst1.u32        {d2[0]}, [r5@32], r3
+    vst1.u32        {d2[1]}, [r8@32], r3
+    vst1.u32        {d3[0]}, [r5@32], r3
+    vst1.u32        {d3[1]}, [r8@32], r3
+
+    vmov            q8, q10
+    vmov            d18, d22
+    vmov            d19, d24
+    vmov            q10, q13
+    vmov            d22, d25
+
+    subs            r12, r12, #4            ; h -= 4
+    bgt             vp9_convolve8_loop_vert
+
+    ; outer loop
+    add             r0, r0, #4
+    add             r2, r2, #4
+    subs            r6, r6, #4              ; w -= 4
+    bgt             vp9_convolve8_loop_vert_h
+
+    pop             {r4-r8, pc}
+
+    ENDP
+    END
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -295,20 +295,16 @@
 specialize qw/vp9_convolve_avg neon dspr2/, "$sse2_x86inc";
 
 add_proto qw/void vp9_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-specialize qw/vp9_convolve8 sse2 ssse3 neon_asm dspr2/, "$avx2_ssse3";
-$vp9_convolve8_neon_asm=vp9_convolve8_neon;
+specialize qw/vp9_convolve8 sse2 ssse3 neon dspr2/, "$avx2_ssse3";
 
 add_proto qw/void vp9_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-specialize qw/vp9_convolve8_horiz sse2 ssse3 neon_asm dspr2/, "$avx2_ssse3";
-$vp9_convolve8_horiz_neon_asm=vp9_convolve8_horiz_neon;
+specialize qw/vp9_convolve8_horiz sse2 ssse3 neon dspr2/, "$avx2_ssse3";
 
 add_proto qw/void vp9_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-specialize qw/vp9_convolve8_vert sse2 ssse3 neon_asm dspr2/, "$avx2_ssse3";
-$vp9_convolve8_vert_neon_asm=vp9_convolve8_vert_neon;
+specialize qw/vp9_convolve8_vert sse2 ssse3 neon dspr2/, "$avx2_ssse3";
 
 add_proto qw/void vp9_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-specialize qw/vp9_convolve8_avg sse2 ssse3 neon_asm dspr2/;
-$vp9_convolve8_avg_neon_asm=vp9_convolve8_avg_neon;
+specialize qw/vp9_convolve8_avg sse2 ssse3 neon dspr2/;
 
 add_proto qw/void vp9_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 specialize qw/vp9_convolve8_avg_horiz sse2 ssse3 neon dspr2/;
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -131,9 +131,7 @@
 VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_idct_ssse3_x86_64.asm
 endif
 
-VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_convolve_neon.c
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct16x16_neon.c
-VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_convolve8_neon$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_loopfilter_16_neon_asm$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_dc_only_idct_add_neon$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct4x4_1_add_neon$(ASM)
@@ -155,6 +153,8 @@
 ifeq ($(HAVE_NEON_ASM), yes)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_avg_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_avg_neon_asm$(ASM)
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_neon_asm$(ASM)
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_copy_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon.c
@@ -162,6 +162,8 @@
 ifeq ($(HAVE_NEON), yes)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_avg_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_avg_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_copy_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon.c