ref: 5b098b18254169ad3aaec45f3f6e0a6851ded487
parent: 547cb14e1542c7770f58a82189fc3a5b962296ac
author: James Yu <james.yu@linaro.org>
date: Tue Jan 21 04:43:29 EST 2014
VP9 common for ARMv8 by using NEON intrinsics 01 Add vp9_loopfilter_neon.c - vp9_lpf_horizontal_4_neon - vp9_lpf_vertical_4_neon - vp9_lpf_horizontal_8_neon - vp9_lpf_vertical_8_neon Change-Id: I97a0d7b399a431c21ee77396be3d5f5a1f7ebccb Signed-off-by: James Yu <james.yu@linaro.org>
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -594,4 +594,35 @@
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
+#if HAVE_NEON && (!CONFIG_VP9_HIGHBITDEPTH)
+INSTANTIATE_TEST_CASE_P(
+ NEON, Loop8Test6Param,
+ ::testing::Values(
+#if HAVE_NEON_ASM
+ make_tuple(&vp9_lpf_horizontal_16_neon,
+ &vp9_lpf_horizontal_16_c, 8),
+#endif // HAVE_NEON_ASM
+ make_tuple(&vp9_lpf_horizontal_4_neon,
+ &vp9_lpf_horizontal_4_c, 8),
+ make_tuple(&vp9_lpf_horizontal_8_neon,
+ &vp9_lpf_horizontal_8_c, 8),
+ make_tuple(&vp9_lpf_vertical_4_neon,
+ &vp9_lpf_vertical_4_c, 8),
+ make_tuple(&vp9_lpf_vertical_8_neon,
+ &vp9_lpf_vertical_8_c, 8)));
+INSTANTIATE_TEST_CASE_P(
+ NEON, Loop8Test9Param,
+ ::testing::Values(
+#if HAVE_NEON_ASM
+ make_tuple(&vp9_lpf_horizontal_4_dual_neon,
+ &vp9_lpf_horizontal_4_dual_c, 8),
+#endif // HAVE_NEON_ASM
+ make_tuple(&vp9_lpf_horizontal_8_dual_neon,
+ &vp9_lpf_horizontal_8_dual_c, 8),
+ make_tuple(&vp9_lpf_vertical_4_dual_neon,
+ &vp9_lpf_vertical_4_dual_c, 8),
+ make_tuple(&vp9_lpf_vertical_8_dual_neon,
+ &vp9_lpf_vertical_8_dual_c, 8)));
+#endif // HAVE_NEON && (!CONFIG_VP9_HIGHBITDEPTH)
+
} // namespace
--- a/vp9/common/arm/neon/vp9_loopfilter_16_neon.asm
+++ /dev/null
@@ -1,199 +1,0 @@
-;
-; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
-;
-; Use of this source code is governed by a BSD-style license
-; that can be found in the LICENSE file in the root of the source
-; tree. An additional intellectual property rights grant can be found
-; in the file PATENTS. All contributing project authors may
-; be found in the AUTHORS file in the root of the source tree.
-;
-
- EXPORT |vp9_lpf_horizontal_4_dual_neon|
- ARM
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-;void vp9_lpf_horizontal_4_dual_neon(uint8_t *s, int p,
-; const uint8_t *blimit0,
-; const uint8_t *limit0,
-; const uint8_t *thresh0,
-; const uint8_t *blimit1,
-; const uint8_t *limit1,
-; const uint8_t *thresh1)
-; r0 uint8_t *s,
-; r1 int p,
-; r2 const uint8_t *blimit0,
-; r3 const uint8_t *limit0,
-; sp const uint8_t *thresh0,
-; sp+4 const uint8_t *blimit1,
-; sp+8 const uint8_t *limit1,
-; sp+12 const uint8_t *thresh1,
-
-|vp9_lpf_horizontal_4_dual_neon| PROC
- push {lr}
-
- ldr r12, [sp, #4] ; load thresh0
- vld1.8 {d0}, [r2] ; load blimit0 to first half q
- vld1.8 {d2}, [r3] ; load limit0 to first half q
-
- add r1, r1, r1 ; double pitch
- ldr r2, [sp, #8] ; load blimit1
-
- vld1.8 {d4}, [r12] ; load thresh0 to first half q
-
- ldr r3, [sp, #12] ; load limit1
- ldr r12, [sp, #16] ; load thresh1
- vld1.8 {d1}, [r2] ; load blimit1 to 2nd half q
-
- sub r2, r0, r1, lsl #1 ; s[-4 * p]
-
- vld1.8 {d3}, [r3] ; load limit1 to 2nd half q
- vld1.8 {d5}, [r12] ; load thresh1 to 2nd half q
-
- vpush {d8-d15} ; save neon registers
-
- add r3, r2, r1, lsr #1 ; s[-3 * p]
-
- vld1.u8 {q3}, [r2@64], r1 ; p3
- vld1.u8 {q4}, [r3@64], r1 ; p2
- vld1.u8 {q5}, [r2@64], r1 ; p1
- vld1.u8 {q6}, [r3@64], r1 ; p0
- vld1.u8 {q7}, [r2@64], r1 ; q0
- vld1.u8 {q8}, [r3@64], r1 ; q1
- vld1.u8 {q9}, [r2@64] ; q2
- vld1.u8 {q10}, [r3@64] ; q3
-
- sub r2, r2, r1, lsl #1
- sub r3, r3, r1, lsl #1
-
- bl vp9_loop_filter_neon_16
-
- vst1.u8 {q5}, [r2@64], r1 ; store op1
- vst1.u8 {q6}, [r3@64], r1 ; store op0
- vst1.u8 {q7}, [r2@64], r1 ; store oq0
- vst1.u8 {q8}, [r3@64], r1 ; store oq1
-
- vpop {d8-d15} ; restore neon registers
-
- pop {pc}
- ENDP ; |vp9_lpf_horizontal_4_dual_neon|
-
-; void vp9_loop_filter_neon_16();
-; This is a helper function for the loopfilters. The invidual functions do the
-; necessary load, transpose (if necessary) and store. This function uses
-; registers d8-d15, so the calling function must save those registers.
-;
-; r0-r3, r12 PRESERVE
-; q0 blimit
-; q1 limit
-; q2 thresh
-; q3 p3
-; q4 p2
-; q5 p1
-; q6 p0
-; q7 q0
-; q8 q1
-; q9 q2
-; q10 q3
-;
-; Outputs:
-; q5 op1
-; q6 op0
-; q7 oq0
-; q8 oq1
-|vp9_loop_filter_neon_16| PROC
-
- ; filter_mask
- vabd.u8 q11, q3, q4 ; m1 = abs(p3 - p2)
- vabd.u8 q12, q4, q5 ; m2 = abs(p2 - p1)
- vabd.u8 q13, q5, q6 ; m3 = abs(p1 - p0)
- vabd.u8 q14, q8, q7 ; m4 = abs(q1 - q0)
- vabd.u8 q3, q9, q8 ; m5 = abs(q2 - q1)
- vabd.u8 q4, q10, q9 ; m6 = abs(q3 - q2)
-
- ; only compare the largest value to limit
- vmax.u8 q11, q11, q12 ; m7 = max(m1, m2)
- vmax.u8 q12, q13, q14 ; m8 = max(m3, m4)
-
- vabd.u8 q9, q6, q7 ; abs(p0 - q0)
-
- vmax.u8 q3, q3, q4 ; m9 = max(m5, m6)
-
- vmov.u8 q10, #0x80
-
- vmax.u8 q15, q11, q12 ; m10 = max(m7, m8)
-
- vcgt.u8 q13, q13, q2 ; (abs(p1 - p0) > thresh)*-1
- vcgt.u8 q14, q14, q2 ; (abs(q1 - q0) > thresh)*-1
- vmax.u8 q15, q15, q3 ; m11 = max(m10, m9)
-
- vabd.u8 q2, q5, q8 ; a = abs(p1 - q1)
- vqadd.u8 q9, q9, q9 ; b = abs(p0 - q0) * 2
-
- veor q7, q7, q10 ; qs0
-
- vcge.u8 q15, q1, q15 ; abs(m11) > limit
-
- vshr.u8 q2, q2, #1 ; a = a / 2
- veor q6, q6, q10 ; ps0
-
- veor q5, q5, q10 ; ps1
- vqadd.u8 q9, q9, q2 ; a = b + a
-
- veor q8, q8, q10 ; qs1
-
- vmov.u16 q4, #3
-
- vsubl.s8 q2, d14, d12 ; ( qs0 - ps0)
- vsubl.s8 q11, d15, d13
-
- vcge.u8 q9, q0, q9 ; a > blimit
-
- vqsub.s8 q1, q5, q8 ; filter = clamp(ps1-qs1)
- vorr q14, q13, q14 ; hev
-
- vmul.i16 q2, q2, q4 ; 3 * ( qs0 - ps0)
- vmul.i16 q11, q11, q4
-
- vand q1, q1, q14 ; filter &= hev
- vand q15, q15, q9 ; mask
-
- vmov.u8 q4, #3
-
- vaddw.s8 q2, q2, d2 ; filter + 3 * (qs0 - ps0)
- vaddw.s8 q11, q11, d3
-
- vmov.u8 q9, #4
-
- ; filter = clamp(filter + 3 * ( qs0 - ps0))
- vqmovn.s16 d2, q2
- vqmovn.s16 d3, q11
- vand q1, q1, q15 ; filter &= mask
-
- vqadd.s8 q2, q1, q4 ; filter2 = clamp(filter+3)
- vqadd.s8 q1, q1, q9 ; filter1 = clamp(filter+4)
- vshr.s8 q2, q2, #3 ; filter2 >>= 3
- vshr.s8 q1, q1, #3 ; filter1 >>= 3
-
-
- vqadd.s8 q11, q6, q2 ; u = clamp(ps0 + filter2)
- vqsub.s8 q0, q7, q1 ; u = clamp(qs0 - filter1)
-
- ; outer tap adjustments
- vrshr.s8 q1, q1, #1 ; filter = ++filter1 >> 1
-
- veor q7, q0, q10 ; *oq0 = u^0x80
-
- vbic q1, q1, q14 ; filter &= ~hev
-
- vqadd.s8 q13, q5, q1 ; u = clamp(ps1 + filter)
- vqsub.s8 q12, q8, q1 ; u = clamp(qs1 - filter)
-
- veor q6, q11, q10 ; *op0 = u^0x80
- veor q5, q13, q10 ; *op1 = u^0x80
- veor q8, q12, q10 ; *oq1 = u^0x80
-
- bx lr
- ENDP ; |vp9_loop_filter_neon_16|
-
- END
--- a/vp9/common/arm/neon/vp9_loopfilter_16_neon.c
+++ b/vp9/common/arm/neon/vp9_loopfilter_16_neon.c
@@ -18,8 +18,8 @@
const uint8_t *blimit1,
const uint8_t *limit1,
const uint8_t *thresh1) {
- vp9_lpf_horizontal_8(s, p, blimit0, limit0, thresh0, 1);
- vp9_lpf_horizontal_8(s + 8, p, blimit1, limit1, thresh1, 1);
+ vp9_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1, 1);
}
void vp9_lpf_vertical_4_dual_neon(uint8_t *s, int p,
@@ -44,6 +44,7 @@
vp9_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
}
+#if HAVE_NEON_ASM
void vp9_lpf_vertical_16_dual_neon(uint8_t *s, int p,
const uint8_t *blimit,
const uint8_t *limit,
@@ -51,3 +52,4 @@
vp9_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
vp9_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh);
}
+#endif // HAVE_NEON_ASM
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_loopfilter_16_neon_asm.asm
@@ -1,0 +1,199 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_lpf_horizontal_4_dual_neon|
+ ARM
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+;void vp9_lpf_horizontal_4_dual_neon(uint8_t *s, int p,
+; const uint8_t *blimit0,
+; const uint8_t *limit0,
+; const uint8_t *thresh0,
+; const uint8_t *blimit1,
+; const uint8_t *limit1,
+; const uint8_t *thresh1)
+; r0 uint8_t *s,
+; r1 int p,
+; r2 const uint8_t *blimit0,
+; r3 const uint8_t *limit0,
+; sp const uint8_t *thresh0,
+; sp+4 const uint8_t *blimit1,
+; sp+8 const uint8_t *limit1,
+; sp+12 const uint8_t *thresh1,
+
+|vp9_lpf_horizontal_4_dual_neon| PROC
+ push {lr}
+
+ ldr r12, [sp, #4] ; load thresh0
+ vld1.8 {d0}, [r2] ; load blimit0 to first half q
+ vld1.8 {d2}, [r3] ; load limit0 to first half q
+
+ add r1, r1, r1 ; double pitch
+ ldr r2, [sp, #8] ; load blimit1
+
+ vld1.8 {d4}, [r12] ; load thresh0 to first half q
+
+ ldr r3, [sp, #12] ; load limit1
+ ldr r12, [sp, #16] ; load thresh1
+ vld1.8 {d1}, [r2] ; load blimit1 to 2nd half q
+
+ sub r2, r0, r1, lsl #1 ; s[-4 * p]
+
+ vld1.8 {d3}, [r3] ; load limit1 to 2nd half q
+ vld1.8 {d5}, [r12] ; load thresh1 to 2nd half q
+
+ vpush {d8-d15} ; save neon registers
+
+ add r3, r2, r1, lsr #1 ; s[-3 * p]
+
+ vld1.u8 {q3}, [r2@64], r1 ; p3
+ vld1.u8 {q4}, [r3@64], r1 ; p2
+ vld1.u8 {q5}, [r2@64], r1 ; p1
+ vld1.u8 {q6}, [r3@64], r1 ; p0
+ vld1.u8 {q7}, [r2@64], r1 ; q0
+ vld1.u8 {q8}, [r3@64], r1 ; q1
+ vld1.u8 {q9}, [r2@64] ; q2
+ vld1.u8 {q10}, [r3@64] ; q3
+
+ sub r2, r2, r1, lsl #1
+ sub r3, r3, r1, lsl #1
+
+ bl vp9_loop_filter_neon_16
+
+ vst1.u8 {q5}, [r2@64], r1 ; store op1
+ vst1.u8 {q6}, [r3@64], r1 ; store op0
+ vst1.u8 {q7}, [r2@64], r1 ; store oq0
+ vst1.u8 {q8}, [r3@64], r1 ; store oq1
+
+ vpop {d8-d15} ; restore neon registers
+
+ pop {pc}
+ ENDP ; |vp9_lpf_horizontal_4_dual_neon|
+
+; void vp9_loop_filter_neon_16();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store. This function uses
+; registers d8-d15, so the calling function must save those registers.
+;
+; r0-r3, r12 PRESERVE
+; q0 blimit
+; q1 limit
+; q2 thresh
+; q3 p3
+; q4 p2
+; q5 p1
+; q6 p0
+; q7 q0
+; q8 q1
+; q9 q2
+; q10 q3
+;
+; Outputs:
+; q5 op1
+; q6 op0
+; q7 oq0
+; q8 oq1
+|vp9_loop_filter_neon_16| PROC
+
+ ; filter_mask
+ vabd.u8 q11, q3, q4 ; m1 = abs(p3 - p2)
+ vabd.u8 q12, q4, q5 ; m2 = abs(p2 - p1)
+ vabd.u8 q13, q5, q6 ; m3 = abs(p1 - p0)
+ vabd.u8 q14, q8, q7 ; m4 = abs(q1 - q0)
+ vabd.u8 q3, q9, q8 ; m5 = abs(q2 - q1)
+ vabd.u8 q4, q10, q9 ; m6 = abs(q3 - q2)
+
+ ; only compare the largest value to limit
+ vmax.u8 q11, q11, q12 ; m7 = max(m1, m2)
+ vmax.u8 q12, q13, q14 ; m8 = max(m3, m4)
+
+ vabd.u8 q9, q6, q7 ; abs(p0 - q0)
+
+ vmax.u8 q3, q3, q4 ; m9 = max(m5, m6)
+
+ vmov.u8 q10, #0x80
+
+ vmax.u8 q15, q11, q12 ; m10 = max(m7, m8)
+
+ vcgt.u8 q13, q13, q2 ; (abs(p1 - p0) > thresh)*-1
+ vcgt.u8 q14, q14, q2 ; (abs(q1 - q0) > thresh)*-1
+ vmax.u8 q15, q15, q3 ; m11 = max(m10, m9)
+
+ vabd.u8 q2, q5, q8 ; a = abs(p1 - q1)
+ vqadd.u8 q9, q9, q9 ; b = abs(p0 - q0) * 2
+
+ veor q7, q7, q10 ; qs0
+
+ vcge.u8 q15, q1, q15 ; abs(m11) > limit
+
+ vshr.u8 q2, q2, #1 ; a = a / 2
+ veor q6, q6, q10 ; ps0
+
+ veor q5, q5, q10 ; ps1
+ vqadd.u8 q9, q9, q2 ; a = b + a
+
+ veor q8, q8, q10 ; qs1
+
+ vmov.u16 q4, #3
+
+ vsubl.s8 q2, d14, d12 ; ( qs0 - ps0)
+ vsubl.s8 q11, d15, d13
+
+ vcge.u8 q9, q0, q9 ; a > blimit
+
+ vqsub.s8 q1, q5, q8 ; filter = clamp(ps1-qs1)
+ vorr q14, q13, q14 ; hev
+
+ vmul.i16 q2, q2, q4 ; 3 * ( qs0 - ps0)
+ vmul.i16 q11, q11, q4
+
+ vand q1, q1, q14 ; filter &= hev
+ vand q15, q15, q9 ; mask
+
+ vmov.u8 q4, #3
+
+ vaddw.s8 q2, q2, d2 ; filter + 3 * (qs0 - ps0)
+ vaddw.s8 q11, q11, d3
+
+ vmov.u8 q9, #4
+
+ ; filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d2, q2
+ vqmovn.s16 d3, q11
+ vand q1, q1, q15 ; filter &= mask
+
+ vqadd.s8 q2, q1, q4 ; filter2 = clamp(filter+3)
+ vqadd.s8 q1, q1, q9 ; filter1 = clamp(filter+4)
+ vshr.s8 q2, q2, #3 ; filter2 >>= 3
+ vshr.s8 q1, q1, #3 ; filter1 >>= 3
+
+
+ vqadd.s8 q11, q6, q2 ; u = clamp(ps0 + filter2)
+ vqsub.s8 q0, q7, q1 ; u = clamp(qs0 - filter1)
+
+ ; outer tap adjustments
+ vrshr.s8 q1, q1, #1 ; filter = ++filter1 >> 1
+
+ veor q7, q0, q10 ; *oq0 = u^0x80
+
+ vbic q1, q1, q14 ; filter &= ~hev
+
+ vqadd.s8 q13, q5, q1 ; u = clamp(ps1 + filter)
+ vqsub.s8 q12, q8, q1 ; u = clamp(qs1 - filter)
+
+ veor q6, q11, q10 ; *op0 = u^0x80
+ veor q5, q13, q10 ; *op1 = u^0x80
+ veor q8, q12, q10 ; *oq1 = u^0x80
+
+ bx lr
+ ENDP ; |vp9_loop_filter_neon_16|
+
+ END
--- a/vp9/common/arm/neon/vp9_loopfilter_neon.asm
+++ /dev/null
@@ -1,708 +1,0 @@
-;
-; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
-;
-; Use of this source code is governed by a BSD-style license
-; that can be found in the LICENSE file in the root of the source
-; tree. An additional intellectual property rights grant can be found
-; in the file PATENTS. All contributing project authors may
-; be found in the AUTHORS file in the root of the source tree.
-;
-
- EXPORT |vp9_lpf_horizontal_4_neon|
- EXPORT |vp9_lpf_vertical_4_neon|
- EXPORT |vp9_lpf_horizontal_8_neon|
- EXPORT |vp9_lpf_vertical_8_neon|
- ARM
-
- AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
-; works on 16 iterations at a time.
-; TODO(fgalligan): See about removing the count code as this function is only
-; called with a count of 1.
-;
-; void vp9_lpf_horizontal_4_neon(uint8_t *s,
-; int p /* pitch */,
-; const uint8_t *blimit,
-; const uint8_t *limit,
-; const uint8_t *thresh,
-; int count)
-;
-; r0 uint8_t *s,
-; r1 int p, /* pitch */
-; r2 const uint8_t *blimit,
-; r3 const uint8_t *limit,
-; sp const uint8_t *thresh,
-; sp+4 int count
-|vp9_lpf_horizontal_4_neon| PROC
- push {lr}
-
- vld1.8 {d0[]}, [r2] ; duplicate *blimit
- ldr r12, [sp, #8] ; load count
- ldr r2, [sp, #4] ; load thresh
- add r1, r1, r1 ; double pitch
-
- cmp r12, #0
- beq end_vp9_lf_h_edge
-
- vld1.8 {d1[]}, [r3] ; duplicate *limit
- vld1.8 {d2[]}, [r2] ; duplicate *thresh
-
-count_lf_h_loop
- sub r2, r0, r1, lsl #1 ; move src pointer down by 4 lines
- add r3, r2, r1, lsr #1 ; set to 3 lines down
-
- vld1.u8 {d3}, [r2@64], r1 ; p3
- vld1.u8 {d4}, [r3@64], r1 ; p2
- vld1.u8 {d5}, [r2@64], r1 ; p1
- vld1.u8 {d6}, [r3@64], r1 ; p0
- vld1.u8 {d7}, [r2@64], r1 ; q0
- vld1.u8 {d16}, [r3@64], r1 ; q1
- vld1.u8 {d17}, [r2@64] ; q2
- vld1.u8 {d18}, [r3@64] ; q3
-
- sub r2, r2, r1, lsl #1
- sub r3, r3, r1, lsl #1
-
- bl vp9_loop_filter_neon
-
- vst1.u8 {d4}, [r2@64], r1 ; store op1
- vst1.u8 {d5}, [r3@64], r1 ; store op0
- vst1.u8 {d6}, [r2@64], r1 ; store oq0
- vst1.u8 {d7}, [r3@64], r1 ; store oq1
-
- add r0, r0, #8
- subs r12, r12, #1
- bne count_lf_h_loop
-
-end_vp9_lf_h_edge
- pop {pc}
- ENDP ; |vp9_lpf_horizontal_4_neon|
-
-; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
-; works on 16 iterations at a time.
-; TODO(fgalligan): See about removing the count code as this function is only
-; called with a count of 1.
-;
-; void vp9_lpf_vertical_4_neon(uint8_t *s,
-; int p /* pitch */,
-; const uint8_t *blimit,
-; const uint8_t *limit,
-; const uint8_t *thresh,
-; int count)
-;
-; r0 uint8_t *s,
-; r1 int p, /* pitch */
-; r2 const uint8_t *blimit,
-; r3 const uint8_t *limit,
-; sp const uint8_t *thresh,
-; sp+4 int count
-|vp9_lpf_vertical_4_neon| PROC
- push {lr}
-
- vld1.8 {d0[]}, [r2] ; duplicate *blimit
- ldr r12, [sp, #8] ; load count
- vld1.8 {d1[]}, [r3] ; duplicate *limit
-
- ldr r3, [sp, #4] ; load thresh
- sub r2, r0, #4 ; move s pointer down by 4 columns
- cmp r12, #0
- beq end_vp9_lf_v_edge
-
- vld1.8 {d2[]}, [r3] ; duplicate *thresh
-
-count_lf_v_loop
- vld1.u8 {d3}, [r2], r1 ; load s data
- vld1.u8 {d4}, [r2], r1
- vld1.u8 {d5}, [r2], r1
- vld1.u8 {d6}, [r2], r1
- vld1.u8 {d7}, [r2], r1
- vld1.u8 {d16}, [r2], r1
- vld1.u8 {d17}, [r2], r1
- vld1.u8 {d18}, [r2]
-
- ;transpose to 8x16 matrix
- vtrn.32 d3, d7
- vtrn.32 d4, d16
- vtrn.32 d5, d17
- vtrn.32 d6, d18
-
- vtrn.16 d3, d5
- vtrn.16 d4, d6
- vtrn.16 d7, d17
- vtrn.16 d16, d18
-
- vtrn.8 d3, d4
- vtrn.8 d5, d6
- vtrn.8 d7, d16
- vtrn.8 d17, d18
-
- bl vp9_loop_filter_neon
-
- sub r0, r0, #2
-
- ;store op1, op0, oq0, oq1
- vst4.8 {d4[0], d5[0], d6[0], d7[0]}, [r0], r1
- vst4.8 {d4[1], d5[1], d6[1], d7[1]}, [r0], r1
- vst4.8 {d4[2], d5[2], d6[2], d7[2]}, [r0], r1
- vst4.8 {d4[3], d5[3], d6[3], d7[3]}, [r0], r1
- vst4.8 {d4[4], d5[4], d6[4], d7[4]}, [r0], r1
- vst4.8 {d4[5], d5[5], d6[5], d7[5]}, [r0], r1
- vst4.8 {d4[6], d5[6], d6[6], d7[6]}, [r0], r1
- vst4.8 {d4[7], d5[7], d6[7], d7[7]}, [r0]
-
- add r0, r0, r1, lsl #3 ; s += pitch * 8
- subs r12, r12, #1
- subne r2, r0, #4 ; move s pointer down by 4 columns
- bne count_lf_v_loop
-
-end_vp9_lf_v_edge
- pop {pc}
- ENDP ; |vp9_lpf_vertical_4_neon|
-
-; void vp9_loop_filter_neon();
-; This is a helper function for the loopfilters. The invidual functions do the
-; necessary load, transpose (if necessary) and store. The function does not use
-; registers d8-d15.
-;
-; Inputs:
-; r0-r3, r12 PRESERVE
-; d0 blimit
-; d1 limit
-; d2 thresh
-; d3 p3
-; d4 p2
-; d5 p1
-; d6 p0
-; d7 q0
-; d16 q1
-; d17 q2
-; d18 q3
-;
-; Outputs:
-; d4 op1
-; d5 op0
-; d6 oq0
-; d7 oq1
-|vp9_loop_filter_neon| PROC
- ; filter_mask
- vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
- vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
- vabd.u8 d21, d5, d6 ; m3 = abs(p1 - p0)
- vabd.u8 d22, d16, d7 ; m4 = abs(q1 - q0)
- vabd.u8 d3, d17, d16 ; m5 = abs(q2 - q1)
- vabd.u8 d4, d18, d17 ; m6 = abs(q3 - q2)
-
- ; only compare the largest value to limit
- vmax.u8 d19, d19, d20 ; m1 = max(m1, m2)
- vmax.u8 d20, d21, d22 ; m2 = max(m3, m4)
-
- vabd.u8 d17, d6, d7 ; abs(p0 - q0)
-
- vmax.u8 d3, d3, d4 ; m3 = max(m5, m6)
-
- vmov.u8 d18, #0x80
-
- vmax.u8 d23, d19, d20 ; m1 = max(m1, m2)
-
- ; hevmask
- vcgt.u8 d21, d21, d2 ; (abs(p1 - p0) > thresh)*-1
- vcgt.u8 d22, d22, d2 ; (abs(q1 - q0) > thresh)*-1
- vmax.u8 d23, d23, d3 ; m1 = max(m1, m3)
-
- vabd.u8 d28, d5, d16 ; a = abs(p1 - q1)
- vqadd.u8 d17, d17, d17 ; b = abs(p0 - q0) * 2
-
- veor d7, d7, d18 ; qs0
-
- vcge.u8 d23, d1, d23 ; abs(m1) > limit
-
- ; filter() function
- ; convert to signed
-
- vshr.u8 d28, d28, #1 ; a = a / 2
- veor d6, d6, d18 ; ps0
-
- veor d5, d5, d18 ; ps1
- vqadd.u8 d17, d17, d28 ; a = b + a
-
- veor d16, d16, d18 ; qs1
-
- vmov.u8 d19, #3
-
- vsub.s8 d28, d7, d6 ; ( qs0 - ps0)
-
- vcge.u8 d17, d0, d17 ; a > blimit
-
- vqsub.s8 d27, d5, d16 ; filter = clamp(ps1-qs1)
- vorr d22, d21, d22 ; hevmask
-
- vmull.s8 q12, d28, d19 ; 3 * ( qs0 - ps0)
-
- vand d27, d27, d22 ; filter &= hev
- vand d23, d23, d17 ; filter_mask
-
- vaddw.s8 q12, q12, d27 ; filter + 3 * (qs0 - ps0)
-
- vmov.u8 d17, #4
-
- ; filter = clamp(filter + 3 * ( qs0 - ps0))
- vqmovn.s16 d27, q12
-
- vand d27, d27, d23 ; filter &= mask
-
- vqadd.s8 d28, d27, d19 ; filter2 = clamp(filter+3)
- vqadd.s8 d27, d27, d17 ; filter1 = clamp(filter+4)
- vshr.s8 d28, d28, #3 ; filter2 >>= 3
- vshr.s8 d27, d27, #3 ; filter1 >>= 3
-
- vqadd.s8 d19, d6, d28 ; u = clamp(ps0 + filter2)
- vqsub.s8 d26, d7, d27 ; u = clamp(qs0 - filter1)
-
- ; outer tap adjustments
- vrshr.s8 d27, d27, #1 ; filter = ++filter1 >> 1
-
- veor d6, d26, d18 ; *oq0 = u^0x80
-
- vbic d27, d27, d22 ; filter &= ~hev
-
- vqadd.s8 d21, d5, d27 ; u = clamp(ps1 + filter)
- vqsub.s8 d20, d16, d27 ; u = clamp(qs1 - filter)
-
- veor d5, d19, d18 ; *op0 = u^0x80
- veor d4, d21, d18 ; *op1 = u^0x80
- veor d7, d20, d18 ; *oq1 = u^0x80
-
- bx lr
- ENDP ; |vp9_loop_filter_neon|
-
-; void vp9_lpf_horizontal_8_neon(uint8_t *s, int p,
-; const uint8_t *blimit,
-; const uint8_t *limit,
-; const uint8_t *thresh,
-; int count)
-; r0 uint8_t *s,
-; r1 int p, /* pitch */
-; r2 const uint8_t *blimit,
-; r3 const uint8_t *limit,
-; sp const uint8_t *thresh,
-; sp+4 int count
-|vp9_lpf_horizontal_8_neon| PROC
- push {r4-r5, lr}
-
- vld1.8 {d0[]}, [r2] ; duplicate *blimit
- ldr r12, [sp, #16] ; load count
- ldr r2, [sp, #12] ; load thresh
- add r1, r1, r1 ; double pitch
-
- cmp r12, #0
- beq end_vp9_mblf_h_edge
-
- vld1.8 {d1[]}, [r3] ; duplicate *limit
- vld1.8 {d2[]}, [r2] ; duplicate *thresh
-
-count_mblf_h_loop
- sub r3, r0, r1, lsl #1 ; move src pointer down by 4 lines
- add r2, r3, r1, lsr #1 ; set to 3 lines down
-
- vld1.u8 {d3}, [r3@64], r1 ; p3
- vld1.u8 {d4}, [r2@64], r1 ; p2
- vld1.u8 {d5}, [r3@64], r1 ; p1
- vld1.u8 {d6}, [r2@64], r1 ; p0
- vld1.u8 {d7}, [r3@64], r1 ; q0
- vld1.u8 {d16}, [r2@64], r1 ; q1
- vld1.u8 {d17}, [r3@64] ; q2
- vld1.u8 {d18}, [r2@64], r1 ; q3
-
- sub r3, r3, r1, lsl #1
- sub r2, r2, r1, lsl #2
-
- bl vp9_mbloop_filter_neon
-
- vst1.u8 {d0}, [r2@64], r1 ; store op2
- vst1.u8 {d1}, [r3@64], r1 ; store op1
- vst1.u8 {d2}, [r2@64], r1 ; store op0
- vst1.u8 {d3}, [r3@64], r1 ; store oq0
- vst1.u8 {d4}, [r2@64], r1 ; store oq1
- vst1.u8 {d5}, [r3@64], r1 ; store oq2
-
- add r0, r0, #8
- subs r12, r12, #1
- bne count_mblf_h_loop
-
-end_vp9_mblf_h_edge
- pop {r4-r5, pc}
-
- ENDP ; |vp9_lpf_horizontal_8_neon|
-
-; void vp9_lpf_vertical_8_neon(uint8_t *s,
-; int pitch,
-; const uint8_t *blimit,
-; const uint8_t *limit,
-; const uint8_t *thresh,
-; int count)
-;
-; r0 uint8_t *s,
-; r1 int pitch,
-; r2 const uint8_t *blimit,
-; r3 const uint8_t *limit,
-; sp const uint8_t *thresh,
-; sp+4 int count
-|vp9_lpf_vertical_8_neon| PROC
- push {r4-r5, lr}
-
- vld1.8 {d0[]}, [r2] ; duplicate *blimit
- ldr r12, [sp, #16] ; load count
- vld1.8 {d1[]}, [r3] ; duplicate *limit
-
- ldr r3, [sp, #12] ; load thresh
- sub r2, r0, #4 ; move s pointer down by 4 columns
- cmp r12, #0
- beq end_vp9_mblf_v_edge
-
- vld1.8 {d2[]}, [r3] ; duplicate *thresh
-
-count_mblf_v_loop
- vld1.u8 {d3}, [r2], r1 ; load s data
- vld1.u8 {d4}, [r2], r1
- vld1.u8 {d5}, [r2], r1
- vld1.u8 {d6}, [r2], r1
- vld1.u8 {d7}, [r2], r1
- vld1.u8 {d16}, [r2], r1
- vld1.u8 {d17}, [r2], r1
- vld1.u8 {d18}, [r2]
-
- ;transpose to 8x16 matrix
- vtrn.32 d3, d7
- vtrn.32 d4, d16
- vtrn.32 d5, d17
- vtrn.32 d6, d18
-
- vtrn.16 d3, d5
- vtrn.16 d4, d6
- vtrn.16 d7, d17
- vtrn.16 d16, d18
-
- vtrn.8 d3, d4
- vtrn.8 d5, d6
- vtrn.8 d7, d16
- vtrn.8 d17, d18
-
- sub r2, r0, #3
- add r3, r0, #1
-
- bl vp9_mbloop_filter_neon
-
- ;store op2, op1, op0, oq0
- vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r2], r1
- vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r2], r1
- vst4.8 {d0[2], d1[2], d2[2], d3[2]}, [r2], r1
- vst4.8 {d0[3], d1[3], d2[3], d3[3]}, [r2], r1
- vst4.8 {d0[4], d1[4], d2[4], d3[4]}, [r2], r1
- vst4.8 {d0[5], d1[5], d2[5], d3[5]}, [r2], r1
- vst4.8 {d0[6], d1[6], d2[6], d3[6]}, [r2], r1
- vst4.8 {d0[7], d1[7], d2[7], d3[7]}, [r2]
-
- ;store oq1, oq2
- vst2.8 {d4[0], d5[0]}, [r3], r1
- vst2.8 {d4[1], d5[1]}, [r3], r1
- vst2.8 {d4[2], d5[2]}, [r3], r1
- vst2.8 {d4[3], d5[3]}, [r3], r1
- vst2.8 {d4[4], d5[4]}, [r3], r1
- vst2.8 {d4[5], d5[5]}, [r3], r1
- vst2.8 {d4[6], d5[6]}, [r3], r1
- vst2.8 {d4[7], d5[7]}, [r3]
-
- add r0, r0, r1, lsl #3 ; s += pitch * 8
- subs r12, r12, #1
- subne r2, r0, #4 ; move s pointer down by 4 columns
- bne count_mblf_v_loop
-
-end_vp9_mblf_v_edge
- pop {r4-r5, pc}
- ENDP ; |vp9_lpf_vertical_8_neon|
-
-; void vp9_mbloop_filter_neon();
-; This is a helper function for the loopfilters. The invidual functions do the
-; necessary load, transpose (if necessary) and store. The function does not use
-; registers d8-d15.
-;
-; Inputs:
-; r0-r3, r12 PRESERVE
-; d0 blimit
-; d1 limit
-; d2 thresh
-; d3 p3
-; d4 p2
-; d5 p1
-; d6 p0
-; d7 q0
-; d16 q1
-; d17 q2
-; d18 q3
-;
-; Outputs:
-; d0 op2
-; d1 op1
-; d2 op0
-; d3 oq0
-; d4 oq1
-; d5 oq2
-|vp9_mbloop_filter_neon| PROC
- ; filter_mask
- vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
- vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
- vabd.u8 d21, d5, d6 ; m3 = abs(p1 - p0)
- vabd.u8 d22, d16, d7 ; m4 = abs(q1 - q0)
- vabd.u8 d23, d17, d16 ; m5 = abs(q2 - q1)
- vabd.u8 d24, d18, d17 ; m6 = abs(q3 - q2)
-
- ; only compare the largest value to limit
- vmax.u8 d19, d19, d20 ; m1 = max(m1, m2)
- vmax.u8 d20, d21, d22 ; m2 = max(m3, m4)
-
- vabd.u8 d25, d6, d4 ; m7 = abs(p0 - p2)
-
- vmax.u8 d23, d23, d24 ; m3 = max(m5, m6)
-
- vabd.u8 d26, d7, d17 ; m8 = abs(q0 - q2)
-
- vmax.u8 d19, d19, d20
-
- vabd.u8 d24, d6, d7 ; m9 = abs(p0 - q0)
- vabd.u8 d27, d3, d6 ; m10 = abs(p3 - p0)
- vabd.u8 d28, d18, d7 ; m11 = abs(q3 - q0)
-
- vmax.u8 d19, d19, d23
-
- vabd.u8 d23, d5, d16 ; a = abs(p1 - q1)
- vqadd.u8 d24, d24, d24 ; b = abs(p0 - q0) * 2
-
- ; abs () > limit
- vcge.u8 d19, d1, d19
-
- ; only compare the largest value to thresh
- vmax.u8 d25, d25, d26 ; m4 = max(m7, m8)
- vmax.u8 d26, d27, d28 ; m5 = max(m10, m11)
-
- vshr.u8 d23, d23, #1 ; a = a / 2
-
- vmax.u8 d25, d25, d26 ; m4 = max(m4, m5)
-
- vqadd.u8 d24, d24, d23 ; a = b + a
-
- vmax.u8 d20, d20, d25 ; m2 = max(m2, m4)
-
- vmov.u8 d23, #1
- vcge.u8 d24, d0, d24 ; a > blimit
-
- vcgt.u8 d21, d21, d2 ; (abs(p1 - p0) > thresh)*-1
-
- vcge.u8 d20, d23, d20 ; flat
-
- vand d19, d19, d24 ; mask
-
- vcgt.u8 d23, d22, d2 ; (abs(q1 - q0) > thresh)*-1
-
- vand d20, d20, d19 ; flat & mask
-
- vmov.u8 d22, #0x80
-
- vorr d23, d21, d23 ; hev
-
- ; This instruction will truncate the "flat & mask" masks down to 4 bits
- ; each to fit into one 32 bit arm register. The values are stored in
- ; q10.64[0].
- vshrn.u16 d30, q10, #4
- vmov.u32 r4, d30[0] ; flat & mask 4bits
-
- adds r5, r4, #1 ; Check for all 1's
-
- ; If mask and flat are 1's for all vectors, then we only need to execute
- ; the power branch for all vectors.
- beq power_branch_only
-
- cmp r4, #0 ; Check for 0, set flag for later
-
- ; mbfilter() function
- ; filter() function
- ; convert to signed
- veor d21, d7, d22 ; qs0
- veor d24, d6, d22 ; ps0
- veor d25, d5, d22 ; ps1
- veor d26, d16, d22 ; qs1
-
- vmov.u8 d27, #3
-
- vsub.s8 d28, d21, d24 ; ( qs0 - ps0)
-
- vqsub.s8 d29, d25, d26 ; filter = clamp(ps1-qs1)
-
- vmull.s8 q15, d28, d27 ; 3 * ( qs0 - ps0)
-
- vand d29, d29, d23 ; filter &= hev
-
- vaddw.s8 q15, q15, d29 ; filter + 3 * (qs0 - ps0)
-
- vmov.u8 d29, #4
-
- ; filter = clamp(filter + 3 * ( qs0 - ps0))
- vqmovn.s16 d28, q15
-
- vand d28, d28, d19 ; filter &= mask
-
- vqadd.s8 d30, d28, d27 ; filter2 = clamp(filter+3)
- vqadd.s8 d29, d28, d29 ; filter1 = clamp(filter+4)
- vshr.s8 d30, d30, #3 ; filter2 >>= 3
- vshr.s8 d29, d29, #3 ; filter1 >>= 3
-
- vqadd.s8 d24, d24, d30 ; op0 = clamp(ps0 + filter2)
- vqsub.s8 d21, d21, d29 ; oq0 = clamp(qs0 - filter1)
-
- ; outer tap adjustments: ++filter1 >> 1
- vrshr.s8 d29, d29, #1
- vbic d29, d29, d23 ; filter &= ~hev
-
- vqadd.s8 d25, d25, d29 ; op1 = clamp(ps1 + filter)
- vqsub.s8 d26, d26, d29 ; oq1 = clamp(qs1 - filter)
-
- ; If mask and flat are 0's for all vectors, then we only need to execute
- ; the filter branch for all vectors.
- beq filter_branch_only
-
- ; If mask and flat are mixed then we must perform both branches and
- ; combine the data.
- veor d24, d24, d22 ; *f_op0 = u^0x80
- veor d21, d21, d22 ; *f_oq0 = u^0x80
- veor d25, d25, d22 ; *f_op1 = u^0x80
- veor d26, d26, d22 ; *f_oq1 = u^0x80
-
- ; At this point we have already executed the filter branch. The filter
- ; branch does not set op2 or oq2, so use p2 and q2. Execute the power
- ; branch and combine the data.
- vmov.u8 d23, #2
- vaddl.u8 q14, d6, d7 ; r_op2 = p0 + q0
- vmlal.u8 q14, d3, d27 ; r_op2 += p3 * 3
- vmlal.u8 q14, d4, d23 ; r_op2 += p2 * 2
-
- vbif d0, d4, d20 ; op2 |= p2 & ~(flat & mask)
-
- vaddw.u8 q14, d5 ; r_op2 += p1
-
- vbif d1, d25, d20 ; op1 |= f_op1 & ~(flat & mask)
-
- vqrshrn.u16 d30, q14, #3 ; r_op2
-
- vsubw.u8 q14, d3 ; r_op1 = r_op2 - p3
- vsubw.u8 q14, d4 ; r_op1 -= p2
- vaddw.u8 q14, d5 ; r_op1 += p1
- vaddw.u8 q14, d16 ; r_op1 += q1
-
- vbif d2, d24, d20 ; op0 |= f_op0 & ~(flat & mask)
-
- vqrshrn.u16 d31, q14, #3 ; r_op1
-
- vsubw.u8 q14, d3 ; r_op0 = r_op1 - p3
- vsubw.u8 q14, d5 ; r_op0 -= p1
- vaddw.u8 q14, d6 ; r_op0 += p0
- vaddw.u8 q14, d17 ; r_op0 += q2
-
- vbit d0, d30, d20 ; op2 |= r_op2 & (flat & mask)
-
- vqrshrn.u16 d23, q14, #3 ; r_op0
-
- vsubw.u8 q14, d3 ; r_oq0 = r_op0 - p3
- vsubw.u8 q14, d6 ; r_oq0 -= p0
- vaddw.u8 q14, d7 ; r_oq0 += q0
-
- vbit d1, d31, d20 ; op1 |= r_op1 & (flat & mask)
-
- vaddw.u8 q14, d18 ; oq0 += q3
-
- vbit d2, d23, d20 ; op0 |= r_op0 & (flat & mask)
-
- vqrshrn.u16 d22, q14, #3 ; r_oq0
-
- vsubw.u8 q14, d4 ; r_oq1 = r_oq0 - p2
- vsubw.u8 q14, d7 ; r_oq1 -= q0
- vaddw.u8 q14, d16 ; r_oq1 += q1
-
- vbif d3, d21, d20 ; oq0 |= f_oq0 & ~(flat & mask)
-
- vaddw.u8 q14, d18 ; r_oq1 += q3
-
- vbif d4, d26, d20 ; oq1 |= f_oq1 & ~(flat & mask)
-
- vqrshrn.u16 d6, q14, #3 ; r_oq1
-
- vsubw.u8 q14, d5 ; r_oq2 = r_oq1 - p1
- vsubw.u8 q14, d16 ; r_oq2 -= q1
- vaddw.u8 q14, d17 ; r_oq2 += q2
- vaddw.u8 q14, d18 ; r_oq2 += q3
-
- vbif d5, d17, d20 ; oq2 |= q2 & ~(flat & mask)
-
- vqrshrn.u16 d7, q14, #3 ; r_oq2
-
- vbit d3, d22, d20 ; oq0 |= r_oq0 & (flat & mask)
- vbit d4, d6, d20 ; oq1 |= r_oq1 & (flat & mask)
- vbit d5, d7, d20 ; oq2 |= r_oq2 & (flat & mask)
-
- bx lr
-
-power_branch_only
- vmov.u8 d27, #3
- vmov.u8 d21, #2
- vaddl.u8 q14, d6, d7 ; op2 = p0 + q0
- vmlal.u8 q14, d3, d27 ; op2 += p3 * 3
- vmlal.u8 q14, d4, d21 ; op2 += p2 * 2
- vaddw.u8 q14, d5 ; op2 += p1
- vqrshrn.u16 d0, q14, #3 ; op2
-
- vsubw.u8 q14, d3 ; op1 = op2 - p3
- vsubw.u8 q14, d4 ; op1 -= p2
- vaddw.u8 q14, d5 ; op1 += p1
- vaddw.u8 q14, d16 ; op1 += q1
- vqrshrn.u16 d1, q14, #3 ; op1
-
- vsubw.u8 q14, d3 ; op0 = op1 - p3
- vsubw.u8 q14, d5 ; op0 -= p1
- vaddw.u8 q14, d6 ; op0 += p0
- vaddw.u8 q14, d17 ; op0 += q2
- vqrshrn.u16 d2, q14, #3 ; op0
-
- vsubw.u8 q14, d3 ; oq0 = op0 - p3
- vsubw.u8 q14, d6 ; oq0 -= p0
- vaddw.u8 q14, d7 ; oq0 += q0
- vaddw.u8 q14, d18 ; oq0 += q3
- vqrshrn.u16 d3, q14, #3 ; oq0
-
- vsubw.u8 q14, d4 ; oq1 = oq0 - p2
- vsubw.u8 q14, d7 ; oq1 -= q0
- vaddw.u8 q14, d16 ; oq1 += q1
- vaddw.u8 q14, d18 ; oq1 += q3
- vqrshrn.u16 d4, q14, #3 ; oq1
-
- vsubw.u8 q14, d5 ; oq2 = oq1 - p1
- vsubw.u8 q14, d16 ; oq2 -= q1
- vaddw.u8 q14, d17 ; oq2 += q2
- vaddw.u8 q14, d18 ; oq2 += q3
- vqrshrn.u16 d5, q14, #3 ; oq2
-
- bx lr
-
-filter_branch_only
- ; TODO(fgalligan): See if we can rearange registers so we do not need to
- ; do the 2 vswp.
- vswp d0, d4 ; op2
- vswp d5, d17 ; oq2
- veor d2, d24, d22 ; *op0 = u^0x80
- veor d3, d21, d22 ; *oq0 = u^0x80
- veor d1, d25, d22 ; *op1 = u^0x80
- veor d4, d26, d22 ; *oq1 = u^0x80
-
- bx lr
-
- ENDP ; |vp9_mbloop_filter_neon|
-
- END
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_loopfilter_neon.c
@@ -1,0 +1,712 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+static inline void vp9_loop_filter_neon(
+ uint8x8_t dblimit, // flimit
+ uint8x8_t dlimit, // limit
+ uint8x8_t dthresh, // thresh
+ uint8x8_t d3u8, // p3
+ uint8x8_t d4u8, // p2
+ uint8x8_t d5u8, // p1
+ uint8x8_t d6u8, // p0
+ uint8x8_t d7u8, // q0
+ uint8x8_t d16u8, // q1
+ uint8x8_t d17u8, // q2
+ uint8x8_t d18u8, // q3
+ uint8x8_t *d4ru8, // p1
+ uint8x8_t *d5ru8, // p0
+ uint8x8_t *d6ru8, // q0
+ uint8x8_t *d7ru8) { // q1
+ uint8x8_t d19u8, d20u8, d21u8, d22u8, d23u8, d27u8, d28u8;
+ int16x8_t q12s16;
+ int8x8_t d19s8, d20s8, d21s8, d26s8, d27s8, d28s8;
+
+ d19u8 = vabd_u8(d3u8, d4u8);
+ d20u8 = vabd_u8(d4u8, d5u8);
+ d21u8 = vabd_u8(d5u8, d6u8);
+ d22u8 = vabd_u8(d16u8, d7u8);
+ d3u8 = vabd_u8(d17u8, d16u8);
+ d4u8 = vabd_u8(d18u8, d17u8);
+
+ d19u8 = vmax_u8(d19u8, d20u8);
+ d20u8 = vmax_u8(d21u8, d22u8);
+ d3u8 = vmax_u8(d3u8, d4u8);
+ d23u8 = vmax_u8(d19u8, d20u8);
+
+ d17u8 = vabd_u8(d6u8, d7u8);
+
+ d21u8 = vcgt_u8(d21u8, dthresh);
+ d22u8 = vcgt_u8(d22u8, dthresh);
+ d23u8 = vmax_u8(d23u8, d3u8);
+
+ d28u8 = vabd_u8(d5u8, d16u8);
+ d17u8 = vqadd_u8(d17u8, d17u8);
+
+ d23u8 = vcge_u8(dlimit, d23u8);
+
+ d18u8 = vdup_n_u8(0x80);
+ d5u8 = veor_u8(d5u8, d18u8);
+ d6u8 = veor_u8(d6u8, d18u8);
+ d7u8 = veor_u8(d7u8, d18u8);
+ d16u8 = veor_u8(d16u8, d18u8);
+
+ d28u8 = vshr_n_u8(d28u8, 1);
+ d17u8 = vqadd_u8(d17u8, d28u8);
+
+ d19u8 = vdup_n_u8(3);
+
+ d28s8 = vsub_s8(vreinterpret_s8_u8(d7u8),
+ vreinterpret_s8_u8(d6u8));
+
+ d17u8 = vcge_u8(dblimit, d17u8);
+
+ d27s8 = vqsub_s8(vreinterpret_s8_u8(d5u8),
+ vreinterpret_s8_u8(d16u8));
+
+ d22u8 = vorr_u8(d21u8, d22u8);
+
+ q12s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d19u8));
+
+ d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d22u8);
+ d23u8 = vand_u8(d23u8, d17u8);
+
+ q12s16 = vaddw_s8(q12s16, vreinterpret_s8_u8(d27u8));
+
+ d17u8 = vdup_n_u8(4);
+
+ d27s8 = vqmovn_s16(q12s16);
+ d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d23u8);
+ d27s8 = vreinterpret_s8_u8(d27u8);
+
+ d28s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d19u8));
+ d27s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d17u8));
+ d28s8 = vshr_n_s8(d28s8, 3);
+ d27s8 = vshr_n_s8(d27s8, 3);
+
+ d19s8 = vqadd_s8(vreinterpret_s8_u8(d6u8), d28s8);
+ d26s8 = vqsub_s8(vreinterpret_s8_u8(d7u8), d27s8);
+
+ d27s8 = vrshr_n_s8(d27s8, 1);
+ d27s8 = vbic_s8(d27s8, vreinterpret_s8_u8(d22u8));
+
+ d21s8 = vqadd_s8(vreinterpret_s8_u8(d5u8), d27s8);
+ d20s8 = vqsub_s8(vreinterpret_s8_u8(d16u8), d27s8);
+
+ *d4ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d18u8);
+ *d5ru8 = veor_u8(vreinterpret_u8_s8(d19s8), d18u8);
+ *d6ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d18u8);
+ *d7ru8 = veor_u8(vreinterpret_u8_s8(d20s8), d18u8);
+ return;
+}
+
+void vp9_lpf_horizontal_4_neon(
+ unsigned char *src,
+ int pitch,
+ unsigned char *blimit,
+ unsigned char *limit,
+ unsigned char *thresh,
+ int count) {
+ int i;
+ uint8_t *s, *psrc;
+ uint8x8_t dblimit, dlimit, dthresh;
+ uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
+
+ if (count == 0) // end_vp9_lf_h_edge
+ return;
+
+ dblimit = vld1_u8(blimit);
+ dlimit = vld1_u8(limit);
+ dthresh = vld1_u8(thresh);
+
+ psrc = src - (pitch << 2);
+ for (i = 0; i < count; i++) {
+ s = psrc + i * 8;
+
+ d3u8 = vld1_u8(s);
+ s += pitch;
+ d4u8 = vld1_u8(s);
+ s += pitch;
+ d5u8 = vld1_u8(s);
+ s += pitch;
+ d6u8 = vld1_u8(s);
+ s += pitch;
+ d7u8 = vld1_u8(s);
+ s += pitch;
+ d16u8 = vld1_u8(s);
+ s += pitch;
+ d17u8 = vld1_u8(s);
+ s += pitch;
+ d18u8 = vld1_u8(s);
+
+ vp9_loop_filter_neon(dblimit, dlimit, dthresh,
+ d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
+ &d4u8, &d5u8, &d6u8, &d7u8);
+
+ s -= (pitch * 5);
+ vst1_u8(s, d4u8);
+ s += pitch;
+ vst1_u8(s, d5u8);
+ s += pitch;
+ vst1_u8(s, d6u8);
+ s += pitch;
+ vst1_u8(s, d7u8);
+ }
+ return;
+}
+
+void vp9_lpf_vertical_4_neon(
+ unsigned char *src,
+ int pitch,
+ unsigned char *blimit,
+ unsigned char *limit,
+ unsigned char *thresh,
+ int count) {
+ int i, pitch8;
+ uint8_t *s;
+ uint8x8_t dblimit, dlimit, dthresh;
+ uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
+ uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
+ uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
+ uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
+ uint8x8x4_t d4Result;
+
+ if (count == 0) // end_vp9_lf_h_edge
+ return;
+
+ dblimit = vld1_u8(blimit);
+ dlimit = vld1_u8(limit);
+ dthresh = vld1_u8(thresh);
+
+ pitch8 = pitch * 8;
+ for (i = 0; i < count; i++, src += pitch8) {
+ s = src - (i + 1) * 4;
+
+ d3u8 = vld1_u8(s);
+ s += pitch;
+ d4u8 = vld1_u8(s);
+ s += pitch;
+ d5u8 = vld1_u8(s);
+ s += pitch;
+ d6u8 = vld1_u8(s);
+ s += pitch;
+ d7u8 = vld1_u8(s);
+ s += pitch;
+ d16u8 = vld1_u8(s);
+ s += pitch;
+ d17u8 = vld1_u8(s);
+ s += pitch;
+ d18u8 = vld1_u8(s);
+
+ d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8),
+ vreinterpret_u32_u8(d7u8));
+ d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8),
+ vreinterpret_u32_u8(d16u8));
+ d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8),
+ vreinterpret_u32_u8(d17u8));
+ d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8),
+ vreinterpret_u32_u8(d18u8));
+
+ d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
+ vreinterpret_u16_u32(d2tmp2.val[0]));
+ d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
+ vreinterpret_u16_u32(d2tmp3.val[0]));
+ d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
+ vreinterpret_u16_u32(d2tmp2.val[1]));
+ d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
+ vreinterpret_u16_u32(d2tmp3.val[1]));
+
+ d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
+ vreinterpret_u8_u16(d2tmp5.val[0]));
+ d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
+ vreinterpret_u8_u16(d2tmp5.val[1]));
+ d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
+ vreinterpret_u8_u16(d2tmp7.val[0]));
+ d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
+ vreinterpret_u8_u16(d2tmp7.val[1]));
+
+ d3u8 = d2tmp8.val[0];
+ d4u8 = d2tmp8.val[1];
+ d5u8 = d2tmp9.val[0];
+ d6u8 = d2tmp9.val[1];
+ d7u8 = d2tmp10.val[0];
+ d16u8 = d2tmp10.val[1];
+ d17u8 = d2tmp11.val[0];
+ d18u8 = d2tmp11.val[1];
+
+ vp9_loop_filter_neon(dblimit, dlimit, dthresh,
+ d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
+ &d4u8, &d5u8, &d6u8, &d7u8);
+
+ d4Result.val[0] = d4u8;
+ d4Result.val[1] = d5u8;
+ d4Result.val[2] = d6u8;
+ d4Result.val[3] = d7u8;
+
+ src -= 2;
+ vst4_lane_u8(src, d4Result, 0);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 1);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 2);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 3);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 4);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 5);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 6);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 7);
+ }
+ return;
+}
+
+static inline void vp9_mbloop_filter_neon(
+ uint8x8_t dblimit, // mblimit
+ uint8x8_t dlimit, // limit
+ uint8x8_t dthresh, // thresh
+ uint8x8_t d3u8, // p2
+ uint8x8_t d4u8, // p2
+ uint8x8_t d5u8, // p1
+ uint8x8_t d6u8, // p0
+ uint8x8_t d7u8, // q0
+ uint8x8_t d16u8, // q1
+ uint8x8_t d17u8, // q2
+ uint8x8_t d18u8, // q3
+ uint8x8_t *d0ru8, // p1
+ uint8x8_t *d1ru8, // p1
+ uint8x8_t *d2ru8, // p0
+ uint8x8_t *d3ru8, // q0
+ uint8x8_t *d4ru8, // q1
+ uint8x8_t *d5ru8) { // q1
+ uint32_t flat;
+ uint8x8_t d0u8, d1u8, d2u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8;
+ uint8x8_t d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
+ int16x8_t q15s16;
+ uint16x8_t q10u16, q14u16;
+ int8x8_t d21s8, d24s8, d25s8, d26s8, d28s8, d29s8, d30s8;
+
+ d19u8 = vabd_u8(d3u8, d4u8);
+ d20u8 = vabd_u8(d4u8, d5u8);
+ d21u8 = vabd_u8(d5u8, d6u8);
+ d22u8 = vabd_u8(d16u8, d7u8);
+ d23u8 = vabd_u8(d17u8, d16u8);
+ d24u8 = vabd_u8(d18u8, d17u8);
+
+ d19u8 = vmax_u8(d19u8, d20u8);
+ d20u8 = vmax_u8(d21u8, d22u8);
+
+ d25u8 = vabd_u8(d6u8, d4u8);
+
+ d23u8 = vmax_u8(d23u8, d24u8);
+
+ d26u8 = vabd_u8(d7u8, d17u8);
+
+ d19u8 = vmax_u8(d19u8, d20u8);
+
+ d24u8 = vabd_u8(d6u8, d7u8);
+ d27u8 = vabd_u8(d3u8, d6u8);
+ d28u8 = vabd_u8(d18u8, d7u8);
+
+ d19u8 = vmax_u8(d19u8, d23u8);
+
+ d23u8 = vabd_u8(d5u8, d16u8);
+ d24u8 = vqadd_u8(d24u8, d24u8);
+
+
+ d19u8 = vcge_u8(dlimit, d19u8);
+
+
+ d25u8 = vmax_u8(d25u8, d26u8);
+ d26u8 = vmax_u8(d27u8, d28u8);
+
+ d23u8 = vshr_n_u8(d23u8, 1);
+
+ d25u8 = vmax_u8(d25u8, d26u8);
+
+ d24u8 = vqadd_u8(d24u8, d23u8);
+
+ d20u8 = vmax_u8(d20u8, d25u8);
+
+ d23u8 = vdup_n_u8(1);
+ d24u8 = vcge_u8(dblimit, d24u8);
+
+ d21u8 = vcgt_u8(d21u8, dthresh);
+
+ d20u8 = vcge_u8(d23u8, d20u8);
+
+ d19u8 = vand_u8(d19u8, d24u8);
+
+ d23u8 = vcgt_u8(d22u8, dthresh);
+
+ d20u8 = vand_u8(d20u8, d19u8);
+
+ d22u8 = vdup_n_u8(0x80);
+
+ d23u8 = vorr_u8(d21u8, d23u8);
+
+ q10u16 = vcombine_u16(vreinterpret_u16_u8(d20u8),
+ vreinterpret_u16_u8(d21u8));
+
+ d30u8 = vshrn_n_u16(q10u16, 4);
+ flat = vget_lane_u32(vreinterpret_u32_u8(d30u8), 0);
+
+ if (flat == 0xffffffff) { // Check for all 1's, power_branch_only
+ d27u8 = vdup_n_u8(3);
+ d21u8 = vdup_n_u8(2);
+ q14u16 = vaddl_u8(d6u8, d7u8);
+ q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
+ q14u16 = vmlal_u8(q14u16, d4u8, d21u8);
+ q14u16 = vaddw_u8(q14u16, d5u8);
+ *d0ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d4u8);
+ q14u16 = vaddw_u8(q14u16, d5u8);
+ q14u16 = vaddw_u8(q14u16, d16u8);
+ *d1ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d5u8);
+ q14u16 = vaddw_u8(q14u16, d6u8);
+ q14u16 = vaddw_u8(q14u16, d17u8);
+ *d2ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d6u8);
+ q14u16 = vaddw_u8(q14u16, d7u8);
+ q14u16 = vaddw_u8(q14u16, d18u8);
+ *d3ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d4u8);
+ q14u16 = vsubw_u8(q14u16, d7u8);
+ q14u16 = vaddw_u8(q14u16, d16u8);
+ q14u16 = vaddw_u8(q14u16, d18u8);
+ *d4ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d5u8);
+ q14u16 = vsubw_u8(q14u16, d16u8);
+ q14u16 = vaddw_u8(q14u16, d17u8);
+ q14u16 = vaddw_u8(q14u16, d18u8);
+ *d5ru8 = vqrshrn_n_u16(q14u16, 3);
+ } else {
+ d21u8 = veor_u8(d7u8, d22u8);
+ d24u8 = veor_u8(d6u8, d22u8);
+ d25u8 = veor_u8(d5u8, d22u8);
+ d26u8 = veor_u8(d16u8, d22u8);
+
+ d27u8 = vdup_n_u8(3);
+
+ d28s8 = vsub_s8(vreinterpret_s8_u8(d21u8), vreinterpret_s8_u8(d24u8));
+ d29s8 = vqsub_s8(vreinterpret_s8_u8(d25u8), vreinterpret_s8_u8(d26u8));
+
+ q15s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d27u8));
+
+ d29s8 = vand_s8(d29s8, vreinterpret_s8_u8(d23u8));
+
+ q15s16 = vaddw_s8(q15s16, d29s8);
+
+ d29u8 = vdup_n_u8(4);
+
+ d28s8 = vqmovn_s16(q15s16);
+
+ d28s8 = vand_s8(d28s8, vreinterpret_s8_u8(d19u8));
+
+ d30s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d27u8));
+ d29s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d29u8));
+ d30s8 = vshr_n_s8(d30s8, 3);
+ d29s8 = vshr_n_s8(d29s8, 3);
+
+ d24s8 = vqadd_s8(vreinterpret_s8_u8(d24u8), d30s8);
+ d21s8 = vqsub_s8(vreinterpret_s8_u8(d21u8), d29s8);
+
+ d29s8 = vrshr_n_s8(d29s8, 1);
+ d29s8 = vbic_s8(d29s8, vreinterpret_s8_u8(d23u8));
+
+ d25s8 = vqadd_s8(vreinterpret_s8_u8(d25u8), d29s8);
+ d26s8 = vqsub_s8(vreinterpret_s8_u8(d26u8), d29s8);
+
+ if (flat == 0) { // filter_branch_only
+ *d0ru8 = d4u8;
+ *d1ru8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
+ *d2ru8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
+ *d3ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
+ *d4ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
+ *d5ru8 = d17u8;
+ return;
+ }
+
+ d21u8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
+ d24u8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
+ d25u8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
+ d26u8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
+
+ d23u8 = vdup_n_u8(2);
+ q14u16 = vaddl_u8(d6u8, d7u8);
+ q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
+ q14u16 = vmlal_u8(q14u16, d4u8, d23u8);
+
+ d0u8 = vbsl_u8(d20u8, dblimit, d4u8);
+
+ q14u16 = vaddw_u8(q14u16, d5u8);
+
+ d1u8 = vbsl_u8(d20u8, dlimit, d25u8);
+
+ d30u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d4u8);
+ q14u16 = vaddw_u8(q14u16, d5u8);
+ q14u16 = vaddw_u8(q14u16, d16u8);
+
+ d2u8 = vbsl_u8(d20u8, dthresh, d24u8);
+
+ d31u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d5u8);
+ q14u16 = vaddw_u8(q14u16, d6u8);
+ q14u16 = vaddw_u8(q14u16, d17u8);
+
+ *d0ru8 = vbsl_u8(d20u8, d30u8, d0u8);
+
+ d23u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d6u8);
+ q14u16 = vaddw_u8(q14u16, d7u8);
+
+ *d1ru8 = vbsl_u8(d20u8, d31u8, d1u8);
+
+ q14u16 = vaddw_u8(q14u16, d18u8);
+
+ *d2ru8 = vbsl_u8(d20u8, d23u8, d2u8);
+
+ d22u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d4u8);
+ q14u16 = vsubw_u8(q14u16, d7u8);
+ q14u16 = vaddw_u8(q14u16, d16u8);
+
+ d3u8 = vbsl_u8(d20u8, d3u8, d21u8);
+
+ q14u16 = vaddw_u8(q14u16, d18u8);
+
+ d4u8 = vbsl_u8(d20u8, d4u8, d26u8);
+
+ d6u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d5u8);
+ q14u16 = vsubw_u8(q14u16, d16u8);
+ q14u16 = vaddw_u8(q14u16, d17u8);
+ q14u16 = vaddw_u8(q14u16, d18u8);
+
+ d5u8 = vbsl_u8(d20u8, d5u8, d17u8);
+
+ d7u8 = vqrshrn_n_u16(q14u16, 3);
+
+ *d3ru8 = vbsl_u8(d20u8, d22u8, d3u8);
+ *d4ru8 = vbsl_u8(d20u8, d6u8, d4u8);
+ *d5ru8 = vbsl_u8(d20u8, d7u8, d5u8);
+ }
+ return;
+}
+
+void vp9_lpf_horizontal_8_neon(
+ unsigned char *src,
+ int pitch,
+ unsigned char *blimit,
+ unsigned char *limit,
+ unsigned char *thresh,
+ int count) {
+ int i;
+ uint8_t *s, *psrc;
+ uint8x8_t dblimit, dlimit, dthresh;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
+ uint8x8_t d16u8, d17u8, d18u8;
+
+ if (count == 0) // end_vp9_mblf_h_edge
+ return;
+
+ dblimit = vld1_u8(blimit);
+ dlimit = vld1_u8(limit);
+ dthresh = vld1_u8(thresh);
+
+ psrc = src - (pitch << 2);
+ for (i = 0; i < count; i++) {
+ s = psrc + i * 8;
+
+ d3u8 = vld1_u8(s);
+ s += pitch;
+ d4u8 = vld1_u8(s);
+ s += pitch;
+ d5u8 = vld1_u8(s);
+ s += pitch;
+ d6u8 = vld1_u8(s);
+ s += pitch;
+ d7u8 = vld1_u8(s);
+ s += pitch;
+ d16u8 = vld1_u8(s);
+ s += pitch;
+ d17u8 = vld1_u8(s);
+ s += pitch;
+ d18u8 = vld1_u8(s);
+
+ vp9_mbloop_filter_neon(dblimit, dlimit, dthresh,
+ d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
+ &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8);
+
+ s -= (pitch * 6);
+ vst1_u8(s, d0u8);
+ s += pitch;
+ vst1_u8(s, d1u8);
+ s += pitch;
+ vst1_u8(s, d2u8);
+ s += pitch;
+ vst1_u8(s, d3u8);
+ s += pitch;
+ vst1_u8(s, d4u8);
+ s += pitch;
+ vst1_u8(s, d5u8);
+ }
+ return;
+}
+
+void vp9_lpf_vertical_8_neon(
+ unsigned char *src,
+ int pitch,
+ unsigned char *blimit,
+ unsigned char *limit,
+ unsigned char *thresh,
+ int count) {
+ int i;
+ uint8_t *s;
+ uint8x8_t dblimit, dlimit, dthresh;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
+ uint8x8_t d16u8, d17u8, d18u8;
+ uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
+ uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
+ uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
+ uint8x8x4_t d4Result;
+ uint8x8x2_t d2Result;
+
+ if (count == 0)
+ return;
+
+ dblimit = vld1_u8(blimit);
+ dlimit = vld1_u8(limit);
+ dthresh = vld1_u8(thresh);
+
+ for (i = 0; i < count; i++) {
+ s = src + (i * (pitch << 3)) - 4;
+
+ d3u8 = vld1_u8(s);
+ s += pitch;
+ d4u8 = vld1_u8(s);
+ s += pitch;
+ d5u8 = vld1_u8(s);
+ s += pitch;
+ d6u8 = vld1_u8(s);
+ s += pitch;
+ d7u8 = vld1_u8(s);
+ s += pitch;
+ d16u8 = vld1_u8(s);
+ s += pitch;
+ d17u8 = vld1_u8(s);
+ s += pitch;
+ d18u8 = vld1_u8(s);
+
+ d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8),
+ vreinterpret_u32_u8(d7u8));
+ d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8),
+ vreinterpret_u32_u8(d16u8));
+ d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8),
+ vreinterpret_u32_u8(d17u8));
+ d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8),
+ vreinterpret_u32_u8(d18u8));
+
+ d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
+ vreinterpret_u16_u32(d2tmp2.val[0]));
+ d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
+ vreinterpret_u16_u32(d2tmp3.val[0]));
+ d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
+ vreinterpret_u16_u32(d2tmp2.val[1]));
+ d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
+ vreinterpret_u16_u32(d2tmp3.val[1]));
+
+ d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
+ vreinterpret_u8_u16(d2tmp5.val[0]));
+ d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
+ vreinterpret_u8_u16(d2tmp5.val[1]));
+ d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
+ vreinterpret_u8_u16(d2tmp7.val[0]));
+ d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
+ vreinterpret_u8_u16(d2tmp7.val[1]));
+
+ d3u8 = d2tmp8.val[0];
+ d4u8 = d2tmp8.val[1];
+ d5u8 = d2tmp9.val[0];
+ d6u8 = d2tmp9.val[1];
+ d7u8 = d2tmp10.val[0];
+ d16u8 = d2tmp10.val[1];
+ d17u8 = d2tmp11.val[0];
+ d18u8 = d2tmp11.val[1];
+
+ vp9_mbloop_filter_neon(dblimit, dlimit, dthresh,
+ d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
+ &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8);
+
+ d4Result.val[0] = d0u8;
+ d4Result.val[1] = d1u8;
+ d4Result.val[2] = d2u8;
+ d4Result.val[3] = d3u8;
+
+ d2Result.val[0] = d4u8;
+ d2Result.val[1] = d5u8;
+
+ s = src - 3;
+ vst4_lane_u8(s, d4Result, 0);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 1);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 2);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 3);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 4);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 5);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 6);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 7);
+
+ s = src + 1;
+ vst2_lane_u8(s, d2Result, 0);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 1);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 2);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 3);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 4);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 5);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 6);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 7);
+ }
+ return;
+}
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_loopfilter_neon_asm.asm
@@ -1,0 +1,708 @@
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_lpf_horizontal_4_neon|
+ EXPORT |vp9_lpf_vertical_4_neon|
+ EXPORT |vp9_lpf_horizontal_8_neon|
+ EXPORT |vp9_lpf_vertical_8_neon|
+ ARM
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
+; works on 16 iterations at a time.
+; TODO(fgalligan): See about removing the count code as this function is only
+; called with a count of 1.
+;
+; void vp9_lpf_horizontal_4_neon(uint8_t *s,
+; int p /* pitch */,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+;
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_lpf_horizontal_4_neon| PROC
+ push {lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #8] ; load count
+ ldr r2, [sp, #4] ; load thresh
+ add r1, r1, r1 ; double pitch
+
+ cmp r12, #0
+ beq end_vp9_lf_h_edge
+
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+ vld1.8 {d2[]}, [r2] ; duplicate *thresh
+
+count_lf_h_loop
+ sub r2, r0, r1, lsl #1 ; move src pointer down by 4 lines
+ add r3, r2, r1, lsr #1 ; set to 3 lines down
+
+ vld1.u8 {d3}, [r2@64], r1 ; p3
+ vld1.u8 {d4}, [r3@64], r1 ; p2
+ vld1.u8 {d5}, [r2@64], r1 ; p1
+ vld1.u8 {d6}, [r3@64], r1 ; p0
+ vld1.u8 {d7}, [r2@64], r1 ; q0
+ vld1.u8 {d16}, [r3@64], r1 ; q1
+ vld1.u8 {d17}, [r2@64] ; q2
+ vld1.u8 {d18}, [r3@64] ; q3
+
+ sub r2, r2, r1, lsl #1
+ sub r3, r3, r1, lsl #1
+
+ bl vp9_loop_filter_neon
+
+ vst1.u8 {d4}, [r2@64], r1 ; store op1
+ vst1.u8 {d5}, [r3@64], r1 ; store op0
+ vst1.u8 {d6}, [r2@64], r1 ; store oq0
+ vst1.u8 {d7}, [r3@64], r1 ; store oq1
+
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne count_lf_h_loop
+
+end_vp9_lf_h_edge
+ pop {pc}
+ ENDP ; |vp9_lpf_horizontal_4_neon|
+
+; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
+; works on 16 iterations at a time.
+; TODO(fgalligan): See about removing the count code as this function is only
+; called with a count of 1.
+;
+; void vp9_lpf_vertical_4_neon(uint8_t *s,
+; int p /* pitch */,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+;
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_lpf_vertical_4_neon| PROC
+ push {lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #8] ; load count
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+
+ ldr r3, [sp, #4] ; load thresh
+ sub r2, r0, #4 ; move s pointer down by 4 columns
+ cmp r12, #0
+ beq end_vp9_lf_v_edge
+
+ vld1.8 {d2[]}, [r3] ; duplicate *thresh
+
+count_lf_v_loop
+ vld1.u8 {d3}, [r2], r1 ; load s data
+ vld1.u8 {d4}, [r2], r1
+ vld1.u8 {d5}, [r2], r1
+ vld1.u8 {d6}, [r2], r1
+ vld1.u8 {d7}, [r2], r1
+ vld1.u8 {d16}, [r2], r1
+ vld1.u8 {d17}, [r2], r1
+ vld1.u8 {d18}, [r2]
+
+ ;transpose to 8x16 matrix
+ vtrn.32 d3, d7
+ vtrn.32 d4, d16
+ vtrn.32 d5, d17
+ vtrn.32 d6, d18
+
+ vtrn.16 d3, d5
+ vtrn.16 d4, d6
+ vtrn.16 d7, d17
+ vtrn.16 d16, d18
+
+ vtrn.8 d3, d4
+ vtrn.8 d5, d6
+ vtrn.8 d7, d16
+ vtrn.8 d17, d18
+
+ bl vp9_loop_filter_neon
+
+ sub r0, r0, #2
+
+ ;store op1, op0, oq0, oq1
+ vst4.8 {d4[0], d5[0], d6[0], d7[0]}, [r0], r1
+ vst4.8 {d4[1], d5[1], d6[1], d7[1]}, [r0], r1
+ vst4.8 {d4[2], d5[2], d6[2], d7[2]}, [r0], r1
+ vst4.8 {d4[3], d5[3], d6[3], d7[3]}, [r0], r1
+ vst4.8 {d4[4], d5[4], d6[4], d7[4]}, [r0], r1
+ vst4.8 {d4[5], d5[5], d6[5], d7[5]}, [r0], r1
+ vst4.8 {d4[6], d5[6], d6[6], d7[6]}, [r0], r1
+ vst4.8 {d4[7], d5[7], d6[7], d7[7]}, [r0]
+
+ add r0, r0, r1, lsl #3 ; s += pitch * 8
+ subs r12, r12, #1
+ subne r2, r0, #4 ; move s pointer down by 4 columns
+ bne count_lf_v_loop
+
+end_vp9_lf_v_edge
+ pop {pc}
+ ENDP ; |vp9_lpf_vertical_4_neon|
+
+; void vp9_loop_filter_neon();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store. The function does not use
+; registers d8-d15.
+;
+; Inputs:
+; r0-r3, r12 PRESERVE
+; d0 blimit
+; d1 limit
+; d2 thresh
+; d3 p3
+; d4 p2
+; d5 p1
+; d6 p0
+; d7 q0
+; d16 q1
+; d17 q2
+; d18 q3
+;
+; Outputs:
+; d4 op1
+; d5 op0
+; d6 oq0
+; d7 oq1
+|vp9_loop_filter_neon| PROC
+ ; filter_mask
+ vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
+ vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
+ vabd.u8 d21, d5, d6 ; m3 = abs(p1 - p0)
+ vabd.u8 d22, d16, d7 ; m4 = abs(q1 - q0)
+ vabd.u8 d3, d17, d16 ; m5 = abs(q2 - q1)
+ vabd.u8 d4, d18, d17 ; m6 = abs(q3 - q2)
+
+ ; only compare the largest value to limit
+ vmax.u8 d19, d19, d20 ; m1 = max(m1, m2)
+ vmax.u8 d20, d21, d22 ; m2 = max(m3, m4)
+
+ vabd.u8 d17, d6, d7 ; abs(p0 - q0)
+
+ vmax.u8 d3, d3, d4 ; m3 = max(m5, m6)
+
+ vmov.u8 d18, #0x80
+
+ vmax.u8 d23, d19, d20 ; m1 = max(m1, m2)
+
+ ; hevmask
+ vcgt.u8 d21, d21, d2 ; (abs(p1 - p0) > thresh)*-1
+ vcgt.u8 d22, d22, d2 ; (abs(q1 - q0) > thresh)*-1
+ vmax.u8 d23, d23, d3 ; m1 = max(m1, m3)
+
+ vabd.u8 d28, d5, d16 ; a = abs(p1 - q1)
+ vqadd.u8 d17, d17, d17 ; b = abs(p0 - q0) * 2
+
+ veor d7, d7, d18 ; qs0
+
+ vcge.u8 d23, d1, d23 ; abs(m1) > limit
+
+ ; filter() function
+ ; convert to signed
+
+ vshr.u8 d28, d28, #1 ; a = a / 2
+ veor d6, d6, d18 ; ps0
+
+ veor d5, d5, d18 ; ps1
+ vqadd.u8 d17, d17, d28 ; a = b + a
+
+ veor d16, d16, d18 ; qs1
+
+ vmov.u8 d19, #3
+
+ vsub.s8 d28, d7, d6 ; ( qs0 - ps0)
+
+ vcge.u8 d17, d0, d17 ; a > blimit
+
+ vqsub.s8 d27, d5, d16 ; filter = clamp(ps1-qs1)
+ vorr d22, d21, d22 ; hevmask
+
+ vmull.s8 q12, d28, d19 ; 3 * ( qs0 - ps0)
+
+ vand d27, d27, d22 ; filter &= hev
+ vand d23, d23, d17 ; filter_mask
+
+ vaddw.s8 q12, q12, d27 ; filter + 3 * (qs0 - ps0)
+
+ vmov.u8 d17, #4
+
+ ; filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d27, q12
+
+ vand d27, d27, d23 ; filter &= mask
+
+ vqadd.s8 d28, d27, d19 ; filter2 = clamp(filter+3)
+ vqadd.s8 d27, d27, d17 ; filter1 = clamp(filter+4)
+ vshr.s8 d28, d28, #3 ; filter2 >>= 3
+ vshr.s8 d27, d27, #3 ; filter1 >>= 3
+
+ vqadd.s8 d19, d6, d28 ; u = clamp(ps0 + filter2)
+ vqsub.s8 d26, d7, d27 ; u = clamp(qs0 - filter1)
+
+ ; outer tap adjustments
+ vrshr.s8 d27, d27, #1 ; filter = ++filter1 >> 1
+
+ veor d6, d26, d18 ; *oq0 = u^0x80
+
+ vbic d27, d27, d22 ; filter &= ~hev
+
+ vqadd.s8 d21, d5, d27 ; u = clamp(ps1 + filter)
+ vqsub.s8 d20, d16, d27 ; u = clamp(qs1 - filter)
+
+ veor d5, d19, d18 ; *op0 = u^0x80
+ veor d4, d21, d18 ; *op1 = u^0x80
+ veor d7, d20, d18 ; *oq1 = u^0x80
+
+ bx lr
+ ENDP ; |vp9_loop_filter_neon|
+
+; void vp9_lpf_horizontal_8_neon(uint8_t *s, int p,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_lpf_horizontal_8_neon| PROC
+ push {r4-r5, lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #16] ; load count
+ ldr r2, [sp, #12] ; load thresh
+ add r1, r1, r1 ; double pitch
+
+ cmp r12, #0
+ beq end_vp9_mblf_h_edge
+
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+ vld1.8 {d2[]}, [r2] ; duplicate *thresh
+
+count_mblf_h_loop
+ sub r3, r0, r1, lsl #1 ; move src pointer down by 4 lines
+ add r2, r3, r1, lsr #1 ; set to 3 lines down
+
+ vld1.u8 {d3}, [r3@64], r1 ; p3
+ vld1.u8 {d4}, [r2@64], r1 ; p2
+ vld1.u8 {d5}, [r3@64], r1 ; p1
+ vld1.u8 {d6}, [r2@64], r1 ; p0
+ vld1.u8 {d7}, [r3@64], r1 ; q0
+ vld1.u8 {d16}, [r2@64], r1 ; q1
+ vld1.u8 {d17}, [r3@64] ; q2
+ vld1.u8 {d18}, [r2@64], r1 ; q3
+
+ sub r3, r3, r1, lsl #1
+ sub r2, r2, r1, lsl #2
+
+ bl vp9_mbloop_filter_neon
+
+ vst1.u8 {d0}, [r2@64], r1 ; store op2
+ vst1.u8 {d1}, [r3@64], r1 ; store op1
+ vst1.u8 {d2}, [r2@64], r1 ; store op0
+ vst1.u8 {d3}, [r3@64], r1 ; store oq0
+ vst1.u8 {d4}, [r2@64], r1 ; store oq1
+ vst1.u8 {d5}, [r3@64], r1 ; store oq2
+
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne count_mblf_h_loop
+
+end_vp9_mblf_h_edge
+ pop {r4-r5, pc}
+
+ ENDP ; |vp9_lpf_horizontal_8_neon|
+
+; void vp9_lpf_vertical_8_neon(uint8_t *s,
+; int pitch,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+;
+; r0 uint8_t *s,
+; r1 int pitch,
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_lpf_vertical_8_neon| PROC
+ push {r4-r5, lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #16] ; load count
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+
+ ldr r3, [sp, #12] ; load thresh
+ sub r2, r0, #4 ; move s pointer down by 4 columns
+ cmp r12, #0
+ beq end_vp9_mblf_v_edge
+
+ vld1.8 {d2[]}, [r3] ; duplicate *thresh
+
+count_mblf_v_loop
+ vld1.u8 {d3}, [r2], r1 ; load s data
+ vld1.u8 {d4}, [r2], r1
+ vld1.u8 {d5}, [r2], r1
+ vld1.u8 {d6}, [r2], r1
+ vld1.u8 {d7}, [r2], r1
+ vld1.u8 {d16}, [r2], r1
+ vld1.u8 {d17}, [r2], r1
+ vld1.u8 {d18}, [r2]
+
+ ;transpose to 8x16 matrix
+ vtrn.32 d3, d7
+ vtrn.32 d4, d16
+ vtrn.32 d5, d17
+ vtrn.32 d6, d18
+
+ vtrn.16 d3, d5
+ vtrn.16 d4, d6
+ vtrn.16 d7, d17
+ vtrn.16 d16, d18
+
+ vtrn.8 d3, d4
+ vtrn.8 d5, d6
+ vtrn.8 d7, d16
+ vtrn.8 d17, d18
+
+ sub r2, r0, #3
+ add r3, r0, #1
+
+ bl vp9_mbloop_filter_neon
+
+ ;store op2, op1, op0, oq0
+ vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r2], r1
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r2], r1
+ vst4.8 {d0[2], d1[2], d2[2], d3[2]}, [r2], r1
+ vst4.8 {d0[3], d1[3], d2[3], d3[3]}, [r2], r1
+ vst4.8 {d0[4], d1[4], d2[4], d3[4]}, [r2], r1
+ vst4.8 {d0[5], d1[5], d2[5], d3[5]}, [r2], r1
+ vst4.8 {d0[6], d1[6], d2[6], d3[6]}, [r2], r1
+ vst4.8 {d0[7], d1[7], d2[7], d3[7]}, [r2]
+
+ ;store oq1, oq2
+ vst2.8 {d4[0], d5[0]}, [r3], r1
+ vst2.8 {d4[1], d5[1]}, [r3], r1
+ vst2.8 {d4[2], d5[2]}, [r3], r1
+ vst2.8 {d4[3], d5[3]}, [r3], r1
+ vst2.8 {d4[4], d5[4]}, [r3], r1
+ vst2.8 {d4[5], d5[5]}, [r3], r1
+ vst2.8 {d4[6], d5[6]}, [r3], r1
+ vst2.8 {d4[7], d5[7]}, [r3]
+
+ add r0, r0, r1, lsl #3 ; s += pitch * 8
+ subs r12, r12, #1
+ subne r2, r0, #4 ; move s pointer down by 4 columns
+ bne count_mblf_v_loop
+
+end_vp9_mblf_v_edge
+ pop {r4-r5, pc}
+ ENDP ; |vp9_lpf_vertical_8_neon|
+
+; void vp9_mbloop_filter_neon();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store. The function does not use
+; registers d8-d15.
+;
+; Inputs:
+; r0-r3, r12 PRESERVE
+; d0 blimit
+; d1 limit
+; d2 thresh
+; d3 p3
+; d4 p2
+; d5 p1
+; d6 p0
+; d7 q0
+; d16 q1
+; d17 q2
+; d18 q3
+;
+; Outputs:
+; d0 op2
+; d1 op1
+; d2 op0
+; d3 oq0
+; d4 oq1
+; d5 oq2
+|vp9_mbloop_filter_neon| PROC
+ ; filter_mask
+ vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
+ vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
+ vabd.u8 d21, d5, d6 ; m3 = abs(p1 - p0)
+ vabd.u8 d22, d16, d7 ; m4 = abs(q1 - q0)
+ vabd.u8 d23, d17, d16 ; m5 = abs(q2 - q1)
+ vabd.u8 d24, d18, d17 ; m6 = abs(q3 - q2)
+
+ ; only compare the largest value to limit
+ vmax.u8 d19, d19, d20 ; m1 = max(m1, m2)
+ vmax.u8 d20, d21, d22 ; m2 = max(m3, m4)
+
+ vabd.u8 d25, d6, d4 ; m7 = abs(p0 - p2)
+
+ vmax.u8 d23, d23, d24 ; m3 = max(m5, m6)
+
+ vabd.u8 d26, d7, d17 ; m8 = abs(q0 - q2)
+
+ vmax.u8 d19, d19, d20
+
+ vabd.u8 d24, d6, d7 ; m9 = abs(p0 - q0)
+ vabd.u8 d27, d3, d6 ; m10 = abs(p3 - p0)
+ vabd.u8 d28, d18, d7 ; m11 = abs(q3 - q0)
+
+ vmax.u8 d19, d19, d23
+
+ vabd.u8 d23, d5, d16 ; a = abs(p1 - q1)
+ vqadd.u8 d24, d24, d24 ; b = abs(p0 - q0) * 2
+
+ ; abs () > limit
+ vcge.u8 d19, d1, d19
+
+ ; only compare the largest value to thresh
+ vmax.u8 d25, d25, d26 ; m4 = max(m7, m8)
+ vmax.u8 d26, d27, d28 ; m5 = max(m10, m11)
+
+ vshr.u8 d23, d23, #1 ; a = a / 2
+
+ vmax.u8 d25, d25, d26 ; m4 = max(m4, m5)
+
+ vqadd.u8 d24, d24, d23 ; a = b + a
+
+ vmax.u8 d20, d20, d25 ; m2 = max(m2, m4)
+
+ vmov.u8 d23, #1
+ vcge.u8 d24, d0, d24 ; a > blimit
+
+ vcgt.u8 d21, d21, d2 ; (abs(p1 - p0) > thresh)*-1
+
+ vcge.u8 d20, d23, d20 ; flat
+
+ vand d19, d19, d24 ; mask
+
+ vcgt.u8 d23, d22, d2 ; (abs(q1 - q0) > thresh)*-1
+
+ vand d20, d20, d19 ; flat & mask
+
+ vmov.u8 d22, #0x80
+
+ vorr d23, d21, d23 ; hev
+
+ ; This instruction will truncate the "flat & mask" masks down to 4 bits
+ ; each to fit into one 32 bit arm register. The values are stored in
+ ; q10.64[0].
+ vshrn.u16 d30, q10, #4
+ vmov.u32 r4, d30[0] ; flat & mask 4bits
+
+ adds r5, r4, #1 ; Check for all 1's
+
+ ; If mask and flat are 1's for all vectors, then we only need to execute
+ ; the power branch for all vectors.
+ beq power_branch_only
+
+ cmp r4, #0 ; Check for 0, set flag for later
+
+ ; mbfilter() function
+ ; filter() function
+ ; convert to signed
+ veor d21, d7, d22 ; qs0
+ veor d24, d6, d22 ; ps0
+ veor d25, d5, d22 ; ps1
+ veor d26, d16, d22 ; qs1
+
+ vmov.u8 d27, #3
+
+ vsub.s8 d28, d21, d24 ; ( qs0 - ps0)
+
+ vqsub.s8 d29, d25, d26 ; filter = clamp(ps1-qs1)
+
+ vmull.s8 q15, d28, d27 ; 3 * ( qs0 - ps0)
+
+ vand d29, d29, d23 ; filter &= hev
+
+ vaddw.s8 q15, q15, d29 ; filter + 3 * (qs0 - ps0)
+
+ vmov.u8 d29, #4
+
+ ; filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d28, q15
+
+ vand d28, d28, d19 ; filter &= mask
+
+ vqadd.s8 d30, d28, d27 ; filter2 = clamp(filter+3)
+ vqadd.s8 d29, d28, d29 ; filter1 = clamp(filter+4)
+ vshr.s8 d30, d30, #3 ; filter2 >>= 3
+ vshr.s8 d29, d29, #3 ; filter1 >>= 3
+
+ vqadd.s8 d24, d24, d30 ; op0 = clamp(ps0 + filter2)
+ vqsub.s8 d21, d21, d29 ; oq0 = clamp(qs0 - filter1)
+
+ ; outer tap adjustments: ++filter1 >> 1
+ vrshr.s8 d29, d29, #1
+ vbic d29, d29, d23 ; filter &= ~hev
+
+ vqadd.s8 d25, d25, d29 ; op1 = clamp(ps1 + filter)
+ vqsub.s8 d26, d26, d29 ; oq1 = clamp(qs1 - filter)
+
+ ; If mask and flat are 0's for all vectors, then we only need to execute
+ ; the filter branch for all vectors.
+ beq filter_branch_only
+
+ ; If mask and flat are mixed then we must perform both branches and
+ ; combine the data.
+ veor d24, d24, d22 ; *f_op0 = u^0x80
+ veor d21, d21, d22 ; *f_oq0 = u^0x80
+ veor d25, d25, d22 ; *f_op1 = u^0x80
+ veor d26, d26, d22 ; *f_oq1 = u^0x80
+
+ ; At this point we have already executed the filter branch. The filter
+ ; branch does not set op2 or oq2, so use p2 and q2. Execute the power
+ ; branch and combine the data.
+ vmov.u8 d23, #2
+ vaddl.u8 q14, d6, d7 ; r_op2 = p0 + q0
+ vmlal.u8 q14, d3, d27 ; r_op2 += p3 * 3
+ vmlal.u8 q14, d4, d23 ; r_op2 += p2 * 2
+
+ vbif d0, d4, d20 ; op2 |= p2 & ~(flat & mask)
+
+ vaddw.u8 q14, d5 ; r_op2 += p1
+
+ vbif d1, d25, d20 ; op1 |= f_op1 & ~(flat & mask)
+
+ vqrshrn.u16 d30, q14, #3 ; r_op2
+
+ vsubw.u8 q14, d3 ; r_op1 = r_op2 - p3
+ vsubw.u8 q14, d4 ; r_op1 -= p2
+ vaddw.u8 q14, d5 ; r_op1 += p1
+ vaddw.u8 q14, d16 ; r_op1 += q1
+
+ vbif d2, d24, d20 ; op0 |= f_op0 & ~(flat & mask)
+
+ vqrshrn.u16 d31, q14, #3 ; r_op1
+
+ vsubw.u8 q14, d3 ; r_op0 = r_op1 - p3
+ vsubw.u8 q14, d5 ; r_op0 -= p1
+ vaddw.u8 q14, d6 ; r_op0 += p0
+ vaddw.u8 q14, d17 ; r_op0 += q2
+
+ vbit d0, d30, d20 ; op2 |= r_op2 & (flat & mask)
+
+ vqrshrn.u16 d23, q14, #3 ; r_op0
+
+ vsubw.u8 q14, d3 ; r_oq0 = r_op0 - p3
+ vsubw.u8 q14, d6 ; r_oq0 -= p0
+ vaddw.u8 q14, d7 ; r_oq0 += q0
+
+ vbit d1, d31, d20 ; op1 |= r_op1 & (flat & mask)
+
+ vaddw.u8 q14, d18 ; oq0 += q3
+
+ vbit d2, d23, d20 ; op0 |= r_op0 & (flat & mask)
+
+ vqrshrn.u16 d22, q14, #3 ; r_oq0
+
+ vsubw.u8 q14, d4 ; r_oq1 = r_oq0 - p2
+ vsubw.u8 q14, d7 ; r_oq1 -= q0
+ vaddw.u8 q14, d16 ; r_oq1 += q1
+
+ vbif d3, d21, d20 ; oq0 |= f_oq0 & ~(flat & mask)
+
+ vaddw.u8 q14, d18 ; r_oq1 += q3
+
+ vbif d4, d26, d20 ; oq1 |= f_oq1 & ~(flat & mask)
+
+ vqrshrn.u16 d6, q14, #3 ; r_oq1
+
+ vsubw.u8 q14, d5 ; r_oq2 = r_oq1 - p1
+ vsubw.u8 q14, d16 ; r_oq2 -= q1
+ vaddw.u8 q14, d17 ; r_oq2 += q2
+ vaddw.u8 q14, d18 ; r_oq2 += q3
+
+ vbif d5, d17, d20 ; oq2 |= q2 & ~(flat & mask)
+
+ vqrshrn.u16 d7, q14, #3 ; r_oq2
+
+ vbit d3, d22, d20 ; oq0 |= r_oq0 & (flat & mask)
+ vbit d4, d6, d20 ; oq1 |= r_oq1 & (flat & mask)
+ vbit d5, d7, d20 ; oq2 |= r_oq2 & (flat & mask)
+
+ bx lr
+
+power_branch_only
+ vmov.u8 d27, #3
+ vmov.u8 d21, #2
+ vaddl.u8 q14, d6, d7 ; op2 = p0 + q0
+ vmlal.u8 q14, d3, d27 ; op2 += p3 * 3
+ vmlal.u8 q14, d4, d21 ; op2 += p2 * 2
+ vaddw.u8 q14, d5 ; op2 += p1
+ vqrshrn.u16 d0, q14, #3 ; op2
+
+ vsubw.u8 q14, d3 ; op1 = op2 - p3
+ vsubw.u8 q14, d4 ; op1 -= p2
+ vaddw.u8 q14, d5 ; op1 += p1
+ vaddw.u8 q14, d16 ; op1 += q1
+ vqrshrn.u16 d1, q14, #3 ; op1
+
+ vsubw.u8 q14, d3 ; op0 = op1 - p3
+ vsubw.u8 q14, d5 ; op0 -= p1
+ vaddw.u8 q14, d6 ; op0 += p0
+ vaddw.u8 q14, d17 ; op0 += q2
+ vqrshrn.u16 d2, q14, #3 ; op0
+
+ vsubw.u8 q14, d3 ; oq0 = op0 - p3
+ vsubw.u8 q14, d6 ; oq0 -= p0
+ vaddw.u8 q14, d7 ; oq0 += q0
+ vaddw.u8 q14, d18 ; oq0 += q3
+ vqrshrn.u16 d3, q14, #3 ; oq0
+
+ vsubw.u8 q14, d4 ; oq1 = oq0 - p2
+ vsubw.u8 q14, d7 ; oq1 -= q0
+ vaddw.u8 q14, d16 ; oq1 += q1
+ vaddw.u8 q14, d18 ; oq1 += q3
+ vqrshrn.u16 d4, q14, #3 ; oq1
+
+ vsubw.u8 q14, d5 ; oq2 = oq1 - p1
+ vsubw.u8 q14, d16 ; oq2 -= q1
+ vaddw.u8 q14, d17 ; oq2 += q2
+ vaddw.u8 q14, d18 ; oq2 += q3
+ vqrshrn.u16 d5, q14, #3 ; oq2
+
+ bx lr
+
+filter_branch_only
+ ; TODO(fgalligan): See if we can rearange registers so we do not need to
+ ; do the 2 vswp.
+ vswp d0, d4 ; op2
+ vswp d5, d17 ; oq2
+ veor d2, d24, d22 ; *op0 = u^0x80
+ veor d3, d21, d22 ; *oq0 = u^0x80
+ veor d1, d25, d22 ; *op1 = u^0x80
+ veor d4, d26, d22 ; *oq1 = u^0x80
+
+ bx lr
+
+ ENDP ; |vp9_mbloop_filter_neon|
+
+ END
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -236,20 +236,16 @@
$vp9_lpf_vertical_16_dual_neon_asm=vp9_lpf_vertical_16_dual_neon;
add_proto qw/void vp9_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
-specialize qw/vp9_lpf_vertical_8 sse2 neon_asm dspr2/;
-$vp9_lpf_vertical_8_neon_asm=vp9_lpf_vertical_8_neon;
+specialize qw/vp9_lpf_vertical_8 sse2 neon dspr2/;
add_proto qw/void vp9_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vp9_lpf_vertical_8_dual sse2 neon_asm dspr2/;
-$vp9_lpf_vertical_8_dual_neon_asm=vp9_lpf_vertical_8_dual_neon;
+specialize qw/vp9_lpf_vertical_8_dual sse2 neon dspr2/;
add_proto qw/void vp9_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
-specialize qw/vp9_lpf_vertical_4 mmx neon_asm dspr2/;
-$vp9_lpf_vertical_4_neon_asm=vp9_lpf_vertical_4_neon;
+specialize qw/vp9_lpf_vertical_4 mmx neon dspr2/;
add_proto qw/void vp9_lpf_vertical_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vp9_lpf_vertical_4_dual sse2 neon_asm dspr2/;
-$vp9_lpf_vertical_4_dual_neon_asm=vp9_lpf_vertical_4_dual_neon;
+specialize qw/vp9_lpf_vertical_4_dual sse2 neon dspr2/;
add_proto qw/void vp9_lpf_horizontal_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
specialize qw/vp9_lpf_horizontal_16 sse2 avx2 neon_asm dspr2/;
@@ -256,16 +252,13 @@
$vp9_lpf_horizontal_16_neon_asm=vp9_lpf_horizontal_16_neon;
add_proto qw/void vp9_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
-specialize qw/vp9_lpf_horizontal_8 sse2 neon_asm dspr2/;
-$vp9_lpf_horizontal_8_neon_asm=vp9_lpf_horizontal_8_neon;
+specialize qw/vp9_lpf_horizontal_8 sse2 neon dspr2/;
add_proto qw/void vp9_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vp9_lpf_horizontal_8_dual sse2 neon_asm dspr2/;
-$vp9_lpf_horizontal_8_dual_neon_asm=vp9_lpf_horizontal_8_dual_neon;
+specialize qw/vp9_lpf_horizontal_8_dual sse2 neon dspr2/;
add_proto qw/void vp9_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
-specialize qw/vp9_lpf_horizontal_4 mmx neon_asm dspr2/;
-$vp9_lpf_horizontal_4_neon_asm=vp9_lpf_horizontal_4_neon;
+specialize qw/vp9_lpf_horizontal_4 mmx neon dspr2/;
add_proto qw/void vp9_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
specialize qw/vp9_lpf_horizontal_4_dual sse2 neon_asm dspr2/;
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -133,11 +133,9 @@
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_convolve_neon.c
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct16x16_neon.c
-VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_loopfilter_16_neon.c
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_convolve8_neon$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_convolve8_avg_neon$(ASM)
-VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_loopfilter_neon$(ASM)
-VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_loopfilter_16_neon$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_loopfilter_16_neon_asm$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_dc_only_idct_add_neon$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct4x4_1_add_neon$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct4x4_add_neon$(ASM)
@@ -154,5 +152,17 @@
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_avg_neon$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_save_reg_neon$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_reconintra_neon$(ASM)
+
+# neon with assembly and intrinsics implementations. If both are available
+# prefer assembly.
+ifeq ($(HAVE_NEON_ASM), yes)
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon_asm$(ASM)
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon.c
+else
+ifeq ($(HAVE_NEON), yes)
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon.c
+endif # HAVE_NEON
+endif # HAVE_NEON_ASM
$(eval $(call rtcd_h_template,vp9_rtcd,vp9/common/vp9_rtcd_defs.pl))