shithub: libvpx

Download patch

ref: 030ca4d0e59c68a54e6eb02cea4ab3fdc498d048
parent: 2772b45ac0523a5d15e686a7c896e05b1eaef26e
author: James Yu <james.yu@linaro.org>
date: Thu Jan 30 10:26:31 EST 2014

VP9 common for ARMv8 by using NEON intrinsics 10

Add vp9_idct32x32_1_add_neon.c
- vp9_idct32x32_1_add_neon

Change-Id: If9ffe9a857228f5c67f61dc2b428b40965816eda
Signed-off-by: James Yu <james.yu@linaro.org>

--- a/vp9/common/arm/neon/vp9_idct32x32_1_add_neon.asm
+++ /dev/null
@@ -1,144 +1,0 @@
-;
-;  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license and patent
-;  grant that can be found in the LICENSE file in the root of the source
-;  tree. All contributing project authors may be found in the AUTHORS
-;  file in the root of the source tree.
-;
-
-    EXPORT  |vp9_idct32x32_1_add_neon|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-    ;TODO(hkuang): put the following macros in a seperate
-    ;file so other idct function could also use them.
-    MACRO
-    LD_16x8          $src, $stride
-    vld1.8           {q8}, [$src], $stride
-    vld1.8           {q9}, [$src], $stride
-    vld1.8           {q10}, [$src], $stride
-    vld1.8           {q11}, [$src], $stride
-    vld1.8           {q12}, [$src], $stride
-    vld1.8           {q13}, [$src], $stride
-    vld1.8           {q14}, [$src], $stride
-    vld1.8           {q15}, [$src], $stride
-    MEND
-
-    MACRO
-    ADD_DIFF_16x8    $diff
-    vqadd.u8         q8, q8, $diff
-    vqadd.u8         q9, q9, $diff
-    vqadd.u8         q10, q10, $diff
-    vqadd.u8         q11, q11, $diff
-    vqadd.u8         q12, q12, $diff
-    vqadd.u8         q13, q13, $diff
-    vqadd.u8         q14, q14, $diff
-    vqadd.u8         q15, q15, $diff
-    MEND
-
-    MACRO
-    SUB_DIFF_16x8    $diff
-    vqsub.u8         q8, q8, $diff
-    vqsub.u8         q9, q9, $diff
-    vqsub.u8         q10, q10, $diff
-    vqsub.u8         q11, q11, $diff
-    vqsub.u8         q12, q12, $diff
-    vqsub.u8         q13, q13, $diff
-    vqsub.u8         q14, q14, $diff
-    vqsub.u8         q15, q15, $diff
-    MEND
-
-    MACRO
-    ST_16x8          $dst, $stride
-    vst1.8           {q8}, [$dst], $stride
-    vst1.8           {q9}, [$dst], $stride
-    vst1.8           {q10},[$dst], $stride
-    vst1.8           {q11},[$dst], $stride
-    vst1.8           {q12},[$dst], $stride
-    vst1.8           {q13},[$dst], $stride
-    vst1.8           {q14},[$dst], $stride
-    vst1.8           {q15},[$dst], $stride
-    MEND
-
-;void vp9_idct32x32_1_add_neon(int16_t *input, uint8_t *dest,
-;                              int dest_stride)
-;
-; r0  int16_t input
-; r1  uint8_t *dest
-; r2  int dest_stride
-
-|vp9_idct32x32_1_add_neon| PROC
-    push             {lr}
-    pld              [r1]
-    add              r3, r1, #16               ; r3 dest + 16 for second loop
-    ldrsh            r0, [r0]
-
-    ; generate cospi_16_64 = 11585
-    mov              r12, #0x2d00
-    add              r12, #0x41
-
-    ; out = dct_const_round_shift(input[0] * cospi_16_64)
-    mul              r0, r0, r12               ; input[0] * cospi_16_64
-    add              r0, r0, #0x2000           ; +(1 << ((DCT_CONST_BITS) - 1))
-    asr              r0, r0, #14               ; >> DCT_CONST_BITS
-
-    ; out = dct_const_round_shift(out * cospi_16_64)
-    mul              r0, r0, r12               ; out * cospi_16_64
-    mov              r12, r1                   ; save dest
-    add              r0, r0, #0x2000           ; +(1 << ((DCT_CONST_BITS) - 1))
-    asr              r0, r0, #14               ; >> DCT_CONST_BITS
-
-    ; a1 = ROUND_POWER_OF_TWO(out, 6)
-    add              r0, r0, #32               ; + (1 <<((6) - 1))
-    asrs             r0, r0, #6                ; >> 6
-    bge              diff_positive_32_32
-
-diff_negative_32_32
-    neg              r0, r0
-    usat             r0, #8, r0
-    vdup.u8          q0, r0
-    mov              r0, #4
-
-diff_negative_32_32_loop
-    sub              r0, #1
-    LD_16x8          r1, r2
-    SUB_DIFF_16x8    q0
-    ST_16x8          r12, r2
-
-    LD_16x8          r1, r2
-    SUB_DIFF_16x8    q0
-    ST_16x8          r12, r2
-    cmp              r0, #2
-    moveq            r1, r3
-    moveq            r12, r3
-    cmp              r0, #0
-    bne              diff_negative_32_32_loop
-    pop              {pc}
-
-diff_positive_32_32
-    usat             r0, #8, r0
-    vdup.u8          q0, r0
-    mov              r0, #4
-
-diff_positive_32_32_loop
-    sub              r0, #1
-    LD_16x8          r1, r2
-    ADD_DIFF_16x8    q0
-    ST_16x8          r12, r2
-
-    LD_16x8          r1, r2
-    ADD_DIFF_16x8    q0
-    ST_16x8          r12, r2
-    cmp              r0, #2
-    moveq            r1, r3
-    moveq            r12, r3
-    cmp              r0, #0
-    bne              diff_positive_32_32_loop
-    pop              {pc}
-
-    ENDP             ; |vp9_idct32x32_1_add_neon|
-    END
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_idct32x32_1_add_neon.c
@@ -1,0 +1,161 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include "vp9/common/vp9_idct.h"
+
+static inline void LD_16x8(
+        uint8_t *d,
+        int d_stride,
+        uint8x16_t *q8u8,
+        uint8x16_t *q9u8,
+        uint8x16_t *q10u8,
+        uint8x16_t *q11u8,
+        uint8x16_t *q12u8,
+        uint8x16_t *q13u8,
+        uint8x16_t *q14u8,
+        uint8x16_t *q15u8) {
+    *q8u8 = vld1q_u8(d);
+    d += d_stride;
+    *q9u8 = vld1q_u8(d);
+    d += d_stride;
+    *q10u8 = vld1q_u8(d);
+    d += d_stride;
+    *q11u8 = vld1q_u8(d);
+    d += d_stride;
+    *q12u8 = vld1q_u8(d);
+    d += d_stride;
+    *q13u8 = vld1q_u8(d);
+    d += d_stride;
+    *q14u8 = vld1q_u8(d);
+    d += d_stride;
+    *q15u8 = vld1q_u8(d);
+    return;
+}
+
+static inline void ADD_DIFF_16x8(
+        uint8x16_t qdiffu8,
+        uint8x16_t *q8u8,
+        uint8x16_t *q9u8,
+        uint8x16_t *q10u8,
+        uint8x16_t *q11u8,
+        uint8x16_t *q12u8,
+        uint8x16_t *q13u8,
+        uint8x16_t *q14u8,
+        uint8x16_t *q15u8) {
+    *q8u8 = vqaddq_u8(*q8u8, qdiffu8);
+    *q9u8 = vqaddq_u8(*q9u8, qdiffu8);
+    *q10u8 = vqaddq_u8(*q10u8, qdiffu8);
+    *q11u8 = vqaddq_u8(*q11u8, qdiffu8);
+    *q12u8 = vqaddq_u8(*q12u8, qdiffu8);
+    *q13u8 = vqaddq_u8(*q13u8, qdiffu8);
+    *q14u8 = vqaddq_u8(*q14u8, qdiffu8);
+    *q15u8 = vqaddq_u8(*q15u8, qdiffu8);
+    return;
+}
+
+static inline void SUB_DIFF_16x8(
+        uint8x16_t qdiffu8,
+        uint8x16_t *q8u8,
+        uint8x16_t *q9u8,
+        uint8x16_t *q10u8,
+        uint8x16_t *q11u8,
+        uint8x16_t *q12u8,
+        uint8x16_t *q13u8,
+        uint8x16_t *q14u8,
+        uint8x16_t *q15u8) {
+    *q8u8 = vqsubq_u8(*q8u8, qdiffu8);
+    *q9u8 = vqsubq_u8(*q9u8, qdiffu8);
+    *q10u8 = vqsubq_u8(*q10u8, qdiffu8);
+    *q11u8 = vqsubq_u8(*q11u8, qdiffu8);
+    *q12u8 = vqsubq_u8(*q12u8, qdiffu8);
+    *q13u8 = vqsubq_u8(*q13u8, qdiffu8);
+    *q14u8 = vqsubq_u8(*q14u8, qdiffu8);
+    *q15u8 = vqsubq_u8(*q15u8, qdiffu8);
+    return;
+}
+
+static inline void ST_16x8(
+        uint8_t *d,
+        int d_stride,
+        uint8x16_t *q8u8,
+        uint8x16_t *q9u8,
+        uint8x16_t *q10u8,
+        uint8x16_t *q11u8,
+        uint8x16_t *q12u8,
+        uint8x16_t *q13u8,
+        uint8x16_t *q14u8,
+        uint8x16_t *q15u8) {
+    vst1q_u8(d, *q8u8);
+    d += d_stride;
+    vst1q_u8(d, *q9u8);
+    d += d_stride;
+    vst1q_u8(d, *q10u8);
+    d += d_stride;
+    vst1q_u8(d, *q11u8);
+    d += d_stride;
+    vst1q_u8(d, *q12u8);
+    d += d_stride;
+    vst1q_u8(d, *q13u8);
+    d += d_stride;
+    vst1q_u8(d, *q14u8);
+    d += d_stride;
+    vst1q_u8(d, *q15u8);
+    return;
+}
+
+void vp9_idct32x32_1_add_neon(
+        int16_t *input,
+        uint8_t *dest,
+        int dest_stride) {
+    uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
+    int i, j, dest_stride8;
+    uint8_t *d;
+    int16_t a1, cospi_16_64 = 11585;
+    int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+
+    out = dct_const_round_shift(out * cospi_16_64);
+    a1 = ROUND_POWER_OF_TWO(out, 6);
+
+    dest_stride8 = dest_stride * 8;
+    if (a1 >= 0) {  // diff_positive_32_32
+        a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1;
+        q0u8 = vdupq_n_u8(a1);
+        for (i = 0; i < 2; i++, dest += 16) {  // diff_positive_32_32_loop
+            d = dest;
+            for (j = 0; j < 4; j++) {
+                LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
+                                        &q12u8, &q13u8, &q14u8, &q15u8);
+                ADD_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8,
+                                    &q12u8, &q13u8, &q14u8, &q15u8);
+                ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
+                                        &q12u8, &q13u8, &q14u8, &q15u8);
+                d += dest_stride8;
+            }
+        }
+    } else {  // diff_negative_32_32
+        a1 = -a1;
+        a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1;
+        q0u8 = vdupq_n_u8(a1);
+        for (i = 0; i < 2; i++, dest += 16) {  // diff_negative_32_32_loop
+            d = dest;
+            for (j = 0; j < 4; j++) {
+                LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
+                                        &q12u8, &q13u8, &q14u8, &q15u8);
+                SUB_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8,
+                                    &q12u8, &q13u8, &q14u8, &q15u8);
+                ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
+                                        &q12u8, &q13u8, &q14u8, &q15u8);
+                d += dest_stride8;
+            }
+        }
+    }
+    return;
+}
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_idct32x32_1_add_neon_asm.asm
@@ -1,0 +1,144 @@
+;
+;  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license and patent
+;  grant that can be found in the LICENSE file in the root of the source
+;  tree. All contributing project authors may be found in the AUTHORS
+;  file in the root of the source tree.
+;
+
+    EXPORT  |vp9_idct32x32_1_add_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+    ;TODO(hkuang): put the following macros in a seperate
+    ;file so other idct function could also use them.
+    MACRO
+    LD_16x8          $src, $stride
+    vld1.8           {q8}, [$src], $stride
+    vld1.8           {q9}, [$src], $stride
+    vld1.8           {q10}, [$src], $stride
+    vld1.8           {q11}, [$src], $stride
+    vld1.8           {q12}, [$src], $stride
+    vld1.8           {q13}, [$src], $stride
+    vld1.8           {q14}, [$src], $stride
+    vld1.8           {q15}, [$src], $stride
+    MEND
+
+    MACRO
+    ADD_DIFF_16x8    $diff
+    vqadd.u8         q8, q8, $diff
+    vqadd.u8         q9, q9, $diff
+    vqadd.u8         q10, q10, $diff
+    vqadd.u8         q11, q11, $diff
+    vqadd.u8         q12, q12, $diff
+    vqadd.u8         q13, q13, $diff
+    vqadd.u8         q14, q14, $diff
+    vqadd.u8         q15, q15, $diff
+    MEND
+
+    MACRO
+    SUB_DIFF_16x8    $diff
+    vqsub.u8         q8, q8, $diff
+    vqsub.u8         q9, q9, $diff
+    vqsub.u8         q10, q10, $diff
+    vqsub.u8         q11, q11, $diff
+    vqsub.u8         q12, q12, $diff
+    vqsub.u8         q13, q13, $diff
+    vqsub.u8         q14, q14, $diff
+    vqsub.u8         q15, q15, $diff
+    MEND
+
+    MACRO
+    ST_16x8          $dst, $stride
+    vst1.8           {q8}, [$dst], $stride
+    vst1.8           {q9}, [$dst], $stride
+    vst1.8           {q10},[$dst], $stride
+    vst1.8           {q11},[$dst], $stride
+    vst1.8           {q12},[$dst], $stride
+    vst1.8           {q13},[$dst], $stride
+    vst1.8           {q14},[$dst], $stride
+    vst1.8           {q15},[$dst], $stride
+    MEND
+
+;void vp9_idct32x32_1_add_neon(int16_t *input, uint8_t *dest,
+;                              int dest_stride)
+;
+; r0  int16_t input
+; r1  uint8_t *dest
+; r2  int dest_stride
+
+|vp9_idct32x32_1_add_neon| PROC
+    push             {lr}
+    pld              [r1]
+    add              r3, r1, #16               ; r3 dest + 16 for second loop
+    ldrsh            r0, [r0]
+
+    ; generate cospi_16_64 = 11585
+    mov              r12, #0x2d00
+    add              r12, #0x41
+
+    ; out = dct_const_round_shift(input[0] * cospi_16_64)
+    mul              r0, r0, r12               ; input[0] * cospi_16_64
+    add              r0, r0, #0x2000           ; +(1 << ((DCT_CONST_BITS) - 1))
+    asr              r0, r0, #14               ; >> DCT_CONST_BITS
+
+    ; out = dct_const_round_shift(out * cospi_16_64)
+    mul              r0, r0, r12               ; out * cospi_16_64
+    mov              r12, r1                   ; save dest
+    add              r0, r0, #0x2000           ; +(1 << ((DCT_CONST_BITS) - 1))
+    asr              r0, r0, #14               ; >> DCT_CONST_BITS
+
+    ; a1 = ROUND_POWER_OF_TWO(out, 6)
+    add              r0, r0, #32               ; + (1 <<((6) - 1))
+    asrs             r0, r0, #6                ; >> 6
+    bge              diff_positive_32_32
+
+diff_negative_32_32
+    neg              r0, r0
+    usat             r0, #8, r0
+    vdup.u8          q0, r0
+    mov              r0, #4
+
+diff_negative_32_32_loop
+    sub              r0, #1
+    LD_16x8          r1, r2
+    SUB_DIFF_16x8    q0
+    ST_16x8          r12, r2
+
+    LD_16x8          r1, r2
+    SUB_DIFF_16x8    q0
+    ST_16x8          r12, r2
+    cmp              r0, #2
+    moveq            r1, r3
+    moveq            r12, r3
+    cmp              r0, #0
+    bne              diff_negative_32_32_loop
+    pop              {pc}
+
+diff_positive_32_32
+    usat             r0, #8, r0
+    vdup.u8          q0, r0
+    mov              r0, #4
+
+diff_positive_32_32_loop
+    sub              r0, #1
+    LD_16x8          r1, r2
+    ADD_DIFF_16x8    q0
+    ST_16x8          r12, r2
+
+    LD_16x8          r1, r2
+    ADD_DIFF_16x8    q0
+    ST_16x8          r12, r2
+    cmp              r0, #2
+    moveq            r1, r3
+    moveq            r12, r3
+    cmp              r0, #0
+    bne              diff_positive_32_32_loop
+    pop              {pc}
+
+    ENDP             ; |vp9_idct32x32_1_add_neon|
+    END
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -460,8 +460,7 @@
     $vp9_idct32x32_34_add_neon_asm=vp9_idct32x32_1024_add_neon;
 
     add_proto qw/void vp9_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct32x32_1_add sse2 neon_asm dspr2/;
-    $vp9_idct32x32_1_add_neon_asm=vp9_idct32x32_1_add_neon;
+    specialize qw/vp9_idct32x32_1_add sse2 neon dspr2/;
 
     add_proto qw/void vp9_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
     specialize qw/vp9_iht4x4_16_add sse2 neon_asm dspr2/;
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -138,7 +138,6 @@
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct8x8_add_neon$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct16x16_1_add_neon$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct16x16_add_neon$(ASM)
-VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct32x32_1_add_neon$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct32x32_add_neon$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_iht4x4_add_neon$(ASM)
 VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_iht8x8_add_neon$(ASM)
@@ -154,6 +153,7 @@
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_copy_neon_asm$(ASM)
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct32x32_1_add_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct4x4_1_add_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_1_add_neon_asm$(ASM)
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon_asm$(ASM)
@@ -165,6 +165,7 @@
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_copy_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct32x32_1_add_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct4x4_1_add_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_1_add_neon.c
 VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon.c