ref: 12566c3d0f9bedcfe938c51479464c099d9ae066
parent: 33ddc645ceb0d50290365de4d9f0decf56c49291
parent: 21a1abd8e369412e9504b9ca5a0f0eafe31813be
author: James Zern <jzern@google.com>
date: Mon Nov 28 19:12:45 EST 2016
Merge changes Ide6d3994,I164cfcbe * changes: enable vpx_idct32x32_135_add_neon in hbd builds idct_neon: rename load_tran_low_to_s16 -> ...s16q
--- a/vpx_dsp/arm/idct32x32_135_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_135_add_neon.c
@@ -16,6 +16,50 @@
#include "vpx_dsp/arm/transpose_neon.h"
#include "vpx_dsp/txfm_common.h"
+static INLINE void load_8x8_s16(const tran_low_t *input, int16x8_t *const in0,
+ int16x8_t *const in1, int16x8_t *const in2,
+ int16x8_t *const in3, int16x8_t *const in4,
+ int16x8_t *const in5, int16x8_t *const in6,
+ int16x8_t *const in7) {
+ *in0 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in1 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in2 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in3 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in4 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in5 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in6 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in7 = load_tran_low_to_s16q(input);
+}
+
+static INLINE void load_4x8_s16(const tran_low_t *input, int16x4_t *const in0,
+ int16x4_t *const in1, int16x4_t *const in2,
+ int16x4_t *const in3, int16x4_t *const in4,
+ int16x4_t *const in5, int16x4_t *const in6,
+ int16x4_t *const in7) {
+ *in0 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in1 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in2 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in3 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in4 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in5 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in6 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in7 = load_tran_low_to_s16d(input);
+}
+
// Only for the first pass of the _135_ variant. Since it only uses values from
// the top left 16x16 it can safely assume all the remaining values are 0 and
// skip an awful lot of calculations. In fact, only the first 12 columns make
@@ -43,7 +87,7 @@
// 13 84 93 103 110 125
// 14 98 106 115 127
// 15 117 128
-static void idct32_12_neon(const int16_t *input, int16_t *output) {
+static void idct32_12_neon(const tran_low_t *input, int16_t *output) {
int16x8_t in0, in1, in2, in3, in4, in5, in6, in7;
int16x4_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int16x8_t in8, in9, in10, in11;
@@ -66,27 +110,11 @@
s7_11, s7_12, s7_13, s7_14, s7_15, s7_20, s7_21, s7_22, s7_23, s7_24,
s7_25, s7_26, s7_27;
- load_and_transpose_s16_8x8(input, 32, &in0, &in1, &in2, &in3, &in4, &in5,
- &in6, &in7);
+ load_8x8_s16(input, &in0, &in1, &in2, &in3, &in4, &in5, &in6, &in7);
+ transpose_s16_8x8(&in0, &in1, &in2, &in3, &in4, &in5, &in6, &in7);
- input += 8;
-
- tmp0 = vld1_s16(input);
- input += 32;
- tmp1 = vld1_s16(input);
- input += 32;
- tmp2 = vld1_s16(input);
- input += 32;
- tmp3 = vld1_s16(input);
- input += 32;
- tmp4 = vld1_s16(input);
- input += 32;
- tmp5 = vld1_s16(input);
- input += 32;
- tmp6 = vld1_s16(input);
- input += 32;
- tmp7 = vld1_s16(input);
-
+ load_4x8_s16(input + 8, &tmp0, &tmp1, &tmp2, &tmp3, &tmp4, &tmp5, &tmp6,
+ &tmp7);
transpose_s16_4x8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, &in8, &in9,
&in10, &in11);
@@ -669,7 +697,7 @@
output + (24 * stride), stride);
}
-void vpx_idct32x32_135_add_neon(const int16_t *input, uint8_t *dest,
+void vpx_idct32x32_135_add_neon(const tran_low_t *input, uint8_t *dest,
int stride) {
int i;
int16_t temp[32 * 16];
--- a/vpx_dsp/arm/idct32x32_34_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_34_add_neon.c
@@ -46,21 +46,21 @@
s2_31;
int16x8_t s3_24, s3_25, s3_26, s3_27;
- in0 = load_tran_low_to_s16(input);
+ in0 = load_tran_low_to_s16q(input);
input += 32;
- in1 = load_tran_low_to_s16(input);
+ in1 = load_tran_low_to_s16q(input);
input += 32;
- in2 = load_tran_low_to_s16(input);
+ in2 = load_tran_low_to_s16q(input);
input += 32;
- in3 = load_tran_low_to_s16(input);
+ in3 = load_tran_low_to_s16q(input);
input += 32;
- in4 = load_tran_low_to_s16(input);
+ in4 = load_tran_low_to_s16q(input);
input += 32;
- in5 = load_tran_low_to_s16(input);
+ in5 = load_tran_low_to_s16q(input);
input += 32;
- in6 = load_tran_low_to_s16(input);
+ in6 = load_tran_low_to_s16q(input);
input += 32;
- in7 = load_tran_low_to_s16(input);
+ in7 = load_tran_low_to_s16q(input);
transpose_s16_8x8(&in0, &in1, &in2, &in3, &in4, &in5, &in6, &in7);
// stage 1
--- a/vpx_dsp/arm/idct4x4_add_neon.c
+++ b/vpx_dsp/arm/idct4x4_add_neon.c
@@ -62,8 +62,8 @@
assert(!(dest_stride % sizeof(uint32_t)));
// Rows
- a0 = load_tran_low_to_s16(input);
- a1 = load_tran_low_to_s16(input + 8);
+ a0 = load_tran_low_to_s16q(input);
+ a1 = load_tran_low_to_s16q(input + 8);
idct4x4_16_kernel(cospis, &a0, &a1);
// Columns
--- a/vpx_dsp/arm/idct8x8_add_neon.c
+++ b/vpx_dsp/arm/idct8x8_add_neon.c
@@ -174,14 +174,14 @@
int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
uint16x8_t q8u16, q9u16, q10u16, q11u16;
- q8s16 = load_tran_low_to_s16(input);
- q9s16 = load_tran_low_to_s16(input + 8);
- q10s16 = load_tran_low_to_s16(input + 16);
- q11s16 = load_tran_low_to_s16(input + 24);
- q12s16 = load_tran_low_to_s16(input + 32);
- q13s16 = load_tran_low_to_s16(input + 40);
- q14s16 = load_tran_low_to_s16(input + 48);
- q15s16 = load_tran_low_to_s16(input + 56);
+ q8s16 = load_tran_low_to_s16q(input);
+ q9s16 = load_tran_low_to_s16q(input + 8);
+ q10s16 = load_tran_low_to_s16q(input + 16);
+ q11s16 = load_tran_low_to_s16q(input + 24);
+ q12s16 = load_tran_low_to_s16q(input + 32);
+ q13s16 = load_tran_low_to_s16q(input + 40);
+ q14s16 = load_tran_low_to_s16q(input + 48);
+ q15s16 = load_tran_low_to_s16q(input + 56);
transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
@@ -280,14 +280,14 @@
uint16x8_t q8u16, q9u16, q10u16, q11u16;
int32x4_t q9s32, q10s32, q11s32, q12s32;
- q8s16 = load_tran_low_to_s16(input);
- q9s16 = load_tran_low_to_s16(input + 8);
- q10s16 = load_tran_low_to_s16(input + 16);
- q11s16 = load_tran_low_to_s16(input + 24);
- q12s16 = load_tran_low_to_s16(input + 32);
- q13s16 = load_tran_low_to_s16(input + 40);
- q14s16 = load_tran_low_to_s16(input + 48);
- q15s16 = load_tran_low_to_s16(input + 56);
+ q8s16 = load_tran_low_to_s16q(input);
+ q9s16 = load_tran_low_to_s16q(input + 8);
+ q10s16 = load_tran_low_to_s16q(input + 16);
+ q11s16 = load_tran_low_to_s16q(input + 24);
+ q12s16 = load_tran_low_to_s16q(input + 32);
+ q13s16 = load_tran_low_to_s16q(input + 40);
+ q14s16 = load_tran_low_to_s16q(input + 48);
+ q15s16 = load_tran_low_to_s16q(input + 56);
transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
--- a/vpx_dsp/arm/idct_neon.h
+++ b/vpx_dsp/arm/idct_neon.h
@@ -18,9 +18,9 @@
#include "vpx_dsp/vpx_dsp_common.h"
//------------------------------------------------------------------------------
+// Helper functions used to load tran_low_t into int16, narrowing if necessary.
-// Helper function used to load tran_low_t into int16, narrowing if necessary.
-static INLINE int16x8_t load_tran_low_to_s16(const tran_low_t *buf) {
+static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
#if CONFIG_VP9_HIGHBITDEPTH
const int32x4_t v0 = vld1q_s32(buf);
const int32x4_t v1 = vld1q_s32(buf + 4);
@@ -31,6 +31,17 @@
return vld1q_s16(buf);
#endif
}
+
+static INLINE int16x4_t load_tran_low_to_s16d(const tran_low_t *buf) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int32x4_t v0 = vld1q_s32(buf);
+ return vmovn_s32(v0);
+#else
+ return vld1_s16(buf);
+#endif
+}
+
+//------------------------------------------------------------------------------
// Multiply a by a_const. Saturate, shift and narrow by 14.
static INLINE int16x8_t multiply_shift_and_narrow_s16(const int16x8_t a,
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -213,7 +213,6 @@
endif # HAVE_NEON_ASM
DSP_SRCS-$(HAVE_NEON) += arm/idct16x16_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_add_neon.c
-DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_135_add_neon.c
DSP_SRCS-$(HAVE_MSA) += mips/inv_txfm_msa.h
DSP_SRCS-$(HAVE_MSA) += mips/idct4x4_msa.c
@@ -246,6 +245,7 @@
DSP_SRCS-$(HAVE_NEON) += arm/idct_neon.h
DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_1_add_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_34_add_neon.c
+DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_135_add_neon.c
endif # CONFIG_VP9
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -698,7 +698,7 @@
specialize qw/vpx_idct32x32_1024_add sse2/, "$ssse3_x86_64";
add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_135_add sse2/, "$ssse3_x86_64";
+ specialize qw/vpx_idct32x32_135_add neon sse2/, "$ssse3_x86_64";
# Need to add 135 eob idct32x32 implementations.
$vpx_idct32x32_135_add_sse2=vpx_idct32x32_1024_add_sse2;