ref: 21a1abd8e369412e9504b9ca5a0f0eafe31813be
parent: 568d4b1d63b83e5b4137c7ae182d05e9cfe9c771
author: James Zern <jzern@google.com>
date: Tue Nov 22 12:47:38 EST 2016
enable vpx_idct32x32_135_add_neon in hbd builds BUG=webm:1294 Change-Id: Ide6d3994fe01c4320c9d143e6d059b49568048e4
--- a/vpx_dsp/arm/idct32x32_135_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_135_add_neon.c
@@ -16,6 +16,50 @@
#include "vpx_dsp/arm/transpose_neon.h"
#include "vpx_dsp/txfm_common.h"
+static INLINE void load_8x8_s16(const tran_low_t *input, int16x8_t *const in0,
+ int16x8_t *const in1, int16x8_t *const in2,
+ int16x8_t *const in3, int16x8_t *const in4,
+ int16x8_t *const in5, int16x8_t *const in6,
+ int16x8_t *const in7) {
+ *in0 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in1 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in2 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in3 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in4 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in5 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in6 = load_tran_low_to_s16q(input);
+ input += 32;
+ *in7 = load_tran_low_to_s16q(input);
+}
+
+static INLINE void load_4x8_s16(const tran_low_t *input, int16x4_t *const in0,
+ int16x4_t *const in1, int16x4_t *const in2,
+ int16x4_t *const in3, int16x4_t *const in4,
+ int16x4_t *const in5, int16x4_t *const in6,
+ int16x4_t *const in7) {
+ *in0 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in1 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in2 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in3 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in4 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in5 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in6 = load_tran_low_to_s16d(input);
+ input += 32;
+ *in7 = load_tran_low_to_s16d(input);
+}
+
// Only for the first pass of the _135_ variant. Since it only uses values from
// the top left 16x16 it can safely assume all the remaining values are 0 and
// skip an awful lot of calculations. In fact, only the first 12 columns make
@@ -43,7 +87,7 @@
// 13 84 93 103 110 125
// 14 98 106 115 127
// 15 117 128
-static void idct32_12_neon(const int16_t *input, int16_t *output) {
+static void idct32_12_neon(const tran_low_t *input, int16_t *output) {
int16x8_t in0, in1, in2, in3, in4, in5, in6, in7;
int16x4_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int16x8_t in8, in9, in10, in11;
@@ -66,27 +110,11 @@
s7_11, s7_12, s7_13, s7_14, s7_15, s7_20, s7_21, s7_22, s7_23, s7_24,
s7_25, s7_26, s7_27;
- load_and_transpose_s16_8x8(input, 32, &in0, &in1, &in2, &in3, &in4, &in5,
- &in6, &in7);
+ load_8x8_s16(input, &in0, &in1, &in2, &in3, &in4, &in5, &in6, &in7);
+ transpose_s16_8x8(&in0, &in1, &in2, &in3, &in4, &in5, &in6, &in7);
- input += 8;
-
- tmp0 = vld1_s16(input);
- input += 32;
- tmp1 = vld1_s16(input);
- input += 32;
- tmp2 = vld1_s16(input);
- input += 32;
- tmp3 = vld1_s16(input);
- input += 32;
- tmp4 = vld1_s16(input);
- input += 32;
- tmp5 = vld1_s16(input);
- input += 32;
- tmp6 = vld1_s16(input);
- input += 32;
- tmp7 = vld1_s16(input);
-
+ load_4x8_s16(input + 8, &tmp0, &tmp1, &tmp2, &tmp3, &tmp4, &tmp5, &tmp6,
+ &tmp7);
transpose_s16_4x8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, &in8, &in9,
&in10, &in11);
@@ -669,7 +697,7 @@
output + (24 * stride), stride);
}
-void vpx_idct32x32_135_add_neon(const int16_t *input, uint8_t *dest,
+void vpx_idct32x32_135_add_neon(const tran_low_t *input, uint8_t *dest,
int stride) {
int i;
int16_t temp[32 * 16];
--- a/vpx_dsp/arm/idct_neon.h
+++ b/vpx_dsp/arm/idct_neon.h
@@ -18,8 +18,8 @@
#include "vpx_dsp/vpx_dsp_common.h"
//------------------------------------------------------------------------------
+// Helper functions used to load tran_low_t into int16, narrowing if necessary.
-// Helper function used to load tran_low_t into int16, narrowing if necessary.
static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
#if CONFIG_VP9_HIGHBITDEPTH
const int32x4_t v0 = vld1q_s32(buf);
@@ -31,6 +31,17 @@
return vld1q_s16(buf);
#endif
}
+
+static INLINE int16x4_t load_tran_low_to_s16d(const tran_low_t *buf) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int32x4_t v0 = vld1q_s32(buf);
+ return vmovn_s32(v0);
+#else
+ return vld1_s16(buf);
+#endif
+}
+
+//------------------------------------------------------------------------------
// Multiply a by a_const. Saturate, shift and narrow by 14.
static INLINE int16x8_t multiply_shift_and_narrow_s16(const int16x8_t a,
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -213,7 +213,6 @@
endif # HAVE_NEON_ASM
DSP_SRCS-$(HAVE_NEON) += arm/idct16x16_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_add_neon.c
-DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_135_add_neon.c
DSP_SRCS-$(HAVE_MSA) += mips/inv_txfm_msa.h
DSP_SRCS-$(HAVE_MSA) += mips/idct4x4_msa.c
@@ -246,6 +245,7 @@
DSP_SRCS-$(HAVE_NEON) += arm/idct_neon.h
DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_1_add_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_34_add_neon.c
+DSP_SRCS-$(HAVE_NEON) += arm/idct32x32_135_add_neon.c
endif # CONFIG_VP9
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -698,7 +698,7 @@
specialize qw/vpx_idct32x32_1024_add sse2/, "$ssse3_x86_64";
add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
- specialize qw/vpx_idct32x32_135_add sse2/, "$ssse3_x86_64";
+ specialize qw/vpx_idct32x32_135_add neon sse2/, "$ssse3_x86_64";
# Need to add 135 eob idct32x32 implementations.
$vpx_idct32x32_135_add_sse2=vpx_idct32x32_1024_add_sse2;