ref: 5b10674b5cfc86f78742da82cd1af026ba634b13
parent: 726556dde993cb5ae6be196e8beaeddf9417ca86
parent: 750e753134466fd2b12db5087878b41120235da1
author: Kaustubh Raste <kaustubh.raste@imgtec.com>
date: Thu Feb 2 03:09:21 EST 2017
Merge "Add mips msa sum_squares_2d_i16 function"
--- a/test/sum_squares_test.cc
+++ b/test/sum_squares_test.cc
@@ -110,4 +110,10 @@
::testing::Values(make_tuple(&vpx_sum_squares_2d_i16_c,
&vpx_sum_squares_2d_i16_sse2)));
#endif // HAVE_SSE2
+
+#if HAVE_MSA
+INSTANTIATE_TEST_CASE_P(MSA, SumSquaresTest, ::testing::Values(make_tuple(
+ &vpx_sum_squares_2d_i16_c,
+ &vpx_sum_squares_2d_i16_msa)));
+#endif // HAVE_MSA
} // namespace
--- /dev/null
+++ b/vpx_dsp/mips/sum_squares_msa.c
@@ -1,0 +1,129 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_dsp_rtcd.h"
+#include "./macros_msa.h"
+
+uint64_t vpx_sum_squares_2d_i16_msa(const int16_t *src, int src_stride,
+ int size) {
+ int row, col;
+ uint64_t ss_res = 0;
+ v4i32 mul0, mul1;
+ v2i64 res0 = { 0 };
+
+ if (4 == size) {
+ uint64_t src0, src1, src2, src3;
+ v8i16 diff0 = { 0 };
+ v8i16 diff1 = { 0 };
+
+ LD4(src, src_stride, src0, src1, src2, src3);
+ INSERT_D2_SH(src0, src1, diff0);
+ INSERT_D2_SH(src2, src3, diff1);
+ DOTP_SH2_SW(diff0, diff1, diff0, diff1, mul0, mul1);
+ mul0 += mul1;
+ res0 = __msa_hadd_s_d(mul0, mul0);
+ res0 += __msa_splati_d(res0, 1);
+ ss_res = (uint64_t)__msa_copy_s_d(res0, 0);
+ } else if (8 == size) {
+ v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
+
+ LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ mul0 += mul1;
+ res0 = __msa_hadd_s_d(mul0, mul0);
+ res0 += __msa_splati_d(res0, 1);
+ ss_res = (uint64_t)__msa_copy_s_d(res0, 0);
+ } else if (16 == size) {
+ v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
+
+ LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ src += 8 * src_stride;
+ DPADD_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ DPADD_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ DPADD_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ mul0 += mul1;
+ res0 += __msa_hadd_s_d(mul0, mul0);
+
+ res0 += __msa_splati_d(res0, 1);
+ ss_res = (uint64_t)__msa_copy_s_d(res0, 0);
+ } else if (0 == (size % 16)) {
+ v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
+
+ for (row = 0; row < (size >> 4); row++) {
+ for (col = 0; col < size; col += 16) {
+ const int16_t *src_ptr = src + col;
+ LD_SH8(src_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6,
+ src7);
+ DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ LD_SH8(src_ptr + 8, src_stride, src0, src1, src2, src3, src4, src5,
+ src6, src7);
+ src_ptr += 8 * src_stride;
+ DPADD_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ LD_SH8(src_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6,
+ src7);
+ DPADD_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ LD_SH8(src_ptr + 8, src_stride, src0, src1, src2, src3, src4, src5,
+ src6, src7);
+ DPADD_SH2_SW(src0, src1, src0, src1, mul0, mul1);
+ DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1);
+ DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1);
+ DPADD_SH2_SW(src6, src7, src6, src7, mul0, mul1);
+ mul0 += mul1;
+ res0 += __msa_hadd_s_d(mul0, mul0);
+ }
+
+ src += 16 * src_stride;
+ }
+
+ res0 += __msa_splati_d(res0, 1);
+ ss_res = (uint64_t)__msa_copy_s_d(res0, 0);
+ } else {
+ int16_t val;
+
+ for (row = 0; row < size; row++) {
+ for (col = 0; col < size; col++) {
+ val = src[col];
+ ss_res += val * val;
+ }
+
+ src += src_stride;
+ }
+ }
+
+ return ss_res;
+}
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -271,6 +271,7 @@
DSP_SRCS-yes += subtract.c
DSP_SRCS-yes += sum_squares.c
DSP_SRCS-$(HAVE_SSE2) += x86/sum_squares_sse2.c
+DSP_SRCS-$(HAVE_MSA) += mips/sum_squares_msa.c
DSP_SRCS-$(HAVE_NEON) += arm/sad4d_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/sad_neon.c
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -1051,7 +1051,7 @@
specialize qw/vpx_sad4x4x4d msa sse2/;
add_proto qw/uint64_t vpx_sum_squares_2d_i16/, "const int16_t *src, int stride, int size";
-specialize qw/vpx_sum_squares_2d_i16 sse2/;
+specialize qw/vpx_sum_squares_2d_i16 sse2 msa/;
#
# Structured Similarity (SSIM)