shithub: libvpx

Download patch

ref: 95cb130f327e233443a750816bc2af72fa972baf
parent: d45870be8d39fe8545008af4e432ba9a79f0b0e8
parent: 2301d10f739a13f9f04eb016036e044484cf9d5d
author: Parag Salasakar <img.mips1@gmail.com>
date: Thu Apr 30 00:39:11 EDT 2015

Merge "mips msa vp9 copy and avg convolve optimization"

--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -1817,7 +1817,7 @@
 
 #if HAVE_MSA
 const ConvolveFunctions convolve8_msa(
-    vp9_convolve_copy_c, vp9_convolve_avg_c,
+    vp9_convolve_copy_msa, vp9_convolve_avg_msa,
     vp9_convolve8_horiz_msa, vp9_convolve8_avg_horiz_c,
     vp9_convolve8_vert_msa, vp9_convolve8_avg_vert_c,
     vp9_convolve8_msa, vp9_convolve8_avg_c, 0);
--- /dev/null
+++ b/vp9/common/mips/msa/vp9_convolve_avg_msa.c
@@ -1,0 +1,335 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/mips/msa/vp9_macros_msa.h"
+
+static void avg_width4_msa(const uint8_t *src, int32_t src_stride,
+                           uint8_t *dst, int32_t dst_stride, int32_t height) {
+  int32_t cnt;
+  uint32_t out0, out1, out2, out3;
+  v16u8 src0, src1, src2, src3;
+  v16u8 dst0, dst1, dst2, dst3;
+
+  if (0 == (height % 4)) {
+    for (cnt = (height / 4); cnt--;) {
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      src += (4 * src_stride);
+
+      LOAD_4VECS_UB(dst, dst_stride, dst0, dst1, dst2, dst3);
+
+      dst0 = __msa_aver_u_b(src0, dst0);
+      dst1 = __msa_aver_u_b(src1, dst1);
+      dst2 = __msa_aver_u_b(src2, dst2);
+      dst3 = __msa_aver_u_b(src3, dst3);
+
+      out0 = __msa_copy_u_w((v4i32)dst0, 0);
+      out1 = __msa_copy_u_w((v4i32)dst1, 0);
+      out2 = __msa_copy_u_w((v4i32)dst2, 0);
+      out3 = __msa_copy_u_w((v4i32)dst3, 0);
+
+      STORE_WORD(dst, out0);
+      dst += dst_stride;
+      STORE_WORD(dst, out1);
+      dst += dst_stride;
+      STORE_WORD(dst, out2);
+      dst += dst_stride;
+      STORE_WORD(dst, out3);
+      dst += dst_stride;
+    }
+  } else if (0 == (height % 2)) {
+    for (cnt = (height / 2); cnt--;) {
+      LOAD_2VECS_UB(src, src_stride, src0, src1);
+      src += (2 * src_stride);
+
+      LOAD_2VECS_UB(dst, dst_stride, dst0, dst1);
+
+      dst0 = __msa_aver_u_b(src0, dst0);
+      dst1 = __msa_aver_u_b(src1, dst1);
+
+      out0 = __msa_copy_u_w((v4i32)dst0, 0);
+      out1 = __msa_copy_u_w((v4i32)dst1, 0);
+
+      STORE_WORD(dst, out0);
+      dst += dst_stride;
+      STORE_WORD(dst, out1);
+      dst += dst_stride;
+    }
+  }
+}
+
+static void avg_width8_msa(const uint8_t *src, int32_t src_stride,
+                           uint8_t *dst, int32_t dst_stride, int32_t height) {
+  int32_t cnt;
+  uint64_t out0, out1, out2, out3;
+  v16u8 src0, src1, src2, src3;
+  v16u8 dst0, dst1, dst2, dst3;
+
+  for (cnt = (height / 4); cnt--;) {
+    LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+    src += (4 * src_stride);
+
+    LOAD_4VECS_UB(dst, dst_stride, dst0, dst1, dst2, dst3);
+
+    dst0 = __msa_aver_u_b(src0, dst0);
+    dst1 = __msa_aver_u_b(src1, dst1);
+    dst2 = __msa_aver_u_b(src2, dst2);
+    dst3 = __msa_aver_u_b(src3, dst3);
+
+    out0 = __msa_copy_u_d((v2i64)dst0, 0);
+    out1 = __msa_copy_u_d((v2i64)dst1, 0);
+    out2 = __msa_copy_u_d((v2i64)dst2, 0);
+    out3 = __msa_copy_u_d((v2i64)dst3, 0);
+
+    STORE_DWORD(dst, out0);
+    dst += dst_stride;
+    STORE_DWORD(dst, out1);
+    dst += dst_stride;
+    STORE_DWORD(dst, out2);
+    dst += dst_stride;
+    STORE_DWORD(dst, out3);
+    dst += dst_stride;
+  }
+}
+
+static void avg_width16_msa(const uint8_t *src, int32_t src_stride,
+                            uint8_t *dst, int32_t dst_stride, int32_t height) {
+  int32_t cnt;
+  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+
+  for (cnt = (height / 8); cnt--;) {
+    LOAD_8VECS_UB(src, src_stride,
+                  src0, src1, src2, src3, src4, src5, src6, src7);
+    src += (8 * src_stride);
+
+    LOAD_8VECS_UB(dst, dst_stride,
+                  dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
+
+    dst0 = __msa_aver_u_b(src0, dst0);
+    dst1 = __msa_aver_u_b(src1, dst1);
+    dst2 = __msa_aver_u_b(src2, dst2);
+    dst3 = __msa_aver_u_b(src3, dst3);
+    dst4 = __msa_aver_u_b(src4, dst4);
+    dst5 = __msa_aver_u_b(src5, dst5);
+    dst6 = __msa_aver_u_b(src6, dst6);
+    dst7 = __msa_aver_u_b(src7, dst7);
+
+    STORE_8VECS_UB(dst, dst_stride,
+                   dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
+    dst += (8 * dst_stride);
+  }
+}
+
+static void avg_width32_msa(const uint8_t *src, int32_t src_stride,
+                            uint8_t *dst, int32_t dst_stride, int32_t height) {
+  int32_t cnt;
+  uint8_t *dst_dup = dst;
+  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+  v16u8 src8, src9, src10, src11, src12, src13, src14, src15;
+  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+  v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
+
+  for (cnt = (height / 8); cnt--;) {
+    src0 = LOAD_UB(src);
+    src1 = LOAD_UB(src + 16);
+    src += src_stride;
+    src2 = LOAD_UB(src);
+    src3 = LOAD_UB(src + 16);
+    src += src_stride;
+    src4 = LOAD_UB(src);
+    src5 = LOAD_UB(src + 16);
+    src += src_stride;
+    src6 = LOAD_UB(src);
+    src7 = LOAD_UB(src + 16);
+    src += src_stride;
+
+    dst0 = LOAD_UB(dst_dup);
+    dst1 = LOAD_UB(dst_dup + 16);
+    dst_dup += dst_stride;
+    dst2 = LOAD_UB(dst_dup);
+    dst3 = LOAD_UB(dst_dup + 16);
+    dst_dup += dst_stride;
+    dst4 = LOAD_UB(dst_dup);
+    dst5 = LOAD_UB(dst_dup + 16);
+    dst_dup += dst_stride;
+    dst6 = LOAD_UB(dst_dup);
+    dst7 = LOAD_UB(dst_dup + 16);
+    dst_dup += dst_stride;
+
+    src8 = LOAD_UB(src);
+    src9 = LOAD_UB(src + 16);
+    src += src_stride;
+    src10 = LOAD_UB(src);
+    src11 = LOAD_UB(src + 16);
+    src += src_stride;
+    src12 = LOAD_UB(src);
+    src13 = LOAD_UB(src + 16);
+    src += src_stride;
+    src14 = LOAD_UB(src);
+    src15 = LOAD_UB(src + 16);
+    src += src_stride;
+
+    dst8 = LOAD_UB(dst_dup);
+    dst9 = LOAD_UB(dst_dup + 16);
+    dst_dup += dst_stride;
+    dst10 = LOAD_UB(dst_dup);
+    dst11 = LOAD_UB(dst_dup + 16);
+    dst_dup += dst_stride;
+    dst12 = LOAD_UB(dst_dup);
+    dst13 = LOAD_UB(dst_dup + 16);
+    dst_dup += dst_stride;
+    dst14 = LOAD_UB(dst_dup);
+    dst15 = LOAD_UB(dst_dup + 16);
+    dst_dup += dst_stride;
+
+    dst0 = __msa_aver_u_b(src0, dst0);
+    dst1 = __msa_aver_u_b(src1, dst1);
+    dst2 = __msa_aver_u_b(src2, dst2);
+    dst3 = __msa_aver_u_b(src3, dst3);
+    dst4 = __msa_aver_u_b(src4, dst4);
+    dst5 = __msa_aver_u_b(src5, dst5);
+    dst6 = __msa_aver_u_b(src6, dst6);
+    dst7 = __msa_aver_u_b(src7, dst7);
+    dst8 = __msa_aver_u_b(src8, dst8);
+    dst9 = __msa_aver_u_b(src9, dst9);
+    dst10 = __msa_aver_u_b(src10, dst10);
+    dst11 = __msa_aver_u_b(src11, dst11);
+    dst12 = __msa_aver_u_b(src12, dst12);
+    dst13 = __msa_aver_u_b(src13, dst13);
+    dst14 = __msa_aver_u_b(src14, dst14);
+    dst15 = __msa_aver_u_b(src15, dst15);
+
+    STORE_UB(dst0, dst);
+    STORE_UB(dst1, dst + 16);
+    dst += dst_stride;
+    STORE_UB(dst2, dst);
+    STORE_UB(dst3, dst + 16);
+    dst += dst_stride;
+    STORE_UB(dst4, dst);
+    STORE_UB(dst5, dst + 16);
+    dst += dst_stride;
+    STORE_UB(dst6, dst);
+    STORE_UB(dst7, dst + 16);
+    dst += dst_stride;
+    STORE_UB(dst8, dst);
+    STORE_UB(dst9, dst + 16);
+    dst += dst_stride;
+    STORE_UB(dst10, dst);
+    STORE_UB(dst11, dst + 16);
+    dst += dst_stride;
+    STORE_UB(dst12, dst);
+    STORE_UB(dst13, dst + 16);
+    dst += dst_stride;
+    STORE_UB(dst14, dst);
+    STORE_UB(dst15, dst + 16);
+    dst += dst_stride;
+  }
+}
+
+static void avg_width64_msa(const uint8_t *src, int32_t src_stride,
+                            uint8_t *dst, int32_t dst_stride, int32_t height) {
+  int32_t cnt;
+  uint8_t *dst_dup = dst;
+  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+  v16u8 src8, src9, src10, src11, src12, src13, src14, src15;
+  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+  v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
+
+  for (cnt = (height / 4); cnt--;) {
+    LOAD_4VECS_UB(src, 16, src0, src1, src2, src3);
+    src += src_stride;
+    LOAD_4VECS_UB(src, 16, src4, src5, src6, src7);
+    src += src_stride;
+    LOAD_4VECS_UB(src, 16, src8, src9, src10, src11);
+    src += src_stride;
+    LOAD_4VECS_UB(src, 16, src12, src13, src14, src15);
+    src += src_stride;
+
+    LOAD_4VECS_UB(dst_dup, 16, dst0, dst1, dst2, dst3);
+    dst_dup += dst_stride;
+    LOAD_4VECS_UB(dst_dup, 16, dst4, dst5, dst6, dst7);
+    dst_dup += dst_stride;
+    LOAD_4VECS_UB(dst_dup, 16, dst8, dst9, dst10, dst11);
+    dst_dup += dst_stride;
+    LOAD_4VECS_UB(dst_dup, 16, dst12, dst13, dst14, dst15);
+    dst_dup += dst_stride;
+
+    dst0 = __msa_aver_u_b(src0, dst0);
+    dst1 = __msa_aver_u_b(src1, dst1);
+    dst2 = __msa_aver_u_b(src2, dst2);
+    dst3 = __msa_aver_u_b(src3, dst3);
+    dst4 = __msa_aver_u_b(src4, dst4);
+    dst5 = __msa_aver_u_b(src5, dst5);
+    dst6 = __msa_aver_u_b(src6, dst6);
+    dst7 = __msa_aver_u_b(src7, dst7);
+    dst8 = __msa_aver_u_b(src8, dst8);
+    dst9 = __msa_aver_u_b(src9, dst9);
+    dst10 = __msa_aver_u_b(src10, dst10);
+    dst11 = __msa_aver_u_b(src11, dst11);
+    dst12 = __msa_aver_u_b(src12, dst12);
+    dst13 = __msa_aver_u_b(src13, dst13);
+    dst14 = __msa_aver_u_b(src14, dst14);
+    dst15 = __msa_aver_u_b(src15, dst15);
+
+    STORE_4VECS_UB(dst, 16, dst0, dst1, dst2, dst3);
+    dst += dst_stride;
+    STORE_4VECS_UB(dst, 16, dst4, dst5, dst6, dst7);
+    dst += dst_stride;
+    STORE_4VECS_UB(dst, 16, dst8, dst9, dst10, dst11);
+    dst += dst_stride;
+    STORE_4VECS_UB(dst, 16, dst12, dst13, dst14, dst15);
+    dst += dst_stride;
+  }
+}
+
+void vp9_convolve_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
+                          uint8_t *dst, ptrdiff_t dst_stride,
+                          const int16_t *filter_x, int32_t filter_x_stride,
+                          const int16_t *filter_y, int32_t filter_y_stride,
+                          int32_t w, int32_t h) {
+  (void)filter_x;
+  (void)filter_y;
+  (void)filter_x_stride;
+  (void)filter_y_stride;
+
+  switch (w) {
+    case 4: {
+      avg_width4_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    case 8: {
+      avg_width8_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    case 16: {
+      avg_width16_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    case 32: {
+      avg_width32_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    case 64: {
+      avg_width64_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    default: {
+      int32_t lp, cnt;
+      for (cnt = h; cnt--;) {
+        for (lp = 0; lp < w; ++lp) {
+          dst[lp] = (((dst[lp] + src[lp]) + 1) >> 1);
+        }
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+    }
+  }
+}
--- /dev/null
+++ b/vp9/common/mips/msa/vp9_convolve_copy_msa.c
@@ -1,0 +1,300 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+#include "vp9/common/mips/msa/vp9_macros_msa.h"
+
+static void copy_width8_msa(const uint8_t *src, int32_t src_stride,
+                            uint8_t *dst, int32_t dst_stride,
+                            int32_t height) {
+  int32_t cnt;
+  uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
+  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+  if (0 == height % 12) {
+    for (cnt = (height / 12); cnt--;) {
+      LOAD_8VECS_UB(src, src_stride,
+                    src0, src1, src2, src3, src4, src5, src6, src7);
+      src += (8 * src_stride);
+
+      out0 = __msa_copy_u_d((v2i64)src0, 0);
+      out1 = __msa_copy_u_d((v2i64)src1, 0);
+      out2 = __msa_copy_u_d((v2i64)src2, 0);
+      out3 = __msa_copy_u_d((v2i64)src3, 0);
+      out4 = __msa_copy_u_d((v2i64)src4, 0);
+      out5 = __msa_copy_u_d((v2i64)src5, 0);
+      out6 = __msa_copy_u_d((v2i64)src6, 0);
+      out7 = __msa_copy_u_d((v2i64)src7, 0);
+
+      STORE_DWORD(dst, out0);
+      dst += dst_stride;
+      STORE_DWORD(dst, out1);
+      dst += dst_stride;
+      STORE_DWORD(dst, out2);
+      dst += dst_stride;
+      STORE_DWORD(dst, out3);
+      dst += dst_stride;
+      STORE_DWORD(dst, out4);
+      dst += dst_stride;
+      STORE_DWORD(dst, out5);
+      dst += dst_stride;
+      STORE_DWORD(dst, out6);
+      dst += dst_stride;
+      STORE_DWORD(dst, out7);
+      dst += dst_stride;
+
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      src += (4 * src_stride);
+
+      out0 = __msa_copy_u_d((v2i64)src0, 0);
+      out1 = __msa_copy_u_d((v2i64)src1, 0);
+      out2 = __msa_copy_u_d((v2i64)src2, 0);
+      out3 = __msa_copy_u_d((v2i64)src3, 0);
+
+      STORE_DWORD(dst, out0);
+      dst += dst_stride;
+      STORE_DWORD(dst, out1);
+      dst += dst_stride;
+      STORE_DWORD(dst, out2);
+      dst += dst_stride;
+      STORE_DWORD(dst, out3);
+      dst += dst_stride;
+    }
+  } else if (0 == height % 8) {
+    for (cnt = height >> 3; cnt--;) {
+      LOAD_8VECS_UB(src, src_stride,
+                    src0, src1, src2, src3, src4, src5, src6, src7);
+      src += (8 * src_stride);
+
+      out0 = __msa_copy_u_d((v2i64)src0, 0);
+      out1 = __msa_copy_u_d((v2i64)src1, 0);
+      out2 = __msa_copy_u_d((v2i64)src2, 0);
+      out3 = __msa_copy_u_d((v2i64)src3, 0);
+      out4 = __msa_copy_u_d((v2i64)src4, 0);
+      out5 = __msa_copy_u_d((v2i64)src5, 0);
+      out6 = __msa_copy_u_d((v2i64)src6, 0);
+      out7 = __msa_copy_u_d((v2i64)src7, 0);
+
+      STORE_DWORD(dst, out0);
+      dst += dst_stride;
+      STORE_DWORD(dst, out1);
+      dst += dst_stride;
+      STORE_DWORD(dst, out2);
+      dst += dst_stride;
+      STORE_DWORD(dst, out3);
+      dst += dst_stride;
+      STORE_DWORD(dst, out4);
+      dst += dst_stride;
+      STORE_DWORD(dst, out5);
+      dst += dst_stride;
+      STORE_DWORD(dst, out6);
+      dst += dst_stride;
+      STORE_DWORD(dst, out7);
+      dst += dst_stride;
+    }
+  } else if (0 == height % 4) {
+    for (cnt = (height / 4); cnt--;) {
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      src += (4 * src_stride);
+
+      out0 = __msa_copy_u_d((v2i64)src0, 0);
+      out1 = __msa_copy_u_d((v2i64)src1, 0);
+      out2 = __msa_copy_u_d((v2i64)src2, 0);
+      out3 = __msa_copy_u_d((v2i64)src3, 0);
+
+      STORE_DWORD(dst, out0);
+      dst += dst_stride;
+      STORE_DWORD(dst, out1);
+      dst += dst_stride;
+      STORE_DWORD(dst, out2);
+      dst += dst_stride;
+      STORE_DWORD(dst, out3);
+      dst += dst_stride;
+    }
+  } else if (0 == height % 2) {
+    for (cnt = (height / 2); cnt--;) {
+      LOAD_2VECS_UB(src, src_stride, src0, src1);
+      src += (2 * src_stride);
+
+      out0 = __msa_copy_u_d((v2i64)src0, 0);
+      out1 = __msa_copy_u_d((v2i64)src1, 0);
+
+      STORE_DWORD(dst, out0);
+      dst += dst_stride;
+      STORE_DWORD(dst, out1);
+      dst += dst_stride;
+    }
+  }
+}
+
+static void copy_16multx8mult_msa(const uint8_t *src, int32_t src_stride,
+                                  uint8_t *dst, int32_t dst_stride,
+                                  int32_t height, int32_t width) {
+  int32_t cnt, loop_cnt;
+  const uint8_t *src_tmp;
+  uint8_t *dst_tmp;
+  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+  for (cnt = (width >> 4); cnt--;) {
+    src_tmp = src;
+    dst_tmp = dst;
+
+    for (loop_cnt = (height >> 3); loop_cnt--;) {
+      LOAD_8VECS_UB(src_tmp, src_stride,
+                    src0, src1, src2, src3, src4, src5, src6, src7);
+      src_tmp += (8 * src_stride);
+
+      STORE_8VECS_UB(dst_tmp, dst_stride,
+                     src0, src1, src2, src3, src4, src5, src6, src7);
+      dst_tmp += (8 * dst_stride);
+    }
+
+    src += 16;
+    dst += 16;
+  }
+}
+
+static void copy_width16_msa(const uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height) {
+  int32_t cnt;
+  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+  if (0 == height % 12) {
+    for (cnt = (height / 12); cnt--;) {
+      LOAD_8VECS_UB(src, src_stride,
+                    src0, src1, src2, src3, src4, src5, src6, src7);
+      src += (8 * src_stride);
+
+      STORE_8VECS_UB(dst, dst_stride,
+                     src0, src1, src2, src3, src4, src5, src6, src7);
+      dst += (8 * dst_stride);
+
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      src += (4 * src_stride);
+
+      STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+      dst += (4 * dst_stride);
+    }
+  } else if (0 == height % 8) {
+    copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
+  } else if (0 == height % 4) {
+    for (cnt = (height >> 2); cnt--;) {
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      src += (4 * src_stride);
+
+      STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+      dst += (4 * dst_stride);
+    }
+  }
+}
+
+static void copy_width32_msa(const uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height) {
+  int32_t cnt;
+  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+  if (0 == height % 12) {
+    for (cnt = (height / 12); cnt--;) {
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7);
+      src += (4 * src_stride);
+
+      STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+      STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
+      dst += (4 * dst_stride);
+
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7);
+      src += (4 * src_stride);
+
+      STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+      STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
+      dst += (4 * dst_stride);
+
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7);
+      src += (4 * src_stride);
+
+      STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+      STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
+      dst += (4 * dst_stride);
+    }
+  } else if (0 == height % 8) {
+    copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 32);
+  } else if (0 == height % 4) {
+    for (cnt = (height >> 2); cnt--;) {
+      LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+      LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7);
+      src += (4 * src_stride);
+
+      STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+      STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
+      dst += (4 * dst_stride);
+    }
+  }
+}
+
+static void copy_width64_msa(const uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height) {
+  copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64);
+}
+
+void vp9_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride,
+                           uint8_t *dst, ptrdiff_t dst_stride,
+                           const int16_t *filter_x, int32_t filter_x_stride,
+                           const int16_t *filter_y, int32_t filter_y_stride,
+                           int32_t w, int32_t h) {
+  (void)filter_x;
+  (void)filter_y;
+  (void)filter_x_stride;
+  (void)filter_y_stride;
+
+  switch (w) {
+    case 4: {
+      uint32_t cnt, tmp;
+      /* 1 word storage */
+      for (cnt = h; cnt--;) {
+        tmp = LOAD_WORD(src);
+        STORE_WORD(dst, tmp);
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+    }
+    case 8: {
+      copy_width8_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    case 16: {
+      copy_width16_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    case 32: {
+      copy_width32_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    case 64: {
+      copy_width64_msa(src, src_stride, dst, dst_stride, h);
+      break;
+    }
+    default: {
+      uint32_t cnt;
+      for (cnt = h; cnt--;) {
+        memcpy(dst, src, w);
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+    }
+  }
+}
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -286,10 +286,10 @@
 # Sub Pixel Filters
 #
 add_proto qw/void vp9_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-specialize qw/vp9_convolve_copy neon dspr2/, "$sse2_x86inc";
+specialize qw/vp9_convolve_copy neon dspr2 msa/, "$sse2_x86inc";
 
 add_proto qw/void vp9_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-specialize qw/vp9_convolve_avg neon dspr2/, "$sse2_x86inc";
+specialize qw/vp9_convolve_avg neon dspr2 msa/, "$sse2_x86inc";
 
 add_proto qw/void vp9_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 specialize qw/vp9_convolve8 sse2 ssse3 neon dspr2 msa/, "$avx2_ssse3";
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -134,6 +134,8 @@
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve8_horiz_msa.c
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve8_msa.c
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve8_vert_msa.c
+VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve_avg_msa.c
+VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve_copy_msa.c
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve_msa.h
 
 VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_idct_intrin_sse2.c