shithub: libvpx

Download patch

ref: cf1c0ebc3a56ecb8f0b9c7ee5a0de8da00d70b93
parent: cbebbff025cee750b3744809dfbfc829ffe29098
parent: 6af9d7f2e2cbed0ccdd6bc65ade3083484859750
author: Parag Salasakar <img.mips1@gmail.com>
date: Tue Jun 2 00:48:01 EDT 2015

Merge "mips msa vp9 updated idct 8x8, 16x16 and 32x32 module"

--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -929,8 +929,7 @@
                    &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
 #endif  // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if 0  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-// TODO(parag): enable when function hooks are added
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans16x16DCT,
     ::testing::Values(
--- a/test/dct32x32_test.cc
+++ b/test/dct32x32_test.cc
@@ -382,8 +382,7 @@
                    &vp9_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
 #endif  // HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if 0  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-// TODO(parag): enable when function hooks are added
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans32x32Test,
     ::testing::Values(
--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -782,8 +782,7 @@
                    VPX_BITS_8)));
 #endif
 
-#if 0  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-// TODO(parag): enable when function hooks are added
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, FwdTrans8x8DCT,
     ::testing::Values(
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -305,8 +305,7 @@
                    TX_8X8, 12)));
 #endif
 
-#if 0  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-// TODO(parag): enable when function hooks are added
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, PartialIDctTest,
     ::testing::Values(
--- a/vp9/common/mips/msa/vp9_idct16x16_msa.c
+++ b/vp9/common/mips/msa/vp9_idct16x16_msa.c
@@ -8,267 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include <assert.h>
+#include "vp9/common/mips/msa/vp9_idct_msa.h"
 
-#include "vpx_ports/mem.h"
-#include "vp9/common/vp9_idct.h"
-#include "vp9/common/mips/msa/vp9_macros_msa.h"
-
-#define SET_COSPI_PAIR(c0_h, c1_h) ({  \
-  v8i16 out0, r0_m, r1_m;              \
-                                       \
-  r0_m = __msa_fill_h(c0_h);           \
-  r1_m = __msa_fill_h(c1_h);           \
-  out0 = __msa_ilvev_h(r1_m, r0_m);    \
-                                       \
-  out0;                                \
-})
-
-#define DOTP_CONST_PAIR(reg0, reg1, const0, const1, out0, out1) {  \
-  v8i16 k0_m = __msa_fill_h(const0);                               \
-  v8i16 s0_m, s1_m, s2_m, s3_m;                                    \
-                                                                   \
-  s0_m = __msa_fill_h(const1);                                     \
-  k0_m = __msa_ilvev_h(s0_m, k0_m);                                \
-                                                                   \
-  s0_m = __msa_ilvl_h(-reg1, reg0);                                \
-  s1_m = __msa_ilvr_h(-reg1, reg0);                                \
-  s2_m = __msa_ilvl_h(reg0, reg1);                                 \
-  s3_m = __msa_ilvr_h(reg0, reg1);                                 \
-  s1_m = (v8i16)__msa_dotp_s_w(s1_m, k0_m);                        \
-  s0_m = (v8i16)__msa_dotp_s_w(s0_m, k0_m);                        \
-  s1_m = (v8i16)__msa_srari_w((v4i32)s1_m, DCT_CONST_BITS);        \
-  s0_m = (v8i16)__msa_srari_w((v4i32)s0_m, DCT_CONST_BITS);        \
-  out0 = __msa_pckev_h(s0_m, s1_m);                                \
-                                                                   \
-  s1_m = (v8i16)__msa_dotp_s_w(s3_m, k0_m);                        \
-  s0_m = (v8i16)__msa_dotp_s_w(s2_m, k0_m);                        \
-  s1_m = (v8i16)__msa_srari_w((v4i32)s1_m, DCT_CONST_BITS);        \
-  s0_m = (v8i16)__msa_srari_w((v4i32)s0_m, DCT_CONST_BITS);        \
-  out1 = __msa_pckev_h(s0_m, s1_m);                                \
-}
-
-#define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1) {      \
-  v4i32 madd0_m, madd1_m, madd2_m, madd3_m;               \
-  v8i16 madd_s0_m, madd_s1_m;                             \
-                                                          \
-  ILV_H_LR_SH(m0, m1, madd_s1_m, madd_s0_m);              \
-                                                          \
-  DOTP_S_W_4VECS_SW(madd_s0_m, c0, madd_s1_m, c0,         \
-                    madd_s0_m, c1, madd_s1_m, c1,         \
-                    madd0_m, madd1_m, madd2_m, madd3_m);  \
-                                                          \
-  SRARI_W_4VECS_SW(madd0_m, madd1_m, madd2_m, madd3_m,    \
-                   madd0_m, madd1_m, madd2_m, madd3_m,    \
-                   DCT_CONST_BITS);                       \
-                                                          \
-  PCKEV_H_2VECS_SH(madd1_m, madd0_m, madd3_m, madd2_m,    \
-                   res0, res1);                           \
-}
-
-#define VP9_MADD_BF(inp0, inp1, inp2, inp3,                   \
-                    cst0, cst1, cst2, cst3,                   \
-                    out0, out1, out2, out3) {                 \
-  v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;           \
-  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                       \
-  v4i32 m4_m, m5_m;                                           \
-                                                              \
-  ILV_H_LRLR_SH(inp0, inp1, inp2, inp3,                       \
-                madd_s1_m, madd_s0_m, madd_s3_m, madd_s2_m);  \
-                                                              \
-  DOTP_S_W_4VECS_SW(madd_s0_m, cst0, madd_s1_m, cst0,         \
-                    madd_s2_m, cst2, madd_s3_m, cst2,         \
-                    tmp0_m, tmp1_m, tmp2_m, tmp3_m);          \
-                                                              \
-  m4_m = tmp0_m + tmp2_m;                                     \
-  m5_m = tmp1_m + tmp3_m;                                     \
-  tmp3_m = tmp1_m - tmp3_m;                                   \
-  tmp2_m = tmp0_m - tmp2_m;                                   \
-                                                              \
-  SRARI_W_4VECS_SW(m4_m, m5_m, tmp2_m, tmp3_m,                \
-                   m4_m, m5_m, tmp2_m, tmp3_m,                \
-                   DCT_CONST_BITS);                           \
-                                                              \
-  PCKEV_H_2VECS_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1);   \
-                                                              \
-  DOTP_S_W_4VECS_SW(madd_s0_m, cst1, madd_s1_m, cst1,         \
-                    madd_s2_m, cst3, madd_s3_m, cst3,         \
-                    tmp0_m, tmp1_m, tmp2_m, tmp3_m);          \
-                                                              \
-  m4_m = tmp0_m + tmp2_m;                                     \
-  m5_m = tmp1_m + tmp3_m;                                     \
-  tmp3_m = tmp1_m - tmp3_m;                                   \
-  tmp2_m = tmp0_m - tmp2_m;                                   \
-                                                              \
-  SRARI_W_4VECS_SW(m4_m, m5_m, tmp2_m, tmp3_m,                \
-                   m4_m, m5_m, tmp2_m, tmp3_m,                \
-                   DCT_CONST_BITS);                           \
-                                                              \
-  PCKEV_H_2VECS_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3);   \
-}
-
-#define TRANSPOSE8x8_H1(in0, in1, in2, in3,                   \
-                        in4, in5, in6, in7,                   \
-                        out0, out1, out2, out3,               \
-                        out4, out5, out6, out7) {             \
-  v8i16 loc0_m, loc1_m;                                       \
-  v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                       \
-  v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                       \
-                                                              \
-  loc0_m = __msa_ilvr_h((in6), (in4));                        \
-  loc1_m = __msa_ilvr_h((in7), (in5));                        \
-  tmp0_m = __msa_ilvr_h(loc1_m, loc0_m);                      \
-  tmp1_m = __msa_ilvl_h(loc1_m, loc0_m);                      \
-                                                              \
-  loc0_m = __msa_ilvl_h((in6), (in4));                        \
-  loc1_m = __msa_ilvl_h((in7), (in5));                        \
-  tmp2_m = __msa_ilvr_h(loc1_m, loc0_m);                      \
-  tmp3_m = __msa_ilvl_h(loc1_m, loc0_m);                      \
-                                                              \
-  loc0_m = __msa_ilvr_h((in2), (in0));                        \
-  loc1_m = __msa_ilvr_h((in3), (in1));                        \
-  tmp4_m = __msa_ilvr_h(loc1_m, loc0_m);                      \
-  tmp5_m = __msa_ilvl_h(loc1_m, loc0_m);                      \
-                                                              \
-  loc0_m = __msa_ilvl_h((in2), (in0));                        \
-  loc1_m = __msa_ilvl_h((in3), (in1));                        \
-  tmp6_m = __msa_ilvr_h(loc1_m, loc0_m);                      \
-  tmp7_m = __msa_ilvl_h(loc1_m, loc0_m);                      \
-                                                              \
-  out0 = (v8i16)__msa_pckev_d((v2i64)tmp0_m, (v2i64)tmp4_m);  \
-  out1 = (v8i16)__msa_pckod_d((v2i64)tmp0_m, (v2i64)tmp4_m);  \
-  out2 = (v8i16)__msa_pckev_d((v2i64)tmp1_m, (v2i64)tmp5_m);  \
-  out3 = (v8i16)__msa_pckod_d((v2i64)tmp1_m, (v2i64)tmp5_m);  \
-  out4 = (v8i16)__msa_pckev_d((v2i64)tmp2_m, (v2i64)tmp6_m);  \
-  out5 = (v8i16)__msa_pckod_d((v2i64)tmp2_m, (v2i64)tmp6_m);  \
-  out6 = (v8i16)__msa_pckev_d((v2i64)tmp3_m, (v2i64)tmp7_m);  \
-  out7 = (v8i16)__msa_pckod_d((v2i64)tmp3_m, (v2i64)tmp7_m);  \
-}
-
-#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7,                  \
-                         r8, r9, r10, r11, r12, r13, r14, r15,            \
-                         out0, out1, out2, out3, out4, out5, out6, out7,  \
-                         out8, out9, out10, out11,                        \
-                         out12, out13, out14, out15) {                    \
-  v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m;                   \
-  v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m;             \
-  v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m;                   \
-  v8i16 h8_m, h9_m, h10_m, h11_m;                                         \
-  v8i16 k0_m, k1_m, k2_m, k3_m;                                           \
-                                                                          \
-  /* stage 1 */                                                           \
-  k0_m = SET_COSPI_PAIR(cospi_1_64, cospi_31_64);                         \
-  k1_m = SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);                        \
-  k2_m = SET_COSPI_PAIR(cospi_17_64, cospi_15_64);                        \
-  k3_m = SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);                       \
-  VP9_MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m,                    \
-              g0_m, g1_m, g2_m, g3_m);                                    \
-                                                                          \
-  k0_m = SET_COSPI_PAIR(cospi_5_64, cospi_27_64);                         \
-  k1_m = SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);                        \
-  k2_m = SET_COSPI_PAIR(cospi_21_64, cospi_11_64);                        \
-  k3_m = SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);                       \
-  VP9_MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m,                   \
-              g4_m, g5_m, g6_m, g7_m);                                    \
-                                                                          \
-  k0_m = SET_COSPI_PAIR(cospi_9_64, cospi_23_64);                         \
-  k1_m = SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);                        \
-  k2_m = SET_COSPI_PAIR(cospi_25_64, cospi_7_64);                         \
-  k3_m = SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);                        \
-  VP9_MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m,                   \
-              g8_m, g9_m, g10_m, g11_m);                                  \
-                                                                          \
-  k0_m = SET_COSPI_PAIR(cospi_13_64, cospi_19_64);                        \
-  k1_m = SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);                       \
-  k2_m = SET_COSPI_PAIR(cospi_29_64, cospi_3_64);                         \
-  k3_m = SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);                        \
-  VP9_MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m,                    \
-              g12_m, g13_m, g14_m, g15_m);                                \
-                                                                          \
-  /* stage 2 */                                                           \
-  k0_m = SET_COSPI_PAIR(cospi_4_64, cospi_28_64);                         \
-  k1_m = SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);                        \
-  k2_m = SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);                        \
-  VP9_MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m,            \
-              h0_m, h1_m, h2_m, h3_m);                                    \
-                                                                          \
-  k0_m = SET_COSPI_PAIR(cospi_12_64, cospi_20_64);                        \
-  k1_m = SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);                       \
-  k2_m = SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);                       \
-  VP9_MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m,           \
-              h4_m, h5_m, h6_m, h7_m);                                    \
-                                                                          \
-  BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10);          \
-                                                                          \
-  BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m,          \
-              h8_m, h9_m, h10_m, h11_m, h6_m, h4_m, h2_m, h0_m);          \
-                                                                          \
-  /* stage 3 */                                                           \
-  BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m);        \
-                                                                          \
-  k0_m = SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                         \
-  k1_m = SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                        \
-  k2_m = SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);                        \
-  VP9_MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m,             \
-              out4, out6, out5, out7);                                    \
-  VP9_MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m,             \
-              out12, out14, out13, out15);                                \
-                                                                          \
-  /* stage 4 */                                                           \
-  k0_m = SET_COSPI_PAIR(cospi_16_64, cospi_16_64);                        \
-  k1_m = SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);                      \
-  k2_m = SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);                       \
-  k3_m = SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);                       \
-  VP9_MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3);                   \
-  VP9_MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7);                     \
-  VP9_MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11);                 \
-  VP9_MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15);                 \
-}
-
-#define VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride,     \
-                                            in0, in1, in2, in3) {  \
-  uint64_t out0_m, out1_m, out2_m, out3_m;                         \
-  v8i16 res0_m, res1_m, res2_m, res3_m;                            \
-  v16u8 dest0_m, dest1_m, dest2_m, dest3_m;                        \
-  v16i8 tmp0_m, tmp1_m;                                            \
-  v16i8 zero_m = { 0 };                                            \
-  uint8_t *dst_m = (uint8_t *)(dest);                              \
-                                                                   \
-  LOAD_4VECS_UB(dst_m, (dest_stride),                              \
-                dest0_m, dest1_m, dest2_m, dest3_m);               \
-                                                                   \
-  res0_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest0_m);            \
-  res1_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest1_m);            \
-  res2_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest2_m);            \
-  res3_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest3_m);            \
-                                                                   \
-  res0_m += (v8i16)(in0);                                          \
-  res1_m += (v8i16)(in1);                                          \
-  res2_m += (v8i16)(in2);                                          \
-  res3_m += (v8i16)(in3);                                          \
-                                                                   \
-  res0_m = CLIP_UNSIGNED_CHAR_H(res0_m);                           \
-  res1_m = CLIP_UNSIGNED_CHAR_H(res1_m);                           \
-  res2_m = CLIP_UNSIGNED_CHAR_H(res2_m);                           \
-  res3_m = CLIP_UNSIGNED_CHAR_H(res3_m);                           \
-                                                                   \
-  tmp0_m = __msa_pckev_b((v16i8)res1_m, (v16i8)res0_m);            \
-  tmp1_m = __msa_pckev_b((v16i8)res3_m, (v16i8)res2_m);            \
-                                                                   \
-  out0_m = __msa_copy_u_d((v2i64)tmp0_m, 0);                       \
-  out1_m = __msa_copy_u_d((v2i64)tmp0_m, 1);                       \
-  out2_m = __msa_copy_u_d((v2i64)tmp1_m, 0);                       \
-  out3_m = __msa_copy_u_d((v2i64)tmp1_m, 1);                       \
-                                                                   \
-  STORE_DWORD(dst_m, out0_m);                                      \
-  dst_m += (dest_stride);                                          \
-  STORE_DWORD(dst_m, out1_m);                                      \
-  dst_m += (dest_stride);                                          \
-  STORE_DWORD(dst_m, out2_m);                                      \
-  dst_m += (dest_stride);                                          \
-  STORE_DWORD(dst_m, out3_m);                                      \
-}
-
 void vp9_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
   v8i16 loc0, loc1, loc2, loc3;
   v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
@@ -275,50 +16,29 @@
   v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
   v8i16 tmp5, tmp6, tmp7;
 
-  /* load left top 8x8 */
-  LOAD_8VECS_SH(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  LD_SH8(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  input += 8;
+  LD_SH8(input, 16, reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
 
-  /* load right top 8x8 */
-  LOAD_8VECS_SH((input + 8), 16,
-                reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
+  TRANSPOSE8x8_SH_SH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
+                     reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  TRANSPOSE8x8_SH_SH(reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15,
+                     reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
+  VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
+  VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
+  BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
+  VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
+  VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
+  VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
+  BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
+  SUB4(reg2, loc1, reg14, loc0, reg6, loc3, reg10, loc2, reg0, reg12, reg4,
+       reg8);
+  ADD4(reg2, loc1, reg14, loc0, reg6, loc3, reg10, loc2, reg2, reg14, reg6,
+       reg10);
 
-  /* transpose block */
-  TRANSPOSE8x8_H1(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
-                  reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
-
-  /* transpose block */
-  TRANSPOSE8x8_H1(reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15,
-                  reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
-
-  DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
-  DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
-
-  loc0 = reg2 + reg10;
-  reg2 = reg2 - reg10;
-  loc1 = reg14 + reg6;
-  reg14 = reg14 - reg6;
-
-  DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
-  DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
-  DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
-
-  reg14 = reg8 - reg12;
-  reg2 = reg8 + reg12;
-  reg10 = reg0 - reg4;
-  reg6 = reg0 + reg4;
-
-  reg0 = reg2 - loc1;
-  reg2 = reg2 + loc1;
-  reg12 = reg14 - loc0;
-  reg14 = reg14 + loc0;
-  reg4 = reg6 - loc3;
-  reg6 = reg6 + loc3;
-  reg8 = reg10 - loc2;
-  reg10 = reg10 + loc2;
-
   /* stage 2 */
-  DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
-  DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
+  VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
+  VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
 
   reg9 = reg1 - loc2;
   reg1 = reg1 + loc2;
@@ -325,14 +45,10 @@
   reg7 = reg15 - loc3;
   reg15 = reg15 + loc3;
 
-  DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
-  DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
+  VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
+  VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
+  BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
 
-  reg13 = loc0 + reg5;
-  reg5 = loc0 - reg5;
-  reg3 = loc1 + reg11;
-  reg11 = loc1 - reg11;
-
   loc1 = reg15 + reg3;
   reg3 = reg15 - reg3;
   loc2 = reg2 + loc1;
@@ -346,8 +62,8 @@
   tmp7 = loc1;
   reg0 = loc2;
 
-  DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
-  DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11);
+  VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
+  VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11);
 
   loc0 = reg9 + reg5;
   reg5 = reg9 - reg5;
@@ -360,21 +76,15 @@
   loc2 = reg4 - loc0;
   tmp5 = loc1;
 
-  DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
+  VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
+  BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
 
-  loc0 = reg8 + reg5;
-  loc1 = reg8 - reg5;
-  reg4 = reg10 + reg11;
-  reg9 = reg10 - reg11;
   reg10 = loc0;
   reg11 = loc1;
 
-  DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
+  VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
+  BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
 
-  reg8 = reg12 + reg3;
-  reg5 = reg12 - reg3;
-  reg6 = reg14 + reg13;
-  reg7 = reg14 - reg13;
   reg13 = loc2;
 
   /* Transpose and store the output */
@@ -383,21 +93,18 @@
   reg3 = tmp7;
 
   /* transpose block */
-  TRANSPOSE8x8_H1(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14,
-                  reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
+  TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14,
+                     reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
+  ST_SH8(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, output, 16);
 
-  STORE_8VECS_SH(output, 16, reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
-
   /* transpose block */
-  TRANSPOSE8x8_H1(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15,
-                  reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
-
-  STORE_8VECS_SH((output + 8), 16,
-                 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
+  TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15,
+                     reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
+  ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16);
 }
 
-void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dest,
-                                      int32_t dest_stride) {
+void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+                                      int32_t dst_stride) {
   v8i16 loc0, loc1, loc2, loc3;
   v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
   v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
@@ -404,29 +111,19 @@
   v8i16 tmp5, tmp6, tmp7;
 
   /* load up 8x8 */
-  LOAD_8VECS_SH(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
-
+  LD_SH8(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  input += 8 * 16;
   /* load bottom 8x8 */
-  LOAD_8VECS_SH((input + 8 * 16), 16,
-                reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
+  LD_SH8(input, 16, reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
 
-  DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
-  DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
+  VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
+  VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
+  BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
+  VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
+  VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
+  VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
+  BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
 
-  loc0 = reg2 + reg10;
-  reg2 = reg2 - reg10;
-  loc1 = reg14 + reg6;
-  reg14 = reg14 - reg6;
-
-  DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
-  DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
-  DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
-
-  reg14 = reg8 - reg12;
-  reg2 = reg8 + reg12;
-  reg10 = reg0 - reg4;
-  reg6 = reg0 + reg4;
-
   reg0 = reg2 - loc1;
   reg2 = reg2 + loc1;
   reg12 = reg14 - loc0;
@@ -437,8 +134,8 @@
   reg10 = reg10 + loc2;
 
   /* stage 2 */
-  DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
-  DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
+  VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
+  VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
 
   reg9 = reg1 - loc2;
   reg1 = reg1 + loc2;
@@ -445,14 +142,10 @@
   reg7 = reg15 - loc3;
   reg15 = reg15 + loc3;
 
-  DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
-  DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
+  VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
+  VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
+  BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
 
-  reg13 = loc0 + reg5;
-  reg5 = loc0 - reg5;
-  reg3 = loc1 + reg11;
-  reg11 = loc1 - reg11;
-
   loc1 = reg15 + reg3;
   reg3 = reg15 - reg3;
   loc2 = reg2 + loc1;
@@ -466,8 +159,8 @@
   tmp7 = loc1;
   reg0 = loc2;
 
-  DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
-  DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11);
+  VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
+  VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11);
 
   loc0 = reg9 + reg5;
   reg5 = reg9 - reg5;
@@ -480,21 +173,14 @@
   loc2 = reg4 - loc0;
   tmp5 = loc1;
 
-  DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
+  VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
+  BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
 
-  loc0 = reg8 + reg5;
-  loc1 = reg8 - reg5;
-  reg4 = reg10 + reg11;
-  reg9 = reg10 - reg11;
   reg10 = loc0;
   reg11 = loc1;
 
-  DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
-
-  reg8 = reg12 + reg3;
-  reg5 = reg12 - reg3;
-  reg6 = reg14 + reg13;
-  reg7 = reg14 - reg13;
+  VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
+  BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
   reg13 = loc2;
 
   /* Transpose and store the output */
@@ -502,22 +188,21 @@
   reg14 = tmp6;
   reg3 = tmp7;
 
-  SRARI_H_4VECS_SH(reg0, reg2, reg4, reg6, reg0, reg2, reg4, reg6, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride,
-                                      reg0, reg2, reg4, reg6);
-  SRARI_H_4VECS_SH(reg8, reg10, reg12, reg14, reg8, reg10, reg12, reg14, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4((dest + (4 * dest_stride)),
-                                      dest_stride, reg8, reg10, reg12, reg14);
-  SRARI_H_4VECS_SH(reg3, reg13, reg11, reg5, reg3, reg13, reg11, reg5, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4((dest + (8 * dest_stride)),
-                                      dest_stride, reg3, reg13, reg11, reg5);
-  SRARI_H_4VECS_SH(reg7, reg9, reg1, reg15, reg7, reg9, reg1, reg15, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4((dest + (12 * dest_stride)),
-                                      dest_stride, reg7, reg9, reg1, reg15);
+  SRARI_H4_SH(reg0, reg2, reg4, reg6, 6);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
+  dst += (4 * dst_stride);
+  SRARI_H4_SH(reg8, reg10, reg12, reg14, 6);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
+  dst += (4 * dst_stride);
+  SRARI_H4_SH(reg3, reg13, reg11, reg5, 6);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
+  dst += (4 * dst_stride);
+  SRARI_H4_SH(reg7, reg9, reg1, reg15, 6);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
 }
 
-void vp9_idct16x16_256_add_msa(const int16_t *input, uint8_t *dest,
-                               int32_t dest_stride) {
+void vp9_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+                               int32_t dst_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]);
   int16_t *out = out_arr;
@@ -531,13 +216,13 @@
   /* transform columns */
   for (i = 0; i < 2; ++i) {
     /* process 8 * 16 block */
-    vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dest + (i << 3)),
-                                     dest_stride);
+    vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
+                                     dst_stride);
   }
 }
 
-void vp9_idct16x16_10_add_msa(const int16_t *input, uint8_t *dest,
-                              int32_t dest_stride) {
+void vp9_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
+                              int32_t dst_stride) {
   uint8_t i;
   DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]);
   int16_t *out = out_arr;
@@ -570,64 +255,38 @@
   /* transform columns */
   for (i = 0; i < 2; ++i) {
     /* process 8 * 16 block */
-    vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dest + (i << 3)),
-                                     dest_stride);
+    vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
+                                     dst_stride);
   }
 }
 
-void vp9_idct16x16_1_add_msa(const int16_t *input, uint8_t *dest,
-                             int32_t dest_stride) {
+void vp9_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
+                             int32_t dst_stride) {
   uint8_t i;
-  int32_t const1;
   int16_t out;
-  v8i16 const2, res0, res1, res2, res3, res4, res5, res6, res7;
-  v16u8 dest0, dest1, dest2, dest3;
-  v16u8 tmp0, tmp1, tmp2, tmp3;
-  v16i8 zero = { 0 };
+  v8i16 vec, res0, res1, res2, res3, res4, res5, res6, res7;
+  v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
 
-  out = dct_const_round_shift(input[0] * cospi_16_64);
-  out = dct_const_round_shift(out * cospi_16_64);
-  const1 = ROUND_POWER_OF_TWO(out, 6);
+  out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
+  out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
+  out = ROUND_POWER_OF_TWO(out, 6);
 
-  const2 = __msa_fill_h(const1);
+  vec = __msa_fill_h(out);
 
-  for (i = 0; i < 4; ++i) {
-    LOAD_4VECS_UB(dest, dest_stride, dest0, dest1, dest2, dest3);
-
-    res0 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest0);
-    res1 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest1);
-    res2 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest2);
-    res3 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest3);
-    res4 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest0);
-    res5 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest1);
-    res6 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest2);
-    res7 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest3);
-
-    res0 += const2;
-    res1 += const2;
-    res2 += const2;
-    res3 += const2;
-    res4 += const2;
-    res5 += const2;
-    res6 += const2;
-    res7 += const2;
-
-    res0 = CLIP_UNSIGNED_CHAR_H(res0);
-    res1 = CLIP_UNSIGNED_CHAR_H(res1);
-    res2 = CLIP_UNSIGNED_CHAR_H(res2);
-    res3 = CLIP_UNSIGNED_CHAR_H(res3);
-    res4 = CLIP_UNSIGNED_CHAR_H(res4);
-    res5 = CLIP_UNSIGNED_CHAR_H(res5);
-    res6 = CLIP_UNSIGNED_CHAR_H(res6);
-    res7 = CLIP_UNSIGNED_CHAR_H(res7);
-
-    tmp0 = (v16u8)__msa_pckev_b((v16i8)res4, (v16i8)res0);
-    tmp1 = (v16u8)__msa_pckev_b((v16i8)res5, (v16i8)res1);
-    tmp2 = (v16u8)__msa_pckev_b((v16i8)res6, (v16i8)res2);
-    tmp3 = (v16u8)__msa_pckev_b((v16i8)res7, (v16i8)res3);
-
-    STORE_4VECS_UB(dest, dest_stride, tmp0, tmp1, tmp2, tmp3);
-    dest += (4 * dest_stride);
+  for (i = 4; i--;) {
+    LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
+    UNPCK_UB_SH(dst0, res0, res4);
+    UNPCK_UB_SH(dst1, res1, res5);
+    UNPCK_UB_SH(dst2, res2, res6);
+    UNPCK_UB_SH(dst3, res3, res7);
+    ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
+    ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
+    CLIP_SH4_0_255(res0, res1, res2, res3);
+    CLIP_SH4_0_255(res4, res5, res6, res7);
+    PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
+                tmp0, tmp1, tmp2, tmp3);
+    ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
+    dst += (4 * dst_stride);
   }
 }
 
@@ -636,16 +295,13 @@
   v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
 
   /* load input data */
-  LOAD_16VECS_SH(input, 8,
-                 l0, l8, l1, l9, l2, l10, l3, l11,
-                 l4, l12, l5, l13, l6, l14, l7, l15);
+  LD_SH16(input, 8,
+          l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14, l7, l15);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
+                     l0, l1, l2, l3, l4, l5, l6, l7);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
+                     l8, l9, l10, l11, l12, l13, l14, l15);
 
-  TRANSPOSE8x8_H_SH(l0, l1, l2, l3, l4, l5, l6, l7,
-                    l0, l1, l2, l3, l4, l5, l6, l7);
-
-  TRANSPOSE8x8_H_SH(l8, l9, l10, l11, l12, l13, l14, l15,
-                    l8, l9, l10, l11, l12, l13, l14, l15);
-
   /* ADST in horizontal */
   VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7,
                    l8, l9, l10, l11, l12, l13, l14, l15,
@@ -657,19 +313,16 @@
   l13 = -r13;
   l15 = -r1;
 
-  TRANSPOSE8x8_H_SH(r0, l1, r12, l3, r6, r14, r10, r2,
-                    l0, l1, l2, l3, l4, l5, l6, l7);
-
-  STORE_8VECS_SH(output, 16, l0, l1, l2, l3, l4, l5, l6, l7);
-
-  TRANSPOSE8x8_H_SH(r3, r11, r15, r7, r5, l13, r9, l15,
-                    l8, l9, l10, l11, l12, l13, l14, l15);
-
-  STORE_8VECS_SH((output + 8), 16, l8, l9, l10, l11, l12, l13, l14, l15);
+  TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2,
+                     l0, l1, l2, l3, l4, l5, l6, l7);
+  ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16);
+  TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15,
+                     l8, l9, l10, l11, l12, l13, l14, l15);
+  ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16);
 }
 
-static void vp9_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dest,
-                                              int32_t dest_stride) {
+static void vp9_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+                                              int32_t dst_stride) {
   v8i16 v0, v2, v4, v6, k0, k1, k2, k3;
   v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
   v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
@@ -678,210 +331,163 @@
   v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11;
   v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
   v8i16 res8, res9, res10, res11, res12, res13, res14, res15;
-  v16u8 dest0, dest1, dest2, dest3, dest4, dest5, dest6, dest7;
-  v16u8 dest8, dest9, dest10, dest11, dest12, dest13, dest14, dest15;
+  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+  v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
   v16i8 zero = { 0 };
 
-  r0 = LOAD_SH(input + 0 * 16);
-  r3 = LOAD_SH(input + 3 * 16);
-  r4 = LOAD_SH(input + 4 * 16);
-  r7 = LOAD_SH(input + 7 * 16);
-  r8 = LOAD_SH(input + 8 * 16);
-  r11 = LOAD_SH(input + 11 * 16);
-  r12 = LOAD_SH(input + 12 * 16);
-  r15 = LOAD_SH(input + 15 * 16);
+  r0 = LD_SH(input + 0 * 16);
+  r3 = LD_SH(input + 3 * 16);
+  r4 = LD_SH(input + 4 * 16);
+  r7 = LD_SH(input + 7 * 16);
+  r8 = LD_SH(input + 8 * 16);
+  r11 = LD_SH(input + 11 * 16);
+  r12 = LD_SH(input + 12 * 16);
+  r15 = LD_SH(input + 15 * 16);
 
   /* stage 1 */
-  k0 = SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
-  k1 = SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
-  k2 = SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
-  k3 = SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
+  k0 = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
+  k1 = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
+  k2 = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
+  k3 = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
   VP9_MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
-
-  k0 = SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
-  k1 = SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
-  k2 = SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
-  k3 = SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
+  k0 = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
+  k1 = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
+  k2 = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
+  k3 = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
   VP9_MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
-
   BUTTERFLY_4(g0, g2, g10, g8, h8, h9, v2, v0);
-
-  k0 = SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
-  k1 = SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
-  k2 = SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
+  k0 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+  k1 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+  k2 = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
   VP9_MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
 
-  r1 = LOAD_SH(input + 1 * 16);
-  r2 = LOAD_SH(input + 2 * 16);
-  r5 = LOAD_SH(input + 5 * 16);
-  r6 = LOAD_SH(input + 6 * 16);
-  r9 = LOAD_SH(input + 9 * 16);
-  r10 = LOAD_SH(input + 10 * 16);
-  r13 = LOAD_SH(input + 13 * 16);
-  r14 = LOAD_SH(input + 14 * 16);
+  r1 = LD_SH(input + 1 * 16);
+  r2 = LD_SH(input + 2 * 16);
+  r5 = LD_SH(input + 5 * 16);
+  r6 = LD_SH(input + 6 * 16);
+  r9 = LD_SH(input + 9 * 16);
+  r10 = LD_SH(input + 10 * 16);
+  r13 = LD_SH(input + 13 * 16);
+  r14 = LD_SH(input + 14 * 16);
 
-  k0 = SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
-  k1 = SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
-  k2 = SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
-  k3 = SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
+  k0 = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
+  k1 = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
+  k2 = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
+  k3 = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
   VP9_MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, g4, g5, g6, g7);
-
-  k0 = SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
-  k1 = SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
-  k2 = SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
-  k3 = SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
+  k0 = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
+  k1 = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
+  k2 = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
+  k3 = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
   VP9_MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g12, g13, g14, g15);
-
   BUTTERFLY_4(g4, g6, g14, g12, h10, h11, v6, v4);
-
   BUTTERFLY_4(h8, h9, h11, h10, out0, out1, h11, h10);
   out1 = -out1;
-  out0 = __msa_srari_h(out0, 6);
-  out1 = __msa_srari_h(out1, 6);
-  dest0 = LOAD_UB(dest + 0 * dest_stride);
-  dest1 = LOAD_UB(dest + 15 * dest_stride);
-  res0 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest0);
-  res1 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest1);
-  res0 += out0;
-  res1 += out1;
-  res0 = CLIP_UNSIGNED_CHAR_H(res0);
-  res1 = CLIP_UNSIGNED_CHAR_H(res1);
-  res0 = (v8i16)__msa_pckev_b((v16i8)res0, (v16i8)res0);
-  res1 = (v8i16)__msa_pckev_b((v16i8)res1, (v16i8)res1);
-  STORE_DWORD(dest, __msa_copy_u_d((v2i64)res0, 0));
-  STORE_DWORD(dest + 15 * dest_stride, __msa_copy_u_d((v2i64)res1, 0));
+  SRARI_H2_SH(out0, out1, 6);
+  dst0 = LD_UB(dst + 0 * dst_stride);
+  dst1 = LD_UB(dst + 15 * dst_stride);
+  ILVR_B2_SH(zero, dst0, zero, dst1, res0, res1);
+  ADD2(res0, out0, res1, out1, res0, res1);
+  CLIP_SH2_0_255(res0, res1);
+  PCKEV_B2_SH(res0, res0, res1, res1, res0, res1);
+  ST8x1_UB(res0, dst);
+  ST8x1_UB(res1, dst + 15 * dst_stride);
 
-  k0 = SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
-  k1 = SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
-  k2 = SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
+  k0 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+  k1 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+  k2 = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
   VP9_MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
-
   BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
   out8 = -out8;
 
-  out8 = __msa_srari_h(out8, 6);
-  out9 = __msa_srari_h(out9, 6);
-  dest8 = LOAD_UB(dest + 1 * dest_stride);
-  dest9 = LOAD_UB(dest + 14 * dest_stride);
-  res8 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest8);
-  res9 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest9);
-  res8 += out8;
-  res9 += out9;
-  res8 = CLIP_UNSIGNED_CHAR_H(res8);
-  res9 = CLIP_UNSIGNED_CHAR_H(res9);
-  res8 = (v8i16)__msa_pckev_b((v16i8)res8, (v16i8)res8);
-  res9 = (v8i16)__msa_pckev_b((v16i8)res9, (v16i8)res9);
-  STORE_DWORD(dest + dest_stride, __msa_copy_u_d((v2i64)res8, 0));
-  STORE_DWORD(dest + 14 * dest_stride, __msa_copy_u_d((v2i64)res9, 0));
+  SRARI_H2_SH(out8, out9, 6);
+  dst8 = LD_UB(dst + 1 * dst_stride);
+  dst9 = LD_UB(dst + 14 * dst_stride);
+  ILVR_B2_SH(zero, dst8, zero, dst9, res8, res9);
+  ADD2(res8, out8, res9, out9, res8, res9);
+  CLIP_SH2_0_255(res8, res9);
+  PCKEV_B2_SH(res8, res8, res9, res9, res8, res9);
+  ST8x1_UB(res8, dst + dst_stride);
+  ST8x1_UB(res9, dst + 14 * dst_stride);
 
-  k0 = SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
-  k1 = SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
-  k2 = SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
+  k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+  k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+  k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
   VP9_MADD_BF(v0, v2, v4, v6, k0, k1, k2, k0, out4, out6, out5, out7);
   out4 = -out4;
-  out4 = __msa_srari_h(out4, 6);
-  out5 = __msa_srari_h(out5, 6);
-  dest4 = LOAD_UB(dest + 3 * dest_stride);
-  dest5 = LOAD_UB(dest + 12 * dest_stride);
-  res4 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest4);
-  res5 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest5);
-  res4 += out4;
-  res5 += out5;
-  res4 = CLIP_UNSIGNED_CHAR_H(res4);
-  res5 = CLIP_UNSIGNED_CHAR_H(res5);
-  res4 = (v8i16)__msa_pckev_b((v16i8)res4, (v16i8)res4);
-  res5 = (v8i16)__msa_pckev_b((v16i8)res5, (v16i8)res5);
-  STORE_DWORD(dest + 3 * dest_stride, __msa_copy_u_d((v2i64)res4, 0));
-  STORE_DWORD(dest + 12 * dest_stride, __msa_copy_u_d((v2i64)res5, 0));
+  SRARI_H2_SH(out4, out5, 6);
+  dst4 = LD_UB(dst + 3 * dst_stride);
+  dst5 = LD_UB(dst + 12 * dst_stride);
+  ILVR_B2_SH(zero, dst4, zero, dst5, res4, res5);
+  ADD2(res4, out4, res5, out5, res4, res5);
+  CLIP_SH2_0_255(res4, res5);
+  PCKEV_B2_SH(res4, res4, res5, res5, res4, res5);
+  ST8x1_UB(res4, dst + 3 * dst_stride);
+  ST8x1_UB(res5, dst + 12 * dst_stride);
 
   VP9_MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
   out13 = -out13;
-  out12 = __msa_srari_h(out12, 6);
-  out13 = __msa_srari_h(out13, 6);
-  dest12 = LOAD_UB(dest + 2 * dest_stride);
-  dest13 = LOAD_UB(dest + 13 * dest_stride);
-  res12 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest12);
-  res13 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest13);
-  res12 += out12;
-  res13 += out13;
-  res12 = CLIP_UNSIGNED_CHAR_H(res12);
-  res13 = CLIP_UNSIGNED_CHAR_H(res13);
-  res12 = (v8i16)__msa_pckev_b((v16i8)res12, (v16i8)res12);
-  res13 = (v8i16)__msa_pckev_b((v16i8)res13, (v16i8)res13);
-  STORE_DWORD(dest + 2 * dest_stride, __msa_copy_u_d((v2i64)res12, 0));
-  STORE_DWORD(dest + 13 * dest_stride, __msa_copy_u_d((v2i64)res13, 0));
+  SRARI_H2_SH(out12, out13, 6);
+  dst12 = LD_UB(dst + 2 * dst_stride);
+  dst13 = LD_UB(dst + 13 * dst_stride);
+  ILVR_B2_SH(zero, dst12, zero, dst13, res12, res13);
+  ADD2(res12, out12, res13, out13, res12, res13);
+  CLIP_SH2_0_255(res12, res13);
+  PCKEV_B2_SH(res12, res12, res13, res13, res12, res13);
+  ST8x1_UB(res12, dst + 2 * dst_stride);
+  ST8x1_UB(res13, dst + 13 * dst_stride);
 
-  k0 = SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
-  k3 = SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+  k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+  k3 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
   VP9_MADD_SHORT(out6, out7, k0, k3, out6, out7);
-  out6 = __msa_srari_h(out6, 6);
-  out7 = __msa_srari_h(out7, 6);
-  dest6 = LOAD_UB(dest + 4 * dest_stride);
-  dest7 = LOAD_UB(dest + 11 * dest_stride);
-  res6 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest6);
-  res7 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest7);
-  res6 += out6;
-  res7 += out7;
-  res6 = CLIP_UNSIGNED_CHAR_H(res6);
-  res7 = CLIP_UNSIGNED_CHAR_H(res7);
-  res6 = (v8i16)__msa_pckev_b((v16i8)res6, (v16i8)res6);
-  res7 = (v8i16)__msa_pckev_b((v16i8)res7, (v16i8)res7);
-  STORE_DWORD(dest + 4 * dest_stride, __msa_copy_u_d((v2i64)res6, 0));
-  STORE_DWORD(dest + 11 * dest_stride, __msa_copy_u_d((v2i64)res7, 0));
+  SRARI_H2_SH(out6, out7, 6);
+  dst6 = LD_UB(dst + 4 * dst_stride);
+  dst7 = LD_UB(dst + 11 * dst_stride);
+  ILVR_B2_SH(zero, dst6, zero, dst7, res6, res7);
+  ADD2(res6, out6, res7, out7, res6, res7);
+  CLIP_SH2_0_255(res6, res7);
+  PCKEV_B2_SH(res6, res6, res7, res7, res6, res7);
+  ST8x1_UB(res6, dst + 4 * dst_stride);
+  ST8x1_UB(res7, dst + 11 * dst_stride);
 
   VP9_MADD_SHORT(out10, out11, k0, k3, out10, out11);
-  out10 = __msa_srari_h(out10, 6);
-  out11 = __msa_srari_h(out11, 6);
-  dest10 = LOAD_UB(dest + 6 * dest_stride);
-  dest11 = LOAD_UB(dest + 9 * dest_stride);
-  res10 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest10);
-  res11 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest11);
-  res10 += out10;
-  res11 += out11;
-  res10 = CLIP_UNSIGNED_CHAR_H(res10);
-  res11 = CLIP_UNSIGNED_CHAR_H(res11);
-  res10 = (v8i16)__msa_pckev_b((v16i8)res10, (v16i8)res10);
-  res11 = (v8i16)__msa_pckev_b((v16i8)res11, (v16i8)res11);
-  STORE_DWORD(dest + 6 * dest_stride, __msa_copy_u_d((v2i64)res10, 0));
-  STORE_DWORD(dest + 9 * dest_stride, __msa_copy_u_d((v2i64)res11, 0));
+  SRARI_H2_SH(out10, out11, 6);
+  dst10 = LD_UB(dst + 6 * dst_stride);
+  dst11 = LD_UB(dst + 9 * dst_stride);
+  ILVR_B2_SH(zero, dst10, zero, dst11, res10, res11);
+  ADD2(res10, out10, res11, out11, res10, res11);
+  CLIP_SH2_0_255(res10, res11);
+  PCKEV_B2_SH(res10, res10, res11, res11, res10, res11);
+  ST8x1_UB(res10, dst + 6 * dst_stride);
+  ST8x1_UB(res11, dst + 9 * dst_stride);
 
-  k1 = SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
-  k2 = SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+  k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
+  k2 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
   VP9_MADD_SHORT(h10, h11, k1, k2, out2, out3);
-  out2 = __msa_srari_h(out2, 6);
-  out3 = __msa_srari_h(out3, 6);
-  dest2 = LOAD_UB(dest + 7 * dest_stride);
-  dest3 = LOAD_UB(dest + 8 * dest_stride);
-  res2 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest2);
-  res3 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest3);
-  res2 += out2;
-  res3 += out3;
-  res2 = CLIP_UNSIGNED_CHAR_H(res2);
-  res3 = CLIP_UNSIGNED_CHAR_H(res3);
-  res2 = (v8i16)__msa_pckev_b((v16i8)res2, (v16i8)res2);
-  res3 = (v8i16)__msa_pckev_b((v16i8)res3, (v16i8)res3);
-  STORE_DWORD(dest + 7 * dest_stride, __msa_copy_u_d((v2i64)res2, 0));
-  STORE_DWORD(dest + 8 * dest_stride, __msa_copy_u_d((v2i64)res3, 0));
+  SRARI_H2_SH(out2, out3, 6);
+  dst2 = LD_UB(dst + 7 * dst_stride);
+  dst3 = LD_UB(dst + 8 * dst_stride);
+  ILVR_B2_SH(zero, dst2, zero, dst3, res2, res3);
+  ADD2(res2, out2, res3, out3, res2, res3);
+  CLIP_SH2_0_255(res2, res3);
+  PCKEV_B2_SH(res2, res2, res3, res3, res2, res3);
+  ST8x1_UB(res2, dst + 7 * dst_stride);
+  ST8x1_UB(res3, dst + 8 * dst_stride);
 
   VP9_MADD_SHORT(out14, out15, k1, k2, out14, out15);
-  out14 = __msa_srari_h(out14, 6);
-  out15 = __msa_srari_h(out15, 6);
-  dest14 = LOAD_UB(dest + 5 * dest_stride);
-  dest15 = LOAD_UB(dest + 10 * dest_stride);
-  res14 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest14);
-  res15 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest15);
-  res14 += out14;
-  res15 += out15;
-  res14 = CLIP_UNSIGNED_CHAR_H(res14);
-  res15 = CLIP_UNSIGNED_CHAR_H(res15);
-  res14 = (v8i16)__msa_pckev_b((v16i8)res14, (v16i8)res14);
-  res15 = (v8i16)__msa_pckev_b((v16i8)res15, (v16i8)res15);
-  STORE_DWORD(dest + 5 * dest_stride, __msa_copy_u_d((v2i64)res14, 0));
-  STORE_DWORD(dest + 10 * dest_stride, __msa_copy_u_d((v2i64)res15, 0));
+  SRARI_H2_SH(out14, out15, 6);
+  dst14 = LD_UB(dst + 5 * dst_stride);
+  dst15 = LD_UB(dst + 10 * dst_stride);
+  ILVR_B2_SH(zero, dst14, zero, dst15, res14, res15);
+  ADD2(res14, out14, res15, out15, res14, res15);
+  CLIP_SH2_0_255(res14, res15);
+  PCKEV_B2_SH(res14, res14, res15, res15, res14, res15);
+  ST8x1_UB(res14, dst + 5 * dst_stride);
+  ST8x1_UB(res15, dst + 10 * dst_stride);
 }
 
-void vp9_iht16x16_256_add_msa(const int16_t *input, uint8_t *dest,
-                              int32_t dest_stride, int32_t tx_type) {
+void vp9_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+                              int32_t dst_stride, int32_t tx_type) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
   int16_t *out_ptr = &out[0];
@@ -897,8 +503,8 @@
       /* transform columns */
       for (i = 0; i < 2; ++i) {
         /* process 8 * 16 block */
-        vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)),
-                                         (dest + (i << 3)), dest_stride);
+        vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+                                         dst_stride);
       }
       break;
     case ADST_DCT:
@@ -911,7 +517,7 @@
       /* transform columns */
       for (i = 0; i < 2; ++i) {
         vp9_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
-                                          (dest + (i << 3)), dest_stride);
+                                          (dst + (i << 3)), dst_stride);
       }
       break;
     case DCT_ADST:
@@ -924,8 +530,8 @@
       /* transform columns */
       for (i = 0; i < 2; ++i) {
         /* process 8 * 16 block */
-        vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)),
-                                         (dest + (i << 3)), dest_stride);
+        vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+                                         dst_stride);
       }
       break;
     case ADST_ADST:
@@ -938,7 +544,7 @@
       /* transform columns */
       for (i = 0; i < 2; ++i) {
         vp9_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
-                                          (dest + (i << 3)), dest_stride);
+                                          (dst + (i << 3)), dst_stride);
       }
       break;
     default:
--- a/vp9/common/mips/msa/vp9_idct32x32_msa.c
+++ b/vp9/common/mips/msa/vp9_idct32x32_msa.c
@@ -8,108 +8,34 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "vpx_ports/mem.h"
-#include "vp9/common/vp9_idct.h"
-#include "vp9/common/mips/msa/vp9_macros_msa.h"
+#include "vp9/common/mips/msa/vp9_idct_msa.h"
 
-#define DOTP_CONST_PAIR(reg0, reg1, const0, const1, out0, out1) {  \
-  v8i16 k0_m = __msa_fill_h(const0);                               \
-  v8i16 s0_m, s1_m, s2_m, s3_m;                                    \
-                                                                   \
-  s0_m = __msa_fill_h(const1);                                     \
-  k0_m = __msa_ilvev_h(s0_m, k0_m);                                \
-                                                                   \
-  s0_m = __msa_ilvl_h(-reg1, reg0);                                \
-  s1_m = __msa_ilvr_h(-reg1, reg0);                                \
-  s2_m = __msa_ilvl_h(reg0, reg1);                                 \
-  s3_m = __msa_ilvr_h(reg0, reg1);                                 \
-  s1_m = (v8i16)__msa_dotp_s_w(s1_m, k0_m);                        \
-  s0_m = (v8i16)__msa_dotp_s_w(s0_m, k0_m);                        \
-  s1_m = (v8i16)__msa_srari_w((v4i32)s1_m, DCT_CONST_BITS);        \
-  s0_m = (v8i16)__msa_srari_w((v4i32)s0_m, DCT_CONST_BITS);        \
-  out0 = __msa_pckev_h(s0_m, s1_m);                                \
-                                                                   \
-  s1_m = (v8i16)__msa_dotp_s_w(s3_m, k0_m);                        \
-  s0_m = (v8i16)__msa_dotp_s_w(s2_m, k0_m);                        \
-  s1_m = (v8i16)__msa_srari_w((v4i32)s1_m, DCT_CONST_BITS);        \
-  s0_m = (v8i16)__msa_srari_w((v4i32)s0_m, DCT_CONST_BITS);        \
-  out1 = __msa_pckev_h(s0_m, s1_m);                                \
-}
-
-#define VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS(dest, dest_stride,     \
-                                              in0, in1, in2, in3) {  \
-  uint64_t out0_m, out1_m, out2_m, out3_m;                           \
-  v8i16 res0_m, res1_m, res2_m, res3_m;                              \
-  v16u8 dest0_m, dest1_m, dest2_m, dest3_m;                          \
-  v16i8 tmp0_m, tmp1_m;                                              \
-  v16i8 zero_m = { 0 };                                              \
-  uint8_t *dst_m = (uint8_t *)(dest);                                \
-                                                                     \
-  dest0_m = LOAD_UB(dst_m);                                          \
-  dest1_m = LOAD_UB(dst_m + 4 * dest_stride);                        \
-  dest2_m = LOAD_UB(dst_m + 8 * dest_stride);                        \
-  dest3_m = LOAD_UB(dst_m + 12 * dest_stride);                       \
-                                                                     \
-  res0_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest0_m);              \
-  res1_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest1_m);              \
-  res2_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest2_m);              \
-  res3_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest3_m);              \
-                                                                     \
-  res0_m += (v8i16)(in0);                                            \
-  res1_m += (v8i16)(in1);                                            \
-  res2_m += (v8i16)(in2);                                            \
-  res3_m += (v8i16)(in3);                                            \
-                                                                     \
-  res0_m = CLIP_UNSIGNED_CHAR_H(res0_m);                             \
-  res1_m = CLIP_UNSIGNED_CHAR_H(res1_m);                             \
-  res2_m = CLIP_UNSIGNED_CHAR_H(res2_m);                             \
-  res3_m = CLIP_UNSIGNED_CHAR_H(res3_m);                             \
-                                                                     \
-  tmp0_m = __msa_pckev_b((v16i8)res1_m, (v16i8)res0_m);              \
-  tmp1_m = __msa_pckev_b((v16i8)res3_m, (v16i8)res2_m);              \
-                                                                     \
-  out0_m = __msa_copy_u_d((v2i64)tmp0_m, 0);                         \
-  out1_m = __msa_copy_u_d((v2i64)tmp0_m, 1);                         \
-  out2_m = __msa_copy_u_d((v2i64)tmp1_m, 0);                         \
-  out3_m = __msa_copy_u_d((v2i64)tmp1_m, 1);                         \
-                                                                     \
-  STORE_DWORD(dst_m, out0_m);                                        \
-  dst_m += (4 * dest_stride);                                        \
-  STORE_DWORD(dst_m, out1_m);                                        \
-  dst_m += (4 * dest_stride);                                        \
-  STORE_DWORD(dst_m, out2_m);                                        \
-  dst_m += (4 * dest_stride);                                        \
-  STORE_DWORD(dst_m, out3_m);                                        \
-}
-
 static void vp9_idct32x8_row_transpose_store(const int16_t *input,
                                              int16_t *tmp_buf) {
-  v8i16 m0, m1, m2, m3, m4, m5, m6, m7;
-  v8i16 n0, n1, n2, n3, n4, n5, n6, n7;
+  v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
 
   /* 1st & 2nd 8x8 */
-  LOAD_8VECS_SH(input, 32, m0, n0, m1, n1, m2, n2, m3, n3);
-  LOAD_8VECS_SH((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7);
-  TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3,
-                    m0, n0, m1, n1, m2, n2, m3, n3);
-  TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7,
-                    m4, n4, m5, n5, m6, n6, m7, n7);
-  STORE_4VECS_SH((tmp_buf), 8, m0, n0, m1, n1);
-  STORE_4VECS_SH((tmp_buf + 4 * 8), 8, m2, n2, m3, n3);
-  STORE_4VECS_SH((tmp_buf + 8 * 8), 8, m4, n4, m5, n5);
-  STORE_4VECS_SH((tmp_buf + 12 * 8), 8, m6, n6, m7, n7);
+  LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3);
+  LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
+                     m0, n0, m1, n1, m2, n2, m3, n3);
+  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
+                     m4, n4, m5, n5, m6, n6, m7, n7);
+  ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8);
+  ST_SH4(m4, n4, m5, n5, (tmp_buf + 8 * 8), 8);
+  ST_SH4(m6, n6, m7, n7, (tmp_buf + 12 * 8), 8);
 
   /* 3rd & 4th 8x8 */
-  LOAD_8VECS_SH((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3);
-  LOAD_8VECS_SH((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7);
-  TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3,
-                    m0, n0, m1, n1, m2, n2, m3, n3);
-  TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7,
-                    m4, n4, m5, n5, m6, n6, m7, n7);
-  STORE_4VECS_SH((tmp_buf + 16 * 8), 8, m0, n0, m1, n1);
-  STORE_4VECS_SH((tmp_buf + 20 * 8), 8, m2, n2, m3, n3);
-  STORE_4VECS_SH((tmp_buf + 24 * 8), 8, m4, n4, m5, n5);
-  STORE_4VECS_SH((tmp_buf + 28 * 8), 8, m6, n6, m7, n7);
+  LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3);
+  LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
+                     m0, n0, m1, n1, m2, n2, m3, n3);
+  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
+                     m4, n4, m5, n5, m6, n6, m7, n7);
+  ST_SH4(m0, n0, m1, n1, (tmp_buf + 16 * 8), 8);
+  ST_SH4(m2, n2, m3, n3, (tmp_buf + 20 * 8), 8);
+  ST_SH4(m4, n4, m5, n5, (tmp_buf + 24 * 8), 8);
+  ST_SH4(m6, n6, m7, n7, (tmp_buf + 28 * 8), 8);
 }
 
 static void vp9_idct32x8_row_even_process_store(int16_t *tmp_buf,
@@ -119,47 +45,29 @@
   v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
 
   /* Even stage 1 */
-  LOAD_8VECS_SH(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
 
-  DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
-  DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
+  VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
+  VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
+  BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
+  VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
 
-  vec0 = reg1 - reg5;
-  vec1 = reg1 + reg5;
-  vec2 = reg7 - reg3;
-  vec3 = reg7 + reg3;
-
-  DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
-
   loc1 = vec3;
   loc0 = vec1;
 
-  DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
-  DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
+  VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
+  VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
+  BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
+  BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
+  BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
 
-  vec0 = reg4 - reg6;
-  vec1 = reg4 + reg6;
-  vec2 = reg0 - reg2;
-  vec3 = reg0 + reg2;
-
-  stp4 = vec0 - loc0;
-  stp3 = vec0 + loc0;
-  stp7 = vec1 - loc1;
-  stp0 = vec1 + loc1;
-  stp5 = vec2 - loc2;
-  stp2 = vec2 + loc2;
-  stp6 = vec3 - loc3;
-  stp1 = vec3 + loc3;
-
   /* Even stage 2 */
-  LOAD_8VECS_SH((tmp_buf + 16), 32,
-                reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
+  VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
+  VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
 
-  DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
-  DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
-  DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
-  DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
-
   vec0 = reg0 + reg4;
   reg0 = reg0 - reg4;
   reg4 = reg6 + reg2;
@@ -176,8 +84,8 @@
   reg4 = reg5 - vec1;
   reg5 = reg5 + vec1;
 
-  DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
-  DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
+  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
 
   vec0 = reg0 - reg6;
   reg0 = reg0 + reg6;
@@ -184,46 +92,34 @@
   vec1 = reg7 - reg1;
   reg7 = reg7 + reg1;
 
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
-  DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
+  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
 
   /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
-  loc0 = stp0 - reg5;
-  loc1 = stp0 + reg5;
-  loc2 = stp1 - reg7;
-  loc3 = stp1 + reg7;
-  STORE_SH(loc0, (tmp_eve_buf + 15 * 8));
-  STORE_SH(loc1, (tmp_eve_buf));
-  STORE_SH(loc2, (tmp_eve_buf + 14 * 8));
-  STORE_SH(loc3, (tmp_eve_buf + 8));
+  BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
+  ST_SH(loc0, (tmp_eve_buf + 15 * 8));
+  ST_SH(loc1, (tmp_eve_buf));
+  ST_SH(loc2, (tmp_eve_buf + 14 * 8));
+  ST_SH(loc3, (tmp_eve_buf + 8));
 
-  loc0 = stp2 - reg1;
-  loc1 = stp2 + reg1;
-  loc2 = stp3 - reg4;
-  loc3 = stp3 + reg4;
-  STORE_SH(loc0, (tmp_eve_buf + 13 * 8));
-  STORE_SH(loc1, (tmp_eve_buf + 2 * 8));
-  STORE_SH(loc2, (tmp_eve_buf + 12 * 8));
-  STORE_SH(loc3, (tmp_eve_buf + 3 * 8));
+  BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
+  ST_SH(loc0, (tmp_eve_buf + 13 * 8));
+  ST_SH(loc1, (tmp_eve_buf + 2 * 8));
+  ST_SH(loc2, (tmp_eve_buf + 12 * 8));
+  ST_SH(loc3, (tmp_eve_buf + 3 * 8));
 
   /* Store 8 */
-  loc0 = stp4 - reg3;
-  loc1 = stp4 + reg3;
-  loc2 = stp5 - reg6;
-  loc3 = stp5 + reg6;
-  STORE_SH(loc0, (tmp_eve_buf + 11 * 8));
-  STORE_SH(loc1, (tmp_eve_buf + 4 * 8));
-  STORE_SH(loc2, (tmp_eve_buf + 10 * 8));
-  STORE_SH(loc3, (tmp_eve_buf + 5 * 8));
+  BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
+  ST_SH(loc0, (tmp_eve_buf + 11 * 8));
+  ST_SH(loc1, (tmp_eve_buf + 4 * 8));
+  ST_SH(loc2, (tmp_eve_buf + 10 * 8));
+  ST_SH(loc3, (tmp_eve_buf + 5 * 8));
 
-  loc0 = stp6 - reg0;
-  loc1 = stp6 + reg0;
-  loc2 = stp7 - reg2;
-  loc3 = stp7 + reg2;
-  STORE_SH(loc0, (tmp_eve_buf + 9 * 8));
-  STORE_SH(loc1, (tmp_eve_buf + 6 * 8));
-  STORE_SH(loc2, (tmp_eve_buf + 8 * 8));
-  STORE_SH(loc3, (tmp_eve_buf + 7 * 8));
+  BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
+  ST_SH(loc0, (tmp_eve_buf + 9 * 8));
+  ST_SH(loc1, (tmp_eve_buf + 6 * 8));
+  ST_SH(loc2, (tmp_eve_buf + 8 * 8));
+  ST_SH(loc3, (tmp_eve_buf + 7 * 8));
 }
 
 static void vp9_idct32x8_row_odd_process_store(int16_t *tmp_buf,
@@ -232,19 +128,19 @@
   v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
 
   /* Odd stage 1 */
-  reg0 = LOAD_SH(tmp_buf + 8);
-  reg1 = LOAD_SH(tmp_buf + 7 * 8);
-  reg2 = LOAD_SH(tmp_buf + 9 * 8);
-  reg3 = LOAD_SH(tmp_buf + 15 * 8);
-  reg4 = LOAD_SH(tmp_buf + 17 * 8);
-  reg5 = LOAD_SH(tmp_buf + 23 * 8);
-  reg6 = LOAD_SH(tmp_buf + 25 * 8);
-  reg7 = LOAD_SH(tmp_buf + 31 * 8);
+  reg0 = LD_SH(tmp_buf + 8);
+  reg1 = LD_SH(tmp_buf + 7 * 8);
+  reg2 = LD_SH(tmp_buf + 9 * 8);
+  reg3 = LD_SH(tmp_buf + 15 * 8);
+  reg4 = LD_SH(tmp_buf + 17 * 8);
+  reg5 = LD_SH(tmp_buf + 23 * 8);
+  reg6 = LD_SH(tmp_buf + 25 * 8);
+  reg7 = LD_SH(tmp_buf + 31 * 8);
 
-  DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
-  DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
-  DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
-  DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
+  VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
+  VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
+  VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
 
   vec0 = reg0 + reg3;
   reg0 = reg0 - reg3;
@@ -257,262 +153,192 @@
   reg5 = vec0;
 
   /* 4 Stores */
-  vec0 = reg5 + reg4;
-  vec1 = reg3 + reg2;
-  STORE_SH(vec0, (tmp_odd_buf + 4 * 8));
-  STORE_SH(vec1, (tmp_odd_buf + 5 * 8));
+  ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
+  ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
 
-  vec0 = reg5 - reg4;
-  vec1 = reg3 - reg2;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
-  STORE_SH(vec0, (tmp_odd_buf));
-  STORE_SH(vec1, (tmp_odd_buf + 8));
+  SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
+  ST_SH2(vec0, vec1, (tmp_odd_buf), 8);
 
   /* 4 Stores */
-  DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
-  DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
+  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
+  BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
+  ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
 
-  vec0 = reg0 + reg1;
-  vec2 = reg7 - reg6;
-  vec1 = reg7 + reg6;
-  vec3 = reg0 - reg1;
-  STORE_SH(vec0, (tmp_odd_buf + 6 * 8));
-  STORE_SH(vec1, (tmp_odd_buf + 7 * 8));
+  VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
+  ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
 
-  DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
-  STORE_SH(vec2, (tmp_odd_buf + 2 * 8));
-  STORE_SH(vec3, (tmp_odd_buf + 3 * 8));
-
   /* Odd stage 2 */
-
   /* 8 loads */
-  reg0 = LOAD_SH(tmp_buf + 3 * 8);
-  reg1 = LOAD_SH(tmp_buf + 5 * 8);
-  reg2 = LOAD_SH(tmp_buf + 11 * 8);
-  reg3 = LOAD_SH(tmp_buf + 13 * 8);
-  reg4 = LOAD_SH(tmp_buf + 19 * 8);
-  reg5 = LOAD_SH(tmp_buf + 21 * 8);
-  reg6 = LOAD_SH(tmp_buf + 27 * 8);
-  reg7 = LOAD_SH(tmp_buf + 29 * 8);
+  reg0 = LD_SH(tmp_buf + 3 * 8);
+  reg1 = LD_SH(tmp_buf + 5 * 8);
+  reg2 = LD_SH(tmp_buf + 11 * 8);
+  reg3 = LD_SH(tmp_buf + 13 * 8);
+  reg4 = LD_SH(tmp_buf + 19 * 8);
+  reg5 = LD_SH(tmp_buf + 21 * 8);
+  reg6 = LD_SH(tmp_buf + 27 * 8);
+  reg7 = LD_SH(tmp_buf + 29 * 8);
 
-  DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
-  DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
-  DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
-  DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
+  VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
+  VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
+  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
 
   /* 4 Stores */
-  vec0 = reg1 - reg2;
-  vec1 = reg6 - reg5;
-  vec2 = reg0 - reg3;
-  vec3 = reg7 - reg4;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
-  DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
+  SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
+       vec0, vec1, vec2, vec3);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
+  VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
 
-  vec2 = loc2 - loc0;
-  vec3 = loc3 - loc1;
-  vec0 = loc2 + loc0;
-  vec1 = loc3 + loc1;
-  STORE_SH(vec0, (tmp_odd_buf + 12 * 8));
-  STORE_SH(vec1, (tmp_odd_buf + 15 * 8));
+  BUTTERFLY_4(loc3, loc2, loc0, loc1, vec1, vec0, vec2, vec3);
+  ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
 
-  DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
+  ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
 
-  STORE_SH(vec0, (tmp_odd_buf + 10 * 8));
-  STORE_SH(vec1, (tmp_odd_buf + 11 * 8));
-
   /* 4 Stores */
-  vec0 = reg0 + reg3;
-  vec1 = reg1 + reg2;
-  vec2 = reg6 + reg5;
-  vec3 = reg7 + reg4;
-  reg0 = vec0 + vec1;
-  reg1 = vec3 + vec2;
-  reg2 = vec0 - vec1;
-  reg3 = vec3 - vec2;
-  STORE_SH(reg0, (tmp_odd_buf + 13 * 8));
-  STORE_SH(reg1, (tmp_odd_buf + 14 * 8));
+  ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
+       vec1, vec2, vec0, vec3);
+  BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
+  ST_SH(reg0, (tmp_odd_buf + 13 * 8));
+  ST_SH(reg1, (tmp_odd_buf + 14 * 8));
 
-  DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
+  VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
+  ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
 
-  STORE_SH(reg0, (tmp_odd_buf + 8 * 8));
-  STORE_SH(reg1, (tmp_odd_buf + 9 * 8));
-
   /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
 
   /* Load 8 & Store 8 */
-  reg0 = LOAD_SH(tmp_odd_buf);
-  reg1 = LOAD_SH(tmp_odd_buf + 1 * 8);
-  reg2 = LOAD_SH(tmp_odd_buf + 2 * 8);
-  reg3 = LOAD_SH(tmp_odd_buf + 3 * 8);
-  reg4 = LOAD_SH(tmp_odd_buf + 8 * 8);
-  reg5 = LOAD_SH(tmp_odd_buf + 9 * 8);
-  reg6 = LOAD_SH(tmp_odd_buf + 10 * 8);
-  reg7 = LOAD_SH(tmp_odd_buf + 11 * 8);
+  LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
+  LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
 
-  loc0 = reg0 + reg4;
-  loc1 = reg1 + reg5;
-  loc2 = reg2 + reg6;
-  loc3 = reg3 + reg7;
-  STORE_SH(loc0, (tmp_odd_buf));
-  STORE_SH(loc1, (tmp_odd_buf + 1 * 8));
-  STORE_SH(loc2, (tmp_odd_buf + 2 * 8));
-  STORE_SH(loc3, (tmp_odd_buf + 3 * 8));
+  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
+       loc0, loc1, loc2, loc3);
+  ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
 
-  vec0 = reg0 - reg4;
-  vec1 = reg1 - reg5;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
+  SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
 
-  vec0 = reg2 - reg6;
-  vec1 = reg3 - reg7;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+  SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
 
-  STORE_SH(loc0, (tmp_odd_buf + 8 * 8));
-  STORE_SH(loc1, (tmp_odd_buf + 9 * 8));
-  STORE_SH(loc2, (tmp_odd_buf + 10 * 8));
-  STORE_SH(loc3, (tmp_odd_buf + 11 * 8));
-
   /* Load 8 & Store 8 */
-  reg1 = LOAD_SH(tmp_odd_buf + 4 * 8);
-  reg2 = LOAD_SH(tmp_odd_buf + 5 * 8);
-  reg0 = LOAD_SH(tmp_odd_buf + 6 * 8);
-  reg3 = LOAD_SH(tmp_odd_buf + 7 * 8);
-  reg4 = LOAD_SH(tmp_odd_buf + 12 * 8);
-  reg5 = LOAD_SH(tmp_odd_buf + 13 * 8);
-  reg6 = LOAD_SH(tmp_odd_buf + 14 * 8);
-  reg7 = LOAD_SH(tmp_odd_buf + 15 * 8);
+  LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
+  LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
 
-  loc0 = reg0 + reg4;
-  loc1 = reg1 + reg5;
-  loc2 = reg2 + reg6;
-  loc3 = reg3 + reg7;
-  STORE_SH(loc0, (tmp_odd_buf + 4 * 8));
-  STORE_SH(loc1, (tmp_odd_buf + 5 * 8));
-  STORE_SH(loc2, (tmp_odd_buf + 6 * 8));
-  STORE_SH(loc3, (tmp_odd_buf + 7 * 8));
+  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
+       loc0, loc1, loc2, loc3);
+  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
 
-  vec0 = reg0 - reg4;
-  vec1 = reg3 - reg7;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
+  SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
 
-  vec0 = reg1 - reg5;
-  vec1 = reg2 - reg6;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
-
-  STORE_SH(loc0, (tmp_odd_buf + 12 * 8));
-  STORE_SH(loc1, (tmp_odd_buf + 13 * 8));
-  STORE_SH(loc2, (tmp_odd_buf + 14 * 8));
-  STORE_SH(loc3, (tmp_odd_buf + 15 * 8));
+  SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
 }
 
 static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf,
                                                int16_t *tmp_eve_buf,
                                                int16_t *tmp_odd_buf,
-                                               int16_t *dest) {
+                                               int16_t *dst) {
   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
-  v8i16 m0, m1, m2, m3, m4, m5, m6, m7;
-  v8i16 n0, n1, n2, n3, n4, n5, n6, n7;
+  v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
 
   /* FINAL BUTTERFLY : Dependency on Even & Odd */
-  /* Total: 32 loads, 32 stores */
-  vec0 = LOAD_SH(tmp_odd_buf);
-  vec1 = LOAD_SH(tmp_odd_buf + 9 * 8);
-  vec2 = LOAD_SH(tmp_odd_buf + 14 * 8);
-  vec3 = LOAD_SH(tmp_odd_buf + 6 * 8);
-  loc0 = LOAD_SH(tmp_eve_buf);
-  loc1 = LOAD_SH(tmp_eve_buf + 8 * 8);
-  loc2 = LOAD_SH(tmp_eve_buf + 4 * 8);
-  loc3 = LOAD_SH(tmp_eve_buf + 12 * 8);
+  vec0 = LD_SH(tmp_odd_buf);
+  vec1 = LD_SH(tmp_odd_buf + 9 * 8);
+  vec2 = LD_SH(tmp_odd_buf + 14 * 8);
+  vec3 = LD_SH(tmp_odd_buf + 6 * 8);
+  loc0 = LD_SH(tmp_eve_buf);
+  loc1 = LD_SH(tmp_eve_buf + 8 * 8);
+  loc2 = LD_SH(tmp_eve_buf + 4 * 8);
+  loc3 = LD_SH(tmp_eve_buf + 12 * 8);
 
-  m0 = (loc0 + vec3);
-  STORE_SH((loc0 - vec3), (tmp_buf + 31 * 8));
-  STORE_SH((loc1 - vec2), (tmp_buf + 23 * 8));
-  m4 = (loc1 + vec2);
-  STORE_SH((loc2 - vec1), (tmp_buf + 27 * 8));
-  m2 = (loc2 + vec1);
-  STORE_SH((loc3 - vec0), (tmp_buf + 19 * 8));
-  m6 = (loc3 + vec0);
+  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
 
+  ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
+  ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
+  ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
+  ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
+
   /* Load 8 & Store 8 */
-  vec0 = LOAD_SH(tmp_odd_buf + 4 * 8);
-  vec1 = LOAD_SH(tmp_odd_buf + 13 * 8);
-  vec2 = LOAD_SH(tmp_odd_buf + 10 * 8);
-  vec3 = LOAD_SH(tmp_odd_buf + 3 * 8);
-  loc0 = LOAD_SH(tmp_eve_buf + 2 * 8);
-  loc1 = LOAD_SH(tmp_eve_buf + 10 * 8);
-  loc2 = LOAD_SH(tmp_eve_buf + 6 * 8);
-  loc3 = LOAD_SH(tmp_eve_buf + 14 * 8);
+  vec0 = LD_SH(tmp_odd_buf + 4 * 8);
+  vec1 = LD_SH(tmp_odd_buf + 13 * 8);
+  vec2 = LD_SH(tmp_odd_buf + 10 * 8);
+  vec3 = LD_SH(tmp_odd_buf + 3 * 8);
+  loc0 = LD_SH(tmp_eve_buf + 2 * 8);
+  loc1 = LD_SH(tmp_eve_buf + 10 * 8);
+  loc2 = LD_SH(tmp_eve_buf + 6 * 8);
+  loc3 = LD_SH(tmp_eve_buf + 14 * 8);
 
-  m1 = (loc0 + vec3);
-  STORE_SH((loc0 - vec3), (tmp_buf + 29 * 8));
-  STORE_SH((loc1 - vec2), (tmp_buf + 21 * 8));
-  m5 = (loc1 + vec2);
-  STORE_SH((loc2 - vec1), (tmp_buf + 25 * 8));
-  m3 = (loc2 + vec1);
-  STORE_SH((loc3 - vec0), (tmp_buf + 17 * 8));
-  m7 = (loc3 + vec0);
+  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
 
+  ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
+  ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
+  ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
+  ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
+
   /* Load 8 & Store 8 */
-  vec0 = LOAD_SH(tmp_odd_buf + 2 * 8);
-  vec1 = LOAD_SH(tmp_odd_buf + 11 * 8);
-  vec2 = LOAD_SH(tmp_odd_buf + 12 * 8);
-  vec3 = LOAD_SH(tmp_odd_buf + 7 * 8);
-  loc0 = LOAD_SH(tmp_eve_buf + 1 * 8);
-  loc1 = LOAD_SH(tmp_eve_buf + 9 * 8);
-  loc2 = LOAD_SH(tmp_eve_buf + 5 * 8);
-  loc3 = LOAD_SH(tmp_eve_buf + 13 * 8);
+  vec0 = LD_SH(tmp_odd_buf + 2 * 8);
+  vec1 = LD_SH(tmp_odd_buf + 11 * 8);
+  vec2 = LD_SH(tmp_odd_buf + 12 * 8);
+  vec3 = LD_SH(tmp_odd_buf + 7 * 8);
+  loc0 = LD_SH(tmp_eve_buf + 1 * 8);
+  loc1 = LD_SH(tmp_eve_buf + 9 * 8);
+  loc2 = LD_SH(tmp_eve_buf + 5 * 8);
+  loc3 = LD_SH(tmp_eve_buf + 13 * 8);
 
-  n0 = (loc0 + vec3);
-  STORE_SH((loc0 - vec3), (tmp_buf + 30 * 8));
-  STORE_SH((loc1 - vec2), (tmp_buf + 22 * 8));
-  n4 = (loc1 + vec2);
-  STORE_SH((loc2 - vec1), (tmp_buf + 26 * 8));
-  n2 = (loc2 + vec1);
-  STORE_SH((loc3 - vec0), (tmp_buf + 18 * 8));
-  n6 = (loc3 + vec0);
+  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
 
+  ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
+  ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
+  ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
+  ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
+
   /* Load 8 & Store 8 */
-  vec0 = LOAD_SH(tmp_odd_buf + 5 * 8);
-  vec1 = LOAD_SH(tmp_odd_buf + 15 * 8);
-  vec2 = LOAD_SH(tmp_odd_buf + 8 * 8);
-  vec3 = LOAD_SH(tmp_odd_buf + 1 * 8);
-  loc0 = LOAD_SH(tmp_eve_buf + 3 * 8);
-  loc1 = LOAD_SH(tmp_eve_buf + 11 * 8);
-  loc2 = LOAD_SH(tmp_eve_buf + 7 * 8);
-  loc3 = LOAD_SH(tmp_eve_buf + 15 * 8);
+  vec0 = LD_SH(tmp_odd_buf + 5 * 8);
+  vec1 = LD_SH(tmp_odd_buf + 15 * 8);
+  vec2 = LD_SH(tmp_odd_buf + 8 * 8);
+  vec3 = LD_SH(tmp_odd_buf + 1 * 8);
+  loc0 = LD_SH(tmp_eve_buf + 3 * 8);
+  loc1 = LD_SH(tmp_eve_buf + 11 * 8);
+  loc2 = LD_SH(tmp_eve_buf + 7 * 8);
+  loc3 = LD_SH(tmp_eve_buf + 15 * 8);
 
-  n1 = (loc0 + vec3);
-  STORE_SH((loc0 - vec3), (tmp_buf + 28 * 8));
-  STORE_SH((loc1 - vec2), (tmp_buf + 20 * 8));
-  n5 = (loc1 + vec2);
-  STORE_SH((loc2 - vec1), (tmp_buf + 24 * 8));
-  n3 = (loc2 + vec1);
-  STORE_SH((loc3 - vec0), (tmp_buf + 16 * 8));
-  n7 = (loc3 + vec0);
+  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
 
+  ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
+  ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
+  ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
+  ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
+
   /* Transpose : 16 vectors */
   /* 1st & 2nd 8x8 */
-  TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3,
-                    m0, n0, m1, n1, m2, n2, m3, n3);
-  STORE_4VECS_SH((dest + 0), 32, m0, n0, m1, n1);
-  STORE_4VECS_SH((dest + 4 * 32), 32, m2, n2, m3, n3);
+  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
+                     m0, n0, m1, n1, m2, n2, m3, n3);
+  ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
+  ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
 
-  TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7,
-                    m4, n4, m5, n5, m6, n6, m7, n7);
-  STORE_4VECS_SH((dest + 8), 32, m4, n4, m5, n5);
-  STORE_4VECS_SH((dest + 8 + 4 * 32), 32, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
+                     m4, n4, m5, n5, m6, n6, m7, n7);
+  ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
+  ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
 
   /* 3rd & 4th 8x8 */
-  LOAD_8VECS_SH((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
-  LOAD_8VECS_SH((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
-  TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3,
-                    m0, n0, m1, n1, m2, n2, m3, n3);
-  STORE_4VECS_SH((dest + 16), 32, m0, n0, m1, n1);
-  STORE_4VECS_SH((dest + 16 + 4 * 32), 32, m2, n2, m3, n3);
+  LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
+  LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
+                     m0, n0, m1, n1, m2, n2, m3, n3);
+  ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
+  ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
 
-  TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7,
-                    m4, n4, m5, n5, m6, n6, m7, n7);
-  STORE_4VECS_SH((dest + 24), 32, m4, n4, m5, n5);
-  STORE_4VECS_SH((dest + 24 + 4 * 32), 32, m6, n6, m7, n7);
+  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
+                     m4, n4, m5, n5, m6, n6, m7, n7);
+  ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
+  ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
 }
 
 static void vp9_idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) {
@@ -521,11 +347,8 @@
   DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
 
   vp9_idct32x8_row_transpose_store(input, &tmp_buf[0]);
-
   vp9_idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]);
-
   vp9_idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]);
-
   vp9_idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0],
                                      &tmp_odd_buf[0], output);
 }
@@ -537,48 +360,31 @@
   v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
 
   /* Even stage 1 */
-  LOAD_8VECS_SH(tmp_buf, (4 * 32),
-                reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  tmp_buf += (2 * 32);
 
-  DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
-  DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
+  VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
+  VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
+  BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
+  VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
 
-  vec0 = reg1 - reg5;
-  vec1 = reg1 + reg5;
-  vec2 = reg7 - reg3;
-  vec3 = reg7 + reg3;
-
-  DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
-
   loc1 = vec3;
   loc0 = vec1;
 
-  DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
-  DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
+  VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
+  VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
+  BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
+  BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
+  BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
 
-  vec0 = reg4 - reg6;
-  vec1 = reg4 + reg6;
-  vec2 = reg0 - reg2;
-  vec3 = reg0 + reg2;
-
-  stp4 = vec0 - loc0;
-  stp3 = vec0 + loc0;
-  stp7 = vec1 - loc1;
-  stp0 = vec1 + loc1;
-  stp5 = vec2 - loc2;
-  stp2 = vec2 + loc2;
-  stp6 = vec3 - loc3;
-  stp1 = vec3 + loc3;
-
   /* Even stage 2 */
   /* Load 8 */
-  LOAD_8VECS_SH((tmp_buf + 2 * 32), (4 * 32),
-                reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
+  LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
 
-  DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
-  DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
-  DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
-  DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
+  VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
+  VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
+  VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
 
   vec0 = reg0 + reg4;
   reg0 = reg0 - reg4;
@@ -596,8 +402,8 @@
   reg4 = reg5 - vec1;
   reg5 = reg5 + vec1;
 
-  DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
-  DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
+  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
 
   vec0 = reg0 - reg6;
   reg0 = reg0 + reg6;
@@ -604,47 +410,27 @@
   vec1 = reg7 - reg1;
   reg7 = reg7 + reg1;
 
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
-  DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
+  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
 
   /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
   /* Store 8 */
-  loc0 = stp0 - reg5;
-  loc1 = stp0 + reg5;
-  loc2 = stp1 - reg7;
-  loc3 = stp1 + reg7;
-  STORE_SH(loc0, (tmp_eve_buf + 15 * 8));
-  STORE_SH(loc1, (tmp_eve_buf));
-  STORE_SH(loc2, (tmp_eve_buf + 14 * 8));
-  STORE_SH(loc3, (tmp_eve_buf + 1 * 8));
+  BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
+  ST_SH2(loc1, loc3, tmp_eve_buf, 8);
+  ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
 
-  loc0 = stp2 - reg1;
-  loc1 = stp2 + reg1;
-  loc2 = stp3 - reg4;
-  loc3 = stp3 + reg4;
-  STORE_SH(loc0, (tmp_eve_buf + 13 * 8));
-  STORE_SH(loc1, (tmp_eve_buf + 2 * 8));
-  STORE_SH(loc2, (tmp_eve_buf + 12 * 8));
-  STORE_SH(loc3, (tmp_eve_buf + 3 * 8));
+  BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
+  ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
+  ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
 
   /* Store 8 */
-  loc0 = stp4 - reg3;
-  loc1 = stp4 + reg3;
-  loc2 = stp5 - reg6;
-  loc3 = stp5 + reg6;
-  STORE_SH(loc0, (tmp_eve_buf + 11 * 8));
-  STORE_SH(loc1, (tmp_eve_buf + 4 * 8));
-  STORE_SH(loc2, (tmp_eve_buf + 10 * 8));
-  STORE_SH(loc3, (tmp_eve_buf + 5 * 8));
+  BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
+  ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
+  ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
 
-  loc0 = stp6 - reg0;
-  loc1 = stp6 + reg0;
-  loc2 = stp7 - reg2;
-  loc3 = stp7 + reg2;
-  STORE_SH(loc0, (tmp_eve_buf + 9 * 8));
-  STORE_SH(loc1, (tmp_eve_buf + 6 * 8));
-  STORE_SH(loc2, (tmp_eve_buf + 8 * 8));
-  STORE_SH(loc3, (tmp_eve_buf + 7 * 8));
+  BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
+  ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
+  ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
 }
 
 static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf,
@@ -653,19 +439,19 @@
   v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
 
   /* Odd stage 1 */
-  reg0 = LOAD_SH(tmp_buf + 32);
-  reg1 = LOAD_SH(tmp_buf + 7 * 32);
-  reg2 = LOAD_SH(tmp_buf + 9 * 32);
-  reg3 = LOAD_SH(tmp_buf + 15 * 32);
-  reg4 = LOAD_SH(tmp_buf + 17 * 32);
-  reg5 = LOAD_SH(tmp_buf + 23 * 32);
-  reg6 = LOAD_SH(tmp_buf + 25 * 32);
-  reg7 = LOAD_SH(tmp_buf + 31 * 32);
+  reg0 = LD_SH(tmp_buf + 32);
+  reg1 = LD_SH(tmp_buf + 7 * 32);
+  reg2 = LD_SH(tmp_buf + 9 * 32);
+  reg3 = LD_SH(tmp_buf + 15 * 32);
+  reg4 = LD_SH(tmp_buf + 17 * 32);
+  reg5 = LD_SH(tmp_buf + 23 * 32);
+  reg6 = LD_SH(tmp_buf + 25 * 32);
+  reg7 = LD_SH(tmp_buf + 31 * 32);
 
-  DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
-  DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
-  DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
-  DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
+  VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
+  VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
+  VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
 
   vec0 = reg0 + reg3;
   reg0 = reg0 - reg3;
@@ -678,278 +464,182 @@
   reg5 = vec0;
 
   /* 4 Stores */
-  vec0 = reg5 + reg4;
-  vec1 = reg3 + reg2;
-  STORE_SH(vec0, (tmp_odd_buf + 4 * 8));
-  STORE_SH(vec1, (tmp_odd_buf + 5 * 8));
+  ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
+  ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
+  SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
+  ST_SH2(vec0, vec1, tmp_odd_buf, 8);
 
-  vec0 = reg5 - reg4;
-  vec1 = reg3 - reg2;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
-  STORE_SH(vec0, (tmp_odd_buf));
-  STORE_SH(vec1, (tmp_odd_buf + 1 * 8));
-
   /* 4 Stores */
-  DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
-  DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
+  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
+  BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
+  ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
+  VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
+  ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
 
-  vec0 = reg0 + reg1;
-  vec2 = reg7 - reg6;
-  vec1 = reg7 + reg6;
-  vec3 = reg0 - reg1;
-  STORE_SH(vec0, (tmp_odd_buf + 6 * 8));
-  STORE_SH(vec1, (tmp_odd_buf + 7 * 8));
-
-  DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
-  STORE_SH(vec2, (tmp_odd_buf + 2 * 8));
-  STORE_SH(vec3, (tmp_odd_buf + 3 * 8));
-
   /* Odd stage 2 */
   /* 8 loads */
-  reg0 = LOAD_SH(tmp_buf + 3 * 32);
-  reg1 = LOAD_SH(tmp_buf + 5 * 32);
-  reg2 = LOAD_SH(tmp_buf + 11 * 32);
-  reg3 = LOAD_SH(tmp_buf + 13 * 32);
-  reg4 = LOAD_SH(tmp_buf + 19 * 32);
-  reg5 = LOAD_SH(tmp_buf + 21 * 32);
-  reg6 = LOAD_SH(tmp_buf + 27 * 32);
-  reg7 = LOAD_SH(tmp_buf + 29 * 32);
+  reg0 = LD_SH(tmp_buf + 3 * 32);
+  reg1 = LD_SH(tmp_buf + 5 * 32);
+  reg2 = LD_SH(tmp_buf + 11 * 32);
+  reg3 = LD_SH(tmp_buf + 13 * 32);
+  reg4 = LD_SH(tmp_buf + 19 * 32);
+  reg5 = LD_SH(tmp_buf + 21 * 32);
+  reg6 = LD_SH(tmp_buf + 27 * 32);
+  reg7 = LD_SH(tmp_buf + 29 * 32);
 
-  DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
-  DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
-  DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
-  DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
+  VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
+  VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
+  VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
+  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
 
   /* 4 Stores */
-  vec0 = reg1 - reg2;
-  vec1 = reg6 - reg5;
-  vec2 = reg0 - reg3;
-  vec3 = reg7 - reg4;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
-  DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
+  SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
+  VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
+  BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
+  ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
+  VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
+  ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
 
-  vec2 = loc2 - loc0;
-  vec3 = loc3 - loc1;
-  vec0 = loc2 + loc0;
-  vec1 = loc3 + loc1;
-  STORE_SH(vec0, (tmp_odd_buf + 12 * 8));
-  STORE_SH(vec1, (tmp_odd_buf + 15 * 8));
-
-  DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
-
-  STORE_SH(vec0, (tmp_odd_buf + 10 * 8));
-  STORE_SH(vec1, (tmp_odd_buf + 11 * 8));
-
   /* 4 Stores */
-  vec0 = reg0 + reg3;
-  vec1 = reg1 + reg2;
-  vec2 = reg6 + reg5;
-  vec3 = reg7 + reg4;
-  reg0 = vec0 + vec1;
-  reg1 = vec3 + vec2;
-  reg2 = vec0 - vec1;
-  reg3 = vec3 - vec2;
-  STORE_SH(reg0, (tmp_odd_buf + 13 * 8));
-  STORE_SH(reg1, (tmp_odd_buf + 14 * 8));
+  ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3);
+  BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
+  ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
+  VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
+  ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
 
-  DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
-
-  STORE_SH(reg0, (tmp_odd_buf + 8 * 8));
-  STORE_SH(reg1, (tmp_odd_buf + 9 * 8));
-
   /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
   /* Load 8 & Store 8 */
-  reg0 = LOAD_SH(tmp_odd_buf);
-  reg1 = LOAD_SH(tmp_odd_buf + 1 * 8);
-  reg2 = LOAD_SH(tmp_odd_buf + 2 * 8);
-  reg3 = LOAD_SH(tmp_odd_buf + 3 * 8);
-  reg4 = LOAD_SH(tmp_odd_buf + 8 * 8);
-  reg5 = LOAD_SH(tmp_odd_buf + 9 * 8);
-  reg6 = LOAD_SH(tmp_odd_buf + 10 * 8);
-  reg7 = LOAD_SH(tmp_odd_buf + 11 * 8);
+  LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
+  LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
 
-  loc0 = reg0 + reg4;
-  loc1 = reg1 + reg5;
-  loc2 = reg2 + reg6;
-  loc3 = reg3 + reg7;
-  STORE_SH(loc0, (tmp_odd_buf));
-  STORE_SH(loc1, (tmp_odd_buf + 1 * 8));
-  STORE_SH(loc2, (tmp_odd_buf + 2 * 8));
-  STORE_SH(loc3, (tmp_odd_buf + 3 * 8));
+  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
+  ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
 
-  vec0 = reg0 - reg4;
-  vec1 = reg1 - reg5;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
+  SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
 
-  vec0 = reg2 - reg6;
-  vec1 = reg3 - reg7;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+  SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
 
-  STORE_SH(loc0, (tmp_odd_buf + 8 * 8));
-  STORE_SH(loc1, (tmp_odd_buf + 9 * 8));
-  STORE_SH(loc2, (tmp_odd_buf + 10 * 8));
-  STORE_SH(loc3, (tmp_odd_buf + 11 * 8));
-
   /* Load 8 & Store 8 */
-  reg1 = LOAD_SH(tmp_odd_buf + 4 * 8);
-  reg2 = LOAD_SH(tmp_odd_buf + 5 * 8);
-  reg0 = LOAD_SH(tmp_odd_buf + 6 * 8);
-  reg3 = LOAD_SH(tmp_odd_buf + 7 * 8);
-  reg4 = LOAD_SH(tmp_odd_buf + 12 * 8);
-  reg5 = LOAD_SH(tmp_odd_buf + 13 * 8);
-  reg6 = LOAD_SH(tmp_odd_buf + 14 * 8);
-  reg7 = LOAD_SH(tmp_odd_buf + 15 * 8);
+  LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
+  LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
 
-  loc0 = reg0 + reg4;
-  loc1 = reg1 + reg5;
-  loc2 = reg2 + reg6;
-  loc3 = reg3 + reg7;
-  STORE_SH(loc0, (tmp_odd_buf + 4 * 8));
-  STORE_SH(loc1, (tmp_odd_buf + 5 * 8));
-  STORE_SH(loc2, (tmp_odd_buf + 6 * 8));
-  STORE_SH(loc3, (tmp_odd_buf + 7 * 8));
+  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
+  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
 
-  vec0 = reg0 - reg4;
-  vec1 = reg3 - reg7;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
+  SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
 
-  vec0 = reg1 - reg5;
-  vec1 = reg2 - reg6;
-  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
-
-  STORE_SH(loc0, (tmp_odd_buf + 12 * 8));
-  STORE_SH(loc1, (tmp_odd_buf + 13 * 8));
-  STORE_SH(loc2, (tmp_odd_buf + 14 * 8));
-  STORE_SH(loc3, (tmp_odd_buf + 15 * 8));
+  SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
+  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
 }
 
 static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
                                                  int16_t *tmp_odd_buf,
-                                                 uint8_t *dest,
-                                                 int32_t dest_stride) {
+                                                 uint8_t *dst,
+                                                 int32_t dst_stride) {
   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
-  v8i16 m0, m1, m2, m3, m4, m5, m6, m7;
-  v8i16 n0, n1, n2, n3, n4, n5, n6, n7;
+  v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
 
   /* FINAL BUTTERFLY : Dependency on Even & Odd */
-  vec0 = LOAD_SH(tmp_odd_buf);
-  vec1 = LOAD_SH(tmp_odd_buf + 9 * 8);
-  vec2 = LOAD_SH(tmp_odd_buf + 14 * 8);
-  vec3 = LOAD_SH(tmp_odd_buf + 6 * 8);
-  loc0 = LOAD_SH(tmp_eve_buf);
-  loc1 = LOAD_SH(tmp_eve_buf + 8 * 8);
-  loc2 = LOAD_SH(tmp_eve_buf + 4 * 8);
-  loc3 = LOAD_SH(tmp_eve_buf + 12 * 8);
+  vec0 = LD_SH(tmp_odd_buf);
+  vec1 = LD_SH(tmp_odd_buf + 9 * 8);
+  vec2 = LD_SH(tmp_odd_buf + 14 * 8);
+  vec3 = LD_SH(tmp_odd_buf + 6 * 8);
+  loc0 = LD_SH(tmp_eve_buf);
+  loc1 = LD_SH(tmp_eve_buf + 8 * 8);
+  loc2 = LD_SH(tmp_eve_buf + 4 * 8);
+  loc3 = LD_SH(tmp_eve_buf + 12 * 8);
 
-  m0 = (loc0 + vec3);
-  m4 = (loc1 + vec2);
-  m2 = (loc2 + vec1);
-  m6 = (loc3 + vec0);
-  SRARI_H_4VECS_SH(m0, m2, m4, m6, m0, m2, m4, m6, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS(dest, dest_stride, m0, m2, m4, m6);
+  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
+  SRARI_H4_SH(m0, m2, m4, m6, 6);
+  VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
 
-  m6 = (loc0 - vec3);
-  m2 = (loc1 - vec2);
-  m4 = (loc2 - vec1);
-  m0 = (loc3 - vec0);
-  SRARI_H_4VECS_SH(m0, m2, m4, m6, m0, m2, m4, m6, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 19 * dest_stride),
-                                        dest_stride, m0, m2, m4, m6);
+  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
+  SRARI_H4_SH(m0, m2, m4, m6, 6);
+  VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
+                      m0, m2, m4, m6);
 
   /* Load 8 & Store 8 */
-  vec0 = LOAD_SH(tmp_odd_buf + 4 * 8);
-  vec1 = LOAD_SH(tmp_odd_buf + 13 * 8);
-  vec2 = LOAD_SH(tmp_odd_buf + 10 * 8);
-  vec3 = LOAD_SH(tmp_odd_buf + 3 * 8);
-  loc0 = LOAD_SH(tmp_eve_buf + 2 * 8);
-  loc1 = LOAD_SH(tmp_eve_buf + 10 * 8);
-  loc2 = LOAD_SH(tmp_eve_buf + 6 * 8);
-  loc3 = LOAD_SH(tmp_eve_buf + 14 * 8);
+  vec0 = LD_SH(tmp_odd_buf + 4 * 8);
+  vec1 = LD_SH(tmp_odd_buf + 13 * 8);
+  vec2 = LD_SH(tmp_odd_buf + 10 * 8);
+  vec3 = LD_SH(tmp_odd_buf + 3 * 8);
+  loc0 = LD_SH(tmp_eve_buf + 2 * 8);
+  loc1 = LD_SH(tmp_eve_buf + 10 * 8);
+  loc2 = LD_SH(tmp_eve_buf + 6 * 8);
+  loc3 = LD_SH(tmp_eve_buf + 14 * 8);
 
-  m1 = (loc0 + vec3);
-  m5 = (loc1 + vec2);
-  m3 = (loc2 + vec1);
-  m7 = (loc3 + vec0);
-  SRARI_H_4VECS_SH(m1, m3, m5, m7, m1, m3, m5, m7, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 2 * dest_stride),
-                                        dest_stride, m1, m3, m5, m7);
+  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
+  SRARI_H4_SH(m1, m3, m5, m7, 6);
+  VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
+                      m1, m3, m5, m7);
 
-  m7 = (loc0 - vec3);
-  m3 = (loc1 - vec2);
-  m5 = (loc2 - vec1);
-  m1 = (loc3 - vec0);
-  SRARI_H_4VECS_SH(m1, m3, m5, m7, m1, m3, m5, m7, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 17 * dest_stride),
-                                        dest_stride, m1, m3, m5, m7);
+  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
+  SRARI_H4_SH(m1, m3, m5, m7, 6);
+  VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
+                      m1, m3, m5, m7);
 
   /* Load 8 & Store 8 */
-  vec0 = LOAD_SH(tmp_odd_buf + 2 * 8);
-  vec1 = LOAD_SH(tmp_odd_buf + 11 * 8);
-  vec2 = LOAD_SH(tmp_odd_buf + 12 * 8);
-  vec3 = LOAD_SH(tmp_odd_buf + 7 * 8);
-  loc0 = LOAD_SH(tmp_eve_buf + 1 * 8);
-  loc1 = LOAD_SH(tmp_eve_buf + 9 * 8);
-  loc2 = LOAD_SH(tmp_eve_buf + 5 * 8);
-  loc3 = LOAD_SH(tmp_eve_buf + 13 * 8);
+  vec0 = LD_SH(tmp_odd_buf + 2 * 8);
+  vec1 = LD_SH(tmp_odd_buf + 11 * 8);
+  vec2 = LD_SH(tmp_odd_buf + 12 * 8);
+  vec3 = LD_SH(tmp_odd_buf + 7 * 8);
+  loc0 = LD_SH(tmp_eve_buf + 1 * 8);
+  loc1 = LD_SH(tmp_eve_buf + 9 * 8);
+  loc2 = LD_SH(tmp_eve_buf + 5 * 8);
+  loc3 = LD_SH(tmp_eve_buf + 13 * 8);
 
-  n0 = (loc0 + vec3);
-  n4 = (loc1 + vec2);
-  n2 = (loc2 + vec1);
-  n6 = (loc3 + vec0);
-  SRARI_H_4VECS_SH(n0, n2, n4, n6, n0, n2, n4, n6, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 1 * dest_stride),
-                                        dest_stride, n0, n2, n4, n6);
+  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
+  SRARI_H4_SH(n0, n2, n4, n6, 6);
+  VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
+                      n0, n2, n4, n6);
 
-  n6 = (loc0 - vec3);
-  n2 = (loc1 - vec2);
-  n4 = (loc2 - vec1);
-  n0 = (loc3 - vec0);
-  SRARI_H_4VECS_SH(n0, n2, n4, n6, n0, n2, n4, n6, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 18 * dest_stride),
-                                        dest_stride, n0, n2, n4, n6);
+  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
+  SRARI_H4_SH(n0, n2, n4, n6, 6);
+  VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
+                      n0, n2, n4, n6);
 
   /* Load 8 & Store 8 */
-  vec0 = LOAD_SH(tmp_odd_buf + 5 * 8);
-  vec1 = LOAD_SH(tmp_odd_buf + 15 * 8);
-  vec2 = LOAD_SH(tmp_odd_buf + 8 * 8);
-  vec3 = LOAD_SH(tmp_odd_buf + 1 * 8);
-  loc0 = LOAD_SH(tmp_eve_buf + 3 * 8);
-  loc1 = LOAD_SH(tmp_eve_buf + 11 * 8);
-  loc2 = LOAD_SH(tmp_eve_buf + 7 * 8);
-  loc3 = LOAD_SH(tmp_eve_buf + 15 * 8);
+  vec0 = LD_SH(tmp_odd_buf + 5 * 8);
+  vec1 = LD_SH(tmp_odd_buf + 15 * 8);
+  vec2 = LD_SH(tmp_odd_buf + 8 * 8);
+  vec3 = LD_SH(tmp_odd_buf + 1 * 8);
+  loc0 = LD_SH(tmp_eve_buf + 3 * 8);
+  loc1 = LD_SH(tmp_eve_buf + 11 * 8);
+  loc2 = LD_SH(tmp_eve_buf + 7 * 8);
+  loc3 = LD_SH(tmp_eve_buf + 15 * 8);
 
-  n1 = (loc0 + vec3);
-  n5 = (loc1 + vec2);
-  n3 = (loc2 + vec1);
-  n7 = (loc3 + vec0);
-  SRARI_H_4VECS_SH(n1, n3, n5, n7, n1, n3, n5, n7, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 3 * dest_stride),
-                                        dest_stride, n1, n3, n5, n7);
+  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
+  SRARI_H4_SH(n1, n3, n5, n7, 6);
+  VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
+                      n1, n3, n5, n7);
 
-  n7 = (loc0 - vec3);
-  n3 = (loc1 - vec2);
-  n5 = (loc2 - vec1);
-  n1 = (loc3 - vec0);
-  SRARI_H_4VECS_SH(n1, n3, n5, n7, n1, n3, n5, n7, 6);
-  VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 16 * dest_stride),
-                                        dest_stride, n1, n3, n5, n7);
+  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
+  SRARI_H4_SH(n1, n3, n5, n7, 6);
+  VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
+                      n1, n3, n5, n7);
 }
 
-static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dest,
-                                               int32_t dest_stride) {
+static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
+                                               int32_t dst_stride) {
   DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
   DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
 
   vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
-
   vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
-
   vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0],
-                                       dest, dest_stride);
+                                       dst, dst_stride);
 }
 
-void vp9_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dest,
-                                int32_t dest_stride) {
+void vp9_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
+                                int32_t dst_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
   int16_t *out_ptr = out_arr;
@@ -963,13 +653,13 @@
   /* transform columns */
   for (i = 0; i < 4; ++i) {
     /* process 8 * 32 block */
-    vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dest + (i << 3)),
-                                       dest_stride);
+    vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+                                       dst_stride);
   }
 }
 
-void vp9_idct32x32_34_add_msa(const int16_t *input, uint8_t *dest,
-                              int32_t dest_stride) {
+void vp9_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
+                              int32_t dst_stride) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
   int16_t *out_ptr = out_arr;
@@ -1008,70 +698,42 @@
   /* transform columns */
   for (i = 0; i < 4; ++i) {
     /* process 8 * 32 block */
-    vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dest + (i << 3)),
-                                       dest_stride);
+    vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
+                                       dst_stride);
   }
 }
 
-void vp9_idct32x32_1_add_msa(const int16_t *input, uint8_t *dest,
-                             int32_t dest_stride) {
-  int32_t i, const1;
-  v8i16 const2;
+void vp9_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
+                             int32_t dst_stride) {
+  int32_t i;
   int16_t out;
-  v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
-  v16u8 dest0, dest1, dest2, dest3;
-  v16u8 tmp0, tmp1, tmp2, tmp3;
-  v16i8 zero = { 0 };
+  v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
+  v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
 
-  out = dct_const_round_shift(input[0] * cospi_16_64);
-  out = dct_const_round_shift(out * cospi_16_64);
-  const1 = ROUND_POWER_OF_TWO(out, 6);
+  out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
+  out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
+  out = ROUND_POWER_OF_TWO(out, 6);
 
-  const2 = __msa_fill_h(const1);
+  vec = __msa_fill_h(out);
 
-  for (i = 0; i < 16; ++i) {
-    dest0 = LOAD_UB(dest);
-    dest1 = LOAD_UB(dest + 16);
-    dest2 = LOAD_UB(dest + dest_stride);
-    dest3 = LOAD_UB(dest + dest_stride + 16);
+  for (i = 16; i--;) {
+    LD_UB2(dst, 16, dst0, dst1);
+    LD_UB2(dst + dst_stride, 16, dst2, dst3);
 
-    res0 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest0);
-    res1 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest1);
-    res2 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest2);
-    res3 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest3);
-    res4 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest0);
-    res5 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest1);
-    res6 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest2);
-    res7 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest3);
+    UNPCK_UB_SH(dst0, res0, res4);
+    UNPCK_UB_SH(dst1, res1, res5);
+    UNPCK_UB_SH(dst2, res2, res6);
+    UNPCK_UB_SH(dst3, res3, res7);
+    ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
+    ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
+    CLIP_SH4_0_255(res0, res1, res2, res3);
+    CLIP_SH4_0_255(res4, res5, res6, res7);
+    PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
+                tmp0, tmp1, tmp2, tmp3);
 
-    res0 += const2;
-    res1 += const2;
-    res2 += const2;
-    res3 += const2;
-    res4 += const2;
-    res5 += const2;
-    res6 += const2;
-    res7 += const2;
-
-    res0 = CLIP_UNSIGNED_CHAR_H(res0);
-    res1 = CLIP_UNSIGNED_CHAR_H(res1);
-    res2 = CLIP_UNSIGNED_CHAR_H(res2);
-    res3 = CLIP_UNSIGNED_CHAR_H(res3);
-    res4 = CLIP_UNSIGNED_CHAR_H(res4);
-    res5 = CLIP_UNSIGNED_CHAR_H(res5);
-    res6 = CLIP_UNSIGNED_CHAR_H(res6);
-    res7 = CLIP_UNSIGNED_CHAR_H(res7);
-
-    tmp0 = (v16u8)__msa_pckev_b((v16i8)res4, (v16i8)res0);
-    tmp1 = (v16u8)__msa_pckev_b((v16i8)res5, (v16i8)res1);
-    tmp2 = (v16u8)__msa_pckev_b((v16i8)res6, (v16i8)res2);
-    tmp3 = (v16u8)__msa_pckev_b((v16i8)res7, (v16i8)res3);
-
-    STORE_UB(tmp0, dest);
-    STORE_UB(tmp1, dest + 16);
-    dest += dest_stride;
-    STORE_UB(tmp2, dest);
-    STORE_UB(tmp3, dest + 16);
-    dest += dest_stride;
+    ST_UB2(tmp0, tmp1, dst, 16);
+    dst += dst_stride;
+    ST_UB2(tmp2, tmp3, dst, 16);
+    dst += dst_stride;
   }
 }
--- a/vp9/common/mips/msa/vp9_idct8x8_msa.c
+++ b/vp9/common/mips/msa/vp9_idct8x8_msa.c
@@ -8,521 +8,122 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include <assert.h>
+#include "vp9/common/mips/msa/vp9_idct_msa.h"
 
-#include "vp9/common/vp9_idct.h"
-#include "vp9/common/mips/msa/vp9_macros_msa.h"
-
-#define SET_COSPI_PAIR(c0_h, c1_h) ({  \
-  v8i16 out0, r0_m, r1_m;              \
-                                       \
-  r0_m = __msa_fill_h(c0_h);           \
-  r1_m = __msa_fill_h(c1_h);           \
-  out0 = __msa_ilvev_h(r1_m, r0_m);    \
-                                       \
-  out0;                                \
-})
-
-#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) ({  \
-  v8i16 c0_m, c1_m;                                    \
-                                                       \
-  c0_m = __msa_splati_h((mask_h), (idx1_h));           \
-  c1_m = __msa_splati_h((mask_h), (idx2_h));           \
-  c0_m = __msa_ilvev_h(c1_m, c0_m);                    \
-                                                       \
-  c0_m;                                                \
-})
-
-#define VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride,     \
-                                            in0, in1, in2, in3) {  \
-  uint64_t out0_m, out1_m, out2_m, out3_m;                         \
-  v8i16 res0_m, res1_m, res2_m, res3_m;                            \
-  v16u8 dest0_m, dest1_m, dest2_m, dest3_m;                        \
-  v16i8 tmp0_m, tmp1_m;                                            \
-  v16i8 zero_m = { 0 };                                            \
-  uint8_t *dst_m = (uint8_t *)(dest);                              \
-                                                                   \
-  LOAD_4VECS_UB(dst_m, (dest_stride),                              \
-                dest0_m, dest1_m, dest2_m, dest3_m);               \
-                                                                   \
-  res0_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest0_m);            \
-  res1_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest1_m);            \
-  res2_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest2_m);            \
-  res3_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest3_m);            \
-                                                                   \
-  res0_m += (v8i16)(in0);                                          \
-  res1_m += (v8i16)(in1);                                          \
-  res2_m += (v8i16)(in2);                                          \
-  res3_m += (v8i16)(in3);                                          \
-                                                                   \
-  res0_m = CLIP_UNSIGNED_CHAR_H(res0_m);                           \
-  res1_m = CLIP_UNSIGNED_CHAR_H(res1_m);                           \
-  res2_m = CLIP_UNSIGNED_CHAR_H(res2_m);                           \
-  res3_m = CLIP_UNSIGNED_CHAR_H(res3_m);                           \
-                                                                   \
-  tmp0_m = __msa_pckev_b((v16i8)res1_m, (v16i8)res0_m);            \
-  tmp1_m = __msa_pckev_b((v16i8)res3_m, (v16i8)res2_m);            \
-                                                                   \
-  out0_m = __msa_copy_u_d((v2i64)tmp0_m, 0);                       \
-  out1_m = __msa_copy_u_d((v2i64)tmp0_m, 1);                       \
-  out2_m = __msa_copy_u_d((v2i64)tmp1_m, 0);                       \
-  out3_m = __msa_copy_u_d((v2i64)tmp1_m, 1);                       \
-                                                                   \
-  STORE_DWORD(dst_m, out0_m);                                      \
-  dst_m += (dest_stride);                                          \
-  STORE_DWORD(dst_m, out1_m);                                      \
-  dst_m += (dest_stride);                                          \
-  STORE_DWORD(dst_m, out2_m);                                      \
-  dst_m += (dest_stride);                                          \
-  STORE_DWORD(dst_m, out3_m);                                      \
-}
-
-/* multiply and add macro */
-#define VP9_MADD(inp0, inp1, inp2, inp3,                      \
-                 cst0, cst1, cst2, cst3,                      \
-                 out0, out1, out2, out3) {                    \
-  v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;           \
-  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                       \
-                                                              \
-  ILV_H_LRLR_SH(inp0, inp1, inp2, inp3,                       \
-                madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m);  \
-                                                              \
-  DOTP_S_W_4VECS_SW(madd_s1_m, cst0, madd_s0_m, cst0,         \
-                    madd_s1_m, cst1, madd_s0_m, cst1,         \
-                    tmp0_m, tmp1_m, tmp2_m, tmp3_m);          \
-                                                              \
-  SRARI_W_4VECS_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m,            \
-                   tmp0_m, tmp1_m, tmp2_m, tmp3_m,            \
-                   DCT_CONST_BITS);                           \
-                                                              \
-  PCKEV_H_2VECS_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m,            \
-                   out0, out1);                               \
-                                                              \
-  DOTP_S_W_4VECS_SW(madd_s3_m, cst2, madd_s2_m, cst2,         \
-                    madd_s3_m, cst3, madd_s2_m, cst3,         \
-                    tmp0_m, tmp1_m, tmp2_m, tmp3_m);          \
-                                                              \
-  SRARI_W_4VECS_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m,            \
-                   tmp0_m, tmp1_m, tmp2_m, tmp3_m,            \
-                   DCT_CONST_BITS);                           \
-                                                              \
-  PCKEV_H_2VECS_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m,            \
-                   out2, out3);                               \
-}
-
-/* idct 8x8 macro */
-#define VP9_IDCT8x8_1D_ODD(in1, in3, in5, in7,        \
-                           k0, k1, k2, k3, mask,      \
-                           out0, out1, out2, out3) {  \
-  v8i16 res0_m, res1_m, res2_m, res3_m;               \
-  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;               \
-                                                      \
-  VP9_MADD(in1, in7, in3, in5, k0, k1, k2, k3,        \
-           in1, in7, in3, in5);                       \
-                                                      \
-  res0_m = in1 - in3;                                 \
-  res1_m = in7 - in5;                                 \
-                                                      \
-  k0 = VP9_SET_CONST_PAIR(mask, 4, 7);                \
-  k1 = __msa_splati_h(mask, 4);                       \
-                                                      \
-  res2_m = __msa_ilvr_h(res0_m, res1_m);              \
-  res3_m = __msa_ilvl_h(res0_m, res1_m);              \
-                                                      \
-  DOTP_S_W_4VECS_SW(res2_m, k0, res3_m, k0,           \
-                    res2_m, k1, res3_m, k1,           \
-                    tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
-                                                      \
-  SRARI_W_4VECS_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m,    \
-                   tmp0_m, tmp1_m, tmp2_m, tmp3_m,    \
-                   DCT_CONST_BITS);                   \
-  out0 = in1 + in3;                                   \
-  PCKEV_H_2VECS_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m,    \
-                   out1, out2);                       \
-  out3 = in7 + in5;                                   \
-}
-
-#define VP9_IDCT8x8_1D_EVEN(in0, in2, in4, in6,        \
-                            k0, k1, k2, k3,            \
-                            out0, out1, out2, out3) {  \
-  k2 = SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);       \
-  k3 = SET_COSPI_PAIR(cospi_8_64, cospi_24_64);        \
-                                                       \
-  VP9_MADD(in0, in4, in2, in6, k1, k0, k2, k3,         \
-           in0, in4, in2, in6);                        \
-                                                       \
-  out0 = in0 + in6;                                    \
-  out1 = in4 + in2;                                    \
-  out2 = in4 - in2;                                    \
-  out3 = in0 - in6;                                    \
-}
-
-#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,            \
-                       out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m;   \
-  v8i16 k0_m, k1_m, k2_m, k3_m;                                           \
-  v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64,     \
-    cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64                  \
-  };                                                                      \
-                                                                          \
-  k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5);                                \
-  k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0);                                \
-  k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3);                                \
-  k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2);                                \
-                                                                          \
-  VP9_IDCT8x8_1D_ODD(in1, in3, in5, in7, k0_m, k1_m, k2_m, k3_m, mask_m,  \
-                     res4_m, res5_m, res6_m, res7_m);                     \
-                                                                          \
-  VP9_IDCT8x8_1D_EVEN(in0, in2, in4, in6, k0_m, k1_m, k2_m, k3_m,         \
-                      res0_m, res1_m, res2_m, res3_m);                    \
-                                                                          \
-  BUTTERFLY_8(res0_m, res1_m, res2_m, res3_m,                             \
-              res4_m, res5_m, res6_m, res7_m,                             \
-              out0, out1, out2, out3,                                     \
-              out4, out5, out6, out7);                                    \
-}
-
-#define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7,  \
-                              dst0, dst1, dst2, dst3) {                \
-  v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9;    \
-                                                                       \
-  tmp0 = __msa_dotp_s_w((in0), (in4));                                 \
-  tmp2 = __msa_dotp_s_w((in1), (in4));                                 \
-  tmp3 = __msa_dotp_s_w((in0), (in5));                                 \
-  tmp4 = __msa_dotp_s_w((in1), (in5));                                 \
-  tmp5 = __msa_dotp_s_w((in2), (in6));                                 \
-  tmp6 = __msa_dotp_s_w((in3), (in6));                                 \
-  tmp7 = __msa_dotp_s_w((in2), (in7));                                 \
-  tmp8 = __msa_dotp_s_w((in3), (in7));                                 \
-                                                                       \
-  BUTTERFLY_4(tmp0, tmp3, tmp7, tmp5, tmp1, tmp9, tmp7, tmp5);         \
-  BUTTERFLY_4(tmp2, tmp4, tmp8, tmp6, tmp3, tmp0, tmp4, tmp2);         \
-                                                                       \
-  SRARI_W_4VECS_SW(tmp1, tmp9, tmp7, tmp5, tmp1, tmp9, tmp7, tmp5,     \
-                   DCT_CONST_BITS);                                    \
-  SRARI_W_4VECS_SW(tmp3, tmp0, tmp4, tmp2, tmp3, tmp0, tmp4, tmp2,     \
-                   DCT_CONST_BITS);                                    \
-                                                                       \
-  PCKEV_H_4VECS_SH(tmp1, tmp3, tmp9, tmp0, tmp7, tmp4, tmp5, tmp2,     \
-                   dst0, dst1, dst2, dst3);                            \
-}
-
-#define DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) ({       \
-  v8i16 dst_m;                                        \
-  v4i32 tp0_m, tp1_m;                                 \
-                                                      \
-  tp1_m = __msa_dotp_s_w((in0), (in2));               \
-  tp0_m = __msa_dotp_s_w((in1), (in2));               \
-  tp1_m = __msa_srari_w(tp1_m, DCT_CONST_BITS);       \
-  tp0_m = __msa_srari_w(tp0_m, DCT_CONST_BITS);       \
-  dst_m = __msa_pckev_h((v8i16)tp1_m, (v8i16)tp0_m);  \
-                                                      \
-  dst_m;                                              \
-})
-
-#define VP9_ADST8_ROW(in0, in1, in2, in3, in4, in5, in6, in7,            \
-                      out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v8i16 const0_m, const1_m, const2_m, const3_m, const4_m;                \
-  v8i16 temp0_m, temp1_m, temp2_m, temp3_m, s0_m, s1_m;                  \
-  v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64,                \
-    cospi_14_64, cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64      \
-  };                                                                     \
-  v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64,               \
-    -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0                        \
-  };                                                                     \
-                                                                         \
-  const0_m = __msa_splati_h(coeff0_m, 0);                                \
-  const1_m = __msa_splati_h(coeff0_m, 7);                                \
-  const2_m = -const0_m;                                                  \
-  const0_m = __msa_ilvev_h(const1_m, const0_m);                          \
-  const1_m = __msa_ilvev_h(const2_m, const1_m);                          \
-  const2_m = __msa_splati_h(coeff0_m, 4);                                \
-  const3_m = __msa_splati_h(coeff0_m, 3);                                \
-  const4_m = -const2_m;                                                  \
-  const2_m = __msa_ilvev_h(const3_m, const2_m);                          \
-  const3_m = __msa_ilvev_h(const4_m, const3_m);                          \
-                                                                         \
-  ILV_H_LRLR_SH(in7, in0, in3, in4,                                      \
-                temp0_m, temp1_m, temp2_m, temp3_m);                     \
-                                                                         \
-  DOT_ADD_SUB_SRARI_PCK(temp0_m, temp1_m, temp2_m, temp3_m,              \
-                        const0_m, const1_m, const2_m, const3_m,          \
-                        in7, in0, in4, in3);                             \
-                                                                         \
-  const0_m = __msa_splati_h(coeff0_m, 2);                                \
-  const1_m = __msa_splati_h(coeff0_m, 5);                                \
-  const2_m = -const0_m;                                                  \
-  const0_m = __msa_ilvev_h(const1_m, const0_m);                          \
-  const1_m = __msa_ilvev_h(const2_m, const1_m);                          \
-  const2_m = __msa_splati_h(coeff0_m, 6);                                \
-  const3_m = __msa_splati_h(coeff0_m, 1);                                \
-  const4_m = -const2_m;                                                  \
-  const2_m = __msa_ilvev_h(const3_m, const2_m);                          \
-  const3_m = __msa_ilvev_h(const4_m, const3_m);                          \
-                                                                         \
-  ILV_H_LRLR_SH(in5, in2, in1, in6,                                      \
-                temp0_m, temp1_m, temp2_m, temp3_m);                     \
-                                                                         \
-  DOT_ADD_SUB_SRARI_PCK(temp0_m, temp1_m, temp2_m, temp3_m,              \
-                        const0_m, const1_m, const2_m, const3_m,          \
-                        in5, in2, in6, in1);                             \
-                                                                         \
-  BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);                 \
-  out7 = -s0_m;                                                          \
-  out0 = s1_m;                                                           \
-                                                                         \
-  SPLATI_H_4VECS_SH(coeff1_m, 0, 4, 1, 5,                                \
-                    const0_m, const1_m, const2_m, const3_m);             \
-                                                                         \
-  const3_m = __msa_ilvev_h(const0_m, const3_m);                          \
-  const2_m = __msa_ilvev_h(const2_m, const1_m);                          \
-  const0_m = __msa_ilvev_h(const1_m, const0_m);                          \
-  const1_m = const0_m;                                                   \
-                                                                         \
-  ILV_H_LRLR_SH(in3, in4, in1, in6,                                      \
-                temp0_m, temp1_m, temp2_m, temp3_m);                     \
-                                                                         \
-  DOT_ADD_SUB_SRARI_PCK(temp0_m, temp1_m, temp2_m, temp3_m,              \
-                        const0_m, const2_m, const3_m, const1_m,          \
-                        out1, out6, s0_m, s1_m);                         \
-                                                                         \
-  const0_m = __msa_splati_h(coeff1_m, 2);                                \
-  const1_m = __msa_splati_h(coeff1_m, 3);                                \
-  const1_m = __msa_ilvev_h(const1_m, const0_m);                          \
-                                                                         \
-  ILV_H_LRLR_SH(in5, in2, s1_m, s0_m,                                    \
-             temp0_m, temp1_m, temp2_m, temp3_m);                        \
-                                                                         \
-  out3 = DOT_SHIFT_RIGHT_PCK_H(temp0_m, temp1_m, const0_m);              \
-  out4 = DOT_SHIFT_RIGHT_PCK_H(temp0_m, temp1_m, const1_m);              \
-  out2 = DOT_SHIFT_RIGHT_PCK_H(temp2_m, temp3_m, const0_m);              \
-  out5 = DOT_SHIFT_RIGHT_PCK_H(temp2_m, temp3_m, const1_m);              \
-                                                                         \
-  out1 = -out1;                                                          \
-  out3 = -out3;                                                          \
-  out5 = -out5;                                                          \
-}
-
-#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,            \
-                  out0, out1, out2, out3, out4, out5, out6, out7) {  \
-  v8i16 const0_m, const1_m, const2_m, const3_m, const4_m;            \
-  v8i16 temp0_m, temp1_m, temp2_m, temp3_m, s0_m, s1_m;              \
-                                                                     \
-  const0_m = __msa_fill_h(cospi_2_64);                               \
-  const1_m = __msa_fill_h(cospi_30_64);                              \
-  const2_m = -const0_m;                                              \
-  const0_m = __msa_ilvev_h(const1_m, const0_m);                      \
-  const1_m = __msa_ilvev_h(const2_m, const1_m);                      \
-  const2_m = __msa_fill_h(cospi_18_64);                              \
-  const3_m = __msa_fill_h(cospi_14_64);                              \
-  const4_m = -const2_m;                                              \
-  const2_m = __msa_ilvev_h(const3_m, const2_m);                      \
-  const3_m = __msa_ilvev_h(const4_m, const3_m);                      \
-                                                                     \
-  ILV_H_LRLR_SH(in7, in0, in3, in4,                                  \
-                temp0_m, temp1_m, temp2_m, temp3_m);                 \
-                                                                     \
-  DOT_ADD_SUB_SRARI_PCK(temp0_m, temp1_m, temp2_m, temp3_m,          \
-                        const0_m, const1_m, const2_m, const3_m,      \
-                        in7, in0, in4, in3);                         \
-                                                                     \
-  const0_m = __msa_fill_h(cospi_10_64);                              \
-  const1_m = __msa_fill_h(cospi_22_64);                              \
-  const2_m = -const0_m;                                              \
-  const0_m = __msa_ilvev_h(const1_m, const0_m);                      \
-  const1_m = __msa_ilvev_h(const2_m, const1_m);                      \
-  const2_m = __msa_fill_h(cospi_26_64);                              \
-  const3_m = __msa_fill_h(cospi_6_64);                               \
-  const4_m = -const2_m;                                              \
-  const2_m = __msa_ilvev_h(const3_m, const2_m);                      \
-  const3_m = __msa_ilvev_h(const4_m, const3_m);                      \
-                                                                     \
-  ILV_H_LRLR_SH(in5, in2, in1, in6,                                  \
-                temp0_m, temp1_m, temp2_m, temp3_m);                 \
-                                                                     \
-  DOT_ADD_SUB_SRARI_PCK(temp0_m, temp1_m, temp2_m, temp3_m,          \
-                        const0_m, const1_m, const2_m, const3_m,      \
-                        in5, in2, in6, in1);                         \
-                                                                     \
-  BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);             \
-  out7 = -s0_m;                                                      \
-  out0 = s1_m;                                                       \
-                                                                     \
-  const1_m = __msa_fill_h(cospi_24_64);                              \
-  const0_m = __msa_fill_h(cospi_8_64);                               \
-  const3_m = -const1_m;                                              \
-  const2_m = -const0_m;                                              \
-                                                                     \
-  const3_m = __msa_ilvev_h(const0_m, const3_m);                      \
-  const2_m = __msa_ilvev_h(const2_m, const1_m);                      \
-  const0_m = __msa_ilvev_h(const1_m, const0_m);                      \
-  const1_m = const0_m;                                               \
-                                                                     \
-  ILV_H_LRLR_SH(in3, in4, in1, in6,                                  \
-                temp0_m, temp1_m, temp2_m, temp3_m);                 \
-                                                                     \
-  DOT_ADD_SUB_SRARI_PCK(temp0_m, temp1_m, temp2_m, temp3_m,          \
-                        const0_m, const2_m, const3_m, const1_m,      \
-                        out1, out6, s0_m, s1_m);                     \
-                                                                     \
-  const0_m = __msa_fill_h(cospi_16_64);                              \
-  const1_m = -const0_m;                                              \
-  const1_m = __msa_ilvev_h(const1_m, const0_m);                      \
-                                                                     \
-  ILV_H_LRLR_SH(in5, in2, s1_m, s0_m,                                \
-                temp0_m, temp1_m, temp2_m, temp3_m);                 \
-                                                                     \
-  out3 = DOT_SHIFT_RIGHT_PCK_H(temp0_m, temp1_m, const0_m);          \
-  out4 = DOT_SHIFT_RIGHT_PCK_H(temp0_m, temp1_m, const1_m);          \
-  out2 = DOT_SHIFT_RIGHT_PCK_H(temp2_m, temp3_m, const0_m);          \
-  out5 = DOT_SHIFT_RIGHT_PCK_H(temp2_m, temp3_m, const1_m);          \
-                                                                     \
-  out1 = -out1;                                                      \
-  out3 = -out3;                                                      \
-  out5 = -out5;                                                      \
-}
-
-void vp9_idct8x8_64_add_msa(const int16_t *input, uint8_t *dest,
-                            int32_t dest_stride) {
+void vp9_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+                            int32_t dst_stride) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
   /* load vector elements of 8x8 block */
-  LOAD_8VECS_SH(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
+  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
 
   /* rows transform */
-  TRANSPOSE8x8_H_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                    in0, in1, in2, in3, in4, in5, in6, in7);
-
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                     in0, in1, in2, in3, in4, in5, in6, in7);
   /* 1D idct8x8 */
   VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                  in0, in1, in2, in3, in4, in5, in6, in7);
-
   /* columns transform */
-  TRANSPOSE8x8_H_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                    in0, in1, in2, in3, in4, in5, in6, in7);
-
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                     in0, in1, in2, in3, in4, in5, in6, in7);
   /* 1D idct8x8 */
   VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                  in0, in1, in2, in3, in4, in5, in6, in7);
-
   /* final rounding (add 2^4, divide by 2^5) and shift */
-  SRARI_H_4VECS_SH(in0, in1, in2, in3, in0, in1, in2, in3, 5);
-  SRARI_H_4VECS_SH(in4, in5, in6, in7, in4, in5, in6, in7, 5);
-
+  SRARI_H4_SH(in0, in1, in2, in3, 5);
+  SRARI_H4_SH(in4, in5, in6, in7, 5);
   /* add block and store 8x8 */
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride, in0, in1, in2, in3);
-  dest += (4 * dest_stride);
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride, in4, in5, in6, in7);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  dst += (4 * dst_stride);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
 
-void vp9_idct8x8_12_add_msa(const int16_t *input, uint8_t *dest,
-                            int32_t dest_stride) {
+void vp9_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
+                            int32_t dst_stride) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
-  v8i16 s0, s1, s2, s3, s4, s5, s6, s7;
-  v8i16 k0, k1, k2, k3, m0, m1, m2, m3;
+  v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
   v4i32 tmp0, tmp1, tmp2, tmp3;
   v8i16 zero = { 0 };
 
   /* load vector elements of 8x8 block */
-  LOAD_8VECS_SH(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
+  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
 
-  TRANSPOSE8X4_H(in0, in1, in2, in3, in0, in1, in2, in3);
-
   /* stage1 */
-  s0 = __msa_ilvl_h(in3, in0);
-  s1 = __msa_ilvl_h(in2, in1);
-
-  k0 = SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
-  k1 = SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
-  k2 = SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
-  k3 = SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
-  DOTP_S_W_4VECS_SW(s0, k0, s0, k1, s1, k2, s1, k3, tmp0, tmp1, tmp2, tmp3);
-
-  SRARI_W_4VECS_SW(tmp0, tmp1, tmp2, tmp3,
-                   tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
-
-  PCKEV_H_2VECS_SH(zero, tmp0, zero, tmp1, s0, s1);
-  PCKEV_H_2VECS_SH(zero, tmp2, zero, tmp3, s2, s3);
-
+  ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
+  k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+  k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+  k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+  k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+  DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
+  SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
+  PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
+  PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
   BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5);
 
   /* stage2 */
-  s0 = __msa_ilvr_h(in2, in0);
-  s1 = __msa_ilvr_h(in3, in1);
-
-  k0 = SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
-  k1 = SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
-  k2 = SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
-  k3 = SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
-  DOTP_S_W_4VECS_SW(s0, k0, s0, k1, s1, k2, s1, k3, tmp0, tmp1, tmp2, tmp3);
-
-  SRARI_W_4VECS_SW(tmp0, tmp1, tmp2, tmp3,
-                   tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
-
-  PCKEV_H_2VECS_SH(zero, tmp0, zero, tmp1, s0, s1);
-  PCKEV_H_2VECS_SH(zero, tmp2, zero, tmp3, s2, s3);
-
+  ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
+  k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+  k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+  k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+  k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+  DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
+  SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
+  PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
+  PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
   BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3);
 
   /* stage3 */
   s0 = __msa_ilvr_h(s6, s5);
 
-  k1 = SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
-  tmp0 = __msa_dotp_s_w(s0, k1);
-  tmp1 = __msa_dotp_s_w(s0, k0);
+  k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+  DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
+  SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS);
+  PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
 
-  tmp0 = __msa_srari_w(tmp0, DCT_CONST_BITS);
-  tmp1 = __msa_srari_w(tmp1, DCT_CONST_BITS);
-
-  PCKEV_H_2VECS_SH(zero, tmp0, zero, tmp1, s2, s3);
-
   /* stage4 */
   BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7,
               in0, in1, in2, in3, in4, in5, in6, in7);
-
-  TRANSPOSE4X8_H(in0, in1, in2, in3, in4, in5, in6, in7,
-                 in0, in1, in2, in3, in4, in5, in6, in7);
-
+  TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                     in0, in1, in2, in3, in4, in5, in6, in7);
   VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                  in0, in1, in2, in3, in4, in5, in6, in7);
 
   /* final rounding (add 2^4, divide by 2^5) and shift */
-  SRARI_H_4VECS_SH(in0, in1, in2, in3, in0, in1, in2, in3, 5);
-  SRARI_H_4VECS_SH(in4, in5, in6, in7, in4, in5, in6, in7, 5);
+  SRARI_H4_SH(in0, in1, in2, in3, 5);
+  SRARI_H4_SH(in4, in5, in6, in7, 5);
 
   /* add block and store 8x8 */
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride, in0, in1, in2, in3);
-  dest += (4 * dest_stride);
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride, in4, in5, in6, in7);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  dst += (4 * dst_stride);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
 
-void vp9_idct8x8_1_add_msa(const int16_t *input, uint8_t *dest,
-                           int32_t dest_stride) {
+void vp9_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride) {
   int16_t out;
-  int32_t const1;
-  v8i16 const2;
+  int32_t val;
+  v8i16 vec;
 
-  out = dct_const_round_shift(input[0] * cospi_16_64);
-  out = dct_const_round_shift(out * cospi_16_64);
-  const1 = ROUND_POWER_OF_TWO(out, 5);
-  const2 = __msa_fill_h(const1);
+  out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
+  out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
+  val = ROUND_POWER_OF_TWO(out, 5);
+  vec = __msa_fill_h(val);
 
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride,
-                                      const2, const2, const2, const2);
-  dest += (4 * dest_stride);
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride,
-                                      const2, const2, const2, const2);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+  dst += (4 * dst_stride);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
 }
 
-void vp9_iht8x8_64_add_msa(const int16_t *input, uint8_t *dest,
-                           int32_t dest_stride, int32_t tx_type) {
+void vp9_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
   /* load vector elements of 8x8 block */
-  LOAD_8VECS_SH(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
+  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
 
-  TRANSPOSE8x8_H_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                    in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                     in0, in1, in2, in3, in4, in5, in6, in7);
 
   switch (tx_type) {
     case DCT_DCT:
@@ -529,10 +130,9 @@
       /* DCT in horizontal */
       VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
-
       /* DCT in vertical */
-      TRANSPOSE8x8_H_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                        in0, in1, in2, in3, in4, in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                         in0, in1, in2, in3, in4, in5, in6, in7);
       VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
       break;
@@ -540,21 +140,19 @@
       /* DCT in horizontal */
       VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
-
       /* ADST in vertical */
-      TRANSPOSE8x8_H_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                        in0, in1, in2, in3, in4, in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                         in0, in1, in2, in3, in4, in5, in6, in7);
       VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
                 in0, in1, in2, in3, in4, in5, in6, in7);
       break;
     case DCT_ADST:
       /* ADST in horizontal */
-      VP9_ADST8_ROW(in0, in1, in2, in3, in4, in5, in6, in7,
-                    in0, in1, in2, in3, in4, in5, in6, in7);
-
+      VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
+                in0, in1, in2, in3, in4, in5, in6, in7);
       /* DCT in vertical */
-      TRANSPOSE8x8_H_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                        in0, in1, in2, in3, in4, in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                         in0, in1, in2, in3, in4, in5, in6, in7);
       VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
                      in0, in1, in2, in3, in4, in5, in6, in7);
       break;
@@ -562,10 +160,9 @@
       /* ADST in horizontal */
       VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
                 in0, in1, in2, in3, in4, in5, in6, in7);
-
       /* ADST in vertical */
-      TRANSPOSE8x8_H_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                        in0, in1, in2, in3, in4, in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                         in0, in1, in2, in3, in4, in5, in6, in7);
       VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
                 in0, in1, in2, in3, in4, in5, in6, in7);
       break;
@@ -575,11 +172,11 @@
   }
 
   /* final rounding (add 2^4, divide by 2^5) and shift */
-  SRARI_H_4VECS_SH(in0, in1, in2, in3, in0, in1, in2, in3, 5);
-  SRARI_H_4VECS_SH(in4, in5, in6, in7, in4, in5, in6, in7, 5);
+  SRARI_H4_SH(in0, in1, in2, in3, 5);
+  SRARI_H4_SH(in4, in5, in6, in7, 5);
 
   /* add block and store 8x8 */
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride, in0, in1, in2, in3);
-  dest += (4 * dest_stride);
-  VP9_ADDBLK_CLIP_AND_STORE_8_BYTES_4(dest, dest_stride, in4, in5, in6, in7);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+  dst += (4 * dst_stride);
+  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
 }
--- /dev/null
+++ b/vp9/common/mips/msa/vp9_idct_msa.h
@@ -1,0 +1,483 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_MIPS_MSA_VP9_IDCT_MSA_H_
+#define VP9_COMMON_MIPS_MSA_VP9_IDCT_MSA_H_
+
+#include <assert.h>
+
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_idct.h"
+#include "vp9/common/mips/msa/vp9_macros_msa.h"
+
+#define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) {  \
+  v8i16 k0_m = __msa_fill_h(cnst0);                                  \
+  v4i32 s0_m, s1_m, s2_m, s3_m;                                      \
+                                                                     \
+  s0_m = (v4i32)__msa_fill_h(cnst1);                                 \
+  k0_m = __msa_ilvev_h((v8i16)s0_m, k0_m);                           \
+                                                                     \
+  ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m);                            \
+  ILVRL_H2_SW(reg0, reg1, s3_m, s2_m);                               \
+  DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m);                   \
+  SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS);                           \
+  out0 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m);                    \
+                                                                     \
+  DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m);                   \
+  SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS);                           \
+  out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m);                    \
+}
+
+#define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7,  \
+                                  dst0, dst1, dst2, dst3) {                \
+  v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m;                                 \
+  v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m;                                 \
+                                                                           \
+  DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5,                      \
+              tp0_m, tp2_m, tp3_m, tp4_m);                                 \
+  DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7,                      \
+              tp5_m, tp6_m, tp7_m, tp8_m);                                 \
+  BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m);     \
+  BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m);     \
+  SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, DCT_CONST_BITS);                 \
+  SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, DCT_CONST_BITS);                 \
+  PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m,      \
+              dst0, dst1, dst2, dst3);                                     \
+}
+
+#define VP9_DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) ({   \
+  v8i16 dst_m;                                        \
+  v4i32 tp0_m, tp1_m;                                 \
+                                                      \
+  DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m);      \
+  SRARI_W2_SW(tp1_m, tp0_m, DCT_CONST_BITS);          \
+  dst_m = __msa_pckev_h((v8i16)tp1_m, (v8i16)tp0_m);  \
+                                                      \
+  dst_m;                                              \
+})
+
+#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,               \
+                  out0, out1, out2, out3, out4, out5, out6, out7) {     \
+  v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                    \
+  v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m;                     \
+  v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64,  \
+    cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 };               \
+  v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64,              \
+    -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 };                    \
+                                                                        \
+  SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m);                       \
+  cnst2_m = -cnst0_m;                                                   \
+  ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);    \
+  SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m);                       \
+  cnst4_m = -cnst2_m;                                                   \
+  ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);    \
+                                                                        \
+  ILVRL_H2_SH(in0, in7, vec1_m, vec0_m);                                \
+  ILVRL_H2_SH(in4, in3, vec3_m, vec2_m);                                \
+  VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,    \
+                            cnst1_m, cnst2_m, cnst3_m, in7, in0,        \
+                            in4, in3);                                  \
+                                                                        \
+  SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m);                       \
+  cnst2_m = -cnst0_m;                                                   \
+  ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);    \
+  SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m);                       \
+  cnst4_m = -cnst2_m;                                                   \
+  ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);    \
+                                                                        \
+  ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                \
+  ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                \
+                                                                        \
+  VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,    \
+                            cnst1_m, cnst2_m, cnst3_m, in5, in2,        \
+                            in6, in1);                                  \
+  BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);                \
+  out7 = -s0_m;                                                         \
+  out0 = s1_m;                                                          \
+                                                                        \
+  SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5,                                    \
+               cnst0_m, cnst1_m, cnst2_m, cnst3_m);                     \
+                                                                        \
+  ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m);    \
+  cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                            \
+  cnst1_m = cnst0_m;                                                    \
+                                                                        \
+  ILVRL_H2_SH(in4, in3, vec1_m, vec0_m);                                \
+  ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                \
+  VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,    \
+                            cnst2_m, cnst3_m, cnst1_m, out1, out6,      \
+                            s0_m, s1_m);                                \
+                                                                        \
+  SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                       \
+  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                            \
+                                                                        \
+  ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                \
+  ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m);                              \
+  out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);            \
+  out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);            \
+  out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);            \
+  out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);            \
+                                                                        \
+  out1 = -out1;                                                         \
+  out3 = -out3;                                                         \
+  out5 = -out5;                                                         \
+}
+
+#define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1) {                \
+  v4i32 madd0_m, madd1_m, madd2_m, madd3_m;                         \
+  v8i16 madd_s0_m, madd_s1_m;                                       \
+                                                                    \
+  ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m);                        \
+  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m,           \
+              c0, c0, c1, c1, madd0_m, madd1_m, madd2_m, madd3_m);  \
+  SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, DCT_CONST_BITS);  \
+  PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1);      \
+}
+
+#define VP9_MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3,     \
+                    out0, out1, out2, out3) {                           \
+  v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                     \
+  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m;                     \
+                                                                        \
+  ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m);                        \
+  ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m);                        \
+  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m,               \
+              cst0, cst0, cst2, cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
+  BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m,                           \
+              m4_m, m5_m, tmp3_m, tmp2_m);                              \
+  SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS);              \
+  PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1);                  \
+  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m,               \
+              cst1, cst1, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
+  BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m,                           \
+              m4_m, m5_m, tmp3_m, tmp2_m);                              \
+  SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS);              \
+  PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3);                  \
+}
+
+#define VP9_SET_COSPI_PAIR(c0_h, c1_h) ({  \
+  v8i16 out0_m, r0_m, r1_m;                \
+                                           \
+  r0_m = __msa_fill_h(c0_h);               \
+  r1_m = __msa_fill_h(c1_h);               \
+  out0_m = __msa_ilvev_h(r1_m, r0_m);      \
+                                           \
+  out0_m;                                  \
+})
+
+#define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) {  \
+  uint8_t *dst_m = (uint8_t *) (dst);                               \
+  v16u8 dst0_m, dst1_m, dst2_m, dst3_m;                             \
+  v16i8 tmp0_m, tmp1_m;                                             \
+  v16i8 zero_m = { 0 };                                             \
+  v8i16 res0_m, res1_m, res2_m, res3_m;                             \
+                                                                    \
+  LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m);        \
+  ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m,        \
+             zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m);       \
+  ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3,          \
+       res0_m, res1_m, res2_m, res3_m);                             \
+  CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m);                   \
+  PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m);      \
+  ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride);                      \
+}
+
+#define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) {   \
+  v8i16 c0_m, c1_m, c2_m, c3_m;                                     \
+  v8i16 step0_m, step1_m;                                           \
+  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                             \
+                                                                    \
+  c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
+  c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
+  step0_m = __msa_ilvr_h(in2, in0);                                 \
+  DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m);        \
+                                                                    \
+  c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
+  c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
+  step1_m = __msa_ilvr_h(in3, in1);                                 \
+  DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m);        \
+  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);      \
+                                                                    \
+  PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m);      \
+  SLDI_B2_0_SW(tmp0_m, tmp2_m, tmp1_m, tmp3_m, 8);                  \
+  BUTTERFLY_4((v8i16)tmp0_m, (v8i16)tmp1_m,                         \
+              (v8i16)tmp2_m, (v8i16)tmp3_m,                         \
+              out0, out1, out2, out3);                              \
+}
+
+#define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) {  \
+  v8i16 res0_m, res1_m, c0_m, c1_m;                                 \
+  v8i16 k1_m, k2_m, k3_m, k4_m;                                     \
+  v8i16 zero_m = { 0 };                                             \
+  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                             \
+  v4i32 int0_m, int1_m, int2_m, int3_m;                             \
+  v8i16 mask_m = { sinpi_1_9, sinpi_2_9, sinpi_3_9,                 \
+    sinpi_4_9, -sinpi_1_9, -sinpi_2_9, -sinpi_3_9,                  \
+    -sinpi_4_9 };                                                   \
+                                                                    \
+  SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m);         \
+  ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m);                  \
+  ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m);                   \
+  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m);          \
+  int0_m = tmp2_m + tmp1_m;                                         \
+                                                                    \
+  SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m);                           \
+  ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m);                  \
+  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m);          \
+  int1_m = tmp0_m + tmp1_m;                                         \
+                                                                    \
+  c0_m = __msa_splati_h(mask_m, 6);                                 \
+  ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m);                 \
+  ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m);                   \
+  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m);          \
+  int2_m = tmp0_m + tmp1_m;                                         \
+                                                                    \
+  c0_m = __msa_splati_h(mask_m, 6);                                 \
+  c0_m = __msa_ilvev_h(c0_m, k1_m);                                 \
+                                                                    \
+  res0_m = __msa_ilvr_h((in1), (in3));                              \
+  tmp0_m = __msa_dotp_s_w(res0_m, c0_m);                            \
+  int3_m = tmp2_m + tmp0_m;                                         \
+                                                                    \
+  res0_m = __msa_ilvr_h((in2), (in3));                              \
+  c1_m = __msa_ilvev_h(k4_m, k3_m);                                 \
+                                                                    \
+  tmp2_m = __msa_dotp_s_w(res0_m, c1_m);                            \
+  res1_m = __msa_ilvr_h((in0), (in2));                              \
+  c1_m = __msa_ilvev_h(k1_m, zero_m);                               \
+                                                                    \
+  tmp3_m = __msa_dotp_s_w(res1_m, c1_m);                            \
+  int3_m += tmp2_m;                                                 \
+  int3_m += tmp3_m;                                                 \
+                                                                    \
+  SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, DCT_CONST_BITS);      \
+  PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1);          \
+  PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3);          \
+}
+
+#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) ({  \
+  v8i16 c0_m, c1_m;                                    \
+                                                       \
+  SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m);    \
+  c0_m = __msa_ilvev_h(c1_m, c0_m);                    \
+                                                       \
+  c0_m;                                                \
+})
+
+/* multiply and add macro */
+#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3,        \
+                 out0, out1, out2, out3) {                              \
+  v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                     \
+  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                 \
+                                                                        \
+  ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m);                        \
+  ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m);                        \
+  DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m,               \
+              cst0, cst0, cst1, cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
+  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);          \
+  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1);              \
+  DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m,               \
+              cst2, cst2, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
+  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);          \
+  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3);              \
+}
+
+/* idct 8x8 macro */
+#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,               \
+                       out0, out1, out2, out3, out4, out5, out6, out7) {     \
+  v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m;              \
+  v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m;              \
+  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                      \
+  v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64,        \
+    cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 };                  \
+                                                                             \
+  k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5);                                   \
+  k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0);                                   \
+  k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3);                                   \
+  k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2);                                   \
+  VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5);  \
+  SUB2(in1, in3, in7, in5, res0_m, res1_m);                                  \
+  k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7);                                   \
+  k1_m = __msa_splati_h(mask_m, 4);                                          \
+                                                                             \
+  ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m);                               \
+  DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m,        \
+              tmp0_m, tmp1_m, tmp2_m, tmp3_m);                               \
+  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS);               \
+  tp4_m = in1 + in3;                                                         \
+  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m);                 \
+  tp7_m = in7 + in5;                                                         \
+  k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                       \
+  k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                        \
+  VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m,                       \
+           in0, in4, in2, in6);                                              \
+  BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m);               \
+  BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m,        \
+              out0, out1, out2, out3, out4, out5, out6, out7);               \
+}
+
+#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,            \
+                        out0, out1, out2, out3, out4, out5, out6, out7) {  \
+  v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m;                    \
+  v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m;                                \
+  v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1;          \
+  v8i16 mask1_m = { cospi_2_64, cospi_30_64, -cospi_2_64,                  \
+    cospi_10_64, cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 };    \
+  v8i16 mask2_m = { cospi_14_64, -cospi_18_64, cospi_26_64,                \
+    cospi_6_64, -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 };      \
+  v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64,                 \
+    -cospi_16_64, 0, 0, 0, 0 };                                            \
+                                                                           \
+  k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1);                                \
+  k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2);                                \
+  ILVRL_H2_SH(in1, in0, in_s1, in_s0);                                     \
+  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
+              r0_m, r1_m, r2_m, r3_m);                                     \
+  k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7);                                \
+  k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1);                                \
+  ILVRL_H2_SH(in5, in4, in_s1, in_s0);                                     \
+  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
+              r4_m, r5_m, r6_m, r7_m);                                     \
+  ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
+       m0_m, m1_m, m2_m, m3_m);                                            \
+  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
+  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m);                     \
+  SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
+       m0_m, m1_m, m2_m, m3_m);                                            \
+  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
+  PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m);                         \
+  k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4);                                \
+  k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5);                                \
+  ILVRL_H2_SH(in3, in2, in_s1, in_s0);                                     \
+  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
+              r0_m, r1_m, r2_m, r3_m);                                     \
+  k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3);                                \
+  k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4);                                \
+  ILVRL_H2_SH(in7, in6, in_s1, in_s0);                                     \
+  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
+              r4_m, r5_m, r6_m, r7_m);                                     \
+  ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
+       m0_m, m1_m, m2_m, m3_m);                                            \
+  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
+  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m);                     \
+  SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
+       m0_m, m1_m, m2_m, m3_m);                                            \
+  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
+  PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m);                         \
+  ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m);                                     \
+  BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3);        \
+  k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6);                                \
+  k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7);                                \
+  ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0);                                   \
+  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
+              r0_m, r1_m, r2_m, r3_m);                                     \
+  k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1);                                \
+  DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m,              \
+              r4_m, r5_m, r6_m, r7_m);                                     \
+  ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m,                     \
+       m0_m, m1_m, m2_m, m3_m);                                            \
+  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
+  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6);                          \
+  SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m,                     \
+       m0_m, m1_m, m2_m, m3_m);                                            \
+  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
+  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5);                           \
+  k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2);                                \
+  k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3);                                \
+  ILVRL_H2_SH(in4, in3, in_s1, in_s0);                                     \
+  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
+              m0_m, m1_m, m2_m, m3_m);                                     \
+  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
+  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4);                          \
+  ILVRL_H2_SW(in5, in2, m2_m, m3_m);                                       \
+  DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m,              \
+              m0_m, m1_m, m2_m, m3_m);                                     \
+  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS);                     \
+  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5);                          \
+                                                                           \
+  out1 = -in1;                                                             \
+  out3 = -in3;                                                             \
+  out5 = -in5;                                                             \
+  out7 = -in7;                                                             \
+}
+
+#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8,        \
+                         r9, r10, r11, r12, r13, r14, r15,          \
+                         out0, out1, out2, out3, out4, out5,        \
+                         out6, out7, out8, out9, out10, out11,      \
+                         out12, out13, out14, out15) {              \
+  v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m;             \
+  v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m;       \
+  v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m;             \
+  v8i16 h8_m, h9_m, h10_m, h11_m;                                   \
+  v8i16 k0_m, k1_m, k2_m, k3_m;                                     \
+                                                                    \
+  /* stage 1 */                                                     \
+  k0_m = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);               \
+  k1_m = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);              \
+  k2_m = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);              \
+  k3_m = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);             \
+  VP9_MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m,              \
+              g0_m, g1_m, g2_m, g3_m);                              \
+  k0_m = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);               \
+  k1_m = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);              \
+  k2_m = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);              \
+  k3_m = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);             \
+  VP9_MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m,             \
+              g4_m, g5_m, g6_m, g7_m);                              \
+  k0_m = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);               \
+  k1_m = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);              \
+  k2_m = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);               \
+  k3_m = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);              \
+  VP9_MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m,             \
+              g8_m, g9_m, g10_m, g11_m);                            \
+  k0_m = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);              \
+  k1_m = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);             \
+  k2_m = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);               \
+  k3_m = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);              \
+  VP9_MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m,              \
+              g12_m, g13_m, g14_m, g15_m);                          \
+                                                                    \
+  /* stage 2 */                                                     \
+  k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);               \
+  k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);              \
+  k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);              \
+  VP9_MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m,      \
+              h0_m, h1_m, h2_m, h3_m);                              \
+  k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);              \
+  k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);             \
+  k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);             \
+  VP9_MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m,     \
+              h4_m, h5_m, h6_m, h7_m);                              \
+  BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10);    \
+  BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m,    \
+              h8_m, h9_m, h10_m, h11_m, h6_m, h4_m, h2_m, h0_m);    \
+                                                                    \
+  /* stage 3 */                                                     \
+  BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m);  \
+  k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
+  k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
+  k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);              \
+  VP9_MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m,       \
+              out4, out6, out5, out7);                              \
+  VP9_MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m,       \
+              out12, out14, out13, out15);                          \
+                                                                    \
+  /* stage 4 */                                                     \
+  k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
+  k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);            \
+  k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
+  k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);             \
+  VP9_MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3);             \
+  VP9_MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7);               \
+  VP9_MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11);           \
+  VP9_MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15);           \
+}
+#endif  /* VP9_COMMON_MIPS_MSA_VP9_IDCT_MSA_H_ */
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -425,42 +425,42 @@
     specialize qw/vp9_idct4x4_16_add sse2 neon dspr2/;
 
     add_proto qw/void vp9_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct8x8_1_add sse2 neon dspr2/;
+    specialize qw/vp9_idct8x8_1_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct8x8_64_add sse2 neon dspr2/, "$ssse3_x86_64";
+    specialize qw/vp9_idct8x8_64_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
 
     add_proto qw/void vp9_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct8x8_12_add sse2 neon dspr2/, "$ssse3_x86_64";
+    specialize qw/vp9_idct8x8_12_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
 
     add_proto qw/void vp9_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct16x16_1_add sse2 neon dspr2/;
+    specialize qw/vp9_idct16x16_1_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct16x16_256_add sse2 neon dspr2/;
+    specialize qw/vp9_idct16x16_256_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct16x16_10_add sse2 neon dspr2/;
+    specialize qw/vp9_idct16x16_10_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct32x32_1024_add sse2 neon dspr2/;
+    specialize qw/vp9_idct32x32_1024_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct32x32_34_add sse2 neon_asm dspr2/;
+    specialize qw/vp9_idct32x32_34_add sse2 neon_asm dspr2 msa/;
     #is this a typo?
     $vp9_idct32x32_34_add_neon_asm=vp9_idct32x32_1024_add_neon;
 
     add_proto qw/void vp9_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct32x32_1_add sse2 neon dspr2/;
+    specialize qw/vp9_idct32x32_1_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
     specialize qw/vp9_iht4x4_16_add sse2 neon dspr2/;
 
     add_proto qw/void vp9_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp9_iht8x8_64_add sse2 neon dspr2/;
+    specialize qw/vp9_iht8x8_64_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp9_iht16x16_256_add sse2 dspr2/;
+    specialize qw/vp9_iht16x16_256_add sse2 dspr2 msa/;
 
     # dct and add
 
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -132,6 +132,10 @@
 
 # common (msa)
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_macros_msa.h
+VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct8x8_msa.c
+VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct16x16_msa.c
+VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct32x32_msa.c
+VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct_msa.h
 
 VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_idct_intrin_sse2.c
 VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_idct_intrin_sse2.h