shithub: libvpx

Download patch

ref: 07ff7fa8114d1d569e5b08d665c887af9bb9495f
parent: bb3d510a18350eb44038897187b1c4b28da86200
author: Dragan Mrdjan <dmrdjan@mips.com>
date: Wed Apr 11 05:53:15 EDT 2012

VP8 optimizations for MIPS dspr2

Signed-off-by: Raghu Gandham <raghu@mips.com>

Change-Id: I3a8bca425cd3dab746a6328c8fc8843c8e87aea6

diff: cannot open b/vp8/common/mips/dspr2//null: file does not exist: 'b/vp8/common/mips/dspr2//null' diff: cannot open b/vp8/common/mips//null: file does not exist: 'b/vp8/common/mips//null'
--- a/build/make/configure.sh
+++ b/build/make/configure.sh
@@ -943,13 +943,16 @@
         esac
     ;;
     mips*)
-        CROSS=${CROSS:-mipsel-linux-uclibc-}
         link_with_cc=gcc
         setup_gnu_toolchain
         tune_cflags="-mtune="
+        if enabled dspr2; then
+            check_add_cflags -mips32r2 -mdspr2
+            disable fast_unaligned
+        fi
         check_add_cflags -march=${tgt_isa}
-    check_add_asflags -march=${tgt_isa}
-    check_add_asflags -KPIC
+        check_add_asflags -march=${tgt_isa}
+        check_add_asflags -KPIC
     ;;
     ppc*)
         enable ppc
--- a/build/make/rtcd.sh
+++ b/build/make/rtcd.sh
@@ -278,6 +278,29 @@
 }
 
 
+mips() {
+  determine_indirection c $ALL_ARCHS
+  cat <<EOF
+$(common_top)
+#include "vpx_config.h"
+
+void ${symbol:-rtcd}(void);
+
+#ifdef RTCD_C
+void ${symbol:-rtcd}(void)
+{
+$(set_function_pointers c)
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+$(common_bottom)
+EOF
+
+}
+
 unoptimized() {
   determine_indirection c
   cat <<EOF
@@ -308,6 +331,15 @@
     REQUIRES=${REQUIRES:-mmx sse sse2}
     require $(filter $REQUIRES)
     x86
+    ;;
+  mips32)
+    ALL_ARCHS=$(filter mips32)
+    dspr2=$([ -f "$config_file" ] && eval echo $(grep HAVE_DSPR2 "$config_file"))
+    HAVE_DSPR2="${dspr2#*=}"
+    if [ "$HAVE_DSPR2" = "yes" ]; then
+        ALL_ARCHS=$(filter mips32 dspr2)
+    fi
+    mips
     ;;
   armv5te)
     ALL_ARCHS=$(filter edsp)
--- a/configure
+++ b/configure
@@ -209,6 +209,7 @@
     neon
 
     mips32
+    dspr2
 
     mmx
     sse
--- /dev/null
+++ b/vp8/common/mips/dspr2/dequantize_dspr2.c
@@ -1,0 +1,33 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+#include "vpx_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
+
+#if HAVE_DSPR2
+void vp8_dequant_idct_add_dspr2(short *input, short *dq,
+                                unsigned char *dest, int stride)
+{
+    int i;
+
+    for (i = 0; i < 16; i++)
+    {
+        input[i] = dq[i] * input[i];
+    }
+
+    vp8_short_idct4x4llm_dspr2(input, dest, stride, dest, stride);
+
+    vpx_memset(input, 0, 32);
+
+}
+
+#endif
--- /dev/null
+++ b/vp8/common/mips/dspr2/filter_dspr2.c
@@ -1,0 +1,2823 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include "vpx_rtcd.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+#define CROP_WIDTH 256
+unsigned char ff_cropTbl[256 + 2 * CROP_WIDTH];
+
+static const unsigned short sub_pel_filterss[8][3] =
+{
+    {      0,      0,      0},
+    {      0, 0x0601, 0x7b0c},
+    { 0x0201, 0x0b08, 0x6c24},
+    {      0, 0x0906, 0x5d32},
+    { 0x0303, 0x1010, 0x4d4d},
+    {      0, 0x0609, 0x325d},
+    { 0x0102, 0x080b, 0x246c},
+    {      0, 0x0106, 0x0c7b},
+};
+
+
+static const int sub_pel_filters_int[8][3] =
+{
+    {          0,          0,          0},
+    { 0x0000fffa, 0x007b000c, 0xffff0000},
+    { 0x0002fff5, 0x006c0024, 0xfff80001},
+    { 0x0000fff7, 0x005d0032, 0xfffa0000},
+    { 0x0003fff0, 0x004d004d, 0xfff00003},
+    { 0x0000fffa, 0x0032005d, 0xfff70000},
+    { 0x0001fff8, 0x0024006c, 0xfff50002},
+    { 0x0000ffff, 0x000c007b, 0xfffa0000},
+};
+
+
+static const int sub_pel_filters_inv[8][3] =
+{
+    {          0,          0,          0},
+    { 0xfffa0000, 0x000c007b, 0x0000ffff},
+    { 0xfff50002, 0x0024006c, 0x0001fff8},
+    { 0xfff70000, 0x0032005d, 0x0000fffa},
+    { 0xfff00003, 0x004d004d, 0x0003fff0},
+    { 0xfffa0000, 0x005d0032, 0x0000fff7},
+    { 0xfff80001, 0x006c0024, 0x0002fff5},
+    { 0xffff0000, 0x007b000c, 0x0000fffa},
+};
+
+
+static const int sub_pel_filters_int_tap_4[8][2] =
+{
+    {          0,          0},
+    { 0xfffa007b, 0x000cffff},
+    {          0,          0},
+    { 0xfff7005d, 0x0032fffa},
+    {          0,          0},
+    { 0xfffa0032, 0x005dfff7},
+    {          0,          0},
+    { 0xffff000c, 0x007bfffa},
+};
+
+
+static const int sub_pel_filters_inv_tap_4[8][2] =
+{
+    {          0,          0},
+    { 0x007bfffa, 0xffff000c},
+    {          0,          0},
+    { 0x005dfff7, 0xfffa0032},
+    {          0,          0},
+    { 0x0032fffa, 0xfff7005d},
+    {          0,          0},
+    { 0x000cffff, 0xfffa007b},
+};
+
+inline void prefetch_load(unsigned char *src)
+{
+    __asm__ __volatile__ (
+        "pref   0,  0(%[src])   \n\t"
+        :
+        : [src] "r" (src)
+    );
+}
+
+
+inline void prefetch_store(unsigned char *dst)
+{
+    __asm__ __volatile__ (
+        "pref   1,  0(%[dst])   \n\t"
+        :
+        : [dst] "r" (dst)
+    );
+}
+
+void dsputil_static_init(void)
+{
+    int i;
+
+    for (i = 0; i < 256; i++) ff_cropTbl[i + CROP_WIDTH] = i;
+
+    for (i = 0; i < CROP_WIDTH; i++)
+    {
+        ff_cropTbl[i] = 0;
+        ff_cropTbl[i + CROP_WIDTH + 256] = 255;
+    }
+}
+
+void vp8_filter_block2d_first_pass_4
+(
+    unsigned char *RESTRICT src_ptr,
+    unsigned char *RESTRICT dst_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int output_height,
+    int xoffset,
+    int pitch
+)
+{
+    unsigned int i;
+    int Temp1, Temp2, Temp3, Temp4;
+
+    unsigned int vector4a = 64;
+    int vector1b, vector2b, vector3b;
+    unsigned int tp1, tp2, tn1, tn2;
+    unsigned int p1, p2, p3;
+    unsigned int n1, n2, n3;
+    unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+    vector3b = sub_pel_filters_inv[xoffset][2];
+
+    /* if (xoffset == 0) we don't need any filtering */
+    if (vector3b == 0)
+    {
+        for (i = 0; i < output_height; i++)
+        {
+            /* prefetch src_ptr data to cache memory */
+            prefetch_load(src_ptr + src_pixels_per_line);
+            dst_ptr[0] = src_ptr[0];
+            dst_ptr[1] = src_ptr[1];
+            dst_ptr[2] = src_ptr[2];
+            dst_ptr[3] = src_ptr[3];
+
+            /* next row... */
+            src_ptr += src_pixels_per_line;
+            dst_ptr += 4;
+        }
+    }
+    else
+    {
+        if (vector3b > 65536)
+        {
+            /* 6 tap filter */
+
+            vector1b = sub_pel_filters_inv[xoffset][0];
+            vector2b = sub_pel_filters_inv[xoffset][1];
+
+            /* prefetch src_ptr data to cache memory */
+            prefetch_load(src_ptr + src_pixels_per_line);
+
+            for (i = output_height; i--;)
+            {
+                /* apply filter with vectors pairs */
+                __asm__ __volatile__ (
+                    "ulw              %[tp1],      -2(%[src_ptr])                 \n\t"
+                    "ulw              %[tp2],      2(%[src_ptr])                  \n\t"
+
+                    /* even 1. pixel */
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+                    "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+                    "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
+                    "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
+
+                    /* even 2. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbl    %[p1],       %[tp2]                         \n\t"
+                    "balign           %[tp2],      %[tp1],         3              \n\t"
+                    "extp             %[Temp1],    $ac3,           9              \n\t"
+                    "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p1],          %[vector3b]    \n\t"
+
+                    /* odd 1. pixel */
+                    "ulw              %[tn2],      3(%[src_ptr])                  \n\t"
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[n1],       %[tp2]                         \n\t"
+                    "preceu.ph.qbl    %[n2],       %[tp2]                         \n\t"
+                    "preceu.ph.qbr    %[n3],       %[tn2]                         \n\t"
+                    "extp             %[Temp3],    $ac2,           9              \n\t"
+                    "dpa.w.ph         $ac3,        %[n1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n2],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n3],          %[vector3b]    \n\t"
+
+                    /* even 2. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbl    %[n1],       %[tn2]                         \n\t"
+                    "extp             %[Temp2],    $ac3,           9              \n\t"
+                    "dpa.w.ph         $ac2,        %[n2],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n3],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n1],          %[vector3b]    \n\t"
+                    "extp             %[Temp4],    $ac2,           9              \n\t"
+
+                    /* clamp */
+                    "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
+                    "lbux             %[tn1],      %[Temp2](%[cm])                \n\t"
+                    "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
+                    "lbux             %[n2],       %[Temp4](%[cm])                \n\t"
+
+                    /* store bytes */
+                    "sb               %[tp1],      0(%[dst_ptr])                  \n\t"
+                    "sb               %[tn1],      1(%[dst_ptr])                  \n\t"
+                    "sb               %[tp2],      2(%[dst_ptr])                  \n\t"
+                    "sb               %[n2],       3(%[dst_ptr])                  \n\t"
+
+                    : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1),
+                      [tn2] "=&r" (tn2), [p1] "=&r" (p1), [p2] "=&r" (p2),
+                      [p3] "=&r" (p3), [n1] "=&r" (n1), [n2] "=&r" (n2),
+                      [n3] "=&r" (n3), [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+                    : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                      [vector4a] "r" (vector4a), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr),
+                      [vector3b] "r" (vector3b), [src_ptr] "r" (src_ptr)
+                );
+
+                /* Next row... */
+                src_ptr += src_pixels_per_line;
+                dst_ptr += pitch;
+            }
+        }
+        else
+        {
+            /* 4 tap filter */
+
+            vector1b = sub_pel_filters_inv_tap_4[xoffset][0];
+            vector2b = sub_pel_filters_inv_tap_4[xoffset][1];
+
+            for (i = output_height; i--;)
+            {
+                /* apply filter with vectors pairs */
+                __asm__ __volatile__ (
+                    "ulw              %[tp1],      -1(%[src_ptr])                 \n\t"
+                    "ulw              %[tp2],      3(%[src_ptr])                  \n\t"
+
+                    /* even 1. pixel */
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+                    "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+                    "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
+                    "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
+
+                    /* even 2. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
+                    "extp             %[Temp1],    $ac3,           9              \n\t"
+
+                    /* odd 1. pixel */
+                    "srl              %[tn1],      %[tp2],         8              \n\t"
+                    "balign           %[tp2],      %[tp1],         3              \n\t"
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[n1],       %[tp2]                         \n\t"
+                    "preceu.ph.qbl    %[n2],       %[tp2]                         \n\t"
+                    "preceu.ph.qbr    %[n3],       %[tn1]                         \n\t"
+                    "extp             %[Temp3],    $ac2,           9              \n\t"
+                    "dpa.w.ph         $ac3,        %[n1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n2],          %[vector2b]    \n\t"
+
+                    /* odd 2. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "extp             %[Temp2],    $ac3,           9              \n\t"
+                    "dpa.w.ph         $ac2,        %[n2],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n3],          %[vector2b]    \n\t"
+                    "extp             %[Temp4],    $ac2,           9              \n\t"
+
+                    /* clamp and store results */
+                    "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
+                    "lbux             %[tn1],      %[Temp2](%[cm])                \n\t"
+                    "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
+                    "sb               %[tp1],      0(%[dst_ptr])                  \n\t"
+                    "sb               %[tn1],      1(%[dst_ptr])                  \n\t"
+                    "lbux             %[n2],       %[Temp4](%[cm])                \n\t"
+                    "sb               %[tp2],      2(%[dst_ptr])                  \n\t"
+                    "sb               %[n2],       3(%[dst_ptr])                  \n\t"
+
+                    : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1),
+                      [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3),
+                      [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3),
+                      [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+                    : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                      [vector4a] "r" (vector4a), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr),
+                      [src_ptr] "r" (src_ptr)
+                );
+                /*  Next row... */
+                src_ptr += src_pixels_per_line;
+                dst_ptr += pitch;
+            }
+        }
+    }
+}
+
+void vp8_filter_block2d_first_pass_8_all
+(
+    unsigned char *RESTRICT src_ptr,
+    unsigned char *RESTRICT dst_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int output_height,
+    int xoffset,
+    int pitch
+)
+{
+    unsigned int i;
+    int Temp1, Temp2, Temp3, Temp4;
+
+    unsigned int vector4a = 64;
+    unsigned int vector1b, vector2b, vector3b;
+    unsigned int tp1, tp2, tn1, tn2;
+    unsigned int p1, p2, p3, p4;
+    unsigned int n1, n2, n3, n4;
+
+    unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+    /* if (xoffset == 0) we don't need any filtering */
+    if (xoffset == 0)
+    {
+        for (i = 0; i < output_height; i++)
+        {
+            /* prefetch src_ptr data to cache memory */
+            prefetch_load(src_ptr + src_pixels_per_line);
+
+            dst_ptr[0] = src_ptr[0];
+            dst_ptr[1] = src_ptr[1];
+            dst_ptr[2] = src_ptr[2];
+            dst_ptr[3] = src_ptr[3];
+            dst_ptr[4] = src_ptr[4];
+            dst_ptr[5] = src_ptr[5];
+            dst_ptr[6] = src_ptr[6];
+            dst_ptr[7] = src_ptr[7];
+
+            /* next row... */
+            src_ptr += src_pixels_per_line;
+            dst_ptr += 8;
+        }
+    }
+    else
+    {
+        vector3b = sub_pel_filters_inv[xoffset][2];
+
+        if (vector3b > 65536)
+        {
+            /* 6 tap filter */
+
+            vector1b = sub_pel_filters_inv[xoffset][0];
+            vector2b = sub_pel_filters_inv[xoffset][1];
+
+            for (i = output_height; i--;)
+            {
+                /* prefetch src_ptr data to cache memory */
+                prefetch_load(src_ptr + src_pixels_per_line);
+
+                /* apply filter with vectors pairs */
+                __asm__ __volatile__ (
+                    "ulw              %[tp1],      -2(%[src_ptr])                 \n\t"
+                    "ulw              %[tp2],      2(%[src_ptr])                  \n\t"
+
+                    /* even 1. pixel */
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+                    "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+                    "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
+                    "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
+
+                    /* even 2. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbl    %[p1],       %[tp2]                         \n\t"
+                    "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p1],          %[vector3b]    \n\t"
+
+                    "balign           %[tp2],      %[tp1],         3              \n\t"
+                    "extp             %[Temp1],    $ac3,           9              \n\t"
+                    "ulw              %[tn2],      3(%[src_ptr])                  \n\t"
+
+                    /* odd 1. pixel */
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[n1],       %[tp2]                         \n\t"
+                    "preceu.ph.qbl    %[n2],       %[tp2]                         \n\t"
+                    "preceu.ph.qbr    %[n3],       %[tn2]                         \n\t"
+                    "extp             %[Temp3],    $ac2,           9              \n\t"
+                    "dpa.w.ph         $ac3,        %[n1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n2],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n3],          %[vector3b]    \n\t"
+
+                    /* odd 2. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbl    %[n1],       %[tn2]                         \n\t"
+                    "dpa.w.ph         $ac2,        %[n2],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n3],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n1],          %[vector3b]    \n\t"
+                    "ulw              %[tp1],      6(%[src_ptr])                  \n\t"
+                    "extp             %[Temp2],    $ac3,           9              \n\t"
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[p2],       %[tp1]                         \n\t"
+                    "extp             %[Temp4],    $ac2,           9              \n\t"
+
+                    : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn2] "=&r" (tn2),
+                      [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3),
+                      [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3),
+                      [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4)
+                    : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                      [vector4a] "r" (vector4a), [vector3b] "r" (vector3b),
+                      [src_ptr] "r" (src_ptr)
+                );
+
+                /* clamp and store results */
+                dst_ptr[0] = cm[Temp1];
+                dst_ptr[1] = cm[Temp2];
+                dst_ptr[2] = cm[Temp3];
+                dst_ptr[3] = cm[Temp4];
+
+                /* next 4 pixels */
+                __asm__ __volatile__ (
+                    /* even 3. pixel */
+                    "dpa.w.ph         $ac3,        %[p3],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p1],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p2],          %[vector3b]    \n\t"
+
+                    /* even 4. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbl    %[p4],       %[tp1]                         \n\t"
+                    "dpa.w.ph         $ac2,        %[p1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p2],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p4],          %[vector3b]    \n\t"
+
+                    "ulw              %[tn1],      7(%[src_ptr])                  \n\t"
+                    "extp             %[Temp1],    $ac3,           9              \n\t"
+
+                    /* odd 3. pixel */
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[n2],       %[tn1]                         \n\t"
+                    "dpa.w.ph         $ac3,        %[n3],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n1],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n2],          %[vector3b]    \n\t"
+                    "extp             %[Temp3],    $ac2,           9              \n\t"
+
+                    /* odd 4. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbl    %[n4],       %[tn1]                         \n\t"
+                    "dpa.w.ph         $ac2,        %[n1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n2],          %[vector2b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n4],          %[vector3b]    \n\t"
+                    "extp             %[Temp2],    $ac3,           9              \n\t"
+                    "extp             %[Temp4],    $ac2,           9              \n\t"
+
+                    : [tn1] "=&r" (tn1), [n2] "=&r" (n2),
+                      [p4] "=&r" (p4), [n4] "=&r" (n4),
+                      [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4)
+                    : [tp1] "r" (tp1), [vector1b] "r" (vector1b), [p2] "r" (p2),
+                      [vector2b] "r" (vector2b), [n1] "r" (n1), [p1] "r" (p1),
+                      [vector4a] "r" (vector4a), [vector3b] "r" (vector3b),
+                      [p3] "r" (p3), [n3] "r" (n3), [src_ptr] "r" (src_ptr)
+                );
+
+                /* clamp and store results */
+                dst_ptr[4] = cm[Temp1];
+                dst_ptr[5] = cm[Temp2];
+                dst_ptr[6] = cm[Temp3];
+                dst_ptr[7] = cm[Temp4];
+
+                src_ptr += src_pixels_per_line;
+                dst_ptr += pitch;
+            }
+        }
+        else
+        {
+            /* 4 tap filter */
+
+            vector1b = sub_pel_filters_inv_tap_4[xoffset][0];
+            vector2b = sub_pel_filters_inv_tap_4[xoffset][1];
+
+            for (i = output_height; i--;)
+            {
+                /* prefetch src_ptr data to cache memory */
+                prefetch_load(src_ptr + src_pixels_per_line);
+
+                /* apply filter with vectors pairs */
+                __asm__ __volatile__ (
+                    "ulw              %[tp1],      -1(%[src_ptr])                 \n\t"
+
+                    /* even 1. pixel */
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+                    "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+                    "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
+
+                    "ulw              %[tp2],      3(%[src_ptr])                  \n\t"
+
+                    /* even 2. pixel  */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
+                    "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
+                    "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
+                    "extp             %[Temp1],    $ac3,           9              \n\t"
+
+                    "balign           %[tp2],      %[tp1],         3              \n\t"
+
+                    /* odd 1. pixel */
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "preceu.ph.qbr    %[n1],       %[tp2]                         \n\t"
+                    "preceu.ph.qbl    %[n2],       %[tp2]                         \n\t"
+                    "dpa.w.ph         $ac3,        %[n1],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n2],          %[vector2b]    \n\t"
+                    "extp             %[Temp3],    $ac2,           9              \n\t"
+
+                    "ulw              %[tn2],      4(%[src_ptr])                  \n\t"
+
+                    /* odd 2. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbr    %[n3],       %[tn2]                         \n\t"
+                    "preceu.ph.qbl    %[n4],       %[tn2]                         \n\t"
+                    "dpa.w.ph         $ac2,        %[n2],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n3],          %[vector2b]    \n\t"
+                    "ulw              %[tp1],      7(%[src_ptr])                  \n\t"
+                    "extp             %[Temp2],    $ac3,           9              \n\t"
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "extp             %[Temp4],    $ac2,           9              \n\t"
+
+                    : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+                      [tn2] "=&r" (tn2), [p1] "=&r" (p1), [p2] "=&r" (p2),
+                      [p3] "=&r" (p3), [p4] "=&r" (p4), [n1] "=&r" (n1),
+                      [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4),
+                      [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4)
+                    : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                      [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr)
+                );
+
+                /* clamp and store results */
+                dst_ptr[0] = cm[Temp1];
+                dst_ptr[1] = cm[Temp2];
+                dst_ptr[2] = cm[Temp3];
+                dst_ptr[3] = cm[Temp4];
+
+                /* next 4 pixels */
+                __asm__ __volatile__ (
+                    /* even 3. pixel */
+                    "dpa.w.ph         $ac3,        %[p3],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[p4],          %[vector2b]    \n\t"
+
+                    /* even 4. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbr    %[p2],       %[tp1]                         \n\t"
+                    "dpa.w.ph         $ac2,        %[p4],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[p2],          %[vector2b]    \n\t"
+                    "extp             %[Temp1],    $ac3,           9              \n\t"
+
+                    /* odd 3. pixel */
+                    "mtlo             %[vector4a], $ac3                           \n\t"
+                    "dpa.w.ph         $ac3,        %[n3],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac3,        %[n4],          %[vector2b]    \n\t"
+                    "ulw              %[tn1],      8(%[src_ptr])                  \n\t"
+                    "extp             %[Temp3],    $ac2,           9              \n\t"
+
+                    /* odd 4. pixel */
+                    "mtlo             %[vector4a], $ac2                           \n\t"
+                    "preceu.ph.qbr    %[n2],       %[tn1]                         \n\t"
+                    "dpa.w.ph         $ac2,        %[n4],          %[vector1b]    \n\t"
+                    "dpa.w.ph         $ac2,        %[n2],          %[vector2b]    \n\t"
+                    "extp             %[Temp2],    $ac3,           9              \n\t"
+                    "extp             %[Temp4],    $ac2,           9              \n\t"
+
+                    : [tn1] "=&r" (tn1), [p2] "=&r" (p2), [n2] "=&r" (n2),
+                      [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4)
+                    : [tp1] "r" (tp1), [p3] "r" (p3), [p4] "r" (p4),
+                      [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                      [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr),
+                      [n3] "r" (n3), [n4] "r" (n4)
+                );
+
+                /* clamp and store results */
+                dst_ptr[4] = cm[Temp1];
+                dst_ptr[5] = cm[Temp2];
+                dst_ptr[6] = cm[Temp3];
+                dst_ptr[7] = cm[Temp4];
+
+                /* next row... */
+                src_ptr += src_pixels_per_line;
+                dst_ptr += pitch;
+            }
+        }
+    }
+}
+
+
+void vp8_filter_block2d_first_pass16_6tap
+(
+    unsigned char *RESTRICT src_ptr,
+    unsigned char *RESTRICT dst_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int output_height,
+    int xoffset,
+    int pitch
+)
+{
+    unsigned int i;
+    int Temp1, Temp2, Temp3, Temp4;
+
+    unsigned int vector4a;
+    unsigned int vector1b, vector2b, vector3b;
+    unsigned int tp1, tp2, tn1, tn2;
+    unsigned int p1, p2, p3, p4;
+    unsigned int n1, n2, n3, n4;
+    unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+    vector1b = sub_pel_filters_inv[xoffset][0];
+    vector2b = sub_pel_filters_inv[xoffset][1];
+    vector3b = sub_pel_filters_inv[xoffset][2];
+    vector4a = 64;
+
+    for (i = output_height; i--;)
+    {
+        /* prefetch src_ptr data to cache memory */
+        prefetch_load(src_ptr + src_pixels_per_line);
+
+        /* apply filter with vectors pairs */
+        __asm__ __volatile__ (
+            "ulw                %[tp1],      -2(%[src_ptr])                 \n\t"
+            "ulw                %[tp2],      2(%[src_ptr])                  \n\t"
+
+            /* even 1. pixel */
+            "mtlo               %[vector4a], $ac3                           \n\t"
+            "preceu.ph.qbr      %[p1],       %[tp1]                         \n\t"
+            "preceu.ph.qbl      %[p2],       %[tp1]                         \n\t"
+            "preceu.ph.qbr      %[p3],       %[tp2]                         \n\t"
+            "dpa.w.ph           $ac3,        %[p1],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac3,        %[p2],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac3,        %[p3],           %[vector3b]   \n\t"
+
+            /* even 2. pixel */
+            "mtlo               %[vector4a], $ac2                           \n\t"
+            "preceu.ph.qbl      %[p1],       %[tp2]                         \n\t"
+            "dpa.w.ph           $ac2,        %[p2],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac2,        %[p3],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac2,        %[p1],           %[vector3b]   \n\t"
+
+            "balign             %[tp2],      %[tp1],          3             \n\t"
+            "ulw                %[tn2],      3(%[src_ptr])                  \n\t"
+            "extp               %[Temp1],    $ac3,            9             \n\t"
+
+            /* odd 1. pixel */
+            "mtlo               %[vector4a], $ac3                           \n\t"
+            "preceu.ph.qbr      %[n1],       %[tp2]                         \n\t"
+            "preceu.ph.qbl      %[n2],       %[tp2]                         \n\t"
+            "preceu.ph.qbr      %[n3],       %[tn2]                         \n\t"
+            "extp               %[Temp3],    $ac2,            9             \n\t"
+            "dpa.w.ph           $ac3,        %[n1],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac3,        %[n2],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac3,        %[n3],           %[vector3b]   \n\t"
+
+            /* odd 2. pixel */
+            "mtlo               %[vector4a], $ac2                           \n\t"
+            "preceu.ph.qbl      %[n1],       %[tn2]                         \n\t"
+            "dpa.w.ph           $ac2,        %[n2],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac2,        %[n3],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac2,        %[n1],           %[vector3b]   \n\t"
+            "ulw                %[tp1],      6(%[src_ptr])                  \n\t"
+            "extp               %[Temp2],    $ac3,            9             \n\t"
+            "mtlo               %[vector4a], $ac3                           \n\t"
+            "preceu.ph.qbr      %[p2],       %[tp1]                         \n\t"
+            "extp               %[Temp4],    $ac2,            9             \n\t"
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn2] "=&r" (tn2),
+              [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3),
+              [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3),
+              [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+              [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4)
+            : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+              [vector4a] "r" (vector4a), [vector3b] "r" (vector3b),
+              [src_ptr] "r" (src_ptr)
+        );
+
+        /* clamp and store results */
+        dst_ptr[0] = cm[Temp1];
+        dst_ptr[1] = cm[Temp2];
+        dst_ptr[2] = cm[Temp3];
+        dst_ptr[3] = cm[Temp4];
+
+        /* next 4 pixels */
+        __asm__ __volatile__ (
+            /* even 3. pixel */
+            "dpa.w.ph           $ac3,        %[p3],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac3,        %[p1],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac3,        %[p2],           %[vector3b]   \n\t"
+
+            /* even 4. pixel */
+            "mtlo               %[vector4a], $ac2                           \n\t"
+            "preceu.ph.qbl      %[p4],       %[tp1]                         \n\t"
+            "dpa.w.ph           $ac2,        %[p1],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac2,        %[p2],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac2,        %[p4],           %[vector3b]   \n\t"
+            "ulw                %[tn1],      7(%[src_ptr])                  \n\t"
+            "extp               %[Temp1],    $ac3,            9             \n\t"
+
+            /* odd 3. pixel */
+            "mtlo               %[vector4a], $ac3                           \n\t"
+            "preceu.ph.qbr      %[n2],       %[tn1]                         \n\t"
+            "dpa.w.ph           $ac3,        %[n3],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac3,        %[n1],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac3,        %[n2],           %[vector3b]   \n\t"
+            "extp               %[Temp3],    $ac2,            9             \n\t"
+
+            /* odd 4. pixel */
+            "mtlo               %[vector4a], $ac2                           \n\t"
+            "preceu.ph.qbl      %[n4],       %[tn1]                         \n\t"
+            "dpa.w.ph           $ac2,        %[n1],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac2,        %[n2],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac2,        %[n4],           %[vector3b]   \n\t"
+            "ulw                %[tp2],      10(%[src_ptr])                 \n\t"
+            "extp               %[Temp2],    $ac3,            9             \n\t"
+            "mtlo               %[vector4a], $ac3                           \n\t"
+            "preceu.ph.qbr      %[p1],       %[tp2]                         \n\t"
+            "extp               %[Temp4],    $ac2,            9             \n\t"
+
+            : [tn1] "=&r" (tn1), [tp2] "=&r" (tp2), [n2] "=&r" (n2),
+              [p4] "=&r" (p4), [n4] "=&r" (n4),
+              [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+              [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4)
+            : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+              [tp1] "r" (tp1), [n1] "r" (n1), [p1] "r" (p1),
+              [vector4a] "r" (vector4a), [p2] "r" (p2), [vector3b] "r" (vector3b),
+              [p3] "r" (p3), [n3] "r" (n3), [src_ptr] "r" (src_ptr)
+        );
+
+        /* clamp and store results */
+        dst_ptr[4] = cm[Temp1];
+        dst_ptr[5] = cm[Temp2];
+        dst_ptr[6] = cm[Temp3];
+        dst_ptr[7] = cm[Temp4];
+
+        /* next 4 pixels */
+        __asm__ __volatile__ (
+            /* even 5. pixel */
+            "dpa.w.ph           $ac3,        %[p2],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac3,        %[p4],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac3,        %[p1],           %[vector3b]   \n\t"
+
+            /* even 6. pixel */
+            "mtlo               %[vector4a], $ac2                           \n\t"
+            "preceu.ph.qbl      %[p3],       %[tp2]                         \n\t"
+            "dpa.w.ph           $ac2,        %[p4],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac2,        %[p1],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac2,        %[p3],           %[vector3b]   \n\t"
+
+            "ulw                %[tn1],      11(%[src_ptr])                 \n\t"
+            "extp               %[Temp1],    $ac3,            9             \n\t"
+
+            /* odd 5. pixel */
+            "mtlo               %[vector4a], $ac3                           \n\t"
+            "preceu.ph.qbr      %[n1],       %[tn1]                         \n\t"
+            "dpa.w.ph           $ac3,        %[n2],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac3,        %[n4],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac3,        %[n1],           %[vector3b]   \n\t"
+            "extp               %[Temp3],    $ac2,            9             \n\t"
+
+            /* odd 6. pixel */
+            "mtlo               %[vector4a], $ac2                           \n\t"
+            "preceu.ph.qbl      %[n3],       %[tn1]                         \n\t"
+            "dpa.w.ph           $ac2,        %[n4],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac2,        %[n1],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac2,        %[n3],           %[vector3b]   \n\t"
+            "ulw                %[tp1],      14(%[src_ptr])                 \n\t"
+            "extp               %[Temp2],    $ac3,            9             \n\t"
+            "mtlo               %[vector4a], $ac3                           \n\t"
+            "preceu.ph.qbr      %[p4],       %[tp1]                         \n\t"
+            "extp               %[Temp4],    $ac2,            9             \n\t"
+
+            : [tn1] "=&r" (tn1), [tp1] "=&r" (tp1),
+              [n1] "=&r" (n1), [p3] "=&r" (p3), [n3] "=&r" (n3),
+              [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+              [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4)
+            : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+              [tp2] "r" (tp2), [p2] "r" (p2), [n2] "r" (n2),
+              [p4] "r" (p4), [n4] "r" (n4), [p1] "r" (p1), [src_ptr] "r" (src_ptr),
+              [vector4a] "r" (vector4a), [vector3b] "r" (vector3b)
+        );
+
+        /* clamp and store results */
+        dst_ptr[8] = cm[Temp1];
+        dst_ptr[9] = cm[Temp2];
+        dst_ptr[10] = cm[Temp3];
+        dst_ptr[11] = cm[Temp4];
+
+        /* next 4 pixels */
+        __asm__ __volatile__ (
+            /* even 7. pixel */
+            "dpa.w.ph           $ac3,        %[p1],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac3,        %[p3],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac3,        %[p4],           %[vector3b]   \n\t"
+
+            /* even 8. pixel */
+            "mtlo               %[vector4a], $ac2                           \n\t"
+            "preceu.ph.qbl      %[p2],       %[tp1]                         \n\t"
+            "dpa.w.ph           $ac2,        %[p3],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac2,        %[p4],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac2,        %[p2],           %[vector3b]   \n\t"
+            "ulw                %[tn1],      15(%[src_ptr])                 \n\t"
+            "extp               %[Temp1],    $ac3,            9             \n\t"
+
+            /* odd 7. pixel */
+            "mtlo               %[vector4a], $ac3                           \n\t"
+            "preceu.ph.qbr      %[n4],       %[tn1]                         \n\t"
+            "dpa.w.ph           $ac3,        %[n1],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac3,        %[n3],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac3,        %[n4],           %[vector3b]   \n\t"
+            "extp               %[Temp3],    $ac2,            9             \n\t"
+
+            /* odd 8. pixel */
+            "mtlo               %[vector4a], $ac2                           \n\t"
+            "preceu.ph.qbl      %[n2],       %[tn1]                         \n\t"
+            "dpa.w.ph           $ac2,        %[n3],           %[vector1b]   \n\t"
+            "dpa.w.ph           $ac2,        %[n4],           %[vector2b]   \n\t"
+            "dpa.w.ph           $ac2,        %[n2],           %[vector3b]   \n\t"
+            "extp               %[Temp2],    $ac3,            9             \n\t"
+            "extp               %[Temp4],    $ac2,            9             \n\t"
+
+            /* clamp and store results */
+            "lbux               %[tp1],      %[Temp1](%[cm])                \n\t"
+            "lbux               %[tn1],      %[Temp2](%[cm])                \n\t"
+            "lbux               %[p2],       %[Temp3](%[cm])                \n\t"
+            "sb                 %[tp1],      12(%[dst_ptr])                 \n\t"
+            "sb                 %[tn1],      13(%[dst_ptr])                 \n\t"
+            "lbux               %[n2],       %[Temp4](%[cm])                \n\t"
+            "sb                 %[p2],       14(%[dst_ptr])                 \n\t"
+            "sb                 %[n2],       15(%[dst_ptr])                 \n\t"
+
+            : [tn1] "=&r" (tn1), [p2] "=&r" (p2), [n2] "=&r" (n2), [n4] "=&r" (n4),
+              [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+              [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4)
+            : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+              [tp1] "r" (tp1), [p4] "r" (p4), [n1] "r" (n1), [p1] "r" (p1),
+              [vector4a] "r" (vector4a), [vector3b] "r" (vector3b), [p3] "r" (p3),
+              [n3] "r" (n3), [src_ptr] "r" (src_ptr),
+              [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
+        );
+
+        src_ptr += src_pixels_per_line;
+        dst_ptr += pitch;
+    }
+}
+
+
+void vp8_filter_block2d_first_pass16_0
+(
+    unsigned char *RESTRICT src_ptr,
+    unsigned char *RESTRICT output_ptr,
+    unsigned int src_pixels_per_line
+)
+{
+    int Temp1, Temp2, Temp3, Temp4;
+    int i;
+
+    /* prefetch src_ptr data to cache memory */
+    prefetch_store(output_ptr + 32);
+
+    /* copy memory from src buffer to dst buffer */
+    for (i = 0; i < 7; i++)
+    {
+        __asm__ __volatile__ (
+            "ulw    %[Temp1],   0(%[src_ptr])                               \n\t"
+            "ulw    %[Temp2],   4(%[src_ptr])                               \n\t"
+            "ulw    %[Temp3],   8(%[src_ptr])                               \n\t"
+            "ulw    %[Temp4],   12(%[src_ptr])                              \n\t"
+            "sw     %[Temp1],   0(%[output_ptr])                            \n\t"
+            "sw     %[Temp2],   4(%[output_ptr])                            \n\t"
+            "sw     %[Temp3],   8(%[output_ptr])                            \n\t"
+            "sw     %[Temp4],   12(%[output_ptr])                           \n\t"
+            "addu   %[src_ptr], %[src_ptr],        %[src_pixels_per_line]   \n\t"
+
+            : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+              [Temp4] "=&r" (Temp4), [src_ptr] "+r" (src_ptr)
+            : [src_pixels_per_line] "r" (src_pixels_per_line),
+              [output_ptr] "r" (output_ptr)
+        );
+
+        __asm__ __volatile__ (
+            "ulw    %[Temp1],   0(%[src_ptr])                               \n\t"
+            "ulw    %[Temp2],   4(%[src_ptr])                               \n\t"
+            "ulw    %[Temp3],   8(%[src_ptr])                               \n\t"
+            "ulw    %[Temp4],   12(%[src_ptr])                              \n\t"
+            "sw     %[Temp1],   16(%[output_ptr])                           \n\t"
+            "sw     %[Temp2],   20(%[output_ptr])                           \n\t"
+            "sw     %[Temp3],   24(%[output_ptr])                           \n\t"
+            "sw     %[Temp4],   28(%[output_ptr])                           \n\t"
+            "addu   %[src_ptr], %[src_ptr],        %[src_pixels_per_line]   \n\t"
+
+            : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+              [Temp4] "=&r" (Temp4), [src_ptr] "+r" (src_ptr)
+            : [src_pixels_per_line] "r" (src_pixels_per_line),
+              [output_ptr] "r" (output_ptr)
+        );
+
+        __asm__ __volatile__ (
+            "ulw    %[Temp1],   0(%[src_ptr])                               \n\t"
+            "ulw    %[Temp2],   4(%[src_ptr])                               \n\t"
+            "ulw    %[Temp3],   8(%[src_ptr])                               \n\t"
+            "ulw    %[Temp4],   12(%[src_ptr])                              \n\t"
+            "sw     %[Temp1],   32(%[output_ptr])                           \n\t"
+            "sw     %[Temp2],   36(%[output_ptr])                           \n\t"
+            "sw     %[Temp3],   40(%[output_ptr])                           \n\t"
+            "sw     %[Temp4],   44(%[output_ptr])                           \n\t"
+            "addu   %[src_ptr], %[src_ptr],        %[src_pixels_per_line]   \n\t"
+
+            : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+              [Temp4] "=&r" (Temp4), [src_ptr] "+r" (src_ptr)
+            : [src_pixels_per_line] "r" (src_pixels_per_line),
+              [output_ptr] "r" (output_ptr)
+        );
+
+        output_ptr += 48;
+    }
+}
+
+
+void vp8_filter_block2d_first_pass16_4tap
+(
+    unsigned char *RESTRICT src_ptr,
+    unsigned char *RESTRICT output_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int output_width,
+    unsigned int output_height,
+    int xoffset,
+    int yoffset,
+    unsigned char *RESTRICT dst_ptr,
+    int pitch
+)
+{
+    unsigned int i, j;
+    int Temp1, Temp2, Temp3, Temp4;
+
+    unsigned int vector4a;
+    int vector1b, vector2b;
+    unsigned int tp1, tp2, tp3, tn1;
+    unsigned int p1, p2, p3;
+    unsigned int n1, n2, n3;
+    unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+    vector4a = 64;
+
+    vector1b = sub_pel_filters_inv_tap_4[xoffset][0];
+    vector2b = sub_pel_filters_inv_tap_4[xoffset][1];
+
+    /* if (yoffset == 0) don't need temp buffer, data will be stored in dst_ptr */
+    if (yoffset == 0)
+    {
+        output_height -= 5;
+        src_ptr += (src_pixels_per_line + src_pixels_per_line);
+
+        for (i = output_height; i--;)
+        {
+            __asm__ __volatile__ (
+                "ulw     %[tp3],   -1(%[src_ptr])               \n\t"
+                : [tp3] "=&r" (tp3)
+                : [src_ptr] "r" (src_ptr)
+            );
+
+            /* processing 4 adjacent pixels */
+            for (j = 0; j < 16; j += 4)
+            {
+                /* apply filter with vectors pairs */
+                __asm__ __volatile__ (
+                    "ulw              %[tp2],      3(%[src_ptr])                    \n\t"
+                    "move             %[tp1],      %[tp3]                           \n\t"
+
+                    /* even 1. pixel */
+                    "mtlo             %[vector4a], $ac3                             \n\t"
+                    "mthi             $0,          $ac3                             \n\t"
+                    "move             %[tp3],      %[tp2]                           \n\t"
+                    "preceu.ph.qbr    %[p1],       %[tp1]                           \n\t"
+                    "preceu.ph.qbl    %[p2],       %[tp1]                           \n\t"
+                    "preceu.ph.qbr    %[p3],       %[tp2]                           \n\t"
+                    "dpa.w.ph         $ac3,        %[p1],           %[vector1b]     \n\t"
+                    "dpa.w.ph         $ac3,        %[p2],           %[vector2b]     \n\t"
+
+                    /* even 2. pixel */
+                    "mtlo             %[vector4a], $ac2                             \n\t"
+                    "mthi             $0,          $ac2                             \n\t"
+                    "dpa.w.ph         $ac2,        %[p2],           %[vector1b]     \n\t"
+                    "dpa.w.ph         $ac2,        %[p3],           %[vector2b]     \n\t"
+                    "extr.w           %[Temp1],    $ac3,            7               \n\t"
+
+                    /* odd 1. pixel */
+                    "ulw              %[tn1],      4(%[src_ptr])                    \n\t"
+                    "balign           %[tp2],      %[tp1],          3               \n\t"
+                    "mtlo             %[vector4a], $ac3                             \n\t"
+                    "mthi             $0,          $ac3                             \n\t"
+                    "preceu.ph.qbr    %[n1],       %[tp2]                           \n\t"
+                    "preceu.ph.qbl    %[n2],       %[tp2]                           \n\t"
+                    "preceu.ph.qbr    %[n3],       %[tn1]                           \n\t"
+                    "extr.w           %[Temp3],    $ac2,            7               \n\t"
+                    "dpa.w.ph         $ac3,        %[n1],           %[vector1b]     \n\t"
+                    "dpa.w.ph         $ac3,        %[n2],           %[vector2b]     \n\t"
+
+                    /* odd 2. pixel */
+                    "mtlo             %[vector4a], $ac2                             \n\t"
+                    "mthi             $0,          $ac2                             \n\t"
+                    "extr.w           %[Temp2],    $ac3,            7               \n\t"
+                    "dpa.w.ph         $ac2,        %[n2],           %[vector1b]     \n\t"
+                    "dpa.w.ph         $ac2,        %[n3],           %[vector2b]     \n\t"
+                    "extr.w           %[Temp4],    $ac2,            7               \n\t"
+
+                    /* clamp and store results */
+                    "lbux             %[tp1],      %[Temp1](%[cm])                  \n\t"
+                    "lbux             %[tn1],      %[Temp2](%[cm])                  \n\t"
+                    "lbux             %[tp2],      %[Temp3](%[cm])                  \n\t"
+                    "sb               %[tp1],      0(%[dst_ptr])                    \n\t"
+                    "sb               %[tn1],      1(%[dst_ptr])                    \n\t"
+                    "lbux             %[n2],       %[Temp4](%[cm])                  \n\t"
+                    "sb               %[tp2],      2(%[dst_ptr])                    \n\t"
+                    "sb               %[n2],       3(%[dst_ptr])                    \n\t"
+
+                    : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
+                      [tn1] "=&r" (tn1), [p1] "=&r" (p1), [p2] "=&r" (p2),
+                      [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3),
+                      [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [p3] "=&r" (p3),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+                    : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                      [vector4a] "r" (vector4a), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr),
+                      [src_ptr] "r" (src_ptr)
+                );
+
+                src_ptr += 4;
+            }
+
+            /* Next row... */
+            src_ptr += src_pixels_per_line - 16;
+            dst_ptr += pitch;
+        }
+    }
+    else
+    {
+        for (i = output_height; i--;)
+        {
+            /* processing 4 adjacent pixels */
+            for (j = 0; j < 16; j += 4)
+            {
+                /* apply filter with vectors pairs */
+                __asm__ __volatile__ (
+                    "ulw              %[tp1],      -1(%[src_ptr])                   \n\t"
+                    "ulw              %[tp2],      3(%[src_ptr])                    \n\t"
+
+                    /* even 1. pixel */
+                    "mtlo             %[vector4a], $ac3                             \n\t"
+                    "mthi             $0,          $ac3                             \n\t"
+                    "preceu.ph.qbr    %[p1],       %[tp1]                           \n\t"
+                    "preceu.ph.qbl    %[p2],       %[tp1]                           \n\t"
+                    "preceu.ph.qbr    %[p3],       %[tp2]                           \n\t"
+                    "dpa.w.ph         $ac3,        %[p1],           %[vector1b]     \n\t"
+                    "dpa.w.ph         $ac3,        %[p2],           %[vector2b]     \n\t"
+
+                    /* even 2. pixel */
+                    "mtlo             %[vector4a], $ac2                             \n\t"
+                    "mthi             $0,          $ac2                             \n\t"
+                    "dpa.w.ph         $ac2,        %[p2],           %[vector1b]     \n\t"
+                    "dpa.w.ph         $ac2,        %[p3],           %[vector2b]     \n\t"
+                    "extr.w           %[Temp1],    $ac3,            7               \n\t"
+
+                    /* odd 1. pixel */
+                    "ulw              %[tn1],      4(%[src_ptr])                    \n\t"
+                    "balign           %[tp2],      %[tp1],          3               \n\t"
+                    "mtlo             %[vector4a], $ac3                             \n\t"
+                    "mthi             $0,          $ac3                             \n\t"
+                    "preceu.ph.qbr    %[n1],       %[tp2]                           \n\t"
+                    "preceu.ph.qbl    %[n2],       %[tp2]                           \n\t"
+                    "preceu.ph.qbr    %[n3],       %[tn1]                           \n\t"
+                    "extr.w           %[Temp3],    $ac2,            7               \n\t"
+                    "dpa.w.ph         $ac3,        %[n1],           %[vector1b]     \n\t"
+                    "dpa.w.ph         $ac3,        %[n2],           %[vector2b]     \n\t"
+
+                    /* odd 2. pixel */
+                    "mtlo             %[vector4a], $ac2                             \n\t"
+                    "mthi             $0,          $ac2                             \n\t"
+                    "extr.w           %[Temp2],    $ac3,            7               \n\t"
+                    "dpa.w.ph         $ac2,        %[n2],           %[vector1b]     \n\t"
+                    "dpa.w.ph         $ac2,        %[n3],           %[vector2b]     \n\t"
+                    "extr.w           %[Temp4],    $ac2,            7               \n\t"
+
+                    /* clamp and store results */
+                    "lbux             %[tp1],      %[Temp1](%[cm])                  \n\t"
+                    "lbux             %[tn1],      %[Temp2](%[cm])                  \n\t"
+                    "lbux             %[tp2],      %[Temp3](%[cm])                  \n\t"
+                    "sb               %[tp1],      0(%[output_ptr])                 \n\t"
+                    "sb               %[tn1],      1(%[output_ptr])                 \n\t"
+                    "lbux             %[n2],       %[Temp4](%[cm])                  \n\t"
+                    "sb               %[tp2],      2(%[output_ptr])                 \n\t"
+                    "sb               %[n2],       3(%[output_ptr])                 \n\t"
+
+                    : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1),
+                      [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3),
+                      [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3),
+                      [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+                    : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                      [vector4a] "r" (vector4a), [cm] "r" (cm),
+                      [output_ptr] "r" (output_ptr), [src_ptr] "r" (src_ptr)
+                );
+
+                src_ptr += 4;
+            }
+
+            /* next row... */
+            src_ptr += src_pixels_per_line;
+            output_ptr += output_width;
+        }
+    }
+}
+
+
+void vp8_filter_block2d_second_pass4
+(
+    unsigned char *RESTRICT src_ptr,
+    unsigned char *RESTRICT output_ptr,
+    int output_pitch,
+    int yoffset
+)
+{
+    unsigned int i;
+
+    int Temp1, Temp2, Temp3, Temp4;
+    unsigned int vector1b, vector2b, vector3b, vector4a;
+
+    unsigned char src_ptr_l2;
+    unsigned char src_ptr_l1;
+    unsigned char src_ptr_0;
+    unsigned char src_ptr_r1;
+    unsigned char src_ptr_r2;
+    unsigned char src_ptr_r3;
+
+    unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+    vector4a = 64;
+
+    /* load filter coefficients */
+    vector1b = sub_pel_filterss[yoffset][0];
+    vector2b = sub_pel_filterss[yoffset][2];
+    vector3b = sub_pel_filterss[yoffset][1];
+
+    if (vector1b)
+    {
+        /* 6 tap filter */
+
+        for (i = 2; i--;)
+        {
+            /* prefetch src_ptr data to cache memory */
+            prefetch_load(src_ptr);
+
+            /* do not allow compiler to reorder instructions */
+            __asm__ __volatile__ (
+                ".set noreorder                                                 \n\t"
+                :
+                :
+            );
+
+            /* apply filter with vectors pairs */
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l2],  -8(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  -4(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   0(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  4(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  8(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r3],  12(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -7(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  -3(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   1(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  5(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  9(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r3],  13(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -6(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  -2(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   2(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  6(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  10(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  14(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp2],       $ac3,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -5(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  -1(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   3(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  7(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  11(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  15(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp3],       $ac0,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp4],       $ac1,           9               \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                  [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2),
+                  [src_ptr_l2] "=&r" (src_ptr_l2), [src_ptr_r3] "=&r" (src_ptr_r3)
+                : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                  [vector3b] "r" (vector3b), [vector4a] "r" (vector4a),
+                  [src_ptr] "r" (src_ptr)
+            );
+
+            /* clamp and store results */
+            output_ptr[0] = cm[Temp1];
+            output_ptr[1] = cm[Temp2];
+            output_ptr[2] = cm[Temp3];
+            output_ptr[3] = cm[Temp4];
+
+            output_ptr += output_pitch;
+
+            /* apply filter with vectors pairs */
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l2],  -4(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  0(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_0],   4(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  8(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  12(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  16(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -3(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  1(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_0],   5(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  9(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  13(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  17(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -2(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  2(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_0],   6(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  10(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  14(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  18(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp2],       $ac3,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -1(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  3(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_0],   7(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  11(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  15(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  19(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp3],       $ac0,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp4],       $ac1,           9               \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                  [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2),
+                  [src_ptr_l2] "=&r" (src_ptr_l2), [src_ptr_r3] "=&r" (src_ptr_r3)
+                : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                  [vector3b] "r" (vector3b), [vector4a] "r" (vector4a),
+                  [src_ptr] "r" (src_ptr)
+            );
+
+            /* clamp and store results */
+            output_ptr[0] = cm[Temp1];
+            output_ptr[1] = cm[Temp2];
+            output_ptr[2] = cm[Temp3];
+            output_ptr[3] = cm[Temp4];
+
+            src_ptr += 8;
+            output_ptr += output_pitch;
+        }
+    }
+    else
+    {
+        /* 4 tap filter */
+
+        /* prefetch src_ptr data to cache memory */
+        prefetch_load(src_ptr);
+
+        for (i = 2; i--;)
+        {
+            /* do not allow compiler to reorder instructions */
+            __asm__ __volatile__ (
+                ".set noreorder                                                 \n\t"
+                :
+                :
+            );
+
+            /* apply filter with vectors pairs */
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l1],  -4(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   0(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  4(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  8(%[src_ptr])                   \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l1],  -3(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   1(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  5(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  9(%[src_ptr])                   \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l1],  -2(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   2(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  6(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  10(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp2],       $ac3,           9               \n\t"
+
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l1],  -1(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   3(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  7(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  11(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp3],       $ac0,           9               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp4],       $ac1,           9               \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                  [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr)
+            );
+
+            /* clamp and store results */
+            output_ptr[0] = cm[Temp1];
+            output_ptr[1] = cm[Temp2];
+            output_ptr[2] = cm[Temp3];
+            output_ptr[3] = cm[Temp4];
+
+            output_ptr += output_pitch;
+
+            /* apply filter with vectors pairs */
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l1],  0(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_0],   4(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  8(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  12(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l1],  1(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_0],   5(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  9(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  13(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l1],  2(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_0],   6(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  10(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  14(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp2],       $ac3,           9               \n\t"
+
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l1],  3(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_0],   7(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  11(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  15(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp3],       $ac0,           9               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp4],       $ac1,           9               \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                  [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr)
+            );
+
+            /* clamp and store results */
+            output_ptr[0] = cm[Temp1];
+            output_ptr[1] = cm[Temp2];
+            output_ptr[2] = cm[Temp3];
+            output_ptr[3] = cm[Temp4];
+
+            src_ptr += 8;
+            output_ptr += output_pitch;
+        }
+    }
+}
+
+
+void vp8_filter_block2d_second_pass_8
+(
+    unsigned char *RESTRICT src_ptr,
+    unsigned char *RESTRICT output_ptr,
+    int output_pitch,
+    unsigned int output_height,
+    unsigned int output_width,
+    unsigned int yoffset
+)
+{
+    unsigned int i;
+
+    int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8;
+    unsigned int vector1b, vector2b, vector3b, vector4a;
+
+    unsigned char src_ptr_l2;
+    unsigned char src_ptr_l1;
+    unsigned char src_ptr_0;
+    unsigned char src_ptr_r1;
+    unsigned char src_ptr_r2;
+    unsigned char src_ptr_r3;
+    unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+    vector4a = 64;
+
+    vector1b = sub_pel_filterss[yoffset][0];
+    vector2b = sub_pel_filterss[yoffset][2];
+    vector3b = sub_pel_filterss[yoffset][1];
+
+    if (vector1b)
+    {
+        /* 6 tap filter */
+
+        /* prefetch src_ptr data to cache memory */
+        prefetch_load(src_ptr);
+
+        for (i = output_height; i--;)
+        {
+            /* apply filter with vectors pairs */
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l2],  -16(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -8(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   0(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  8(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  16(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  24(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -15(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -7(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   1(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  9(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  17(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  25(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -14(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -6(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   2(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  10(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  18(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  26(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp2],       $ac3,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -13(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -5(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   3(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  11(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  19(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  27(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp3],       $ac0,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2),
+                  [src_ptr_l2] "=&r" (src_ptr_l2), [src_ptr_r3] "=&r" (src_ptr_r3)
+                : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                  [vector3b] "r" (vector3b), [vector4a] "r" (vector4a),
+                  [src_ptr] "r" (src_ptr)
+            );
+
+            /* apply filter with vectors pairs */
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l2],  -12(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -4(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   4(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  12(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  20(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  28(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp4],       $ac1,           9               \n\t"
+
+                "lbu            %[src_ptr_l2],  -11(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -3(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   5(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  13(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  21(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  29(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp5],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -10(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -2(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   6(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  14(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  22(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  30(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp6],       $ac3,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -9(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_l1],  -1(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   7(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  15(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  23(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  31(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp7],       $ac0,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp8],       $ac1,           9               \n\t"
+
+                : [Temp4] "=&r" (Temp4), [Temp5] "=&r" (Temp5),
+                  [Temp6] "=&r" (Temp6), [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2),
+                  [src_ptr_l2] "=&r" (src_ptr_l2),[src_ptr_r3] "=&r" (src_ptr_r3)
+                : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                  [vector3b] "r" (vector3b), [vector4a] "r" (vector4a),
+                  [src_ptr] "r" (src_ptr)
+            );
+
+            /* clamp and store results */
+            output_ptr[0] = cm[Temp1];
+            output_ptr[1] = cm[Temp2];
+            output_ptr[2] = cm[Temp3];
+            output_ptr[3] = cm[Temp4];
+            output_ptr[4] = cm[Temp5];
+            output_ptr[5] = cm[Temp6];
+            output_ptr[6] = cm[Temp7];
+            output_ptr[7] = cm[Temp8];
+
+            src_ptr += 8;
+            output_ptr += output_pitch;
+        }
+    }
+    else
+    {
+        /* 4 tap filter */
+
+        /* prefetch src_ptr data to cache memory */
+        prefetch_load(src_ptr);
+
+        for (i = output_height; i--;)
+        {
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l1],  -8(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   0(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  8(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  16(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                : [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr)
+            );
+
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l1],  -7(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   1(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  9(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r2],  17(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                : [Temp1] "=r" (Temp1),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr)
+            );
+
+            src_ptr_l1 = src_ptr[-6];
+            src_ptr_0  = src_ptr[2];
+            src_ptr_r1 = src_ptr[10];
+            src_ptr_r2 = src_ptr[18];
+
+            __asm__ __volatile__ (
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp2],       $ac3,           9               \n\t"
+
+                : [Temp2] "=r" (Temp2)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0),
+                  [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2),
+                  [vector4a] "r" (vector4a)
+            );
+
+            src_ptr_l1 = src_ptr[-5];
+            src_ptr_0  = src_ptr[3];
+            src_ptr_r1 = src_ptr[11];
+            src_ptr_r2 = src_ptr[19];
+
+            __asm__ __volatile__ (
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp3],       $ac0,           9               \n\t"
+
+                : [Temp3] "=r" (Temp3)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0),
+                  [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2),
+                  [vector4a] "r" (vector4a)
+            );
+
+            src_ptr_l1 = src_ptr[-4];
+            src_ptr_0  = src_ptr[4];
+            src_ptr_r1 = src_ptr[12];
+            src_ptr_r2 = src_ptr[20];
+
+            __asm__ __volatile__ (
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp4],       $ac1,           9               \n\t"
+
+                : [Temp4] "=r" (Temp4)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0),
+                  [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2),
+                  [vector4a] "r" (vector4a)
+            );
+
+            src_ptr_l1 = src_ptr[-3];
+            src_ptr_0  = src_ptr[5];
+            src_ptr_r1 = src_ptr[13];
+            src_ptr_r2 = src_ptr[21];
+
+            __asm__ __volatile__ (
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp5],       $ac2,           9               \n\t"
+
+                : [Temp5] "=&r" (Temp5)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0),
+                  [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2),
+                  [vector4a] "r" (vector4a)
+            );
+
+            src_ptr_l1 = src_ptr[-2];
+            src_ptr_0  = src_ptr[6];
+            src_ptr_r1 = src_ptr[14];
+            src_ptr_r2 = src_ptr[22];
+
+            __asm__ __volatile__ (
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp6],       $ac3,           9               \n\t"
+
+                : [Temp6] "=r" (Temp6)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0),
+                  [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2),
+                  [vector4a] "r" (vector4a)
+            );
+
+            src_ptr_l1 = src_ptr[-1];
+            src_ptr_0  = src_ptr[7];
+            src_ptr_r1 = src_ptr[15];
+            src_ptr_r2 = src_ptr[23];
+
+            __asm__ __volatile__ (
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp7],       $ac0,           9               \n\t"
+                "extp           %[Temp8],       $ac1,           9               \n\t"
+
+                : [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8)
+                : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                  [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0),
+                  [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2),
+                  [vector4a] "r" (vector4a)
+            );
+
+            /* clamp and store results */
+            output_ptr[0] = cm[Temp1];
+            output_ptr[1] = cm[Temp2];
+            output_ptr[2] = cm[Temp3];
+            output_ptr[3] = cm[Temp4];
+            output_ptr[4] = cm[Temp5];
+            output_ptr[5] = cm[Temp6];
+            output_ptr[6] = cm[Temp7];
+            output_ptr[7] = cm[Temp8];
+
+            src_ptr += 8;
+            output_ptr += output_pitch;
+        }
+    }
+}
+
+
+void vp8_filter_block2d_second_pass161
+(
+    unsigned char *RESTRICT src_ptr,
+    unsigned char *RESTRICT output_ptr,
+    int output_pitch,
+    const unsigned short *vp8_filter
+)
+{
+    unsigned int i, j;
+
+    int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8;
+    unsigned int vector4a;
+    unsigned int vector1b, vector2b, vector3b;
+
+    unsigned char src_ptr_l2;
+    unsigned char src_ptr_l1;
+    unsigned char src_ptr_0;
+    unsigned char src_ptr_r1;
+    unsigned char src_ptr_r2;
+    unsigned char src_ptr_r3;
+    unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+    vector4a = 64;
+
+    vector1b = vp8_filter[0];
+    vector2b = vp8_filter[2];
+    vector3b = vp8_filter[1];
+
+    if (vector1b == 0)
+    {
+        /* 4 tap filter */
+
+        /* prefetch src_ptr data to cache memory */
+        prefetch_load(src_ptr + 16);
+
+        for (i = 16; i--;)
+        {
+            /* unrolling for loop */
+            for (j = 0; j < 16; j += 8)
+            {
+                /* apply filter with vectors pairs */
+                __asm__ __volatile__ (
+                    "lbu            %[src_ptr_l1],  -16(%[src_ptr])                 \n\t"
+                    "lbu            %[src_ptr_0],   0(%[src_ptr])                   \n\t"
+                    "lbu            %[src_ptr_r1],  16(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_r2],  32(%[src_ptr])                  \n\t"
+                    "mtlo           %[vector4a],    $ac2                            \n\t"
+                    "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                    "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                    "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                    "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                    "lbu            %[src_ptr_l1],  -15(%[src_ptr])                 \n\t"
+                    "lbu            %[src_ptr_0],   1(%[src_ptr])                   \n\t"
+                    "lbu            %[src_ptr_r1],  17(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_r2],  33(%[src_ptr])                  \n\t"
+                    "mtlo           %[vector4a],    $ac3                            \n\t"
+                    "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                    "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                    "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                    "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                    "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                    "lbu            %[src_ptr_l1],  -14(%[src_ptr])                 \n\t"
+                    "lbu            %[src_ptr_0],   2(%[src_ptr])                   \n\t"
+                    "lbu            %[src_ptr_r1],  18(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_r2],  34(%[src_ptr])                  \n\t"
+                    "mtlo           %[vector4a],    $ac1                            \n\t"
+                    "extp           %[Temp2],       $ac3,           9               \n\t"
+
+                    "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                    "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                    "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                    "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                    "lbu            %[src_ptr_l1],  -13(%[src_ptr])                 \n\t"
+                    "lbu            %[src_ptr_0],   3(%[src_ptr])                   \n\t"
+                    "lbu            %[src_ptr_r1],  19(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_r2],  35(%[src_ptr])                  \n\t"
+                    "mtlo           %[vector4a],    $ac3                            \n\t"
+                    "extp           %[Temp3],       $ac1,           9               \n\t"
+
+                    "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                    "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                    "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                    "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                    "lbu            %[src_ptr_l1],  -12(%[src_ptr])                 \n\t"
+                    "lbu            %[src_ptr_0],   4(%[src_ptr])                   \n\t"
+                    "lbu            %[src_ptr_r1],  20(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_r2],  36(%[src_ptr])                  \n\t"
+                    "mtlo           %[vector4a],    $ac2                            \n\t"
+                    "extp           %[Temp4],       $ac3,           9               \n\t"
+
+                    "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                    "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                    "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                    "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                    "lbu            %[src_ptr_l1],  -11(%[src_ptr])                 \n\t"
+                    "lbu            %[src_ptr_0],   5(%[src_ptr])                   \n\t"
+                    "lbu            %[src_ptr_r1],  21(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_r2],  37(%[src_ptr])                  \n\t"
+                    "mtlo           %[vector4a],    $ac3                            \n\t"
+                    "extp           %[Temp5],       $ac2,           9               \n\t"
+
+                    "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                    "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                    "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                    "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                    "lbu            %[src_ptr_l1],  -10(%[src_ptr])                 \n\t"
+                    "lbu            %[src_ptr_0],   6(%[src_ptr])                   \n\t"
+                    "lbu            %[src_ptr_r1],  22(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_r2],  38(%[src_ptr])                  \n\t"
+                    "mtlo           %[vector4a],    $ac1                            \n\t"
+                    "extp           %[Temp6],       $ac3,           9               \n\t"
+
+                    "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                    "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                    "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                    "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                    "lbu            %[src_ptr_l1],  -9(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_0],   7(%[src_ptr])                   \n\t"
+                    "lbu            %[src_ptr_r1],  23(%[src_ptr])                  \n\t"
+                    "lbu            %[src_ptr_r2],  39(%[src_ptr])                  \n\t"
+                    "mtlo           %[vector4a],    $ac3                            \n\t"
+                    "extp           %[Temp7],       $ac1,           9               \n\t"
+
+                    "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                    "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                    "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                    "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                    "extp           %[Temp8],       $ac3,           9               \n\t"
+
+                    : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                      [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4),
+                      [Temp5] "=&r" (Temp5), [Temp6] "=&r" (Temp6),
+                      [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8),
+                      [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                      [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2)
+                    : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b),
+                      [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr)
+                );
+
+                /* clamp and store results */
+                output_ptr[j] = cm[Temp1];
+                output_ptr[j + 1] = cm[Temp2];
+                output_ptr[j + 2] = cm[Temp3];
+                output_ptr[j + 3] = cm[Temp4];
+                output_ptr[j + 4] = cm[Temp5];
+                output_ptr[j + 5] = cm[Temp6];
+                output_ptr[j + 6] = cm[Temp7];
+                output_ptr[j + 7] = cm[Temp8];
+
+                src_ptr += 8;
+            }
+
+            output_ptr += output_pitch;
+        }
+    }
+    else
+    {
+        /* 4 tap filter */
+
+        /* prefetch src_ptr data to cache memory */
+        prefetch_load(src_ptr + 16);
+
+        /* unroll for loop */
+        for (i = 16; i--;)
+        {
+            /* apply filter with vectors pairs */
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l2],  -32(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -16(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_0],   0(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  16(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  32(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  48(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -31(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -15(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_0],   1(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  17(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  33(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  49(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -30(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -14(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_0],   2(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  18(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  34(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  50(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp2],       $ac0,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -29(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -13(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_0],   3(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  19(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  35(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  51(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp3],       $ac1,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -28(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -12(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_0],   4(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  20(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  36(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  52(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+                "extp           %[Temp4],       $ac3,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -27(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -11(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_0],   5(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  21(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  37(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  53(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp5],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -26(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -10(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_0],   6(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  22(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  38(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  54(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp6],       $ac0,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -25(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -9(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   7(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  23(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  39(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  55(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp7],       $ac1,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp8],       $ac3,           9               \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                  [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4),
+                  [Temp5] "=&r" (Temp5), [Temp6] "=&r" (Temp6),
+                  [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2),
+                  [src_ptr_l2] "=&r" (src_ptr_l2),[src_ptr_r3] "=&r" (src_ptr_r3)
+                : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                  [vector3b] "r" (vector3b), [vector4a] "r" (vector4a),
+                  [src_ptr] "r" (src_ptr)
+            );
+
+            /* clamp and store results */
+            output_ptr[0] = cm[Temp1];
+            output_ptr[1] = cm[Temp2];
+            output_ptr[2] = cm[Temp3];
+            output_ptr[3] = cm[Temp4];
+            output_ptr[4] = cm[Temp5];
+            output_ptr[5] = cm[Temp6];
+            output_ptr[6] = cm[Temp7];
+            output_ptr[7] = cm[Temp8];
+
+            /* apply filter with vectors pairs */
+            __asm__ __volatile__ (
+                "lbu            %[src_ptr_l2],  -24(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -8(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   8(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  24(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  40(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  56(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -23(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -7(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   9(%[src_ptr])                   \n\t"
+                "lbu            %[src_ptr_r1],  25(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  41(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  57(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp1],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -22(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -6(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   10(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r1],  26(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  42(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  58(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp2],       $ac0,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -21(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -5(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   11(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r1],  27(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  43(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  59(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp3],       $ac1,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -20(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -4(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   12(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r1],  28(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  44(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  60(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac2                            \n\t"
+                "extp           %[Temp4],       $ac3,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac2,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac2,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -19(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -3(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   13(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r1],  29(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  45(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  61(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac0                            \n\t"
+                "extp           %[Temp5],       $ac2,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac0,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac0,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -18(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -2(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   14(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r1],  30(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  46(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  62(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac1                            \n\t"
+                "extp           %[Temp6],       $ac0,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac1,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac1,           %[src_ptr_l1],  %[vector3b]     \n\t"
+
+                "lbu            %[src_ptr_l2],  -17(%[src_ptr])                 \n\t"
+                "lbu            %[src_ptr_l1],  -1(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_0],   15(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r1],  31(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r2],  47(%[src_ptr])                  \n\t"
+                "lbu            %[src_ptr_r3],  63(%[src_ptr])                  \n\t"
+                "mtlo           %[vector4a],    $ac3                            \n\t"
+                "extp           %[Temp7],       $ac1,           9               \n\t"
+
+                "append         %[src_ptr_l2],  %[src_ptr_r3],  8               \n\t"
+                "append         %[src_ptr_0],   %[src_ptr_r1],  8               \n\t"
+                "append         %[src_ptr_l1],  %[src_ptr_r2],  8               \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_l2],  %[vector1b]     \n\t"
+                "dpau.h.qbr     $ac3,           %[src_ptr_0],   %[vector2b]     \n\t"
+                "dpsu.h.qbr     $ac3,           %[src_ptr_l1],  %[vector3b]     \n\t"
+                "extp           %[Temp8],       $ac3,           9               \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+                  [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4),
+                  [Temp5] "=&r" (Temp5), [Temp6] "=&r" (Temp6),
+                  [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8),
+                  [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0),
+                  [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2),
+                  [src_ptr_l2] "=&r" (src_ptr_l2), [src_ptr_r3] "=&r" (src_ptr_r3)
+                : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+                  [vector3b] "r" (vector3b), [vector4a] "r" (vector4a),
+                  [src_ptr] "r" (src_ptr)
+            );
+
+            src_ptr += 16;
+            output_ptr[8] = cm[Temp1];
+            output_ptr[9] = cm[Temp2];
+            output_ptr[10] = cm[Temp3];
+            output_ptr[11] = cm[Temp4];
+            output_ptr[12] = cm[Temp5];
+            output_ptr[13] = cm[Temp6];
+            output_ptr[14] = cm[Temp7];
+            output_ptr[15] = cm[Temp8];
+
+            output_ptr += output_pitch;
+        }
+    }
+}
+
+
+void vp8_sixtap_predict4x4_dspr2
+(
+    unsigned char *RESTRICT src_ptr,
+    int   src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *RESTRICT dst_ptr,
+    int dst_pitch
+)
+{
+    unsigned char FData[9 * 4]; /* Temp data bufffer used in filtering */
+    unsigned int pos = 16;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+        "wrdsp      %[pos],     1           \n\t"
+        :
+        : [pos] "r" (pos)
+    );
+
+    if (yoffset)
+    {
+        /* First filter 1-D horizontally... */
+        vp8_filter_block2d_first_pass_4(src_ptr - (2 * src_pixels_per_line), FData,
+                                        src_pixels_per_line, 9, xoffset, 4);
+        /* then filter verticaly... */
+        vp8_filter_block2d_second_pass4(FData + 8, dst_ptr, dst_pitch, yoffset);
+    }
+    else
+        /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */
+        vp8_filter_block2d_first_pass_4(src_ptr, dst_ptr, src_pixels_per_line,
+                                        4, xoffset, dst_pitch);
+}
+
+
+void vp8_sixtap_predict8x8_dspr2
+(
+    unsigned char   *RESTRICT src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *RESTRICT dst_ptr,
+    int  dst_pitch
+)
+{
+
+    unsigned char FData[13 * 8]; /* Temp data bufffer used in filtering */
+    unsigned int pos, Temp1, Temp2;
+
+    pos = 16;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+        "wrdsp      %[pos],     1               \n\t"
+        :
+        : [pos] "r" (pos)
+    );
+
+    if (yoffset)
+    {
+
+        src_ptr = src_ptr - (2 * src_pixels_per_line);
+
+        if (xoffset)
+            /* filter 1-D horizontally... */
+            vp8_filter_block2d_first_pass_8_all(src_ptr, FData, src_pixels_per_line,
+                                                13, xoffset, 8);
+
+        else
+        {
+            /* prefetch src_ptr data to cache memory */
+            prefetch_load(src_ptr + 2 * src_pixels_per_line);
+
+            __asm__ __volatile__ (
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   0(%[FData])                             \n\t"
+                "sw     %[Temp2],   4(%[FData])                             \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   8(%[FData])                             \n\t"
+                "sw     %[Temp2],   12(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   16(%[FData])                            \n\t"
+                "sw     %[Temp2],   20(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   24(%[FData])                            \n\t"
+                "sw     %[Temp2],   28(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   32(%[FData])                            \n\t"
+                "sw     %[Temp2],   36(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   40(%[FData])                            \n\t"
+                "sw     %[Temp2],   44(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   48(%[FData])                            \n\t"
+                "sw     %[Temp2],   52(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   56(%[FData])                            \n\t"
+                "sw     %[Temp2],   60(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   64(%[FData])                            \n\t"
+                "sw     %[Temp2],   68(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   72(%[FData])                            \n\t"
+                "sw     %[Temp2],   76(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   80(%[FData])                            \n\t"
+                "sw     %[Temp2],   84(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   88(%[FData])                            \n\t"
+                "sw     %[Temp2],   92(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   96(%[FData])                            \n\t"
+                "sw     %[Temp2],   100(%[FData])                           \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2)
+                : [FData] "r" (FData), [src_ptr] "r" (src_ptr),
+                  [src_pixels_per_line] "r" (src_pixels_per_line)
+            );
+        }
+
+        /* filter verticaly... */
+        vp8_filter_block2d_second_pass_8(FData + 16, dst_ptr, dst_pitch, 8, 8, yoffset);
+    }
+
+    /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */
+    else
+    {
+        if (xoffset)
+            vp8_filter_block2d_first_pass_8_all(src_ptr, dst_ptr, src_pixels_per_line,
+                                                8, xoffset, dst_pitch);
+
+        else
+        {
+            /* copy from src buffer to dst buffer */
+            __asm__ __volatile__ (
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   0(%[dst_ptr])                           \n\t"
+                "sw     %[Temp2],   4(%[dst_ptr])                           \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   8(%[dst_ptr])                           \n\t"
+                "sw     %[Temp2],   12(%[dst_ptr])                          \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   16(%[dst_ptr])                          \n\t"
+                "sw     %[Temp2],   20(%[dst_ptr])                          \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   24(%[dst_ptr])                          \n\t"
+                "sw     %[Temp2],   28(%[dst_ptr])                          \n\t"
+                "addu   %[src_ptr], %[src_ptr],   %[src_pixels_per_line]    \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   32(%[dst_ptr])                          \n\t"
+                "sw     %[Temp2],   36(%[dst_ptr])                          \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   40(%[dst_ptr])                          \n\t"
+                "sw     %[Temp2],   44(%[dst_ptr])                          \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   48(%[dst_ptr])                          \n\t"
+                "sw     %[Temp2],   52(%[dst_ptr])                          \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   56(%[dst_ptr])                          \n\t"
+                "sw     %[Temp2],   60(%[dst_ptr])                          \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2)
+                : [dst_ptr] "r" (dst_ptr), [src_ptr] "r" (src_ptr),
+                  [src_pixels_per_line] "r" (src_pixels_per_line)
+            );
+        }
+    }
+}
+
+
+void vp8_sixtap_predict8x4_dspr2
+(
+    unsigned char   *RESTRICT src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *RESTRICT dst_ptr,
+    int  dst_pitch
+)
+{
+    unsigned char FData[9 * 8]; /* Temp data bufffer used in filtering */
+    unsigned int pos, Temp1, Temp2;
+
+    pos = 16;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+        "wrdsp      %[pos],     1           \n\t"
+        :
+        : [pos] "r" (pos)
+    );
+
+    if (yoffset)
+    {
+
+        src_ptr = src_ptr - (2 * src_pixels_per_line);
+
+        if (xoffset)
+            /* filter 1-D horizontally... */
+            vp8_filter_block2d_first_pass_8_all(src_ptr, FData, src_pixels_per_line,
+                                                9, xoffset, 8);
+
+        else
+        {
+            /* prefetch src_ptr data to cache memory */
+            prefetch_load(src_ptr + 2 * src_pixels_per_line);
+
+            __asm__ __volatile__ (
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   0(%[FData])                             \n\t"
+                "sw     %[Temp2],   4(%[FData])                             \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   8(%[FData])                             \n\t"
+                "sw     %[Temp2],   12(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   16(%[FData])                            \n\t"
+                "sw     %[Temp2],   20(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   24(%[FData])                            \n\t"
+                "sw     %[Temp2],   28(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   32(%[FData])                            \n\t"
+                "sw     %[Temp2],   36(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   40(%[FData])                            \n\t"
+                "sw     %[Temp2],   44(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   48(%[FData])                            \n\t"
+                "sw     %[Temp2],   52(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   56(%[FData])                            \n\t"
+                "sw     %[Temp2],   60(%[FData])                            \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   64(%[FData])                            \n\t"
+                "sw     %[Temp2],   68(%[FData])                            \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2)
+                : [FData] "r" (FData), [src_ptr] "r" (src_ptr),
+                  [src_pixels_per_line] "r" (src_pixels_per_line)
+            );
+        }
+
+        /* filter verticaly... */
+        vp8_filter_block2d_second_pass_8(FData + 16, dst_ptr, dst_pitch, 4, 8, yoffset);
+    }
+
+    /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */
+    else
+    {
+        if (xoffset)
+            vp8_filter_block2d_first_pass_8_all(src_ptr, dst_ptr, src_pixels_per_line,
+                                                4, xoffset, dst_pitch);
+
+        else
+        {
+            /* copy from src buffer to dst buffer */
+            __asm__ __volatile__ (
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   0(%[dst_ptr])                           \n\t"
+                "sw     %[Temp2],   4(%[dst_ptr])                           \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   8(%[dst_ptr])                           \n\t"
+                "sw     %[Temp2],   12(%[dst_ptr])                          \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   16(%[dst_ptr])                          \n\t"
+                "sw     %[Temp2],   20(%[dst_ptr])                          \n\t"
+                "addu   %[src_ptr], %[src_ptr],    %[src_pixels_per_line]   \n\t"
+
+                "ulw    %[Temp1],   0(%[src_ptr])                           \n\t"
+                "ulw    %[Temp2],   4(%[src_ptr])                           \n\t"
+                "sw     %[Temp1],   24(%[dst_ptr])                          \n\t"
+                "sw     %[Temp2],   28(%[dst_ptr])                          \n\t"
+
+                : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2)
+                : [dst_ptr] "r" (dst_ptr), [src_ptr] "r" (src_ptr),
+                  [src_pixels_per_line] "r" (src_pixels_per_line)
+            );
+        }
+    }
+}
+
+
+void vp8_sixtap_predict16x16_dspr2
+(
+    unsigned char   *RESTRICT src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *RESTRICT dst_ptr,
+    int  dst_pitch
+)
+{
+    const unsigned short *VFilter;
+    unsigned char FData[21 * 16]; /* Temp data bufffer used in filtering */
+    unsigned int pos;
+
+    VFilter = sub_pel_filterss[yoffset];
+
+    pos = 16;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+        "wrdsp      %[pos],     1           \n\t"
+        :
+        : [pos] "r" (pos)
+    );
+
+    if (yoffset)
+    {
+
+        src_ptr = src_ptr - (2 * src_pixels_per_line);
+
+        switch (xoffset)
+        {
+            /* filter 1-D horizontally... */
+        case 2:
+        case 4:
+        case 6:
+            /* 6 tap filter */
+            vp8_filter_block2d_first_pass16_6tap(src_ptr, FData, src_pixels_per_line,
+                                                 21, xoffset, 16);
+            break;
+
+        case 0:
+            /* only copy buffer */
+            vp8_filter_block2d_first_pass16_0(src_ptr, FData, src_pixels_per_line);
+            break;
+
+        case 1:
+        case 3:
+        case 5:
+        case 7:
+            /* 4 tap filter */
+            vp8_filter_block2d_first_pass16_4tap(src_ptr, FData, src_pixels_per_line, 16,
+                                                 21, xoffset, yoffset, dst_ptr, dst_pitch);
+            break;
+        }
+
+        /* filter verticaly... */
+        vp8_filter_block2d_second_pass161(FData + 32, dst_ptr, dst_pitch, VFilter);
+    }
+    else
+    {
+        /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */
+        switch (xoffset)
+        {
+        case 2:
+        case 4:
+        case 6:
+            /* 6 tap filter */
+            vp8_filter_block2d_first_pass16_6tap(src_ptr, dst_ptr, src_pixels_per_line,
+                                                 16, xoffset, dst_pitch);
+            break;
+
+        case 1:
+        case 3:
+        case 5:
+        case 7:
+            /* 4 tap filter */
+            vp8_filter_block2d_first_pass16_4tap(src_ptr, dst_ptr, src_pixels_per_line, 16,
+                                                 21, xoffset, yoffset, dst_ptr, dst_pitch);
+            break;
+        }
+    }
+}
+
+#endif
--- /dev/null
+++ b/vp8/common/mips/dspr2/idct_blk_dspr2.c
@@ -1,0 +1,88 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vpx_rtcd.h"
+
+#if HAVE_DSPR2
+
+void vp8_dequant_idct_add_y_block_dspr2
+(short *q, short *dq,
+ unsigned char *dst, int stride, char *eobs)
+{
+    int i, j;
+
+    for (i = 0; i < 4; i++)
+    {
+        for (j = 0; j < 4; j++)
+        {
+            if (*eobs++ > 1)
+                vp8_dequant_idct_add_dspr2(q, dq, dst, stride);
+            else
+            {
+                vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dst, stride, dst, stride);
+                ((int *)q)[0] = 0;
+            }
+
+            q   += 16;
+            dst += 4;
+        }
+
+        dst += 4 * stride - 16;
+    }
+}
+
+void vp8_dequant_idct_add_uv_block_dspr2
+(short *q, short *dq,
+ unsigned char *dstu, unsigned char *dstv, int stride, char *eobs)
+{
+    int i, j;
+
+    for (i = 0; i < 2; i++)
+    {
+        for (j = 0; j < 2; j++)
+        {
+            if (*eobs++ > 1)
+                vp8_dequant_idct_add_dspr2(q, dq, dstu, stride);
+            else
+            {
+                vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dstu, stride, dstu, stride);
+                ((int *)q)[0] = 0;
+            }
+
+            q    += 16;
+            dstu += 4;
+        }
+
+        dstu += 4 * stride - 8;
+    }
+
+    for (i = 0; i < 2; i++)
+    {
+        for (j = 0; j < 2; j++)
+        {
+            if (*eobs++ > 1)
+                vp8_dequant_idct_add_dspr2(q, dq, dstv, stride);
+            else
+            {
+                vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dstv, stride, dstv, stride);
+                ((int *)q)[0] = 0;
+            }
+
+            q    += 16;
+            dstv += 4;
+        }
+
+        dstv += 4 * stride - 8;
+    }
+}
+
+#endif
+
--- /dev/null
+++ b/vp8/common/mips/dspr2/idctllm_dspr2.c
@@ -1,0 +1,369 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_rtcd.h"
+
+#if HAVE_DSPR2
+#define CROP_WIDTH 256
+
+/******************************************************************************
+ * Notes:
+ *
+ * This implementation makes use of 16 bit fixed point version of two multiply
+ * constants:
+ *         1.   sqrt(2) * cos (pi/8)
+ *         2.   sqrt(2) * sin (pi/8)
+ * Since the first constant is bigger than 1, to maintain the same 16 bit
+ * fixed point precision as the second one, we use a trick of
+ *         x * a = x + x*(a-1)
+ * so
+ *         x * sqrt(2) * cos (pi/8) = x + x * (sqrt(2) *cos(pi/8)-1).
+ ****************************************************************************/
+extern unsigned char ff_cropTbl[256 + 2 * CROP_WIDTH];
+static const int cospi8sqrt2minus1 = 20091;
+static const int sinpi8sqrt2      = 35468;
+
+inline void prefetch_load_short(short *src)
+{
+    __asm__ __volatile__ (
+        "pref   0,  0(%[src])   \n\t"
+        :
+        : [src] "r" (src)
+    );
+}
+
+void vp8_short_idct4x4llm_dspr2(short *input, unsigned char *pred_ptr,
+                                int pred_stride, unsigned char *dst_ptr,
+                                int dst_stride)
+{
+    int r, c;
+    int a1, b1, c1, d1;
+    short output[16];
+    short *ip = input;
+    short *op = output;
+    int temp1, temp2;
+    int shortpitch = 4;
+
+    int c2, d2;
+    int temp3, temp4;
+    unsigned char *cm = ff_cropTbl + CROP_WIDTH;
+
+    /* prepare data for load */
+    prefetch_load_short(ip + 8);
+
+    /* first loop is unrolled */
+    a1 = ip[0] + ip[8];
+    b1 = ip[0] - ip[8];
+
+    temp1 = (ip[4] * sinpi8sqrt2) >> 16;
+    temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
+    c1 = temp1 - temp2;
+
+    temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
+    temp2 = (ip[12] * sinpi8sqrt2) >> 16;
+    d1 = temp1 + temp2;
+
+    temp3 = (ip[5] * sinpi8sqrt2) >> 16;
+    temp4 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16);
+    c2 = temp3 - temp4;
+
+    temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16);
+    temp4 = (ip[13] * sinpi8sqrt2) >> 16;
+    d2 = temp3 + temp4;
+
+    op[0] = a1 + d1;
+    op[12] = a1 - d1;
+    op[4] = b1 + c1;
+    op[8] = b1 - c1;
+
+    a1 = ip[1] + ip[9];
+    b1 = ip[1] - ip[9];
+
+    op[1] = a1 + d2;
+    op[13] = a1 - d2;
+    op[5] = b1 + c2;
+    op[9] = b1 - c2;
+
+    a1 = ip[2] + ip[10];
+    b1 = ip[2] - ip[10];
+
+    temp1 = (ip[6] * sinpi8sqrt2) >> 16;
+    temp2 = ip[14] + ((ip[14] * cospi8sqrt2minus1) >> 16);
+    c1 = temp1 - temp2;
+
+    temp1 = ip[6] + ((ip[6] * cospi8sqrt2minus1) >> 16);
+    temp2 = (ip[14] * sinpi8sqrt2) >> 16;
+    d1 = temp1 + temp2;
+
+    temp3 = (ip[7] * sinpi8sqrt2) >> 16;
+    temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16);
+    c2 = temp3 - temp4;
+
+    temp3 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16);
+    temp4 = (ip[15] * sinpi8sqrt2) >> 16;
+    d2 = temp3 + temp4;
+
+    op[2] = a1 + d1;
+    op[14] = a1 - d1;
+    op[6] = b1 + c1;
+    op[10] = b1 - c1;
+
+    a1 = ip[3] + ip[11];
+    b1 = ip[3] - ip[11];
+
+    op[3] = a1 + d2;
+    op[15] = a1 - d2;
+    op[7] = b1 + c2;
+    op[11] = b1 - c2;
+
+    ip = output;
+
+    /* prepare data for load */
+    prefetch_load_short(ip + shortpitch);
+
+    /* second loop is unrolled */
+    a1 = ip[0] + ip[2];
+    b1 = ip[0] - ip[2];
+
+    temp1 = (ip[1] * sinpi8sqrt2) >> 16;
+    temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
+    c1 = temp1 - temp2;
+
+    temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
+    temp2 = (ip[3] * sinpi8sqrt2) >> 16;
+    d1 = temp1 + temp2;
+
+    temp3 = (ip[5] * sinpi8sqrt2) >> 16;
+    temp4 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16);
+    c2 = temp3 - temp4;
+
+    temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16);
+    temp4 = (ip[7] * sinpi8sqrt2) >> 16;
+    d2 = temp3 + temp4;
+
+    op[0] = (a1 + d1 + 4) >> 3;
+    op[3] = (a1 - d1 + 4) >> 3;
+    op[1] = (b1 + c1 + 4) >> 3;
+    op[2] = (b1 - c1 + 4) >> 3;
+
+    a1 = ip[4] + ip[6];
+    b1 = ip[4] - ip[6];
+
+    op[4] = (a1 + d2 + 4) >> 3;
+    op[7] = (a1 - d2 + 4) >> 3;
+    op[5] = (b1 + c2 + 4) >> 3;
+    op[6] = (b1 - c2 + 4) >> 3;
+
+    a1 = ip[8] + ip[10];
+    b1 = ip[8] - ip[10];
+
+    temp1 = (ip[9] * sinpi8sqrt2) >> 16;
+    temp2 = ip[11] + ((ip[11] * cospi8sqrt2minus1) >> 16);
+    c1 = temp1 - temp2;
+
+    temp1 = ip[9] + ((ip[9] * cospi8sqrt2minus1) >> 16);
+    temp2 = (ip[11] * sinpi8sqrt2) >> 16;
+    d1 = temp1 + temp2;
+
+    temp3 = (ip[13] * sinpi8sqrt2) >> 16;
+    temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16);
+    c2 = temp3 - temp4;
+
+    temp3 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16);
+    temp4 = (ip[15] * sinpi8sqrt2) >> 16;
+    d2 = temp3 + temp4;
+
+    op[8] = (a1 + d1 + 4) >> 3;
+    op[11] = (a1 - d1 + 4) >> 3;
+    op[9] = (b1 + c1 + 4) >> 3;
+    op[10] = (b1 - c1 + 4) >> 3;
+
+    a1 = ip[12] + ip[14];
+    b1 = ip[12] - ip[14];
+
+    op[12] = (a1 + d2 + 4) >> 3;
+    op[15] = (a1 - d2 + 4) >> 3;
+    op[13] = (b1 + c2 + 4) >> 3;
+    op[14] = (b1 - c2 + 4) >> 3;
+
+    ip = output;
+
+    for (r = 0; r < 4; r++)
+    {
+        for (c = 0; c < 4; c++)
+        {
+            short a = ip[c] + pred_ptr[c] ;
+            dst_ptr[c] = cm[a] ;
+        }
+
+        ip += 4;
+        dst_ptr += dst_stride;
+        pred_ptr += pred_stride;
+    }
+}
+
+void vp8_dc_only_idct_add_dspr2(short input_dc, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, int dst_stride)
+{
+    int a1;
+    int i, absa1;
+    int t2, vector_a1, vector_a;
+
+    /* a1 = ((input_dc + 4) >> 3); */
+    __asm__ __volatile__ (
+        "addi  %[a1], %[input_dc], 4   \n\t"
+        "sra   %[a1], %[a1],       3   \n\t"
+        : [a1] "=r" (a1)
+        : [input_dc] "r" (input_dc)
+    );
+
+    if (a1 < 0)
+    {
+        /* use quad-byte
+         * input and output memory are four byte aligned
+         */
+        __asm__ __volatile__ (
+            "abs        %[absa1],     %[a1]         \n\t"
+            "replv.qb   %[vector_a1], %[absa1]      \n\t"
+            : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1)
+            : [a1] "r" (a1)
+        );
+
+        /* use (a1 - predptr[c]) instead a1 + predptr[c] */
+        for (i = 4; i--;)
+        {
+            __asm__ __volatile__ (
+                "lw             %[t2],       0(%[pred_ptr])                     \n\t"
+                "add            %[pred_ptr], %[pred_ptr],    %[pred_stride]     \n\t"
+                "subu_s.qb      %[vector_a], %[t2],          %[vector_a1]       \n\t"
+                "sw             %[vector_a], 0(%[dst_ptr])                      \n\t"
+                "add            %[dst_ptr],  %[dst_ptr],     %[dst_stride]      \n\t"
+                : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a),
+                  [dst_ptr] "+&r" (dst_ptr), [pred_ptr] "+&r" (pred_ptr)
+                : [dst_stride] "r" (dst_stride), [pred_stride] "r" (pred_stride), [vector_a1] "r" (vector_a1)
+            );
+        }
+    }
+    else
+    {
+        /* use quad-byte
+         * input and output memory are four byte aligned
+         */
+        __asm__ __volatile__ (
+            "replv.qb       %[vector_a1], %[a1]     \n\t"
+            : [vector_a1] "=r" (vector_a1)
+            : [a1] "r" (a1)
+        );
+
+        for (i = 4; i--;)
+        {
+            __asm__ __volatile__ (
+                "lw             %[t2],       0(%[pred_ptr])                 \n\t"
+                "add            %[pred_ptr], %[pred_ptr],    %[pred_stride] \n\t"
+                "addu_s.qb      %[vector_a], %[vector_a1],   %[t2]          \n\t"
+                "sw             %[vector_a], 0(%[dst_ptr])                  \n\t"
+                "add            %[dst_ptr],  %[dst_ptr],     %[dst_stride]  \n\t"
+                : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a),
+                  [dst_ptr] "+&r" (dst_ptr), [pred_ptr] "+&r" (pred_ptr)
+                : [dst_stride] "r" (dst_stride), [pred_stride] "r" (pred_stride), [vector_a1] "r" (vector_a1)
+            );
+        }
+    }
+
+}
+
+void vp8_short_inv_walsh4x4_dspr2(short *input, short *mb_dqcoeff)
+{
+    short output[16];
+    int i;
+    int a1, b1, c1, d1;
+    int a2, b2, c2, d2;
+    short *ip = input;
+    short *op = output;
+
+    prefetch_load_short(ip);
+
+    for (i = 4; i--;)
+    {
+        a1 = ip[0] + ip[12];
+        b1 = ip[4] + ip[8];
+        c1 = ip[4] - ip[8];
+        d1 = ip[0] - ip[12];
+
+        op[0] = a1 + b1;
+        op[4] = c1 + d1;
+        op[8] = a1 - b1;
+        op[12] = d1 - c1;
+
+        ip++;
+        op++;
+    }
+
+    ip = output;
+    op = output;
+
+    prefetch_load_short(ip);
+
+    for (i = 4; i--;)
+    {
+        a1 = ip[0] + ip[3] + 3;
+        b1 = ip[1] + ip[2];
+        c1 = ip[1] - ip[2];
+        d1 = ip[0] - ip[3] + 3;
+
+        a2 = a1 + b1;
+        b2 = d1 + c1;
+        c2 = a1 - b1;
+        d2 = d1 - c1;
+
+        op[0] = a2 >> 3;
+        op[1] = b2 >> 3;
+        op[2] = c2 >> 3;
+        op[3] = d2 >> 3;
+
+        ip += 4;
+        op += 4;
+    }
+
+    for (i = 0; i < 16; i++)
+    {
+        mb_dqcoeff[i * 16] = output[i];
+    }
+}
+
+void vp8_short_inv_walsh4x4_1_dspr2(short *input, short *mb_dqcoeff)
+{
+    int a1;
+
+    a1 = ((input[0] + 3) >> 3);
+
+    __asm__ __volatile__ (
+        "sh             %[a1], 0(%[mb_dqcoeff])                    \n\t"
+        "sh             %[a1], 32(%[mb_dqcoeff])                   \n\t"
+        "sh             %[a1], 64(%[mb_dqcoeff])                   \n\t"
+        "sh             %[a1], 96(%[mb_dqcoeff])                   \n\t"
+        "sh             %[a1], 128(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 160(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 192(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 224(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 256(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 288(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 320(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 352(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 384(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 416(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 448(%[mb_dqcoeff])                  \n\t"
+        "sh             %[a1], 480(%[mb_dqcoeff])                  \n\t"
+
+        :
+        : [a1] "r" (a1), [mb_dqcoeff] "r" (mb_dqcoeff)
+    );
+}
+
+#endif
--- /dev/null
+++ b/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c
@@ -1,0 +1,2622 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include "vpx_rtcd.h"
+#include "vp8/common/onyxc_int.h"
+
+#if HAVE_DSPR2
+typedef unsigned char uc;
+
+/* prefetch data for load */
+inline void prefetch_load_lf(unsigned char *src)
+{
+    __asm__ __volatile__ (
+        "pref   0,  0(%[src])   \n\t"
+        :
+        : [src] "r" (src)
+    );
+}
+
+
+/* prefetch data for store */
+inline void prefetch_store_lf(unsigned char *dst)
+{
+    __asm__ __volatile__ (
+        "pref   1,  0(%[dst])   \n\t"
+        :
+        : [dst] "r" (dst)
+    );
+}
+
+/* processing 4 pixels at the same time
+ * compute hev and mask in the same function
+ */
+static __inline void vp8_filter_mask_vec_mips
+(
+    uint32_t limit,
+    uint32_t flimit,
+    uint32_t p1,
+    uint32_t p0,
+    uint32_t p3,
+    uint32_t p2,
+    uint32_t q0,
+    uint32_t q1,
+    uint32_t q2,
+    uint32_t q3,
+    uint32_t thresh,
+    uint32_t *hev,
+    uint32_t *mask
+)
+{
+    uint32_t c, r, r3, r_k;
+    uint32_t s1, s2, s3;
+    uint32_t ones = 0xFFFFFFFF;
+    uint32_t hev1;
+
+    __asm__ __volatile__ (
+        /* mask |= (abs(p3 - p2) > limit) */
+        "subu_s.qb      %[c],   %[p3],     %[p2]        \n\t"
+        "subu_s.qb      %[r_k], %[p2],     %[p3]        \n\t"
+        "or             %[r_k], %[r_k],    %[c]         \n\t"
+        "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
+        "or             %[r],   $0,        %[c]         \n\t"
+
+        /* mask |= (abs(p2 - p1) > limit) */
+        "subu_s.qb      %[c],   %[p2],     %[p1]        \n\t"
+        "subu_s.qb      %[r_k], %[p1],     %[p2]        \n\t"
+        "or             %[r_k], %[r_k],    %[c]         \n\t"
+        "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
+        "or             %[r],   %[r],      %[c]         \n\t"
+
+        /* mask |= (abs(p1 - p0) > limit)
+         * hev  |= (abs(p1 - p0) > thresh)
+         */
+        "subu_s.qb      %[c],   %[p1],     %[p0]        \n\t"
+        "subu_s.qb      %[r_k], %[p0],     %[p1]        \n\t"
+        "or             %[r_k], %[r_k],    %[c]         \n\t"
+        "cmpgu.lt.qb    %[c],   %[thresh], %[r_k]       \n\t"
+        "or             %[r3],  $0,        %[c]         \n\t"
+        "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
+        "or             %[r],   %[r],      %[c]         \n\t"
+
+        /* mask |= (abs(q1 - q0) > limit)
+         * hev  |= (abs(q1 - q0) > thresh)
+         */
+        "subu_s.qb      %[c],   %[q1],     %[q0]        \n\t"
+        "subu_s.qb      %[r_k], %[q0],     %[q1]        \n\t"
+        "or             %[r_k], %[r_k],    %[c]         \n\t"
+        "cmpgu.lt.qb    %[c],   %[thresh], %[r_k]       \n\t"
+        "or             %[r3],  %[r3],     %[c]         \n\t"
+        "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
+        "or             %[r],   %[r],      %[c]         \n\t"
+
+        /* mask |= (abs(q2 - q1) > limit) */
+        "subu_s.qb      %[c],   %[q2],     %[q1]        \n\t"
+        "subu_s.qb      %[r_k], %[q1],     %[q2]        \n\t"
+        "or             %[r_k], %[r_k],    %[c]         \n\t"
+        "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
+        "or             %[r],   %[r],      %[c]         \n\t"
+        "sll            %[r3],    %[r3],    24          \n\t"
+
+        /* mask |= (abs(q3 - q2) > limit) */
+        "subu_s.qb      %[c],   %[q3],     %[q2]        \n\t"
+        "subu_s.qb      %[r_k], %[q2],     %[q3]        \n\t"
+        "or             %[r_k], %[r_k],    %[c]         \n\t"
+        "cmpgu.lt.qb    %[c],   %[limit],  %[r_k]       \n\t"
+        "or             %[r],   %[r],      %[c]         \n\t"
+
+        : [c] "=&r" (c), [r_k] "=&r" (r_k),
+          [r] "=&r" (r), [r3] "=&r" (r3)
+        : [limit] "r" (limit), [p3] "r" (p3), [p2] "r" (p2),
+          [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0),
+          [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh)
+    );
+
+    __asm__ __volatile__ (
+        /* abs(p0 - q0) */
+        "subu_s.qb      %[c],   %[p0],     %[q0]        \n\t"
+        "subu_s.qb      %[r_k], %[q0],     %[p0]        \n\t"
+        "wrdsp          %[r3]                           \n\t"
+        "or             %[s1],  %[r_k],    %[c]         \n\t"
+
+        /* abs(p1 - q1) */
+        "subu_s.qb      %[c],    %[p1],    %[q1]        \n\t"
+        "addu_s.qb      %[s3],   %[s1],    %[s1]        \n\t"
+        "pick.qb        %[hev1], %[ones],  $0           \n\t"
+        "subu_s.qb      %[r_k],  %[q1],    %[p1]        \n\t"
+        "or             %[s2],   %[r_k],   %[c]         \n\t"
+
+        /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > flimit * 2 + limit */
+        "shrl.qb        %[s2],   %[s2],     1           \n\t"
+        "addu_s.qb      %[s1],   %[s2],     %[s3]       \n\t"
+        "cmpgu.lt.qb    %[c],    %[flimit], %[s1]       \n\t"
+        "or             %[r],    %[r],      %[c]        \n\t"
+        "sll            %[r],    %[r],      24          \n\t"
+
+        "wrdsp          %[r]                            \n\t"
+        "pick.qb        %[s2],  $0,         %[ones]     \n\t"
+
+        : [c] "=&r" (c), [r_k] "=&r" (r_k), [s1] "=&r" (s1), [hev1] "=&r" (hev1),
+          [s2] "=&r" (s2), [r] "+r" (r), [s3] "=&r" (s3)
+        : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3),
+          [q1] "r" (q1), [ones] "r" (ones), [flimit] "r" (flimit)
+    );
+
+    *hev = hev1;
+    *mask = s2;
+}
+
+
+/* inputs & outputs are quad-byte vectors */
+static __inline void vp8_filter_mips
+(
+    uint32_t mask,
+    uint32_t hev,
+    uint32_t *ps1,
+    uint32_t *ps0,
+    uint32_t *qs0,
+    uint32_t *qs1
+)
+{
+    int32_t vp8_filter_l, vp8_filter_r;
+    int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r;
+    int32_t subr_r, subr_l;
+    uint32_t t1, t2, HWM, t3;
+    uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r;
+
+    int32_t vps1, vps0, vqs0, vqs1;
+    int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r;
+    uint32_t N128;
+
+    N128 = 0x80808080;
+    t1  = 0x03000300;
+    t2  = 0x04000400;
+    t3  = 0x01000100;
+    HWM = 0xFF00FF00;
+
+    vps0 = (*ps0) ^ N128;
+    vps1 = (*ps1) ^ N128;
+    vqs0 = (*qs0) ^ N128;
+    vqs1 = (*qs1) ^ N128;
+
+    /* use halfword pairs instead quad-bytes because of accuracy */
+    vps0_l = vps0 & HWM;
+    vps0_r = vps0 << 8;
+    vps0_r = vps0_r & HWM;
+
+    vps1_l = vps1 & HWM;
+    vps1_r = vps1 << 8;
+    vps1_r = vps1_r & HWM;
+
+    vqs0_l = vqs0 & HWM;
+    vqs0_r = vqs0 << 8;
+    vqs0_r = vqs0_r & HWM;
+
+    vqs1_l = vqs1 & HWM;
+    vqs1_r = vqs1 << 8;
+    vqs1_r = vqs1_r & HWM;
+
+    mask_l = mask & HWM;
+    mask_r = mask << 8;
+    mask_r = mask_r & HWM;
+
+    hev_l = hev & HWM;
+    hev_r = hev << 8;
+    hev_r = hev_r & HWM;
+
+    __asm__ __volatile__ (
+        /* vp8_filter = vp8_signed_char_clamp(ps1 - qs1); */
+        "subq_s.ph    %[vp8_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
+        "subq_s.ph    %[vp8_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
+
+        /* qs0 - ps0 */
+        "subq_s.ph    %[subr_l],       %[vqs0_l],       %[vps0_l]       \n\t"
+        "subq_s.ph    %[subr_r],       %[vqs0_r],       %[vps0_r]       \n\t"
+
+        /* vp8_filter &= hev; */
+        "and          %[vp8_filter_l], %[vp8_filter_l], %[hev_l]        \n\t"
+        "and          %[vp8_filter_r], %[vp8_filter_r], %[hev_r]        \n\t"
+
+        /* vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0)); */
+        "addq_s.ph    %[vp8_filter_l], %[vp8_filter_l], %[subr_l]       \n\t"
+        "addq_s.ph    %[vp8_filter_r], %[vp8_filter_r], %[subr_r]       \n\t"
+        "xor          %[invhev_l],     %[hev_l],        %[HWM]          \n\t"
+        "addq_s.ph    %[vp8_filter_l], %[vp8_filter_l], %[subr_l]       \n\t"
+        "addq_s.ph    %[vp8_filter_r], %[vp8_filter_r], %[subr_r]       \n\t"
+        "xor          %[invhev_r],     %[hev_r],        %[HWM]          \n\t"
+        "addq_s.ph    %[vp8_filter_l], %[vp8_filter_l], %[subr_l]       \n\t"
+        "addq_s.ph    %[vp8_filter_r], %[vp8_filter_r], %[subr_r]       \n\t"
+
+        /* vp8_filter &= mask; */
+        "and          %[vp8_filter_l], %[vp8_filter_l], %[mask_l]       \n\t"
+        "and          %[vp8_filter_r], %[vp8_filter_r], %[mask_r]       \n\t"
+
+        : [vp8_filter_l] "=&r" (vp8_filter_l), [vp8_filter_r] "=&r" (vp8_filter_r),
+          [subr_l] "=&r" (subr_l), [subr_r] "=&r" (subr_r),
+          [invhev_l] "=&r" (invhev_l), [invhev_r] "=&r" (invhev_r)
+
+        : [vps0_l] "r" (vps0_l), [vps0_r] "r" (vps0_r), [vps1_l] "r" (vps1_l),
+          [vps1_r] "r" (vps1_r), [vqs0_l] "r" (vqs0_l), [vqs0_r] "r" (vqs0_r),
+          [vqs1_l] "r" (vqs1_l), [vqs1_r] "r" (vqs1_r),
+          [mask_l] "r" (mask_l), [mask_r] "r" (mask_r),
+          [hev_l] "r" (hev_l), [hev_r] "r" (hev_r),
+          [HWM] "r" (HWM)
+    );
+
+    /* save bottom 3 bits so that we round one side +4 and the other +3 */
+    __asm__ __volatile__ (
+        /* Filter2 = vp8_signed_char_clamp(vp8_filter + 3) >>= 3; */
+        "addq_s.ph    %[Filter1_l],    %[vp8_filter_l], %[t2]           \n\t"
+        "addq_s.ph    %[Filter1_r],    %[vp8_filter_r], %[t2]           \n\t"
+
+        /* Filter1 = vp8_signed_char_clamp(vp8_filter + 4) >>= 3; */
+        "addq_s.ph    %[Filter2_l],    %[vp8_filter_l], %[t1]           \n\t"
+        "addq_s.ph    %[Filter2_r],    %[vp8_filter_r], %[t1]           \n\t"
+        "shra.ph      %[Filter1_r],    %[Filter1_r],    3               \n\t"
+        "shra.ph      %[Filter1_l],    %[Filter1_l],    3               \n\t"
+
+        "shra.ph      %[Filter2_l],    %[Filter2_l],    3               \n\t"
+        "shra.ph      %[Filter2_r],    %[Filter2_r],    3               \n\t"
+
+        "and          %[Filter1_l],    %[Filter1_l],    %[HWM]          \n\t"
+        "and          %[Filter1_r],    %[Filter1_r],    %[HWM]          \n\t"
+
+        /* vps0 = vp8_signed_char_clamp(ps0 + Filter2); */
+        "addq_s.ph    %[vps0_l],       %[vps0_l],       %[Filter2_l]    \n\t"
+        "addq_s.ph    %[vps0_r],       %[vps0_r],       %[Filter2_r]    \n\t"
+
+        /* vqs0 = vp8_signed_char_clamp(qs0 - Filter1); */
+        "subq_s.ph    %[vqs0_l],       %[vqs0_l],       %[Filter1_l]    \n\t"
+        "subq_s.ph    %[vqs0_r],       %[vqs0_r],       %[Filter1_r]    \n\t"
+
+        : [Filter1_l] "=&r" (Filter1_l), [Filter1_r] "=&r" (Filter1_r),
+          [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r),
+          [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r),
+          [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r)
+
+        : [t1] "r" (t1), [t2] "r" (t2),
+          [vp8_filter_l] "r" (vp8_filter_l), [vp8_filter_r] "r" (vp8_filter_r),
+          [HWM] "r" (HWM)
+    );
+
+    __asm__ __volatile__ (
+        /* (vp8_filter += 1) >>= 1 */
+        "addqh.ph    %[Filter1_l],    %[Filter1_l],     %[t3]           \n\t"
+        "addqh.ph    %[Filter1_r],    %[Filter1_r],     %[t3]           \n\t"
+
+        /* vp8_filter &= ~hev; */
+        "and          %[Filter1_l],    %[Filter1_l],    %[invhev_l]     \n\t"
+        "and          %[Filter1_r],    %[Filter1_r],    %[invhev_r]     \n\t"
+
+        /* vps1 = vp8_signed_char_clamp(ps1 + vp8_filter); */
+        "addq_s.ph    %[vps1_l],       %[vps1_l],       %[Filter1_l]    \n\t"
+        "addq_s.ph    %[vps1_r],       %[vps1_r],       %[Filter1_r]    \n\t"
+
+        /* vqs1 = vp8_signed_char_clamp(qs1 - vp8_filter); */
+        "subq_s.ph    %[vqs1_l],       %[vqs1_l],       %[Filter1_l]    \n\t"
+        "subq_s.ph    %[vqs1_r],       %[vqs1_r],       %[Filter1_r]    \n\t"
+
+        : [Filter1_l] "+r" (Filter1_l), [Filter1_r] "+r" (Filter1_r),
+          [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r),
+          [vqs1_l] "+r" (vqs1_l), [vqs1_r] "+r" (vqs1_r)
+
+        : [t3] "r" (t3), [invhev_l] "r" (invhev_l), [invhev_r] "r" (invhev_r)
+    );
+
+    /* Create quad-bytes from halfword pairs */
+    vqs0_l = vqs0_l & HWM;
+    vqs1_l = vqs1_l & HWM;
+    vps0_l = vps0_l & HWM;
+    vps1_l = vps1_l & HWM;
+
+    __asm__ __volatile__ (
+        "shrl.ph      %[vqs0_r],       %[vqs0_r],       8               \n\t"
+        "shrl.ph      %[vps0_r],       %[vps0_r],       8               \n\t"
+        "shrl.ph      %[vqs1_r],       %[vqs1_r],       8               \n\t"
+        "shrl.ph      %[vps1_r],       %[vps1_r],       8               \n\t"
+
+        : [vps1_r] "+r" (vps1_r), [vqs1_r] "+r" (vqs1_r),
+          [vps0_r] "+r" (vps0_r), [vqs0_r] "+r" (vqs0_r)
+        :
+    );
+
+    vqs0 = vqs0_l | vqs0_r;
+    vqs1 = vqs1_l | vqs1_r;
+    vps0 = vps0_l | vps0_r;
+    vps1 = vps1_l | vps1_r;
+
+    *ps0 = vps0 ^ N128;
+    *ps1 = vps1 ^ N128;
+    *qs0 = vqs0 ^ N128;
+    *qs1 = vqs1 ^ N128;
+}
+
+void vp8_loop_filter_horizontal_edge_mips
+(
+    unsigned char *s,
+    int p,
+    unsigned int flimit,
+    unsigned int limit,
+    unsigned int thresh,
+    int count
+)
+{
+    uint32_t mask;
+    uint32_t hev;
+    uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+    unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+
+    mask = 0;
+    hev = 0;
+    p1 = 0;
+    p2 = 0;
+    p3 = 0;
+    p4 = 0;
+
+    /* prefetch data for store */
+    prefetch_store_lf(s);
+
+    /* loop filter designed to work using chars so that we can make maximum use
+     * of 8 bit simd instructions.
+     */
+
+    sm1 = s - (p << 2);
+    s0 = s - p - p - p;
+    s1 = s - p - p ;
+    s2 = s - p;
+    s3 = s;
+    s4 = s + p;
+    s5 = s + p + p;
+    s6 = s + p + p + p;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p1 = *((uint32_t *)(s1));
+    p2 = *((uint32_t *)(s2));
+    p3 = *((uint32_t *)(s3));
+    p4 = *((uint32_t *)(s4));
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        pm1 = *((uint32_t *)(sm1));
+        p0  = *((uint32_t *)(s0));
+        p5  = *((uint32_t *)(s5));
+        p6  = *((uint32_t *)(s6));
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+            /* unpack processed 4x4 neighborhood */
+            *((uint32_t *)s1) = p1;
+            *((uint32_t *)s2) = p2;
+            *((uint32_t *)s3) = p3;
+            *((uint32_t *)s4) = p4;
+        }
+    }
+
+    sm1 += 4;
+    s0  += 4;
+    s1  += 4;
+    s2  += 4;
+    s3  += 4;
+    s4  += 4;
+    s5  += 4;
+    s6  += 4;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p1 = *((uint32_t *)(s1));
+    p2 = *((uint32_t *)(s2));
+    p3 = *((uint32_t *)(s3));
+    p4 = *((uint32_t *)(s4));
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        pm1 = *((uint32_t *)(sm1));
+        p0  = *((uint32_t *)(s0));
+        p5  = *((uint32_t *)(s5));
+        p6  = *((uint32_t *)(s6));
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+            /* unpack processed 4x4 neighborhood */
+            *((uint32_t *)s1) = p1;
+            *((uint32_t *)s2) = p2;
+            *((uint32_t *)s3) = p3;
+            *((uint32_t *)s4) = p4;
+        }
+    }
+
+    sm1 += 4;
+    s0  += 4;
+    s1  += 4;
+    s2  += 4;
+    s3  += 4;
+    s4  += 4;
+    s5  += 4;
+    s6  += 4;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p1 = *((uint32_t *)(s1));
+    p2 = *((uint32_t *)(s2));
+    p3 = *((uint32_t *)(s3));
+    p4 = *((uint32_t *)(s4));
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        pm1 = *((uint32_t *)(sm1));
+        p0  = *((uint32_t *)(s0));
+        p5  = *((uint32_t *)(s5));
+        p6  = *((uint32_t *)(s6));
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+            /* unpack processed 4x4 neighborhood */
+            *((uint32_t *)s1) = p1;
+            *((uint32_t *)s2) = p2;
+            *((uint32_t *)s3) = p3;
+            *((uint32_t *)s4) = p4;
+        }
+    }
+
+    sm1 += 4;
+    s0  += 4;
+    s1  += 4;
+    s2  += 4;
+    s3  += 4;
+    s4  += 4;
+    s5  += 4;
+    s6  += 4;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p1 = *((uint32_t *)(s1));
+    p2 = *((uint32_t *)(s2));
+    p3 = *((uint32_t *)(s3));
+    p4 = *((uint32_t *)(s4));
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        pm1 = *((uint32_t *)(sm1));
+        p0  = *((uint32_t *)(s0));
+        p5  = *((uint32_t *)(s5));
+        p6  = *((uint32_t *)(s6));
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+            /* unpack processed 4x4 neighborhood */
+            *((uint32_t *)s1) = p1;
+            *((uint32_t *)s2) = p2;
+            *((uint32_t *)s3) = p3;
+            *((uint32_t *)s4) = p4;
+        }
+    }
+}
+
+void vp8_loop_filter_uvhorizontal_edge_mips
+(
+    unsigned char *s,
+    int p,
+    unsigned int flimit,
+    unsigned int limit,
+    unsigned int thresh,
+    int count
+)
+{
+    uint32_t mask;
+    uint32_t hev;
+    uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+    unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+
+    mask = 0;
+    hev = 0;
+    p1 = 0;
+    p2 = 0;
+    p3 = 0;
+    p4 = 0;
+
+    /* loop filter designed to work using chars so that we can make maximum use
+     * of 8 bit simd instructions.
+     */
+
+    sm1 = s - (p << 2);
+    s0  = s - p - p - p;
+    s1  = s - p - p ;
+    s2  = s - p;
+    s3  = s;
+    s4  = s + p;
+    s5  = s + p + p;
+    s6  = s + p + p + p;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p1 = *((uint32_t *)(s1));
+    p2 = *((uint32_t *)(s2));
+    p3 = *((uint32_t *)(s3));
+    p4 = *((uint32_t *)(s4));
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        pm1 = *((uint32_t *)(sm1));
+        p0  = *((uint32_t *)(s0));
+        p5  = *((uint32_t *)(s5));
+        p6  = *((uint32_t *)(s6));
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+            /* unpack processed 4x4 neighborhood */
+            *((uint32_t *)s1) = p1;
+            *((uint32_t *)s2) = p2;
+            *((uint32_t *)s3) = p3;
+            *((uint32_t *)s4) = p4;
+        }
+    }
+
+    sm1 += 4;
+    s0  += 4;
+    s1  += 4;
+    s2  += 4;
+    s3  += 4;
+    s4  += 4;
+    s5  += 4;
+    s6  += 4;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p1 = *((uint32_t *)(s1));
+    p2 = *((uint32_t *)(s2));
+    p3 = *((uint32_t *)(s3));
+    p4 = *((uint32_t *)(s4));
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        pm1 = *((uint32_t *)(sm1));
+        p0  = *((uint32_t *)(s0));
+        p5  = *((uint32_t *)(s5));
+        p6  = *((uint32_t *)(s6));
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+            /* unpack processed 4x4 neighborhood */
+            *((uint32_t *)s1) = p1;
+            *((uint32_t *)s2) = p2;
+            *((uint32_t *)s3) = p3;
+            *((uint32_t *)s4) = p4;
+        }
+    }
+}
+
+void vp8_loop_filter_vertical_edge_mips
+(
+    unsigned char *s,
+    int p,
+    const unsigned int flimit,
+    const unsigned int limit,
+    const unsigned int thresh,
+    int count
+)
+{
+    int i;
+    uint32_t mask, hev;
+    uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+    unsigned char *s1, *s2, *s3, *s4;
+    uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+
+    hev = 0;
+    mask = 0;
+    i = 0;
+    pm1 = 0;
+    p0 = 0;
+    p1 = 0;
+    p2 = 0;
+    p3 = 0;
+    p4 = 0;
+    p5 = 0;
+    p6 = 0;
+
+    /* loop filter designed to work using chars so that we can make maximum use
+     * of 8 bit simd instructions.
+     */
+
+    /* apply filter on 4 pixesl at the same time */
+    do
+    {
+
+        /* prefetch data for store */
+        prefetch_store_lf(s + p);
+
+        s1 = s;
+        s2 = s + p;
+        s3 = s2 + p;
+        s4 = s3 + p;
+        s  = s4 + p;
+
+        /* load quad-byte vectors
+         * memory is 4 byte aligned
+         */
+        p2  = *((uint32_t *)(s1 - 4));
+        p6  = *((uint32_t *)(s1));
+        p1  = *((uint32_t *)(s2 - 4));
+        p5  = *((uint32_t *)(s2));
+        p0  = *((uint32_t *)(s3 - 4));
+        p4  = *((uint32_t *)(s3));
+        pm1 = *((uint32_t *)(s4 - 4));
+        p3  = *((uint32_t *)(s4));
+
+        /* transpose pm1, p0, p1, p2 */
+        __asm__ __volatile__ (
+            "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
+            "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
+            "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
+            "precr.qb.ph    %[prim4],   %[p0],      %[pm1]      \n\t"
+
+            "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
+            "precr.qb.ph    %[pm1],     %[prim1],   %[prim2]    \n\t"
+            "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+            "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+            "precrq.ph.w    %[p2],      %[p1],      %[sec3]     \n\t"
+            "precrq.ph.w    %[p0],      %[pm1],     %[sec4]     \n\t"
+            "append         %[p1],      %[sec3],    16          \n\t"
+            "append         %[pm1],     %[sec4],    16          \n\t"
+
+            : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+              [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+              [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1),
+              [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+            :
+        );
+
+        /* transpose p3, p4, p5, p6 */
+        __asm__ __volatile__ (
+            "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
+            "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
+            "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
+            "precr.qb.ph    %[prim4],   %[p4],      %[p3]       \n\t"
+
+            "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
+            "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
+            "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+            "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+            "precrq.ph.w    %[p6],      %[p5],      %[sec3]     \n\t"
+            "precrq.ph.w    %[p4],      %[p3],      %[sec4]     \n\t"
+            "append         %[p5],      %[sec3],    16          \n\t"
+            "append         %[p3],      %[sec4],    16          \n\t"
+
+            : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+              [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+              [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+              [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+            :
+        );
+
+        /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+         * mask will be zero and filtering is not needed
+         */
+        if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+        {
+
+            vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                     thresh, &hev, &mask);
+
+            /* if mask == 0 do filtering is not needed */
+            if (mask)
+            {
+                /* filtering */
+                vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+                /* unpack processed 4x4 neighborhood
+                 * don't use transpose on output data
+                 * because memory isn't aligned
+                 */
+                __asm__ __volatile__ (
+                    "sb         %[p4],  1(%[s4])    \n\t"
+                    "sb         %[p3],  0(%[s4])    \n\t"
+                    "sb         %[p2], -1(%[s4])    \n\t"
+                    "sb         %[p1], -2(%[s4])    \n\t"
+                    :
+                    : [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4),
+                      [p2] "r" (p2), [p1] "r" (p1)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p4], %[p4], 8     \n\t"
+                    "srl        %[p3], %[p3], 8     \n\t"
+                    "srl        %[p2], %[p2], 8     \n\t"
+                    "srl        %[p1], %[p1], 8     \n\t"
+                    : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p4],  1(%[s3])    \n\t"
+                    "sb         %[p3],  0(%[s3])    \n\t"
+                    "sb         %[p2], -1(%[s3])    \n\t"
+                    "sb         %[p1], -2(%[s3])    \n\t"
+                    : [p1] "+r" (p1)
+                    : [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), [p2] "r" (p2)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p4], %[p4], 8     \n\t"
+                    "srl        %[p3], %[p3], 8     \n\t"
+                    "srl        %[p2], %[p2], 8     \n\t"
+                    "srl        %[p1], %[p1], 8     \n\t"
+                    : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p4],  1(%[s2])    \n\t"
+                    "sb         %[p3],  0(%[s2])    \n\t"
+                    "sb         %[p2], -1(%[s2])    \n\t"
+                    "sb         %[p1], -2(%[s2])    \n\t"
+                    :
+                    : [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2),
+                      [p2] "r" (p2), [p1] "r" (p1)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p4], %[p4], 8     \n\t"
+                    "srl        %[p3], %[p3], 8     \n\t"
+                    "srl        %[p2], %[p2], 8     \n\t"
+                    "srl        %[p1], %[p1], 8     \n\t"
+                    : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p4],  1(%[s1])    \n\t"
+                    "sb         %[p3],  0(%[s1])    \n\t"
+                    "sb         %[p2], -1(%[s1])    \n\t"
+                    "sb         %[p1], -2(%[s1])    \n\t"
+                    :
+                    : [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1),
+                      [p2] "r" (p2), [p1] "r" (p1)
+                );
+            }
+        }
+
+        s1 = s;
+        s2 = s + p;
+        s3 = s2 + p;
+        s4 = s3 + p;
+        s  = s4 + p;
+
+        /* load quad-byte vectors
+         * memory is 4 byte aligned
+         */
+        p2  = *((uint32_t *)(s1 - 4));
+        p6  = *((uint32_t *)(s1));
+        p1  = *((uint32_t *)(s2 - 4));
+        p5  = *((uint32_t *)(s2));
+        p0  = *((uint32_t *)(s3 - 4));
+        p4  = *((uint32_t *)(s3));
+        pm1 = *((uint32_t *)(s4 - 4));
+        p3  = *((uint32_t *)(s4));
+
+        /* transpose pm1, p0, p1, p2 */
+        __asm__ __volatile__ (
+            "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
+            "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
+            "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
+            "precr.qb.ph    %[prim4],   %[p0],      %[pm1]      \n\t"
+
+            "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
+            "precr.qb.ph    %[pm1],     %[prim1],   %[prim2]    \n\t"
+            "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+            "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+            "precrq.ph.w    %[p2],      %[p1],      %[sec3]     \n\t"
+            "precrq.ph.w    %[p0],      %[pm1],     %[sec4]     \n\t"
+            "append         %[p1],      %[sec3],    16          \n\t"
+            "append         %[pm1],     %[sec4],    16          \n\t"
+
+            : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+              [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+              [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1),
+              [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+            :
+        );
+
+        /* transpose p3, p4, p5, p6 */
+        __asm__ __volatile__ (
+            "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
+            "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
+            "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
+            "precr.qb.ph    %[prim4],   %[p4],      %[p3]       \n\t"
+
+            "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
+            "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
+            "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+            "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+            "precrq.ph.w    %[p6],      %[p5],      %[sec3]     \n\t"
+            "precrq.ph.w    %[p4],      %[p3],      %[sec4]     \n\t"
+            "append         %[p5],      %[sec3],    16          \n\t"
+            "append         %[p3],      %[sec4],    16          \n\t"
+
+            : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+              [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+              [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+              [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+            :
+        );
+
+        /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+         * mask will be zero and filtering is not needed
+         */
+        if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+        {
+
+            vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                     thresh, &hev, &mask);
+
+            /* if mask == 0 do filtering is not needed */
+            if (mask)
+            {
+                /* filtering */
+                vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+                /* unpack processed 4x4 neighborhood
+                 * don't use transpose on output data
+                 * because memory isn't aligned
+                 */
+                __asm__ __volatile__ (
+                    "sb         %[p4],  1(%[s4])    \n\t"
+                    "sb         %[p3],  0(%[s4])    \n\t"
+                    "sb         %[p2], -1(%[s4])    \n\t"
+                    "sb         %[p1], -2(%[s4])    \n\t"
+                    :
+                    : [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4),
+                      [p2] "r" (p2), [p1] "r" (p1)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p4], %[p4], 8     \n\t"
+                    "srl        %[p3], %[p3], 8     \n\t"
+                    "srl        %[p2], %[p2], 8     \n\t"
+                    "srl        %[p1], %[p1], 8     \n\t"
+                    : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p4],  1(%[s3])    \n\t"
+                    "sb         %[p3],  0(%[s3])    \n\t"
+                    "sb         %[p2], -1(%[s3])    \n\t"
+                    "sb         %[p1], -2(%[s3])    \n\t"
+                    : [p1] "+r" (p1)
+                    : [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), [p2] "r" (p2)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p4], %[p4], 8     \n\t"
+                    "srl        %[p3], %[p3], 8     \n\t"
+                    "srl        %[p2], %[p2], 8     \n\t"
+                    "srl        %[p1], %[p1], 8     \n\t"
+                    : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p4],  1(%[s2])    \n\t"
+                    "sb         %[p3],  0(%[s2])    \n\t"
+                    "sb         %[p2], -1(%[s2])    \n\t"
+                    "sb         %[p1], -2(%[s2])    \n\t"
+                    :
+                    : [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2),
+                      [p2] "r" (p2), [p1] "r" (p1)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p4], %[p4], 8     \n\t"
+                    "srl        %[p3], %[p3], 8     \n\t"
+                    "srl        %[p2], %[p2], 8     \n\t"
+                    "srl        %[p1], %[p1], 8     \n\t"
+                    : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p4],  1(%[s1])    \n\t"
+                    "sb         %[p3],  0(%[s1])    \n\t"
+                    "sb         %[p2], -1(%[s1])    \n\t"
+                    "sb         %[p1], -2(%[s1])    \n\t"
+                    :
+                    : [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1),
+                      [p2] "r" (p2), [p1] "r" (p1)
+                );
+            }
+        }
+
+        i += 8;
+    }
+
+    while (i < count);
+}
+
+void vp8_loop_filter_uvvertical_edge_mips
+(
+    unsigned char *s,
+    int p,
+    unsigned int flimit,
+    unsigned int limit,
+    unsigned int thresh,
+    int count
+)
+{
+    uint32_t mask, hev;
+    uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+    unsigned char *s1, *s2, *s3, *s4;
+    uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+
+    /* loop filter designed to work using chars so that we can make maximum use
+     * of 8 bit simd instructions.
+     */
+
+    /* apply filter on 4 pixesl at the same time */
+
+    s1 = s;
+    s2 = s + p;
+    s3 = s2 + p;
+    s4 = s3 + p;
+
+    /* load quad-byte vectors
+    * memory is 4 byte aligned
+    */
+    p2  = *((uint32_t *)(s1 - 4));
+    p6  = *((uint32_t *)(s1));
+    p1  = *((uint32_t *)(s2 - 4));
+    p5  = *((uint32_t *)(s2));
+    p0  = *((uint32_t *)(s3 - 4));
+    p4  = *((uint32_t *)(s3));
+    pm1 = *((uint32_t *)(s4 - 4));
+    p3  = *((uint32_t *)(s4));
+
+    /* transpose pm1, p0, p1, p2 */
+    __asm__ __volatile__ (
+        "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
+        "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
+        "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
+        "precr.qb.ph    %[prim4],   %[p0],      %[pm1]      \n\t"
+
+        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
+        "precr.qb.ph    %[pm1],     %[prim1],   %[prim2]    \n\t"
+        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+        "precrq.ph.w    %[p2],      %[p1],      %[sec3]     \n\t"
+        "precrq.ph.w    %[p0],      %[pm1],     %[sec4]     \n\t"
+        "append         %[p1],      %[sec3],    16          \n\t"
+        "append         %[pm1],     %[sec4],    16          \n\t"
+
+        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+          [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1),
+          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+        :
+    );
+
+    /* transpose p3, p4, p5, p6 */
+    __asm__ __volatile__ (
+        "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
+        "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
+        "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
+        "precr.qb.ph    %[prim4],   %[p4],      %[p3]       \n\t"
+
+        "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
+        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
+        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+        "precrq.ph.w    %[p6],      %[p5],      %[sec3]     \n\t"
+        "precrq.ph.w    %[p4],      %[p3],      %[sec4]     \n\t"
+        "append         %[p5],      %[sec3],    16          \n\t"
+        "append         %[p3],      %[sec4],    16          \n\t"
+
+        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+          [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+        :
+    );
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+    * mask will be zero and filtering is not needed
+    */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+            /* unpack processed 4x4 neighborhood
+             * don't use transpose on output data
+             * because memory isn't aligned
+             */
+            __asm__ __volatile__ (
+                "sb         %[p4],  1(%[s4])    \n\t"
+                "sb         %[p3],  0(%[s4])    \n\t"
+                "sb         %[p2], -1(%[s4])    \n\t"
+                "sb         %[p1], -2(%[s4])    \n\t"
+                :
+                : [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4),
+                  [p2] "r" (p2), [p1] "r" (p1)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p4], %[p4], 8     \n\t"
+                "srl        %[p3], %[p3], 8     \n\t"
+                "srl        %[p2], %[p2], 8     \n\t"
+                "srl        %[p1], %[p1], 8     \n\t"
+                : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p4],  1(%[s3])    \n\t"
+                "sb         %[p3],  0(%[s3])    \n\t"
+                "sb         %[p2], -1(%[s3])    \n\t"
+                "sb         %[p1], -2(%[s3])    \n\t"
+                : [p1] "+r" (p1)
+                : [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), [p2] "r" (p2)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p4], %[p4], 8     \n\t"
+                "srl        %[p3], %[p3], 8     \n\t"
+                "srl        %[p2], %[p2], 8     \n\t"
+                "srl        %[p1], %[p1], 8     \n\t"
+                : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p4],  1(%[s2])    \n\t"
+                "sb         %[p3],  0(%[s2])    \n\t"
+                "sb         %[p2], -1(%[s2])    \n\t"
+                "sb         %[p1], -2(%[s2])    \n\t"
+                :
+                : [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2),
+                  [p2] "r" (p2), [p1] "r" (p1)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p4], %[p4], 8     \n\t"
+                "srl        %[p3], %[p3], 8     \n\t"
+                "srl        %[p2], %[p2], 8     \n\t"
+                "srl        %[p1], %[p1], 8     \n\t"
+                : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p4],  1(%[s1])    \n\t"
+                "sb         %[p3],  0(%[s1])    \n\t"
+                "sb         %[p2], -1(%[s1])    \n\t"
+                "sb         %[p1], -2(%[s1])    \n\t"
+                :
+                : [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1), [p2] "r" (p2), [p1] "r" (p1)
+            );
+        }
+    }
+
+    s1 = s4 + p;
+    s2 = s1 + p;
+    s3 = s2 + p;
+    s4 = s3 + p;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p2  = *((uint32_t *)(s1 - 4));
+    p6  = *((uint32_t *)(s1));
+    p1  = *((uint32_t *)(s2 - 4));
+    p5  = *((uint32_t *)(s2));
+    p0  = *((uint32_t *)(s3 - 4));
+    p4  = *((uint32_t *)(s3));
+    pm1 = *((uint32_t *)(s4 - 4));
+    p3  = *((uint32_t *)(s4));
+
+    /* transpose pm1, p0, p1, p2 */
+    __asm__ __volatile__ (
+        "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
+        "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
+        "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
+        "precr.qb.ph    %[prim4],   %[p0],      %[pm1]      \n\t"
+
+        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
+        "precr.qb.ph    %[pm1],     %[prim1],   %[prim2]    \n\t"
+        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+        "precrq.ph.w    %[p2],      %[p1],      %[sec3]     \n\t"
+        "precrq.ph.w    %[p0],      %[pm1],     %[sec4]     \n\t"
+        "append         %[p1],      %[sec3],    16          \n\t"
+        "append         %[pm1],     %[sec4],    16          \n\t"
+
+        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+          [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1),
+          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+        :
+    );
+
+    /* transpose p3, p4, p5, p6 */
+    __asm__ __volatile__ (
+        "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
+        "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
+        "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
+        "precr.qb.ph    %[prim4],   %[p4],      %[p3]       \n\t"
+
+        "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
+        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
+        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+        "precrq.ph.w    %[p6],      %[p5],      %[sec3]     \n\t"
+        "precrq.ph.w    %[p4],      %[p3],      %[sec4]     \n\t"
+        "append         %[p5],      %[sec3],    16          \n\t"
+        "append         %[p3],      %[sec4],    16          \n\t"
+
+        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+          [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+        :
+    );
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4);
+
+            /* unpack processed 4x4 neighborhood
+             * don't use transpose on output data
+             * because memory isn't aligned
+             */
+            __asm__ __volatile__ (
+                "sb         %[p4],  1(%[s4])    \n\t"
+                "sb         %[p3],  0(%[s4])    \n\t"
+                "sb         %[p2], -1(%[s4])    \n\t"
+                "sb         %[p1], -2(%[s4])    \n\t"
+                :
+                : [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4),
+                  [p2] "r" (p2), [p1] "r" (p1)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p4], %[p4], 8     \n\t"
+                "srl        %[p3], %[p3], 8     \n\t"
+                "srl        %[p2], %[p2], 8     \n\t"
+                "srl        %[p1], %[p1], 8     \n\t"
+                : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p4],  1(%[s3])    \n\t"
+                "sb         %[p3],  0(%[s3])    \n\t"
+                "sb         %[p2], -1(%[s3])    \n\t"
+                "sb         %[p1], -2(%[s3])    \n\t"
+                : [p1] "+r" (p1)
+                : [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), [p2] "r" (p2)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p4], %[p4], 8     \n\t"
+                "srl        %[p3], %[p3], 8     \n\t"
+                "srl        %[p2], %[p2], 8     \n\t"
+                "srl        %[p1], %[p1], 8     \n\t"
+                : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p4],  1(%[s2])    \n\t"
+                "sb         %[p3],  0(%[s2])    \n\t"
+                "sb         %[p2], -1(%[s2])    \n\t"
+                "sb         %[p1], -2(%[s2])    \n\t"
+                :
+                : [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2),
+                  [p2] "r" (p2), [p1] "r" (p1)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p4], %[p4], 8     \n\t"
+                "srl        %[p3], %[p3], 8     \n\t"
+                "srl        %[p2], %[p2], 8     \n\t"
+                "srl        %[p1], %[p1], 8     \n\t"
+                : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p4],  1(%[s1])    \n\t"
+                "sb         %[p3],  0(%[s1])    \n\t"
+                "sb         %[p2], -1(%[s1])    \n\t"
+                "sb         %[p1], -2(%[s1])    \n\t"
+                :
+                : [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1),
+                  [p2] "r" (p2), [p1] "r" (p1)
+            );
+        }
+    }
+}
+
+/* inputs & outputs are quad-byte vectors */
+static __inline void vp8_mbfilter_mips
+(
+    uint32_t mask,
+    uint32_t hev,
+    uint32_t *ps2,
+    uint32_t *ps1,
+    uint32_t *ps0,
+    uint32_t *qs0,
+    uint32_t *qs1,
+    uint32_t *qs2
+)
+{
+    int32_t vps2, vps1, vps0, vqs0, vqs1, vqs2;
+    int32_t vps2_l, vps1_l, vps0_l, vqs0_l, vqs1_l, vqs2_l;
+    int32_t vps2_r, vps1_r, vps0_r, vqs0_r, vqs1_r, vqs2_r;
+    uint32_t HWM, vp8_filter_l, vp8_filter_r, mask_l, mask_r, hev_l, hev_r, subr_r, subr_l;
+    uint32_t Filter2_l, Filter2_r, t1, t2, Filter1_l, Filter1_r, invhev_l, invhev_r;
+    uint32_t N128, R63;
+    uint32_t u1_l, u1_r, u2_l, u2_r, u3_l, u3_r;
+
+    R63  = 0x003F003F;
+    HWM  = 0xFF00FF00;
+    N128 = 0x80808080;
+    t1   = 0x03000300;
+    t2   = 0x04000400;
+
+    vps0 = (*ps0) ^ N128;
+    vps1 = (*ps1) ^ N128;
+    vps2 = (*ps2) ^ N128;
+    vqs0 = (*qs0) ^ N128;
+    vqs1 = (*qs1) ^ N128;
+    vqs2 = (*qs2) ^ N128;
+
+    /* use halfword pairs instead quad-bytes because of accuracy */
+    vps0_l = vps0 & HWM;
+    vps0_r = vps0 << 8;
+    vps0_r = vps0_r & HWM;
+
+    vqs0_l = vqs0 & HWM;
+    vqs0_r = vqs0 << 8;
+    vqs0_r = vqs0_r & HWM;
+
+    vps1_l = vps1 & HWM;
+    vps1_r = vps1 << 8;
+    vps1_r = vps1_r & HWM;
+
+    vqs1_l = vqs1 & HWM;
+    vqs1_r = vqs1 << 8;
+    vqs1_r = vqs1_r & HWM;
+
+    vqs2_l = vqs2 & HWM;
+    vqs2_r = vqs2 << 8;
+    vqs2_r = vqs2_r & HWM;
+
+    __asm__ __volatile__ (
+        /* qs0 - ps0 */
+        "subq_s.ph    %[subr_l],       %[vqs0_l],       %[vps0_l]       \n\t"
+        "subq_s.ph    %[subr_r],       %[vqs0_r],       %[vps0_r]       \n\t"
+
+        /* vp8_filter = vp8_signed_char_clamp(ps1 - qs1); */
+        "subq_s.ph    %[vp8_filter_l], %[vps1_l],       %[vqs1_l]       \n\t"
+        "subq_s.ph    %[vp8_filter_r], %[vps1_r],       %[vqs1_r]       \n\t"
+
+        : [vp8_filter_l] "=&r" (vp8_filter_l), [vp8_filter_r] "=r" (vp8_filter_r),
+          [subr_l] "=&r" (subr_l), [subr_r] "=&r" (subr_r)
+        : [vps0_l] "r" (vps0_l), [vps0_r] "r" (vps0_r), [vps1_l] "r" (vps1_l),
+          [vps1_r] "r" (vps1_r), [vqs0_l] "r" (vqs0_l), [vqs0_r] "r" (vqs0_r),
+          [vqs1_l] "r" (vqs1_l), [vqs1_r] "r" (vqs1_r)
+    );
+
+    vps2_l = vps2 & HWM;
+    vps2_r = vps2 << 8;
+    vps2_r = vps2_r & HWM;
+
+    /* add outer taps if we have high edge variance */
+    __asm__ __volatile__ (
+        /* vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0)); */
+        "addq_s.ph    %[vp8_filter_l], %[vp8_filter_l], %[subr_l]       \n\t"
+        "addq_s.ph    %[vp8_filter_r], %[vp8_filter_r], %[subr_r]       \n\t"
+        "and          %[mask_l],       %[HWM],          %[mask]         \n\t"
+        "sll          %[mask_r],       %[mask],         8               \n\t"
+        "and          %[mask_r],       %[HWM],          %[mask_r]       \n\t"
+        "addq_s.ph    %[vp8_filter_l], %[vp8_filter_l], %[subr_l]       \n\t"
+        "addq_s.ph    %[vp8_filter_r], %[vp8_filter_r], %[subr_r]       \n\t"
+        "and          %[hev_l],        %[HWM],          %[hev]          \n\t"
+        "sll          %[hev_r],        %[hev],          8               \n\t"
+        "and          %[hev_r],        %[HWM],          %[hev_r]        \n\t"
+        "addq_s.ph    %[vp8_filter_l], %[vp8_filter_l], %[subr_l]       \n\t"
+        "addq_s.ph    %[vp8_filter_r], %[vp8_filter_r], %[subr_r]       \n\t"
+
+        /* vp8_filter &= mask; */
+        "and          %[vp8_filter_l], %[vp8_filter_l], %[mask_l]       \n\t"
+        "and          %[vp8_filter_r], %[vp8_filter_r], %[mask_r]       \n\t"
+
+        /* Filter2 = vp8_filter & hev; */
+        "and          %[Filter2_l],    %[vp8_filter_l], %[hev_l]        \n\t"
+        "and          %[Filter2_r],    %[vp8_filter_r], %[hev_r]        \n\t"
+
+        : [vp8_filter_l] "+r" (vp8_filter_l), [vp8_filter_r] "+r" (vp8_filter_r),
+          [hev_l] "=&r" (hev_l), [hev_r] "=&r" (hev_r),
+          [mask_l] "=&r" (mask_l), [mask_r] "=&r" (mask_r),
+          [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r)
+        : [subr_l] "r" (subr_l), [subr_r] "r" (subr_r),
+          [HWM] "r" (HWM), [hev]  "r" (hev), [mask] "r" (mask)
+    );
+
+    /* save bottom 3 bits so that we round one side +4 and the other +3 */
+    __asm__ __volatile__ (
+        /* Filter1 = vp8_signed_char_clamp(Filter2 + 4) >>= 3; */
+        "addq_s.ph    %[Filter1_l],    %[Filter2_l],    %[t2]           \n\t"
+        "xor          %[invhev_l],     %[hev_l],        %[HWM]          \n\t"
+        "addq_s.ph    %[Filter1_r],    %[Filter2_r],    %[t2]           \n\t"
+
+        /* Filter2 = vp8_signed_char_clamp(Filter2 + 3) >>= 3; */
+        "addq_s.ph    %[Filter2_l],    %[Filter2_l],    %[t1]           \n\t"
+        "addq_s.ph    %[Filter2_r],    %[Filter2_r],    %[t1]           \n\t"
+
+        "shra.ph      %[Filter1_l],    %[Filter1_l],    3               \n\t"
+        "shra.ph      %[Filter1_r],    %[Filter1_r],    3               \n\t"
+
+        "shra.ph      %[Filter2_l],    %[Filter2_l],    3               \n\t"
+        "shra.ph      %[Filter2_r],    %[Filter2_r],    3               \n\t"
+        "and          %[Filter1_l],    %[Filter1_l],    %[HWM]          \n\t"
+        "and          %[Filter1_r],    %[Filter1_r],    %[HWM]          \n\t"
+        "xor          %[invhev_r],     %[hev_r],        %[HWM]          \n\t"
+
+        /* qs0 = vp8_signed_char_clamp(qs0 - Filter1); */
+        "subq_s.ph    %[vqs0_l],       %[vqs0_l],       %[Filter1_l]    \n\t"
+        "subq_s.ph    %[vqs0_r],       %[vqs0_r],       %[Filter1_r]    \n\t"
+
+        /* ps0 = vp8_signed_char_clamp(ps0 + Filter2); */
+        "addq_s.ph    %[vps0_l],       %[vps0_l],       %[Filter2_l]    \n\t"
+        "addq_s.ph    %[vps0_r],       %[vps0_r],       %[Filter2_r]    \n\t"
+
+        : [invhev_l] "=&r" (invhev_l), [invhev_r] "=&r" (invhev_r),
+          [Filter1_l] "=&r" (Filter1_l), [Filter1_r] "=&r" (Filter1_r),
+          [Filter2_l] "+r" (Filter2_l), [Filter2_r] "+r" (Filter2_r),
+          [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r),
+          [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r)
+        : [t1] "r" (t1), [t2] "r" (t2), [HWM] "r" (HWM),
+          [hev_l] "r" (hev_l), [hev_r] "r" (hev_r)
+    );
+
+    /* only apply wider filter if not high edge variance */
+    __asm__ __volatile__ (
+        /* vp8_filter &= ~hev; */
+        "and          %[Filter2_l],    %[vp8_filter_l], %[invhev_l]     \n\t"
+        "and          %[Filter2_r],    %[vp8_filter_r], %[invhev_r]     \n\t"
+
+        "shra.ph      %[Filter2_l],    %[Filter2_l],    8               \n\t"
+        "shra.ph      %[Filter2_r],    %[Filter2_r],    8               \n\t"
+
+        : [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r)
+        : [vp8_filter_l] "r" (vp8_filter_l), [vp8_filter_r] "r" (vp8_filter_r),
+          [invhev_l] "r" (invhev_l), [invhev_r] "r" (invhev_r)
+    );
+
+    /* roughly 3/7th difference across boundary */
+    __asm__ __volatile__ (
+        "shll.ph      %[u3_l],         %[Filter2_l],    3               \n\t"
+        "shll.ph      %[u3_r],         %[Filter2_r],    3               \n\t"
+
+        "addq.ph      %[u3_l],         %[u3_l],         %[Filter2_l]    \n\t"
+        "addq.ph      %[u3_r],         %[u3_r],         %[Filter2_r]    \n\t"
+
+        "shll.ph      %[u2_l],         %[u3_l],         1               \n\t"
+        "shll.ph      %[u2_r],         %[u3_r],         1               \n\t"
+
+        "addq.ph      %[u1_l],         %[u3_l],         %[u2_l]         \n\t"
+        "addq.ph      %[u1_r],         %[u3_r],         %[u2_r]         \n\t"
+
+        "addq.ph      %[u2_l],         %[u2_l],         %[R63]          \n\t"
+        "addq.ph      %[u2_r],         %[u2_r],         %[R63]          \n\t"
+
+        "addq.ph      %[u3_l],         %[u3_l],         %[R63]          \n\t"
+        "addq.ph      %[u3_r],         %[u3_r],         %[R63]          \n\t"
+
+        /* vp8_signed_char_clamp((63 + Filter2 * 27) >> 7)
+         * vp8_signed_char_clamp((63 + Filter2 * 18) >> 7)
+         */
+        "addq.ph      %[u1_l],         %[u1_l],         %[R63]          \n\t"
+        "addq.ph      %[u1_r],         %[u1_r],         %[R63]          \n\t"
+        "shra.ph      %[u1_l],         %[u1_l],         7               \n\t"
+        "shra.ph      %[u1_r],         %[u1_r],         7               \n\t"
+        "shra.ph      %[u2_l],         %[u2_l],         7               \n\t"
+        "shra.ph      %[u2_r],         %[u2_r],         7               \n\t"
+        "shll.ph      %[u1_l],         %[u1_l],         8               \n\t"
+        "shll.ph      %[u1_r],         %[u1_r],         8               \n\t"
+        "shll.ph      %[u2_l],         %[u2_l],         8               \n\t"
+        "shll.ph      %[u2_r],         %[u2_r],         8               \n\t"
+
+        /* vqs0 = vp8_signed_char_clamp(qs0 - u); */
+        "subq_s.ph    %[vqs0_l],       %[vqs0_l],       %[u1_l]         \n\t"
+        "subq_s.ph    %[vqs0_r],       %[vqs0_r],       %[u1_r]         \n\t"
+
+        /* vps0 = vp8_signed_char_clamp(ps0 + u); */
+        "addq_s.ph    %[vps0_l],       %[vps0_l],       %[u1_l]         \n\t"
+        "addq_s.ph    %[vps0_r],       %[vps0_r],       %[u1_r]         \n\t"
+
+        : [u1_l] "=&r" (u1_l), [u1_r] "=&r" (u1_r), [u2_l] "=&r" (u2_l),
+          [u2_r] "=&r" (u2_r), [u3_l] "=&r" (u3_l), [u3_r] "=&r" (u3_r),
+          [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r),
+          [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r)
+        : [R63]  "r" (R63),
+          [Filter2_l] "r" (Filter2_l), [Filter2_r] "r" (Filter2_r)
+    );
+
+    __asm__ __volatile__ (
+        /* vqs1 = vp8_signed_char_clamp(qs1 - u); */
+        "subq_s.ph    %[vqs1_l],       %[vqs1_l],       %[u2_l]         \n\t"
+        "addq_s.ph    %[vps1_l],       %[vps1_l],       %[u2_l]         \n\t"
+
+        /* vps1 = vp8_signed_char_clamp(ps1 + u); */
+        "addq_s.ph    %[vps1_r],       %[vps1_r],       %[u2_r]         \n\t"
+        "subq_s.ph    %[vqs1_r],       %[vqs1_r],       %[u2_r]         \n\t"
+
+        : [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r),
+          [vqs1_l] "+r" (vqs1_l), [vqs1_r] "+r" (vqs1_r)
+        : [u2_l] "r" (u2_l), [u2_r] "r" (u2_r)
+    );
+
+    /* roughly 1/7th difference across boundary */
+    __asm__ __volatile__ (
+        /* u = vp8_signed_char_clamp((63 + Filter2 * 9) >> 7); */
+        "shra.ph      %[u3_l],         %[u3_l],         7               \n\t"
+        "shra.ph      %[u3_r],         %[u3_r],         7               \n\t"
+        "shll.ph      %[u3_l],         %[u3_l],         8               \n\t"
+        "shll.ph      %[u3_r],         %[u3_r],         8               \n\t"
+
+        /* vqs2 = vp8_signed_char_clamp(qs2 - u); */
+        "subq_s.ph    %[vqs2_l],       %[vqs2_l],       %[u3_l]         \n\t"
+        "subq_s.ph    %[vqs2_r],       %[vqs2_r],       %[u3_r]         \n\t"
+
+        /* vps2 = vp8_signed_char_clamp(ps2 + u); */
+        "addq_s.ph    %[vps2_l],       %[vps2_l],       %[u3_l]         \n\t"
+        "addq_s.ph    %[vps2_r],       %[vps2_r],       %[u3_r]         \n\t"
+
+        : [u3_l] "+r" (u3_l), [u3_r] "+r" (u3_r), [vps2_l] "+r" (vps2_l),
+          [vps2_r] "+r" (vps2_r), [vqs2_l] "+r" (vqs2_l), [vqs2_r] "+r" (vqs2_r)
+        :
+    );
+
+    /* Create quad-bytes from halfword pairs */
+    __asm__ __volatile__ (
+        "and          %[vqs0_l],       %[vqs0_l],       %[HWM]          \n\t"
+        "shrl.ph      %[vqs0_r],       %[vqs0_r],       8               \n\t"
+
+        "and          %[vps0_l],       %[vps0_l],       %[HWM]          \n\t"
+        "shrl.ph      %[vps0_r],       %[vps0_r],       8               \n\t"
+
+        "and          %[vqs1_l],       %[vqs1_l],       %[HWM]          \n\t"
+        "shrl.ph      %[vqs1_r],       %[vqs1_r],       8               \n\t"
+
+        "and          %[vps1_l],       %[vps1_l],       %[HWM]          \n\t"
+        "shrl.ph      %[vps1_r],       %[vps1_r],       8               \n\t"
+
+        "and          %[vqs2_l],       %[vqs2_l],       %[HWM]          \n\t"
+        "shrl.ph      %[vqs2_r],       %[vqs2_r],       8               \n\t"
+
+        "and          %[vps2_l],       %[vps2_l],       %[HWM]          \n\t"
+        "shrl.ph      %[vps2_r],       %[vps2_r],       8               \n\t"
+
+        "or           %[vqs0_r],       %[vqs0_l],       %[vqs0_r]       \n\t"
+        "or           %[vps0_r],       %[vps0_l],       %[vps0_r]       \n\t"
+        "or           %[vqs1_r],       %[vqs1_l],       %[vqs1_r]       \n\t"
+        "or           %[vps1_r],       %[vps1_l],       %[vps1_r]       \n\t"
+        "or           %[vqs2_r],       %[vqs2_l],       %[vqs2_r]       \n\t"
+        "or           %[vps2_r],       %[vps2_l],       %[vps2_r]       \n\t"
+
+        : [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r), [vqs1_l] "+r" (vqs1_l),
+          [vqs1_r] "+r" (vqs1_r), [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r),
+          [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r), [vqs2_l] "+r" (vqs2_l),
+          [vqs2_r] "+r" (vqs2_r), [vps2_r] "+r" (vps2_r), [vps2_l] "+r" (vps2_l)
+        : [HWM] "r" (HWM)
+    );
+
+    *ps0 = vps0_r ^ N128;
+    *ps1 = vps1_r ^ N128;
+    *ps2 = vps2_r ^ N128;
+    *qs0 = vqs0_r ^ N128;
+    *qs1 = vqs1_r ^ N128;
+    *qs2 = vqs2_r ^ N128;
+}
+
+void vp8_mbloop_filter_horizontal_edge_mips
+(
+    unsigned char *s,
+    int p,
+    unsigned int flimit,
+    unsigned int limit,
+    unsigned int thresh,
+    int count
+)
+{
+    int i;
+    uint32_t mask, hev;
+    uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+    unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+
+    mask = 0;
+    hev = 0;
+    i = 0;
+    p1 = 0;
+    p2 = 0;
+    p3 = 0;
+    p4 = 0;
+
+    /* loop filter designed to work using chars so that we can make maximum use
+     * of 8 bit simd instructions.
+     */
+
+    sm1 = s - (p << 2);
+    s0  = s - p - p - p;
+    s1  = s - p - p;
+    s2  = s - p;
+    s3  = s;
+    s4  = s + p;
+    s5  = s + p + p;
+    s6  = s + p + p + p;
+
+    /* prefetch data for load */
+    prefetch_load_lf(s + p);
+
+    /* apply filter on 4 pixesl at the same time */
+    do
+    {
+        /* load quad-byte vectors
+         * memory is 4 byte aligned
+         */
+        p1 = *((uint32_t *)(s1));
+        p2 = *((uint32_t *)(s2));
+        p3 = *((uint32_t *)(s3));
+        p4 = *((uint32_t *)(s4));
+
+        /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+         * mask will be zero and filtering is not needed
+         */
+        if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+        {
+
+            pm1 = *((uint32_t *)(sm1));
+            p0  = *((uint32_t *)(s0));
+            p5  = *((uint32_t *)(s5));
+            p6  = *((uint32_t *)(s6));
+
+            vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                     thresh, &hev, &mask);
+
+            /* if mask == 0 do filtering is not needed */
+            if (mask)
+            {
+                /* filtering */
+                vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+                /* unpack processed 4x4 neighborhood
+                 * memory is 4 byte aligned
+                 */
+                *((uint32_t *)s0) = p0;
+                *((uint32_t *)s1) = p1;
+                *((uint32_t *)s2) = p2;
+                *((uint32_t *)s3) = p3;
+                *((uint32_t *)s4) = p4;
+                *((uint32_t *)s5) = p5;
+            }
+        }
+
+        sm1 += 4;
+        s0  += 4;
+        s1  += 4;
+        s2  += 4;
+        s3  += 4;
+        s4  += 4;
+        s5  += 4;
+        s6  += 4;
+
+        /* load quad-byte vectors
+         * memory is 4 byte aligned
+         */
+        p1 = *((uint32_t *)(s1));
+        p2 = *((uint32_t *)(s2));
+        p3 = *((uint32_t *)(s3));
+        p4 = *((uint32_t *)(s4));
+
+        /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+         * mask will be zero and filtering is not needed
+         */
+        if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+        {
+
+            pm1 = *((uint32_t *)(sm1));
+            p0  = *((uint32_t *)(s0));
+            p5  = *((uint32_t *)(s5));
+            p6  = *((uint32_t *)(s6));
+
+            vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                     thresh, &hev, &mask);
+
+            /* if mask == 0 do filtering is not needed */
+            if (mask)
+            {
+                /* filtering */
+                vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+                /* unpack processed 4x4 neighborhood
+                 * memory is 4 byte aligned
+                 */
+                *((uint32_t *)s0) = p0;
+                *((uint32_t *)s1) = p1;
+                *((uint32_t *)s2) = p2;
+                *((uint32_t *)s3) = p3;
+                *((uint32_t *)s4) = p4;
+                *((uint32_t *)s5) = p5;
+            }
+        }
+
+        sm1 += 4;
+        s0  += 4;
+        s1  += 4;
+        s2  += 4;
+        s3  += 4;
+        s4  += 4;
+        s5  += 4;
+        s6  += 4;
+
+        i += 8;
+    }
+
+    while (i < count);
+}
+
+void vp8_mbloop_filter_uvhorizontal_edge_mips
+(
+    unsigned char *s,
+    int p,
+    unsigned int flimit,
+    unsigned int limit,
+    unsigned int thresh,
+    int count
+)
+{
+    uint32_t mask, hev;
+    uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+    unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6;
+
+    mask = 0;
+    hev = 0;
+    p1 = 0;
+    p2 = 0;
+    p3 = 0;
+    p4 = 0;
+
+    /* loop filter designed to work using chars so that we can make maximum use
+     * of 8 bit simd instructions.
+     */
+
+    sm1 = s - (p << 2);
+    s0  = s - p - p - p;
+    s1  = s - p - p;
+    s2  = s - p;
+    s3  = s;
+    s4  = s + p;
+    s5  = s + p + p;
+    s6  = s + p + p + p;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p1 = *((uint32_t *)(s1));
+    p2 = *((uint32_t *)(s2));
+    p3 = *((uint32_t *)(s3));
+    p4 = *((uint32_t *)(s4));
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        pm1 = *((uint32_t *)(sm1));
+        p0  = *((uint32_t *)(s0));
+        p5  = *((uint32_t *)(s5));
+        p6  = *((uint32_t *)(s6));
+
+        /* if mask == 0 do filtering is not needed */
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        if (mask)
+        {
+            /* filtering */
+            vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+            /* unpack processed 4x4 neighborhood
+             * memory is 4 byte aligned
+             */
+            *((uint32_t *)s0) = p0;
+            *((uint32_t *)s1) = p1;
+            *((uint32_t *)s2) = p2;
+            *((uint32_t *)s3) = p3;
+            *((uint32_t *)s4) = p4;
+            *((uint32_t *)s5) = p5;
+        }
+    }
+
+    sm1 += 4;
+    s0  += 4;
+    s1  += 4;
+    s2  += 4;
+    s3  += 4;
+    s4  += 4;
+    s5  += 4;
+    s6  += 4;
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p1 = *((uint32_t *)(s1));
+    p2 = *((uint32_t *)(s2));
+    p3 = *((uint32_t *)(s3));
+    p4 = *((uint32_t *)(s4));
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        pm1 = *((uint32_t *)(sm1));
+        p0  = *((uint32_t *)(s0));
+        p5  = *((uint32_t *)(s5));
+        p6  = *((uint32_t *)(s6));
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+            /* unpack processed 4x4 neighborhood
+             * memory is 4 byte aligned
+             */
+            *((uint32_t *)s0) = p0;
+            *((uint32_t *)s1) = p1;
+            *((uint32_t *)s2) = p2;
+            *((uint32_t *)s3) = p3;
+            *((uint32_t *)s4) = p4;
+            *((uint32_t *)s5) = p5;
+        }
+    }
+}
+
+
+void vp8_mbloop_filter_vertical_edge_mips
+(
+    unsigned char *s,
+    int p,
+    unsigned int flimit,
+    unsigned int limit,
+    unsigned int thresh,
+    int count
+)
+{
+
+    int i;
+    uint32_t mask, hev;
+    uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+    unsigned char *s1, *s2, *s3, *s4;
+    uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+
+    mask = 0;
+    hev = 0;
+    i = 0;
+    pm1 = 0;
+    p0 = 0;
+    p1 = 0;
+    p2 = 0;
+    p3 = 0;
+    p4 = 0;
+    p5 = 0;
+    p6 = 0;
+
+    /* loop filter designed to work using chars so that we can make maximum use
+     * of 8 bit simd instructions.
+     */
+
+    /* apply filter on 4 pixesl at the same time */
+    do
+    {
+        s1 = s;
+        s2 = s + p;
+        s3 = s2 + p;
+        s4 = s3 + p;
+        s  = s4 + p;
+
+        /* load quad-byte vectors
+         * memory is 4 byte aligned
+         */
+        p2  = *((uint32_t *)(s1 - 4));
+        p6  = *((uint32_t *)(s1));
+        p1  = *((uint32_t *)(s2 - 4));
+        p5  = *((uint32_t *)(s2));
+        p0  = *((uint32_t *)(s3 - 4));
+        p4  = *((uint32_t *)(s3));
+        pm1 = *((uint32_t *)(s4 - 4));
+        p3  = *((uint32_t *)(s4));
+
+        /* transpose pm1, p0, p1, p2 */
+        __asm__ __volatile__ (
+            "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
+            "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
+            "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
+            "precr.qb.ph    %[prim4],   %[p0],      %[pm1]      \n\t"
+
+            "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
+            "precr.qb.ph    %[pm1],     %[prim1],   %[prim2]    \n\t"
+            "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+            "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+            "precrq.ph.w    %[p2],      %[p1],      %[sec3]     \n\t"
+            "precrq.ph.w    %[p0],      %[pm1],     %[sec4]     \n\t"
+            "append         %[p1],      %[sec3],    16          \n\t"
+            "append         %[pm1],     %[sec4],    16          \n\t"
+
+            : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+              [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+              [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1),
+              [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+            :
+        );
+
+        /* transpose p3, p4, p5, p6 */
+        __asm__ __volatile__ (
+            "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
+            "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
+            "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
+            "precr.qb.ph    %[prim4],   %[p4],      %[p3]       \n\t"
+
+            "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
+            "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
+            "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+            "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+            "precrq.ph.w    %[p6],      %[p5],      %[sec3]     \n\t"
+            "precrq.ph.w    %[p4],      %[p3],      %[sec4]     \n\t"
+            "append         %[p5],      %[sec3],    16          \n\t"
+            "append         %[p3],      %[sec4],    16          \n\t"
+
+            : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+              [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+              [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+              [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+            :
+        );
+
+        /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+         * mask will be zero and filtering is not needed
+         */
+        if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+        {
+
+            vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                     thresh, &hev, &mask);
+
+            /* if mask == 0 do filtering is not needed */
+            if (mask)
+            {
+                /* filtering */
+                vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+                /* don't use transpose on output data
+                 * because memory isn't aligned
+                 */
+                __asm__ __volatile__ (
+                    "sb         %[p5],  2(%[s4])        \n\t"
+                    "sb         %[p4],  1(%[s4])        \n\t"
+                    "sb         %[p3],  0(%[s4])        \n\t"
+                    "sb         %[p2], -1(%[s4])        \n\t"
+                    "sb         %[p1], -2(%[s4])        \n\t"
+                    "sb         %[p0], -3(%[s4])        \n\t"
+                    :
+                    : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4),
+                      [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p5], %[p5], 8         \n\t"
+                    "srl        %[p4], %[p4], 8         \n\t"
+                    "srl        %[p3], %[p3], 8         \n\t"
+                    "srl        %[p2], %[p2], 8         \n\t"
+                    "srl        %[p1], %[p1], 8         \n\t"
+                    "srl        %[p0], %[p0], 8         \n\t"
+                    : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                      [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p5],  2(%[s3])        \n\t"
+                    "sb         %[p4],  1(%[s3])        \n\t"
+                    "sb         %[p3],  0(%[s3])        \n\t"
+                    "sb         %[p2], -1(%[s3])        \n\t"
+                    "sb         %[p1], -2(%[s3])        \n\t"
+                    "sb         %[p0], -3(%[s3])        \n\t"
+                    :
+                    : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3),
+                      [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p5], %[p5], 8         \n\t"
+                    "srl        %[p4], %[p4], 8         \n\t"
+                    "srl        %[p3], %[p3], 8         \n\t"
+                    "srl        %[p2], %[p2], 8         \n\t"
+                    "srl        %[p1], %[p1], 8         \n\t"
+                    "srl        %[p0], %[p0], 8         \n\t"
+                    : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                      [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p5],  2(%[s2])        \n\t"
+                    "sb         %[p4],  1(%[s2])        \n\t"
+                    "sb         %[p3],  0(%[s2])        \n\t"
+                    "sb         %[p2], -1(%[s2])        \n\t"
+                    "sb         %[p1], -2(%[s2])        \n\t"
+                    "sb         %[p0], -3(%[s2])        \n\t"
+                    :
+                    : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2),
+                      [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+                );
+
+                __asm__ __volatile__ (
+                    "srl        %[p5], %[p5], 8         \n\t"
+                    "srl        %[p4], %[p4], 8         \n\t"
+                    "srl        %[p3], %[p3], 8         \n\t"
+                    "srl        %[p2], %[p2], 8         \n\t"
+                    "srl        %[p1], %[p1], 8         \n\t"
+                    "srl        %[p0], %[p0], 8         \n\t"
+                    : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                      [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                    :
+                );
+
+                __asm__ __volatile__ (
+                    "sb         %[p5],  2(%[s1])        \n\t"
+                    "sb         %[p4],  1(%[s1])        \n\t"
+                    "sb         %[p3],  0(%[s1])        \n\t"
+                    "sb         %[p2], -1(%[s1])        \n\t"
+                    "sb         %[p1], -2(%[s1])        \n\t"
+                    "sb         %[p0], -3(%[s1])        \n\t"
+                    :
+                    : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1),
+                      [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+                );
+            }
+        }
+
+        i += 4;
+    }
+
+    while (i < count);
+}
+
+void vp8_mbloop_filter_uvvertical_edge_mips
+(
+    unsigned char *s,
+    int p,
+    unsigned int flimit,
+    unsigned int limit,
+    unsigned int thresh,
+    int count
+)
+{
+    uint32_t mask, hev;
+    uint32_t pm1, p0, p1, p2, p3, p4, p5, p6;
+    unsigned char *s1, *s2, *s3, *s4;
+    uint32_t prim1, prim2, sec3, sec4, prim3, prim4;
+
+    mask = 0;
+    hev = 0;
+    pm1 = 0;
+    p0 = 0;
+    p1 = 0;
+    p2 = 0;
+    p3 = 0;
+    p4 = 0;
+    p5 = 0;
+    p6 = 0;
+
+    /* loop filter designed to work using chars so that we can make maximum use
+     * of 8 bit simd instructions.
+     */
+
+    /* apply filter on 4 pixesl at the same time */
+
+    s1 = s;
+    s2 = s + p;
+    s3 = s2 + p;
+    s4 = s3 + p;
+
+    /* prefetch data for load */
+    prefetch_load_lf(s + 2 * p);
+
+    /* load quad-byte vectors
+     * memory is 4 byte aligned
+     */
+    p2  = *((uint32_t *)(s1 - 4));
+    p6  = *((uint32_t *)(s1));
+    p1  = *((uint32_t *)(s2 - 4));
+    p5  = *((uint32_t *)(s2));
+    p0  = *((uint32_t *)(s3 - 4));
+    p4  = *((uint32_t *)(s3));
+    pm1 = *((uint32_t *)(s4 - 4));
+    p3  = *((uint32_t *)(s4));
+
+    /* transpose pm1, p0, p1, p2 */
+    __asm__ __volatile__ (
+        "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
+        "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
+        "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
+        "precr.qb.ph    %[prim4],   %[p0],      %[pm1]      \n\t"
+
+        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
+        "precr.qb.ph    %[pm1],     %[prim1],   %[prim2]    \n\t"
+        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+        "precrq.ph.w    %[p2],      %[p1],      %[sec3]     \n\t"
+        "precrq.ph.w    %[p0],      %[pm1],     %[sec4]     \n\t"
+        "append         %[p1],      %[sec3],    16          \n\t"
+        "append         %[pm1],     %[sec4],    16          \n\t"
+
+        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+          [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1),
+          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+        :
+    );
+
+    /* transpose p3, p4, p5, p6 */
+    __asm__ __volatile__ (
+        "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
+        "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
+        "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
+        "precr.qb.ph    %[prim4],   %[p4],      %[p3]       \n\t"
+
+        "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
+        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
+        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+        "precrq.ph.w    %[p6],      %[p5],      %[sec3]     \n\t"
+        "precrq.ph.w    %[p4],      %[p3],      %[sec4]     \n\t"
+        "append         %[p5],      %[sec3],    16          \n\t"
+        "append         %[p3],      %[sec4],    16          \n\t"
+
+        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+          [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+        :
+    );
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6,
+                                 thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+            /* don't use transpose on output data
+             * because memory isn't aligned
+             */
+            __asm__ __volatile__ (
+                "sb         %[p5],  2(%[s4])        \n\t"
+                "sb         %[p4],  1(%[s4])        \n\t"
+                "sb         %[p3],  0(%[s4])        \n\t"
+                "sb         %[p2], -1(%[s4])        \n\t"
+                "sb         %[p1], -2(%[s4])        \n\t"
+                "sb         %[p0], -3(%[s4])        \n\t"
+                :
+                : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4),
+                  [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p5], %[p5], 8         \n\t"
+                "srl        %[p4], %[p4], 8         \n\t"
+                "srl        %[p3], %[p3], 8         \n\t"
+                "srl        %[p2], %[p2], 8         \n\t"
+                "srl        %[p1], %[p1], 8         \n\t"
+                "srl        %[p0], %[p0], 8         \n\t"
+                : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                  [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p5],  2(%[s3])        \n\t"
+                "sb         %[p4],  1(%[s3])        \n\t"
+                "sb         %[p3],  0(%[s3])        \n\t"
+                "sb         %[p2], -1(%[s3])        \n\t"
+                "sb         %[p1], -2(%[s3])        \n\t"
+                "sb         %[p0], -3(%[s3])        \n\t"
+                :
+                : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3),
+                  [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p5], %[p5], 8         \n\t"
+                "srl        %[p4], %[p4], 8         \n\t"
+                "srl        %[p3], %[p3], 8         \n\t"
+                "srl        %[p2], %[p2], 8         \n\t"
+                "srl        %[p1], %[p1], 8         \n\t"
+                "srl        %[p0], %[p0], 8         \n\t"
+                : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                  [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p5],  2(%[s2])        \n\t"
+                "sb         %[p4],  1(%[s2])        \n\t"
+                "sb         %[p3],  0(%[s2])        \n\t"
+                "sb         %[p2], -1(%[s2])        \n\t"
+                "sb         %[p1], -2(%[s2])        \n\t"
+                "sb         %[p0], -3(%[s2])        \n\t"
+                :
+                : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2),
+                  [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p5], %[p5], 8         \n\t"
+                "srl        %[p4], %[p4], 8         \n\t"
+                "srl        %[p3], %[p3], 8         \n\t"
+                "srl        %[p2], %[p2], 8         \n\t"
+                "srl        %[p1], %[p1], 8         \n\t"
+                "srl        %[p0], %[p0], 8         \n\t"
+                : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                  [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p5],  2(%[s1])        \n\t"
+                "sb         %[p4],  1(%[s1])        \n\t"
+                "sb         %[p3],  0(%[s1])        \n\t"
+                "sb         %[p2], -1(%[s1])        \n\t"
+                "sb         %[p1], -2(%[s1])        \n\t"
+                "sb         %[p0], -3(%[s1])        \n\t"
+                :
+                : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1),
+                  [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+            );
+        }
+    }
+
+    s1 = s4 + p;
+    s2 = s1 + p;
+    s3 = s2 + p;
+    s4 = s3 + p;
+
+    /* load quad-byte vectors
+    * memory is 4 byte aligned
+    */
+    p2  = *((uint32_t *)(s1 - 4));
+    p6  = *((uint32_t *)(s1));
+    p1  = *((uint32_t *)(s2 - 4));
+    p5  = *((uint32_t *)(s2));
+    p0  = *((uint32_t *)(s3 - 4));
+    p4  = *((uint32_t *)(s3));
+    pm1 = *((uint32_t *)(s4 - 4));
+    p3  = *((uint32_t *)(s4));
+
+    /* transpose pm1, p0, p1, p2 */
+    __asm__ __volatile__ (
+        "precrq.qb.ph   %[prim1],   %[p2],      %[p1]       \n\t"
+        "precr.qb.ph    %[prim2],   %[p2],      %[p1]       \n\t"
+        "precrq.qb.ph   %[prim3],   %[p0],      %[pm1]      \n\t"
+        "precr.qb.ph    %[prim4],   %[p0],      %[pm1]      \n\t"
+
+        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
+        "precr.qb.ph    %[pm1],     %[prim1],   %[prim2]    \n\t"
+        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+        "precrq.ph.w    %[p2],      %[p1],      %[sec3]     \n\t"
+        "precrq.ph.w    %[p0],      %[pm1],     %[sec4]     \n\t"
+        "append         %[p1],      %[sec3],    16          \n\t"
+        "append         %[pm1],     %[sec4],    16          \n\t"
+
+        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+          [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1),
+          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+        :
+    );
+
+    /* transpose p3, p4, p5, p6 */
+    __asm__ __volatile__ (
+        "precrq.qb.ph   %[prim1],   %[p6],      %[p5]       \n\t"
+        "precr.qb.ph    %[prim2],   %[p6],      %[p5]       \n\t"
+        "precrq.qb.ph   %[prim3],   %[p4],      %[p3]       \n\t"
+        "precr.qb.ph    %[prim4],   %[p4],      %[p3]       \n\t"
+
+        "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
+        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
+        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
+        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
+
+        "precrq.ph.w    %[p6],      %[p5],      %[sec3]     \n\t"
+        "precrq.ph.w    %[p4],      %[p3],      %[sec4]     \n\t"
+        "append         %[p5],      %[sec3],    16          \n\t"
+        "append         %[p3],      %[sec4],    16          \n\t"
+
+        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
+          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
+          [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
+        :
+    );
+
+    /* if (p1 - p4 == 0) and (p2 - p3 == 0)
+     * mask will be zero and filtering is not needed
+     */
+    if (!(((p1 - p4) == 0) && ((p2 - p3) == 0)))
+    {
+
+        vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, thresh, &hev, &mask);
+
+        /* if mask == 0 do filtering is not needed */
+        if (mask)
+        {
+            /* filtering */
+            vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5);
+
+            /* don't use transpose on output data
+             * because memory isn't aligned
+             */
+            __asm__ __volatile__ (
+                "sb         %[p5],  2(%[s4])        \n\t"
+                "sb         %[p4],  1(%[s4])        \n\t"
+                "sb         %[p3],  0(%[s4])        \n\t"
+                "sb         %[p2], -1(%[s4])        \n\t"
+                "sb         %[p1], -2(%[s4])        \n\t"
+                "sb         %[p0], -3(%[s4])        \n\t"
+                :
+                : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4),
+                  [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p5], %[p5], 8         \n\t"
+                "srl        %[p4], %[p4], 8         \n\t"
+                "srl        %[p3], %[p3], 8         \n\t"
+                "srl        %[p2], %[p2], 8         \n\t"
+                "srl        %[p1], %[p1], 8         \n\t"
+                "srl        %[p0], %[p0], 8         \n\t"
+                : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                  [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p5],  2(%[s3])        \n\t"
+                "sb         %[p4],  1(%[s3])        \n\t"
+                "sb         %[p3],  0(%[s3])        \n\t"
+                "sb         %[p2], -1(%[s3])        \n\t"
+                "sb         %[p1], -2(%[s3])        \n\t"
+                "sb         %[p0], -3(%[s3])        \n\t"
+                :
+                : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3),
+                  [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p5], %[p5], 8         \n\t"
+                "srl        %[p4], %[p4], 8         \n\t"
+                "srl        %[p3], %[p3], 8         \n\t"
+                "srl        %[p2], %[p2], 8         \n\t"
+                "srl        %[p1], %[p1], 8         \n\t"
+                "srl        %[p0], %[p0], 8         \n\t"
+                : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                  [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p5],  2(%[s2])        \n\t"
+                "sb         %[p4],  1(%[s2])        \n\t"
+                "sb         %[p3],  0(%[s2])        \n\t"
+                "sb         %[p2], -1(%[s2])        \n\t"
+                "sb         %[p1], -2(%[s2])        \n\t"
+                "sb         %[p0], -3(%[s2])        \n\t"
+                :
+                : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2),
+                  [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+            );
+
+            __asm__ __volatile__ (
+                "srl        %[p5], %[p5], 8         \n\t"
+                "srl        %[p4], %[p4], 8         \n\t"
+                "srl        %[p3], %[p3], 8         \n\t"
+                "srl        %[p2], %[p2], 8         \n\t"
+                "srl        %[p1], %[p1], 8         \n\t"
+                "srl        %[p0], %[p0], 8         \n\t"
+                : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3),
+                  [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0)
+                :
+            );
+
+            __asm__ __volatile__ (
+                "sb         %[p5],  2(%[s1])        \n\t"
+                "sb         %[p4],  1(%[s1])        \n\t"
+                "sb         %[p3],  0(%[s1])        \n\t"
+                "sb         %[p2], -1(%[s1])        \n\t"
+                "sb         %[p1], -2(%[s1])        \n\t"
+                "sb         %[p0], -3(%[s1])        \n\t"
+                :
+                : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1),
+                  [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0)
+            );
+        }
+    }
+}
+
+/* Horizontal MB filtering */
+void vp8_loop_filter_mbh_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                               int y_stride, int uv_stride, loop_filter_info *lfi)
+{
+    unsigned int thresh_vec, flimit_vec, limit_vec;
+    unsigned char thresh, flimit, limit, flimit_temp;
+
+    /* use direct value instead pointers */
+    limit = *(lfi->lim);
+    flimit_temp = *(lfi->mblim);
+    thresh = *(lfi->hev_thr);
+    flimit = flimit_temp;
+
+    /* create quad-byte */
+    __asm__ __volatile__ (
+        "replv.qb       %[thresh_vec], %[thresh]    \n\t"
+        "replv.qb       %[flimit_vec], %[flimit]    \n\t"
+        "replv.qb       %[limit_vec],  %[limit]     \n\t"
+        : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), [limit_vec] "=r" (limit_vec)
+        : [thresh] "r" (thresh), [flimit] "r" (flimit), [limit] "r" (limit)
+    );
+
+    vp8_mbloop_filter_horizontal_edge_mips(y_ptr, y_stride, flimit_vec, limit_vec, thresh_vec, 16);
+
+    if (u_ptr)
+    {
+        vp8_mbloop_filter_uvhorizontal_edge_mips(u_ptr, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+    }
+
+    if (v_ptr)
+    {
+        vp8_mbloop_filter_uvhorizontal_edge_mips(v_ptr, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+    }
+}
+
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                               int y_stride, int uv_stride, loop_filter_info *lfi)
+{
+    unsigned int thresh_vec, flimit_vec, limit_vec;
+    unsigned char thresh, flimit, limit, flimit_temp;
+
+    /* use direct value instead pointers */
+    limit = *(lfi->lim);
+    flimit_temp = *(lfi->mblim);
+    thresh = *(lfi->hev_thr);
+    flimit = flimit_temp;
+
+    /* create quad-byte */
+    __asm__ __volatile__ (
+        "replv.qb       %[thresh_vec], %[thresh]    \n\t"
+        "replv.qb       %[flimit_vec], %[flimit]    \n\t"
+        "replv.qb       %[limit_vec],  %[limit]     \n\t"
+        : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), [limit_vec] "=r" (limit_vec)
+        : [thresh] "r" (thresh), [flimit] "r" (flimit), [limit] "r" (limit)
+    );
+
+    vp8_mbloop_filter_vertical_edge_mips(y_ptr, y_stride, flimit_vec, limit_vec, thresh_vec, 16);
+
+    if (u_ptr)
+        vp8_mbloop_filter_uvvertical_edge_mips(u_ptr, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+
+    if (v_ptr)
+        vp8_mbloop_filter_uvvertical_edge_mips(v_ptr, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+}
+
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                              int y_stride, int uv_stride, loop_filter_info *lfi)
+{
+    unsigned int thresh_vec, flimit_vec, limit_vec;
+    unsigned char thresh, flimit, limit, flimit_temp;
+
+    /* use direct value instead pointers */
+    limit = *(lfi->lim);
+    flimit_temp = *(lfi->blim);
+    thresh = *(lfi->hev_thr);
+    flimit = flimit_temp;
+
+    /* create quad-byte */
+    __asm__ __volatile__ (
+        "replv.qb       %[thresh_vec], %[thresh]    \n\t"
+        "replv.qb       %[flimit_vec], %[flimit]    \n\t"
+        "replv.qb       %[limit_vec],  %[limit]     \n\t"
+        : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), [limit_vec] "=r" (limit_vec)
+        : [thresh] "r" (thresh), [flimit] "r" (flimit), [limit] "r" (limit)
+    );
+
+    vp8_loop_filter_horizontal_edge_mips(y_ptr + 4 * y_stride, y_stride, flimit_vec, limit_vec, thresh_vec, 16);
+    vp8_loop_filter_horizontal_edge_mips(y_ptr + 8 * y_stride, y_stride, flimit_vec, limit_vec, thresh_vec, 16);
+    vp8_loop_filter_horizontal_edge_mips(y_ptr + 12 * y_stride, y_stride, flimit_vec, limit_vec, thresh_vec, 16);
+
+    if (u_ptr)
+        vp8_loop_filter_uvhorizontal_edge_mips(u_ptr + 4 * uv_stride, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+
+    if (v_ptr)
+        vp8_loop_filter_uvhorizontal_edge_mips(v_ptr + 4 * uv_stride, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+}
+
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                              int y_stride, int uv_stride, loop_filter_info *lfi)
+{
+    unsigned int thresh_vec, flimit_vec, limit_vec;
+    unsigned char thresh, flimit, limit, flimit_temp;
+
+    /* use direct value instead pointers */
+    limit = *(lfi->lim);
+    flimit_temp = *(lfi->blim);
+    thresh = *(lfi->hev_thr);
+    flimit = flimit_temp;
+
+    /* create quad-byte */
+    __asm__ __volatile__ (
+        "replv.qb       %[thresh_vec], %[thresh]    \n\t"
+        "replv.qb       %[flimit_vec], %[flimit]    \n\t"
+        "replv.qb       %[limit_vec],  %[limit]     \n\t"
+        : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), [limit_vec] "=r" (limit_vec)
+        : [thresh] "r" (thresh), [flimit] "r" (flimit), [limit] "r" (limit)
+    );
+
+    vp8_loop_filter_vertical_edge_mips(y_ptr + 4, y_stride, flimit_vec, limit_vec, thresh_vec, 16);
+    vp8_loop_filter_vertical_edge_mips(y_ptr + 8, y_stride, flimit_vec, limit_vec, thresh_vec, 16);
+    vp8_loop_filter_vertical_edge_mips(y_ptr + 12, y_stride, flimit_vec, limit_vec, thresh_vec, 16);
+
+    if (u_ptr)
+        vp8_loop_filter_uvvertical_edge_mips(u_ptr + 4, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+
+    if (v_ptr)
+        vp8_loop_filter_uvvertical_edge_mips(v_ptr + 4, uv_stride, flimit_vec, limit_vec, thresh_vec, 0);
+}
+
+#endif
--- /dev/null
+++ b/vp8/common/mips/dspr2/reconinter_dspr2.c
@@ -1,0 +1,121 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+#include "vpx_rtcd.h"
+#include "vpx/vpx_integer.h"
+
+#if HAVE_DSPR2
+inline void prefetch_load_int(unsigned char *src)
+{
+    __asm__ __volatile__ (
+        "pref   0,  0(%[src])   \n\t"
+        :
+        : [src] "r" (src)
+    );
+}
+
+
+__inline void vp8_copy_mem16x16_dspr2(
+    unsigned char *RESTRICT src,
+    int src_stride,
+    unsigned char *RESTRICT dst,
+    int dst_stride)
+{
+    int r;
+    unsigned int a0, a1, a2, a3;
+
+    for (r = 16; r--;)
+    {
+        /* load src data in cache memory */
+        prefetch_load_int(src + src_stride);
+
+        /* use unaligned memory load and store */
+        __asm__ __volatile__ (
+            "ulw    %[a0], 0(%[src])            \n\t"
+            "ulw    %[a1], 4(%[src])            \n\t"
+            "ulw    %[a2], 8(%[src])            \n\t"
+            "ulw    %[a3], 12(%[src])           \n\t"
+            "sw     %[a0], 0(%[dst])            \n\t"
+            "sw     %[a1], 4(%[dst])            \n\t"
+            "sw     %[a2], 8(%[dst])            \n\t"
+            "sw     %[a3], 12(%[dst])           \n\t"
+            : [a0] "=&r" (a0), [a1] "=&r" (a1),
+              [a2] "=&r" (a2), [a3] "=&r" (a3)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+    }
+}
+
+
+__inline void vp8_copy_mem8x8_dspr2(
+    unsigned char *RESTRICT src,
+    int src_stride,
+    unsigned char *RESTRICT dst,
+    int dst_stride)
+{
+    int r;
+    unsigned int a0, a1;
+
+    /* load src data in cache memory */
+    prefetch_load_int(src + src_stride);
+
+    for (r = 8; r--;)
+    {
+        /* use unaligned memory load and store */
+        __asm__ __volatile__ (
+            "ulw    %[a0], 0(%[src])            \n\t"
+            "ulw    %[a1], 4(%[src])            \n\t"
+            "sw     %[a0], 0(%[dst])            \n\t"
+            "sw     %[a1], 4(%[dst])            \n\t"
+            : [a0] "=&r" (a0), [a1] "=&r" (a1)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+    }
+}
+
+
+__inline void vp8_copy_mem8x4_dspr2(
+    unsigned char *RESTRICT src,
+    int src_stride,
+    unsigned char *RESTRICT dst,
+    int dst_stride)
+{
+    int r;
+    unsigned int a0, a1;
+
+    /* load src data in cache memory */
+    prefetch_load_int(src + src_stride);
+
+    for (r = 4; r--;)
+    {
+        /* use unaligned memory load and store */
+        __asm__ __volatile__ (
+            "ulw    %[a0], 0(%[src])            \n\t"
+            "ulw    %[a1], 4(%[src])            \n\t"
+            "sw     %[a0], 0(%[dst])            \n\t"
+            "sw     %[a1], 4(%[dst])            \n\t"
+           : [a0] "=&r" (a0), [a1] "=&r" (a1)
+           : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+    }
+}
+
+#endif
--- a/vp8/common/rtcd_defs.sh
+++ b/vp8/common/rtcd_defs.sh
@@ -22,35 +22,42 @@
 vp8_dequantize_b_media=vp8_dequantize_b_v6
 
 prototype void vp8_dequant_idct_add "short *input, short *dq, unsigned char *output, int stride"
-specialize vp8_dequant_idct_add mmx media neon
+specialize vp8_dequant_idct_add mmx media neon dspr2
 vp8_dequant_idct_add_media=vp8_dequant_idct_add_v6
+vp8_dequant_idct_add_dspr2=vp8_dequant_idct_add_dspr2
 
 prototype void vp8_dequant_idct_add_y_block "short *q, short *dq, unsigned char *dst, int stride, char *eobs"
-specialize vp8_dequant_idct_add_y_block mmx sse2 media neon
+specialize vp8_dequant_idct_add_y_block mmx sse2 media neon dspr2
 vp8_dequant_idct_add_y_block_media=vp8_dequant_idct_add_y_block_v6
+vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2
 
 prototype void vp8_dequant_idct_add_uv_block "short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs"
-specialize vp8_dequant_idct_add_uv_block mmx sse2 media neon
+specialize vp8_dequant_idct_add_uv_block mmx sse2 media neon dspr2
 vp8_dequant_idct_add_uv_block_media=vp8_dequant_idct_add_uv_block_v6
+vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2
 
 #
 # Loopfilter
 #
 prototype void vp8_loop_filter_mbv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
-specialize vp8_loop_filter_mbv mmx sse2 media neon
+specialize vp8_loop_filter_mbv mmx sse2 media neon dspr2
 vp8_loop_filter_mbv_media=vp8_loop_filter_mbv_armv6
+vp8_loop_filter_mbv_dspr2=vp8_loop_filter_mbv_dspr2
 
 prototype void vp8_loop_filter_bv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
-specialize vp8_loop_filter_bv mmx sse2 media neon
+specialize vp8_loop_filter_bv mmx sse2 media neon dspr2
 vp8_loop_filter_bv_media=vp8_loop_filter_bv_armv6
+vp8_loop_filter_bv_dspr2=vp8_loop_filter_bv_dspr2
 
 prototype void vp8_loop_filter_mbh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
-specialize vp8_loop_filter_mbh mmx sse2 media neon
+specialize vp8_loop_filter_mbh mmx sse2 media neon dspr2
 vp8_loop_filter_mbh_media=vp8_loop_filter_mbh_armv6
+vp8_loop_filter_mbh_dspr2=vp8_loop_filter_mbh_dspr2
 
 prototype void vp8_loop_filter_bh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
-specialize vp8_loop_filter_bh mmx sse2 media neon
+specialize vp8_loop_filter_bh mmx sse2 media neon dspr2
 vp8_loop_filter_bh_media=vp8_loop_filter_bh_armv6
+vp8_loop_filter_bh_dspr2=vp8_loop_filter_bh_dspr2
 
 
 prototype void vp8_loop_filter_simple_mbv "unsigned char *y, int ystride, const unsigned char *blimit"
@@ -90,37 +97,45 @@
 #
 #idct16
 prototype void vp8_short_idct4x4llm "short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride"
-specialize vp8_short_idct4x4llm mmx media neon
+specialize vp8_short_idct4x4llm mmx media neon dspr2
 vp8_short_idct4x4llm_media=vp8_short_idct4x4llm_v6_dual
+vp8_short_idct4x4llm_dspr2=vp8_short_idct4x4llm_dspr2
 
 #iwalsh1
 prototype void vp8_short_inv_walsh4x4_1 "short *input, short *output"
+specialize vp8_short_inv_walsh4x4_1 dspr2
+vp8_short_inv_walsh4x4_1_dspr2=vp8_short_inv_walsh4x4_1_dspr2
 # no asm yet
 
 #iwalsh16
 prototype void vp8_short_inv_walsh4x4 "short *input, short *output"
-specialize vp8_short_inv_walsh4x4 mmx sse2 media neon
+specialize vp8_short_inv_walsh4x4 mmx sse2 media neon dspr2
 vp8_short_inv_walsh4x4_media=vp8_short_inv_walsh4x4_v6
+vp8_short_inv_walsh4x4_dspr2=vp8_short_inv_walsh4x4_dspr2
 
 #idct1_scalar_add
 prototype void vp8_dc_only_idct_add "short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride"
-specialize vp8_dc_only_idct_add	mmx media neon
+specialize vp8_dc_only_idct_add	mmx media neon dspr2
 vp8_dc_only_idct_add_media=vp8_dc_only_idct_add_v6
+vp8_dc_only_idct_add_dspr2=vp8_dc_only_idct_add_dspr2
 
 #
 # RECON
 #
 prototype void vp8_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
-specialize vp8_copy_mem16x16 mmx sse2 media neon
+specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
 vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
+vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2
 
 prototype void vp8_copy_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
-specialize vp8_copy_mem8x8 mmx media neon
+specialize vp8_copy_mem8x8 mmx media neon dspr2
 vp8_copy_mem8x8_media=vp8_copy_mem8x8_v6
+vp8_copy_mem8x8_dspr2=vp8_copy_mem8x8_dspr2
 
 prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
-specialize vp8_copy_mem8x4 mmx media neon
+specialize vp8_copy_mem8x4 mmx media neon dspr2
 vp8_copy_mem8x4_media=vp8_copy_mem8x4_v6
+vp8_copy_mem8x4_dspr2=vp8_copy_mem8x4_dspr2
 
 prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x, unsigned char * yabove_row, unsigned char * yleft, int left_stride, unsigned char * ypred_ptr, int y_stride"
 specialize vp8_build_intra_predictors_mby_s sse2 ssse3
@@ -177,20 +192,24 @@
 # Subpixel
 #
 prototype void vp8_sixtap_predict16x16 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
-specialize vp8_sixtap_predict16x16 mmx sse2 ssse3 media neon
+specialize vp8_sixtap_predict16x16 mmx sse2 ssse3 media neon dspr2
 vp8_sixtap_predict16x16_media=vp8_sixtap_predict16x16_armv6
+vp8_sixtap_predict16x16_dspr2=vp8_sixtap_predict16x16_dspr2
 
 prototype void vp8_sixtap_predict8x8 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
-specialize vp8_sixtap_predict8x8 mmx sse2 ssse3 media neon
+specialize vp8_sixtap_predict8x8 mmx sse2 ssse3 media neon dspr2
 vp8_sixtap_predict8x8_media=vp8_sixtap_predict8x8_armv6
+vp8_sixtap_predict8x8_dspr2=vp8_sixtap_predict8x8_dspr2
 
 prototype void vp8_sixtap_predict8x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
-specialize vp8_sixtap_predict8x4 mmx sse2 ssse3 media neon
+specialize vp8_sixtap_predict8x4 mmx sse2 ssse3 media neon dspr2
 vp8_sixtap_predict8x4_media=vp8_sixtap_predict8x4_armv6
+vp8_sixtap_predict8x4_dspr2=vp8_sixtap_predict8x4_dspr2
 
 prototype void vp8_sixtap_predict4x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
-specialize vp8_sixtap_predict4x4 mmx ssse3 media neon
+specialize vp8_sixtap_predict4x4 mmx ssse3 media neon dspr2
 vp8_sixtap_predict4x4_media=vp8_sixtap_predict4x4_armv6
+vp8_sixtap_predict4x4_dspr2=vp8_sixtap_predict4x4_dspr2
 
 prototype void vp8_bilinear_predict16x16 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
 specialize vp8_bilinear_predict16x16 mmx sse2 ssse3 media neon
--- a/vp8/vp8_common.mk
+++ b/vp8/vp8_common.mk
@@ -120,6 +120,14 @@
 endif
 
 # common (c)
+VP8_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/idctllm_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/filter_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/loopfilter_filters_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/reconinter_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/idct_blk_dspr2.c
+VP8_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/dequantize_dspr2.c
+
+# common (c)
 VP8_COMMON_SRCS-$(ARCH_ARM)  += common/arm/filter_arm.c
 VP8_COMMON_SRCS-$(ARCH_ARM)  += common/arm/loopfilter_arm.c
 VP8_COMMON_SRCS-$(ARCH_ARM)  += common/arm/reconintra_arm.c